feat: major platform expansion — Brain service, RSS reader, iOS app, AI assistants, Firefox extension
Brain Service: - Playwright stealth crawler replacing browserless (og:image, Readability, Reddit JSON API) - AI classification with tag definitions and folder assignment - YouTube video download via yt-dlp - Karakeep migration complete (96 items) - Taxonomy management (folders with icons/colors, tags) - Discovery shuffle, sort options, search (Meilisearch + pgvector) - Item tag/folder editing, card color accents RSS Reader Service: - Custom FastAPI reader replacing Miniflux - Feed management (add/delete/refresh), category support - Full article extraction via Readability - Background content fetching for new entries - Mark all read with confirmation - Infinite scroll, retention cleanup (30/60 day) - 17 feeds migrated from Miniflux iOS App (SwiftUI): - Native iOS 17+ app with @Observable architecture - Cookie-based auth, configurable gateway URL - Dashboard with custom background photo + frosted glass widgets - Full fitness module (today/templates/goals/food library) - AI assistant chat (fitness + brain, raw JSON state management) - 120fps ProMotion support AI Assistants (Gateway): - Unified dispatcher with fitness/brain domain detection - Fitness: natural language food logging, photo analysis, multi-item splitting - Brain: save/append/update/delete notes, search & answer, undo support - Madiha user gets fitness-only (brain disabled) Firefox Extension: - One-click save to Brain from any page - Login with platform credentials - Right-click context menu (save page/link/image) - Notes field for URL saves - Signed and published on AMO Other: - Reader bookmark button routes to Brain (was Karakeep) - Fitness food library with "Add" button + add-to-meal popup - Kindle send file size check (25MB SMTP2GO limit) - Atelier UI as default (useAtelierShell=true) - Mobile upload box in nav drawer Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -13,10 +13,10 @@ from sqlalchemy.orm import selectinload
|
||||
|
||||
from app.api.deps import get_user_id, get_db_session
|
||||
from app.config import FOLDERS, TAGS
|
||||
from app.models.item import Item, ItemAsset
|
||||
from app.models.item import Item, ItemAsset, ItemAddition
|
||||
from app.models.schema import (
|
||||
ItemCreate, ItemUpdate, ItemOut, ItemList, SearchQuery, SemanticSearchQuery,
|
||||
HybridSearchQuery, SearchResult, ConfigOut,
|
||||
HybridSearchQuery, SearchResult, ConfigOut, ItemAdditionCreate, ItemAdditionOut,
|
||||
)
|
||||
from app.services.storage import storage
|
||||
from fastapi.responses import Response
|
||||
@@ -25,6 +25,46 @@ from app.worker.tasks import enqueue_process_item
|
||||
router = APIRouter(prefix="/api", tags=["brain"])
|
||||
|
||||
|
||||
async def refresh_item_search_state(db: AsyncSession, item: Item):
|
||||
"""Recompute embedding + Meilisearch doc after assistant additions change."""
|
||||
from app.search.engine import index_item
|
||||
from app.services.embed import generate_embedding
|
||||
|
||||
additions_result = await db.execute(
|
||||
select(ItemAddition)
|
||||
.where(ItemAddition.item_id == item.id, ItemAddition.user_id == item.user_id)
|
||||
.order_by(ItemAddition.created_at.asc())
|
||||
)
|
||||
additions = additions_result.scalars().all()
|
||||
additions_text = "\n\n".join(addition.content for addition in additions if addition.content.strip())
|
||||
|
||||
searchable_text_parts = [item.raw_content or "", item.extracted_text or "", additions_text]
|
||||
searchable_text = "\n\n".join(part.strip() for part in searchable_text_parts if part and part.strip())
|
||||
|
||||
embed_text = f"{item.title or ''}\n{item.summary or ''}\n{searchable_text}".strip()
|
||||
embedding = await generate_embedding(embed_text)
|
||||
if embedding:
|
||||
item.embedding = embedding
|
||||
|
||||
item.updated_at = datetime.utcnow()
|
||||
await db.commit()
|
||||
await db.refresh(item)
|
||||
|
||||
await index_item({
|
||||
"id": item.id,
|
||||
"user_id": item.user_id,
|
||||
"type": item.type,
|
||||
"title": item.title,
|
||||
"url": item.url,
|
||||
"folder": item.folder,
|
||||
"tags": item.tags or [],
|
||||
"summary": item.summary,
|
||||
"extracted_text": searchable_text[:10000],
|
||||
"processing_status": item.processing_status,
|
||||
"created_at": item.created_at.isoformat() if item.created_at else None,
|
||||
})
|
||||
|
||||
|
||||
# ── Health ──
|
||||
|
||||
@router.get("/health")
|
||||
@@ -201,14 +241,31 @@ async def update_item(
|
||||
item.title = body.title
|
||||
if body.folder is not None:
|
||||
item.folder = body.folder
|
||||
# Update folder_id FK
|
||||
from app.models.taxonomy import Folder as FolderModel
|
||||
folder_row = (await db.execute(
|
||||
select(FolderModel).where(FolderModel.user_id == user_id, FolderModel.name == body.folder)
|
||||
)).scalar_one_or_none()
|
||||
item.folder_id = folder_row.id if folder_row else None
|
||||
if body.tags is not None:
|
||||
item.tags = body.tags
|
||||
# Update item_tags relational entries
|
||||
from app.models.taxonomy import Tag as TagModel, ItemTag
|
||||
from sqlalchemy import delete as sa_delete
|
||||
await db.execute(sa_delete(ItemTag).where(ItemTag.item_id == item.id))
|
||||
for tag_name in body.tags:
|
||||
tag_row = (await db.execute(
|
||||
select(TagModel).where(TagModel.user_id == user_id, TagModel.name == tag_name)
|
||||
)).scalar_one_or_none()
|
||||
if tag_row:
|
||||
db.add(ItemTag(item_id=item.id, tag_id=tag_row.id))
|
||||
if body.raw_content is not None:
|
||||
item.raw_content = body.raw_content
|
||||
|
||||
item.updated_at = datetime.utcnow()
|
||||
await db.commit()
|
||||
await db.refresh(item)
|
||||
await refresh_item_search_state(db, item)
|
||||
return item
|
||||
|
||||
|
||||
@@ -238,6 +295,100 @@ async def delete_item(
|
||||
return {"status": "deleted"}
|
||||
|
||||
|
||||
@router.get("/items/{item_id}/additions", response_model=list[ItemAdditionOut])
|
||||
async def list_item_additions(
|
||||
item_id: str,
|
||||
user_id: str = Depends(get_user_id),
|
||||
db: AsyncSession = Depends(get_db_session),
|
||||
):
|
||||
item = (await db.execute(
|
||||
select(Item).where(Item.id == item_id, Item.user_id == user_id)
|
||||
)).scalar_one_or_none()
|
||||
if not item:
|
||||
raise HTTPException(status_code=404, detail="Item not found")
|
||||
|
||||
additions = (await db.execute(
|
||||
select(ItemAddition)
|
||||
.where(ItemAddition.item_id == item_id, ItemAddition.user_id == user_id)
|
||||
.order_by(ItemAddition.created_at.asc())
|
||||
)).scalars().all()
|
||||
return additions
|
||||
|
||||
|
||||
@router.post("/items/{item_id}/additions", response_model=ItemAdditionOut, status_code=201)
|
||||
async def create_item_addition(
|
||||
item_id: str,
|
||||
body: ItemAdditionCreate,
|
||||
user_id: str = Depends(get_user_id),
|
||||
db: AsyncSession = Depends(get_db_session),
|
||||
):
|
||||
item = (await db.execute(
|
||||
select(Item).where(Item.id == item_id, Item.user_id == user_id)
|
||||
)).scalar_one_or_none()
|
||||
if not item:
|
||||
raise HTTPException(status_code=404, detail="Item not found")
|
||||
|
||||
content = body.content.strip()
|
||||
if not content:
|
||||
raise HTTPException(status_code=400, detail="Addition content cannot be empty")
|
||||
|
||||
addition = ItemAddition(
|
||||
id=str(uuid.uuid4()),
|
||||
item_id=item.id,
|
||||
user_id=user_id,
|
||||
source=(body.source or "assistant").strip() or "assistant",
|
||||
kind=(body.kind or "append").strip() or "append",
|
||||
content=content,
|
||||
metadata_json=body.metadata_json or {},
|
||||
)
|
||||
db.add(addition)
|
||||
item.updated_at = datetime.utcnow()
|
||||
await db.commit()
|
||||
await db.refresh(addition)
|
||||
|
||||
result = await db.execute(
|
||||
select(Item).where(Item.id == item.id, Item.user_id == user_id)
|
||||
)
|
||||
fresh_item = result.scalar_one()
|
||||
await refresh_item_search_state(db, fresh_item)
|
||||
return addition
|
||||
|
||||
|
||||
@router.delete("/items/{item_id}/additions/{addition_id}")
|
||||
async def delete_item_addition(
|
||||
item_id: str,
|
||||
addition_id: str,
|
||||
user_id: str = Depends(get_user_id),
|
||||
db: AsyncSession = Depends(get_db_session),
|
||||
):
|
||||
item = (await db.execute(
|
||||
select(Item).where(Item.id == item_id, Item.user_id == user_id)
|
||||
)).scalar_one_or_none()
|
||||
if not item:
|
||||
raise HTTPException(status_code=404, detail="Item not found")
|
||||
|
||||
addition = (await db.execute(
|
||||
select(ItemAddition).where(
|
||||
ItemAddition.id == addition_id,
|
||||
ItemAddition.item_id == item_id,
|
||||
ItemAddition.user_id == user_id,
|
||||
)
|
||||
)).scalar_one_or_none()
|
||||
if not addition:
|
||||
raise HTTPException(status_code=404, detail="Addition not found")
|
||||
|
||||
await db.delete(addition)
|
||||
item.updated_at = datetime.utcnow()
|
||||
await db.commit()
|
||||
|
||||
result = await db.execute(
|
||||
select(Item).where(Item.id == item.id, Item.user_id == user_id)
|
||||
)
|
||||
fresh_item = result.scalar_one()
|
||||
await refresh_item_search_state(db, fresh_item)
|
||||
return {"status": "deleted"}
|
||||
|
||||
|
||||
# ── Reprocess item ──
|
||||
|
||||
@router.post("/items/{item_id}/reprocess", response_model=ItemOut)
|
||||
@@ -335,5 +486,7 @@ async def serve_asset(item_id: str, asset_type: str, filename: str):
|
||||
elif filename.endswith(".jpg") or filename.endswith(".jpeg"): ct = "image/jpeg"
|
||||
elif filename.endswith(".html"): ct = "text/html"
|
||||
elif filename.endswith(".pdf"): ct = "application/pdf"
|
||||
elif filename.endswith(".mp4"): ct = "video/mp4"
|
||||
elif filename.endswith(".webm"): ct = "video/webm"
|
||||
|
||||
return Response(content=data, media_type=ct, headers={"Cache-Control": "public, max-age=3600"})
|
||||
|
||||
@@ -17,8 +17,8 @@ MEILI_URL = os.environ.get("MEILI_URL", "http://brain-meili:7700")
|
||||
MEILI_KEY = os.environ.get("MEILI_MASTER_KEY", "brain-meili-key")
|
||||
MEILI_INDEX = "items"
|
||||
|
||||
# ── Browserless ──
|
||||
BROWSERLESS_URL = os.environ.get("BROWSERLESS_URL", "http://brain-browserless:3000")
|
||||
# ── Crawler ──
|
||||
CRAWLER_URL = os.environ.get("CRAWLER_URL", "http://brain-crawler:3100")
|
||||
|
||||
# ── OpenAI ──
|
||||
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
|
||||
@@ -42,14 +42,14 @@ DEBUG = os.environ.get("DEBUG", "").lower() in ("1", "true")
|
||||
|
||||
# ── Classification rules ──
|
||||
FOLDERS = [
|
||||
"Home", "Family", "Work", "Travel", "Knowledge", "Faith", "Projects"
|
||||
"Home", "Family", "Work", "Travel", "Islam",
|
||||
"Homelab", "Vanlife", "3D Printing", "Documents",
|
||||
]
|
||||
|
||||
TAGS = [
|
||||
"reference", "important", "legal", "financial", "insurance",
|
||||
"research", "idea", "guide", "tutorial", "setup", "how-to",
|
||||
"tools", "dev", "server", "selfhosted", "home-assistant",
|
||||
"shopping", "compare", "buy", "product",
|
||||
"family", "kids", "health", "travel", "faith",
|
||||
"video", "read-later", "books",
|
||||
"diy", "reference", "home-assistant", "shopping", "video",
|
||||
"tutorial", "server", "kids", "books", "travel",
|
||||
"churning", "lawn-garden", "piracy", "work", "3d-printing",
|
||||
"lectures", "vanlife", "yusuf", "madiha", "hafsa", "mustafa",
|
||||
"medical", "legal", "vehicle", "insurance", "financial", "homeschool",
|
||||
]
|
||||
|
||||
@@ -31,7 +31,7 @@ app.include_router(taxonomy_router)
|
||||
async def startup():
|
||||
from sqlalchemy import text as sa_text
|
||||
from app.database import engine, Base
|
||||
from app.models.item import Item, ItemAsset, AppLink # noqa: import to register models
|
||||
from app.models.item import Item, ItemAsset, AppLink, ItemAddition # noqa: import to register models
|
||||
from app.models.taxonomy import Folder, Tag, ItemTag # noqa: register taxonomy tables
|
||||
|
||||
# Enable pgvector extension before creating tables
|
||||
|
||||
@@ -45,6 +45,12 @@ class Item(Base):
|
||||
|
||||
# Relationships
|
||||
assets = relationship("ItemAsset", back_populates="item", cascade="all, delete-orphan")
|
||||
additions = relationship(
|
||||
"ItemAddition",
|
||||
back_populates="item",
|
||||
cascade="all, delete-orphan",
|
||||
order_by="ItemAddition.created_at",
|
||||
)
|
||||
|
||||
__table_args__ = (
|
||||
Index("ix_items_user_status", "user_id", "processing_status"),
|
||||
@@ -79,3 +85,19 @@ class AppLink(Base):
|
||||
app = Column(String(64), nullable=False) # trips|tasks|fitness|inventory
|
||||
app_entity_id = Column(String(128), nullable=False)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
|
||||
|
||||
class ItemAddition(Base):
|
||||
__tablename__ = "item_additions"
|
||||
|
||||
id = Column(UUID(as_uuid=False), primary_key=True, default=new_id)
|
||||
item_id = Column(UUID(as_uuid=False), ForeignKey("items.id", ondelete="CASCADE"), nullable=False, index=True)
|
||||
user_id = Column(String(64), nullable=False, index=True)
|
||||
source = Column(String(32), nullable=False, default="assistant") # assistant|manual
|
||||
kind = Column(String(32), nullable=False, default="append") # append
|
||||
content = Column(Text, nullable=False)
|
||||
metadata_json = Column(JSONB, nullable=True, default=dict)
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
||||
|
||||
item = relationship("Item", back_populates="additions")
|
||||
|
||||
@@ -26,6 +26,13 @@ class ItemUpdate(BaseModel):
|
||||
raw_content: Optional[str] = None
|
||||
|
||||
|
||||
class ItemAdditionCreate(BaseModel):
|
||||
content: str
|
||||
source: Optional[str] = "assistant"
|
||||
kind: Optional[str] = "append"
|
||||
metadata_json: Optional[dict] = None
|
||||
|
||||
|
||||
class SearchQuery(BaseModel):
|
||||
q: str
|
||||
folder: Optional[str] = None
|
||||
@@ -63,6 +70,19 @@ class AssetOut(BaseModel):
|
||||
model_config = {"from_attributes": True}
|
||||
|
||||
|
||||
class ItemAdditionOut(BaseModel):
|
||||
id: str
|
||||
item_id: str
|
||||
source: str
|
||||
kind: str
|
||||
content: str
|
||||
metadata_json: Optional[dict] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
model_config = {"from_attributes": True}
|
||||
|
||||
|
||||
class ItemOut(BaseModel):
|
||||
id: str
|
||||
type: str
|
||||
|
||||
@@ -70,23 +70,24 @@ class ItemTag(Base):
|
||||
|
||||
# Default folders with colors and icons
|
||||
DEFAULT_FOLDERS = [
|
||||
{"name": "Home", "color": "#059669", "icon": "home"},
|
||||
{"name": "Family", "color": "#D97706", "icon": "heart"},
|
||||
{"name": "Work", "color": "#4338CA", "icon": "briefcase"},
|
||||
{"name": "Travel", "color": "#0EA5E9", "icon": "plane"},
|
||||
{"name": "Knowledge", "color": "#8B5CF6", "icon": "book-open"},
|
||||
{"name": "Faith", "color": "#10B981", "icon": "moon"},
|
||||
{"name": "Projects", "color": "#F43F5E", "icon": "folder"},
|
||||
{"name": "Home", "color": "#059669", "icon": "home"},
|
||||
{"name": "Family", "color": "#D97706", "icon": "heart"},
|
||||
{"name": "Work", "color": "#4338CA", "icon": "briefcase"},
|
||||
{"name": "Travel", "color": "#0EA5E9", "icon": "plane"},
|
||||
{"name": "Islam", "color": "#10B981", "icon": "moon"},
|
||||
{"name": "Homelab", "color": "#6366F1", "icon": "server"},
|
||||
{"name": "Vanlife", "color": "#F59E0B", "icon": "truck"},
|
||||
{"name": "3D Printing", "color": "#EC4899", "icon": "printer"},
|
||||
{"name": "Documents", "color": "#78716C", "icon": "file-text"},
|
||||
]
|
||||
|
||||
# Default tags to seed for new users
|
||||
DEFAULT_TAGS = [
|
||||
"reference", "important", "legal", "financial", "insurance",
|
||||
"research", "idea", "guide", "tutorial", "setup", "how-to",
|
||||
"tools", "dev", "server", "selfhosted", "home-assistant",
|
||||
"shopping", "compare", "buy", "product",
|
||||
"family", "kids", "health", "travel", "faith",
|
||||
"video", "read-later", "books",
|
||||
"diy", "reference", "home-assistant", "shopping", "video",
|
||||
"tutorial", "server", "kids", "books", "travel",
|
||||
"churning", "lawn-garden", "piracy", "work", "3d-printing",
|
||||
"lectures", "vanlife", "yusuf", "madiha", "hafsa", "mustafa",
|
||||
"medical", "legal", "vehicle", "insurance", "financial", "homeschool",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -9,20 +9,61 @@ from app.config import OPENAI_API_KEY, OPENAI_MODEL
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
TAG_DEFINITIONS = {
|
||||
"home-assistant": "Home Assistant specific content (dashboards, ESPHome, automations, integrations, Lovelace cards)",
|
||||
"server": "Server/infrastructure content (Docker, backups, networking, self-hosted apps, Linux)",
|
||||
"kids": "Anything related to children, parenting, or educational content for kids",
|
||||
"shopping": "A product page, product review, or specific item you might want to buy (Amazon, stores, book reviews with purchase links). NOT general discussion threads or forums comparing many options.",
|
||||
"diy": "Physical hands-on projects around the house, yard, or vehicle — repairs, woodworking, crafts, building things. NOT software, dashboards, or digital projects.",
|
||||
"reference": "Lookup info like contacts, sizes, specs, measurements, settings to remember",
|
||||
"video": "Video content (YouTube, TikTok, etc)",
|
||||
"tutorial": "How-to guides, step-by-step instructions, learning content",
|
||||
"books": "Book recommendations, reviews, or reading lists",
|
||||
"travel": "Destinations, resorts, hotels, trip ideas, reviews, places to visit",
|
||||
"churning": "Credit card points, miles, award travel, hotel loyalty programs, points maximization, sign-up bonuses",
|
||||
"lawn-garden": "Lawn care, gardening, yard work, bug spraying, fertilizer, landscaping, plants, outdoor maintenance",
|
||||
"piracy": "Anything to do with downloading content like Audiobooks, games",
|
||||
"lectures": "Lecture notes, Islamic talks, sermon recordings, religious class notes",
|
||||
"3d-printing": "3D printer files (STL), printer mods, filament, slicer settings, 3D printed objects and projects",
|
||||
"work": "Work-related content",
|
||||
"vanlife": "Van conversion, Promaster van, van build projects, camping in vans, van electrical/solar, van life lifestyle",
|
||||
"yusuf": "Personal document belonging to family member Yusuf (look for name in title or content)",
|
||||
"madiha": "Personal document belonging to family member Madiha (look for name in title or content)",
|
||||
"hafsa": "Personal document belonging to family member Hafsa (look for name in title or content)",
|
||||
"mustafa": "Personal document belonging to family member Mustafa (look for name in title or content)",
|
||||
"medical": "Medical records, allergy results, prescriptions, lab work, vaccination records, doctor notes",
|
||||
"legal": "Birth certificates, passports, IDs, citizenship papers, contracts, legal agreements",
|
||||
"vehicle": "Car registration, license plates, insurance cards, vehicle titles, maintenance records",
|
||||
"insurance": "Insurance policies, insurance cards, coverage documents, claims",
|
||||
"financial": "Tax documents, bank statements, pay stubs, loan papers, credit reports",
|
||||
"homeschool": "Homeschooling resources, curriculum, lesson plans, educational materials for teaching kids at home, school projects, science experiments",
|
||||
}
|
||||
|
||||
|
||||
def build_system_prompt(folders: list[str], tags: list[str]) -> str:
|
||||
tag_defs = "\n".join(
|
||||
f" - '{t}': {TAG_DEFINITIONS[t]}" if t in TAG_DEFINITIONS else f" - '{t}'"
|
||||
for t in tags
|
||||
)
|
||||
return f"""You are a classification engine for a personal "second brain" knowledge management system.
|
||||
|
||||
Given an item (URL, note, document, or file), you must return structured JSON with:
|
||||
- folder: exactly 1 from this list: {json.dumps(folders)}
|
||||
- tags: exactly 2 or 3 from this list: {json.dumps(tags)}
|
||||
- title: a concise, normalized title (max 80 chars)
|
||||
- tags: ONLY from this predefined list. Do NOT create any new tags outside this list. If no tags fit, return an empty array.
|
||||
- title: a concise, normalized title in Title Case with spaces (max 80 chars, e.g. 'Machine Learning', 'Web Development')
|
||||
- summary: a 1-2 sentence summary of the content (for links/documents only)
|
||||
- corrected_text: for NOTES ONLY — return the original note text with spelling/grammar fixed. Keep the original meaning, tone, and structure. Only fix typos and obvious errors. Return empty string for non-notes.
|
||||
- confidence: a float 0.0-1.0 indicating how confident you are
|
||||
|
||||
Tag definitions (only assign tags that STRONGLY match the content):
|
||||
{tag_defs}
|
||||
|
||||
Rules:
|
||||
- NEVER invent folders or tags not in the lists above
|
||||
- Only assign tags that STRONGLY match the content. 1-2 tags is perfectly fine.
|
||||
- Do NOT pad with extra tags just to reach a target number. If only one tag fits, only use one.
|
||||
- If NO tags fit the content, return an empty tags array.
|
||||
- Name tags: 'yusuf', 'madiha', 'hafsa', or 'mustafa' ONLY when the content is a personal document belonging to that family member (look for their name in the title or content)
|
||||
- NEVER skip classification
|
||||
- NEVER return freeform text outside the schema
|
||||
- For notes: do NOT summarize. Keep the original text. Only fix spelling.
|
||||
@@ -43,7 +84,7 @@ def build_response_schema(folders: list[str], tags: list[str]) -> dict:
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": {"type": "string", "enum": tags},
|
||||
"minItems": 2,
|
||||
"minItems": 0,
|
||||
"maxItems": 3,
|
||||
},
|
||||
"title": {"type": "string"},
|
||||
@@ -88,8 +129,8 @@ async def classify_item(
|
||||
if not OPENAI_API_KEY:
|
||||
log.warning("No OPENAI_API_KEY set, returning defaults")
|
||||
return {
|
||||
"folder": "Knowledge",
|
||||
"tags": ["reference", "read-later"],
|
||||
"folder": "Home",
|
||||
"tags": ["reference"],
|
||||
"title": title or "Untitled",
|
||||
"summary": "No AI classification available",
|
||||
"confidence": 0.0,
|
||||
@@ -122,10 +163,8 @@ async def classify_item(
|
||||
|
||||
# Validate folder and tags are in allowed sets
|
||||
if result["folder"] not in folders:
|
||||
result["folder"] = folders[0] if folders else "Knowledge"
|
||||
result["folder"] = folders[0] if folders else "Home"
|
||||
result["tags"] = [t for t in result["tags"] if t in tags][:3]
|
||||
if len(result["tags"]) < 2:
|
||||
result["tags"] = (result["tags"] + ["reference", "read-later"])[:3]
|
||||
|
||||
return result
|
||||
|
||||
@@ -133,8 +172,8 @@ async def classify_item(
|
||||
log.error(f"Classification attempt {attempt + 1} failed: {e}")
|
||||
if attempt == retries:
|
||||
return {
|
||||
"folder": "Knowledge",
|
||||
"tags": ["reference", "read-later"],
|
||||
"folder": "Home",
|
||||
"tags": ["reference"],
|
||||
"title": title or "Untitled",
|
||||
"summary": f"Classification failed: {e}",
|
||||
"confidence": 0.0,
|
||||
|
||||
@@ -1,162 +1,218 @@
|
||||
"""Content ingestion — fetch, extract, screenshot, archive."""
|
||||
"""Content ingestion — Playwright crawler for HTML, screenshots, og:image."""
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
from html.parser import HTMLParser
|
||||
from io import StringIO
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import httpx
|
||||
|
||||
from app.config import BROWSERLESS_URL
|
||||
from app.config import CRAWLER_URL
|
||||
from app.services.storage import storage
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _HTMLTextExtractor(HTMLParser):
|
||||
"""Simple HTML to text converter."""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._result = StringIO()
|
||||
self._skip = False
|
||||
self._skip_tags = {"script", "style", "noscript", "svg"}
|
||||
# ── YouTube helpers ──
|
||||
|
||||
def handle_starttag(self, tag, attrs):
|
||||
if tag in self._skip_tags:
|
||||
self._skip = True
|
||||
|
||||
def handle_endtag(self, tag):
|
||||
if tag in self._skip_tags:
|
||||
self._skip = False
|
||||
if tag in ("p", "div", "br", "h1", "h2", "h3", "h4", "li", "tr"):
|
||||
self._result.write("\n")
|
||||
|
||||
def handle_data(self, data):
|
||||
if not self._skip:
|
||||
self._result.write(data)
|
||||
|
||||
def get_text(self) -> str:
|
||||
raw = self._result.getvalue()
|
||||
# Collapse whitespace
|
||||
lines = [line.strip() for line in raw.splitlines()]
|
||||
return "\n".join(line for line in lines if line)
|
||||
def _extract_youtube_id(url: str) -> str | None:
|
||||
patterns = [
|
||||
r'(?:youtube\.com/watch\?.*v=|youtu\.be/|youtube\.com/shorts/|youtube\.com/embed/)([a-zA-Z0-9_-]{11})',
|
||||
]
|
||||
for pat in patterns:
|
||||
m = re.search(pat, url)
|
||||
if m:
|
||||
return m.group(1)
|
||||
return None
|
||||
|
||||
|
||||
def html_to_text(html: str) -> str:
|
||||
extractor = _HTMLTextExtractor()
|
||||
extractor.feed(html)
|
||||
return extractor.get_text()
|
||||
def _is_youtube_url(url: str) -> bool:
|
||||
return bool(_extract_youtube_id(url))
|
||||
|
||||
|
||||
def extract_title_from_html(html: str) -> str | None:
|
||||
match = re.search(r"<title[^>]*>(.*?)</title>", html, re.IGNORECASE | re.DOTALL)
|
||||
return match.group(1).strip() if match else None
|
||||
async def fetch_youtube_metadata(url: str) -> dict | None:
|
||||
"""Fetch YouTube video metadata via oEmbed. No API key needed."""
|
||||
video_id = _extract_youtube_id(url)
|
||||
if not video_id:
|
||||
return None
|
||||
|
||||
result = {
|
||||
"title": None,
|
||||
"description": None,
|
||||
"author": None,
|
||||
"thumbnail_url": f"https://img.youtube.com/vi/{video_id}/maxresdefault.jpg",
|
||||
"video_id": video_id,
|
||||
"is_short": "/shorts/" in url,
|
||||
}
|
||||
|
||||
def extract_meta_description(html: str) -> str | None:
|
||||
match = re.search(
|
||||
r'<meta[^>]*name=["\']description["\'][^>]*content=["\'](.*?)["\']',
|
||||
html, re.IGNORECASE | re.DOTALL,
|
||||
)
|
||||
return match.group(1).strip() if match else None
|
||||
|
||||
|
||||
async def fetch_url_content(url: str) -> dict:
|
||||
"""Fetch URL content. Returns dict with html, text, title, description, used_browserless."""
|
||||
result = {"html": None, "text": None, "title": None, "description": None, "used_browserless": False}
|
||||
|
||||
# Try HTTP-first extraction
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=15, follow_redirects=True) as client:
|
||||
resp = await client.get(url, headers={
|
||||
"User-Agent": "Mozilla/5.0 (compatible; SecondBrain/1.0)"
|
||||
})
|
||||
resp.raise_for_status()
|
||||
html = resp.text
|
||||
result["html"] = html
|
||||
result["text"] = html_to_text(html)
|
||||
result["title"] = extract_title_from_html(html)
|
||||
result["description"] = extract_meta_description(html)
|
||||
|
||||
# If extraction is weak (< 200 chars of text), try browserless
|
||||
if len(result["text"] or "") < 200:
|
||||
log.info(f"Weak extraction ({len(result['text'] or '')} chars), trying browserless")
|
||||
br = await fetch_with_browserless(url)
|
||||
if br and len(br.get("text", "")) > len(result["text"] or ""):
|
||||
result.update(br)
|
||||
result["used_browserless"] = True
|
||||
async with httpx.AsyncClient(timeout=10) as client:
|
||||
oembed_url = f"https://www.youtube.com/oembed?url=https://www.youtube.com/watch?v={video_id}&format=json"
|
||||
resp = await client.get(oembed_url)
|
||||
if resp.status_code == 200:
|
||||
data = resp.json()
|
||||
result["title"] = data.get("title")
|
||||
result["author"] = data.get("author_name")
|
||||
|
||||
noembed_url = f"https://noembed.com/embed?url=https://www.youtube.com/watch?v={video_id}"
|
||||
resp2 = await client.get(noembed_url)
|
||||
if resp2.status_code == 200:
|
||||
data2 = resp2.json()
|
||||
if not result["title"]:
|
||||
result["title"] = data2.get("title")
|
||||
if not result["author"]:
|
||||
result["author"] = data2.get("author_name")
|
||||
except Exception as e:
|
||||
log.warning(f"HTTP fetch failed for {url}: {e}, trying browserless")
|
||||
try:
|
||||
br = await fetch_with_browserless(url)
|
||||
if br:
|
||||
result.update(br)
|
||||
result["used_browserless"] = True
|
||||
except Exception as e2:
|
||||
log.error(f"Browserless also failed for {url}: {e2}")
|
||||
log.warning(f"YouTube metadata fetch failed: {e}")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def fetch_with_browserless(url: str) -> dict | None:
|
||||
"""Use browserless/chrome to render JS-heavy pages."""
|
||||
async def download_youtube_thumbnail(url: str, item_id: str) -> str | None:
|
||||
"""Download YouTube thumbnail and save as screenshot asset."""
|
||||
video_id = _extract_youtube_id(url)
|
||||
if not video_id:
|
||||
return None
|
||||
|
||||
urls_to_try = [
|
||||
f"https://img.youtube.com/vi/{video_id}/maxresdefault.jpg",
|
||||
f"https://img.youtube.com/vi/{video_id}/hqdefault.jpg",
|
||||
]
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=30) as client:
|
||||
resp = await client.post(
|
||||
f"{BROWSERLESS_URL}/content",
|
||||
json={"url": url, "waitForTimeout": 3000},
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
html = resp.text
|
||||
return {
|
||||
"html": html,
|
||||
"text": html_to_text(html),
|
||||
"title": extract_title_from_html(html),
|
||||
"description": extract_meta_description(html),
|
||||
}
|
||||
async with httpx.AsyncClient(timeout=10) as client:
|
||||
for thumb_url in urls_to_try:
|
||||
resp = await client.get(thumb_url)
|
||||
if resp.status_code == 200 and len(resp.content) > 1000:
|
||||
path = storage.save(
|
||||
item_id=item_id, asset_type="screenshot",
|
||||
filename="thumbnail.jpg", data=resp.content,
|
||||
)
|
||||
return path
|
||||
except Exception as e:
|
||||
log.error(f"Browserless fetch failed: {e}")
|
||||
log.warning(f"YouTube thumbnail download failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def take_screenshot(url: str, item_id: str) -> str | None:
|
||||
"""Take a screenshot of a URL using browserless. Returns storage path or None."""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=30) as client:
|
||||
resp = await client.post(
|
||||
f"{BROWSERLESS_URL}/screenshot",
|
||||
json={
|
||||
"url": url,
|
||||
"options": {"type": "png", "fullPage": False},
|
||||
"waitForTimeout": 3000,
|
||||
},
|
||||
async def download_youtube_video(url: str, item_id: str) -> tuple[str | None, dict]:
|
||||
"""Download YouTube video via yt-dlp."""
|
||||
import asyncio
|
||||
import subprocess
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
video_id = _extract_youtube_id(url)
|
||||
if not video_id:
|
||||
return None, {}
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
outpath = os.path.join(tmpdir, "%(id)s.%(ext)s")
|
||||
cmd = [
|
||||
"yt-dlp", "--no-playlist",
|
||||
"-f", "bestvideo[height<=720][ext=mp4]+bestaudio[ext=m4a]/best[height<=720][ext=mp4]/best[height<=720]",
|
||||
"--merge-output-format", "mp4",
|
||||
"--write-info-json", "--no-write-playlist-metafiles",
|
||||
"-o", outpath, url,
|
||||
]
|
||||
try:
|
||||
proc = await asyncio.to_thread(
|
||||
subprocess.run, cmd, capture_output=True, text=True, timeout=120,
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
log.warning(f"yt-dlp failed: {proc.stderr[:300]}")
|
||||
return None, {}
|
||||
|
||||
video_file = None
|
||||
info = {}
|
||||
for f in os.listdir(tmpdir):
|
||||
if f.endswith(".mp4"):
|
||||
video_file = os.path.join(tmpdir, f)
|
||||
elif f.endswith(".info.json"):
|
||||
import json as _json
|
||||
with open(os.path.join(tmpdir, f)) as fh:
|
||||
info = _json.load(fh)
|
||||
|
||||
if not video_file:
|
||||
return None, {}
|
||||
|
||||
file_data = open(video_file, "rb").read()
|
||||
path = storage.save(
|
||||
item_id=item_id, asset_type="video",
|
||||
filename=f"{video_id}.mp4", data=file_data,
|
||||
)
|
||||
log.info(f"Downloaded YouTube video: {len(file_data)} bytes -> {path}")
|
||||
return path, info
|
||||
except subprocess.TimeoutExpired:
|
||||
log.warning(f"yt-dlp timed out for {url}")
|
||||
return None, {}
|
||||
except Exception as e:
|
||||
log.error(f"YouTube download failed: {e}")
|
||||
return None, {}
|
||||
|
||||
|
||||
# ── Main crawler (Playwright stealth service) ──
|
||||
|
||||
async def crawl_url(url: str) -> dict:
|
||||
"""Call the Playwright crawler service. Returns dict with html, text, title,
|
||||
description, author, og_image_url, screenshot (base64), status_code, error."""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=45) as client:
|
||||
resp = await client.post(f"{CRAWLER_URL}/crawl", json={"url": url})
|
||||
if resp.status_code == 200:
|
||||
return resp.json()
|
||||
log.warning(f"Crawler returned {resp.status_code} for {url}")
|
||||
except Exception as e:
|
||||
log.error(f"Crawler request failed for {url}: {e}")
|
||||
return {"url": url, "html": None, "text": None, "title": None,
|
||||
"description": None, "og_image_url": None, "screenshot": None, "error": str(e) if 'e' in dir() else "unknown"}
|
||||
|
||||
|
||||
async def save_screenshot_from_base64(b64: str, item_id: str) -> str | None:
|
||||
"""Decode base64 screenshot and save to storage."""
|
||||
try:
|
||||
data = base64.b64decode(b64)
|
||||
if len(data) < 500:
|
||||
return None
|
||||
path = storage.save(
|
||||
item_id=item_id, asset_type="screenshot",
|
||||
filename="screenshot.jpg", data=data,
|
||||
)
|
||||
return path
|
||||
except Exception as e:
|
||||
log.error(f"Screenshot save failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def download_og_image(og_url: str, item_id: str) -> str | None:
|
||||
"""Download an og:image and save as asset."""
|
||||
# Clean HTML entities from URL
|
||||
og_url = og_url.replace("&", "&")
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=15, follow_redirects=True) as client:
|
||||
resp = await client.get(og_url, headers={
|
||||
"User-Agent": "Mozilla/5.0 (compatible; SecondBrain/1.0)"
|
||||
})
|
||||
if resp.status_code == 200 and len(resp.content) > 1000:
|
||||
ct = resp.headers.get("content-type", "image/jpeg")
|
||||
ext = "png" if "png" in ct else "jpg"
|
||||
path = storage.save(
|
||||
item_id=item_id,
|
||||
asset_type="screenshot",
|
||||
filename="screenshot.png",
|
||||
data=resp.content,
|
||||
item_id=item_id, asset_type="og_image",
|
||||
filename=f"og_image.{ext}", data=resp.content,
|
||||
)
|
||||
log.info(f"Downloaded og:image ({len(resp.content)} bytes) for {item_id}")
|
||||
return path
|
||||
except Exception as e:
|
||||
log.error(f"Screenshot failed for {url}: {e}")
|
||||
log.warning(f"og:image download failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def archive_html(html: str, item_id: str) -> str | None:
|
||||
"""Save the full HTML as an archived asset."""
|
||||
"""Save full HTML as an archived asset."""
|
||||
if not html:
|
||||
return None
|
||||
try:
|
||||
path = storage.save(
|
||||
item_id=item_id,
|
||||
asset_type="archived_html",
|
||||
filename="page.html",
|
||||
data=html.encode("utf-8"),
|
||||
item_id=item_id, asset_type="archived_html",
|
||||
filename="page.html", data=html.encode("utf-8"),
|
||||
)
|
||||
return path
|
||||
except Exception as e:
|
||||
|
||||
@@ -12,6 +12,7 @@ from sqlalchemy.orm import selectinload
|
||||
|
||||
from app.config import REDIS_URL, DATABASE_URL_SYNC
|
||||
from app.models.item import Item, ItemAsset
|
||||
from app.models.taxonomy import Folder, Tag, ItemTag # noqa: F401 — register FK targets
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -34,7 +35,7 @@ async def _process_item(item_id: str):
|
||||
"""Full processing pipeline for a saved item."""
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
|
||||
from app.config import DATABASE_URL
|
||||
from app.services.ingest import fetch_url_content, take_screenshot, archive_html
|
||||
from app.services.ingest import crawl_url, save_screenshot_from_base64, download_og_image, archive_html
|
||||
from app.services.classify import classify_item
|
||||
from app.services.embed import generate_embedding
|
||||
from app.search.engine import index_item, ensure_meili_index
|
||||
@@ -62,42 +63,96 @@ async def _process_item(item_id: str):
|
||||
|
||||
# ── Step 1: Fetch content for URLs ──
|
||||
if item.type == "link" and item.url:
|
||||
log.info(f"Fetching URL: {item.url}")
|
||||
content = await fetch_url_content(item.url)
|
||||
html_content = content.get("html")
|
||||
extracted_text = content.get("text") or extracted_text
|
||||
if not title:
|
||||
title = content.get("title")
|
||||
from app.services.ingest import (
|
||||
_is_youtube_url, download_youtube_thumbnail,
|
||||
download_youtube_video, fetch_youtube_metadata,
|
||||
)
|
||||
|
||||
item.metadata_json = item.metadata_json or {}
|
||||
item.metadata_json["description"] = content.get("description")
|
||||
item.metadata_json["used_browserless"] = content.get("used_browserless", False)
|
||||
is_yt = _is_youtube_url(item.url)
|
||||
|
||||
# Take screenshot
|
||||
screenshot_path = await take_screenshot(item.url, item.id)
|
||||
if screenshot_path:
|
||||
asset = ItemAsset(
|
||||
id=str(uuid.uuid4()),
|
||||
item_id=item.id,
|
||||
asset_type="screenshot",
|
||||
filename="screenshot.png",
|
||||
content_type="image/png",
|
||||
storage_path=screenshot_path,
|
||||
)
|
||||
db.add(asset)
|
||||
if is_yt:
|
||||
# YouTube: use oEmbed + thumbnail + yt-dlp (no crawler needed)
|
||||
log.info(f"Processing YouTube URL: {item.url}")
|
||||
yt_meta = await fetch_youtube_metadata(item.url)
|
||||
if yt_meta:
|
||||
if not title:
|
||||
title = yt_meta.get("title")
|
||||
extracted_text = f"YouTube: {yt_meta.get('title','')}\nBy: {yt_meta.get('author','')}"
|
||||
item.metadata_json["youtube"] = {
|
||||
"video_id": yt_meta.get("video_id"),
|
||||
"author": yt_meta.get("author"),
|
||||
"is_short": yt_meta.get("is_short", False),
|
||||
}
|
||||
item.metadata_json["description"] = f"YouTube video by {yt_meta.get('author','')}"
|
||||
|
||||
# Archive HTML
|
||||
if html_content:
|
||||
html_path = await archive_html(html_content, item.id)
|
||||
if html_path:
|
||||
asset = ItemAsset(
|
||||
id=str(uuid.uuid4()),
|
||||
item_id=item.id,
|
||||
asset_type="archived_html",
|
||||
filename="page.html",
|
||||
content_type="text/html",
|
||||
storage_path=html_path,
|
||||
)
|
||||
db.add(asset)
|
||||
# Download video
|
||||
log.info(f"Downloading YouTube video: {item.url}")
|
||||
video_path, yt_info = await download_youtube_video(item.url, item.id)
|
||||
if video_path:
|
||||
db.add(ItemAsset(
|
||||
id=str(uuid.uuid4()), item_id=item.id,
|
||||
asset_type="video", filename=f"{yt_meta['video_id']}.mp4",
|
||||
content_type="video/mp4", storage_path=video_path,
|
||||
))
|
||||
if yt_info.get("duration"):
|
||||
item.metadata_json["youtube"]["duration"] = yt_info["duration"]
|
||||
if yt_info.get("description"):
|
||||
item.metadata_json["youtube"]["description"] = yt_info["description"][:500]
|
||||
extracted_text = f"YouTube: {title or ''}\nBy: {(yt_meta or {}).get('author','')}\n{yt_info['description'][:2000]}"
|
||||
|
||||
# Thumbnail
|
||||
thumb_path = await download_youtube_thumbnail(item.url, item.id)
|
||||
if thumb_path:
|
||||
db.add(ItemAsset(
|
||||
id=str(uuid.uuid4()), item_id=item.id,
|
||||
asset_type="screenshot", filename="thumbnail.jpg",
|
||||
content_type="image/jpeg", storage_path=thumb_path,
|
||||
))
|
||||
|
||||
else:
|
||||
# Regular URL: use Playwright crawler (stealth)
|
||||
log.info(f"Crawling URL: {item.url}")
|
||||
crawl = await crawl_url(item.url)
|
||||
html_content = crawl.get("html")
|
||||
extracted_text = crawl.get("text") or extracted_text
|
||||
if not title:
|
||||
title = crawl.get("title")
|
||||
item.metadata_json["description"] = crawl.get("description")
|
||||
item.metadata_json["author"] = crawl.get("author")
|
||||
item.metadata_json["status_code"] = crawl.get("status_code")
|
||||
|
||||
# Screenshot (from crawler, base64 JPEG)
|
||||
if crawl.get("screenshot"):
|
||||
ss_path = await save_screenshot_from_base64(crawl["screenshot"], item.id)
|
||||
if ss_path:
|
||||
db.add(ItemAsset(
|
||||
id=str(uuid.uuid4()), item_id=item.id,
|
||||
asset_type="screenshot", filename="screenshot.jpg",
|
||||
content_type="image/jpeg", storage_path=ss_path,
|
||||
))
|
||||
|
||||
# og:image (extracted from rendered DOM by crawler)
|
||||
og_url = crawl.get("og_image_url")
|
||||
if og_url:
|
||||
og_path = await download_og_image(og_url, item.id)
|
||||
if og_path:
|
||||
db.add(ItemAsset(
|
||||
id=str(uuid.uuid4()), item_id=item.id,
|
||||
asset_type="og_image", filename="og_image.jpg",
|
||||
content_type="image/jpeg", storage_path=og_path,
|
||||
))
|
||||
item.metadata_json["og_image_url"] = og_url
|
||||
|
||||
# Archive HTML
|
||||
if html_content:
|
||||
html_path = await archive_html(html_content, item.id)
|
||||
if html_path:
|
||||
db.add(ItemAsset(
|
||||
id=str(uuid.uuid4()), item_id=item.id,
|
||||
asset_type="archived_html", filename="page.html",
|
||||
content_type="text/html", storage_path=html_path,
|
||||
))
|
||||
|
||||
# ── Step 1b: Process uploaded files (PDF, image, document) ──
|
||||
if item.type in ("pdf", "image", "document", "file"):
|
||||
|
||||
Reference in New Issue
Block a user