Feed API

Signals via API

Feed API access for integrations is available under Business plans. Public endpoints show a limited or redacted payload; Pro is for individual app access.

Public sample from today's flagship detail endpoint. Public payloads can be limited or redacted. The backend exposes read endpoints for signals, storylines, and briefings.

Public flagship sample

GET /v1/narratives/7dd3e867-1a56-4990-9923-d6f18c7ea35c?tenant=ai&run_id=78fe43c9-82ee-4adb-b6be-1fe7d7c841f6

{
  "id": "7dd3e867-1a56-4990-9923-d6f18c7ea35c",
  "title": "I reduced my token usage by 178x in Claude Code!!",
  "summary": "Okay so, I took the leaked Claude Code repo, around 14.3M tokens total. Queried a knowledge graph, got back \\~80K tokens for that query! **14.3M / 80K ≈ 178x.** Nice. I have officially solved AI, now you can use 20$ claude for 178 times longer!! Wait a min, JK hahah! This is also basically how *everyone* is explaining “token efficiency” on the internet right now. Take total possible context, divid",
  "first_seen_at": "2026-04-11T20:01:35Z",
  "last_seen_at": "2026-04-12T01:56:34Z",
  "created_at": "2026-04-12T05:01:47.589076Z",
  "updated_at": "2026-04-12T05:01:50.636357Z",
  "timeframe": "24h",
  "run_id": "78fe43c9-82ee-4adb-b6be-1fe7d7c841f6",
  "metrics": {
    "divergence": 11.547,
    "post_count": 3,
    "momentum_1h": 0,
    "momentum_7d": 3,
    "score_total": 1.244641,
    "shill_score": 10,
    "momentum_24h": 3,
    "market_movers": [],
    "evidence_score": 0.75,
    "unique_authors": 3,
    "coherence_score": 0.7806,
    "diversity_bonus": 0.1625,
    "duplicate_ratio": 0,
    "platforms_count": 1,
    "social_momentum": 3,
    "divergence_label": "chatter_without_pricing",
    "market_conviction": 0,
    "origin_share_top1": 0.333333,
    "unique_publishers": 3,
    "origin_layer_posts": 0,
    "source_types_count": 1,
    "unique_authors_24h": 3,
    "unique_origin_urls": 3,
    "amplifier_share_top1": 0.333333,
    "author_concentration": 0.3333,
    "publisher_share_top1": 0.333333,
    "amplifier_layer_posts": 3,
    "concentration_penalty": 0,
    "unique_origin_domains": 3,
    "source_diversity_bonus": 0,
    "unique_publisher_types": 1,
    "unique_origin_publishers": 3
  },
  "platforms": [
    "reddit"
  ],
  "top_tickers": [],
  "why_now": [],
  "display_title": "Open-source tools and token efficiency challenges in AI workflows",
  "display_summary": "Recent community developments highlight efforts to improve AI tooling and token usage efficiency.",
  "narrative_frame_display": null,
  "entities": {
    "projects": [
      "SurgicalFS MCP",
      "Claude Code",
      "Synapse AI"
    ]
  },
  "recurring_claims": [
    {
      "claim": "Token efficiency claims based on simple retrieval ratios are misleading because actual token usage includes inputs, outputs, cache operations, and memory management.",
      "evidence_urls": [
        "https://reddit.com/r/ollama/comments/1siyrzp/i_reduced_my_token_usage_by_178x_in_claude_code"
      ]
    },
    {
      "claim": "Current local file system access for AI tools is inefficient, slow, and token-wasteful, especially for non-coding users handling many files.",
      "evidence_urls": [
        "https://reddit.com/r/LLMDevs/comments/1sj1hbw/for_the_noncoding_ai_users_among_us_a_better_file"
      ]
    },
    {
      "claim": "Open-source AI agent orchestration platforms can streamline complex workflows by enabling multi-step task automation with customizable tools.",
      "evidence_urls": [
        "https://reddit.com/r/LangChain/comments/1sitbdg/i_spent_3_months_building_an_opensource_tool_to"
      ]
    }
  ],
  "stance_map": [
    {
      "stance": "neutral",
      "who": "AI developer analyzing token usage",
      "evidence_urls": [
        "https://reddit.com/r/ollama/comments/1siyrzp/i_reduced_my_token_usage_by_178x_in_claude_code"
      ]
    },
    {
      "stance": "promotes",
      "who": "Pharma/biotech consultant user",
      "evidence_urls": [
        "https://reddit.com/r/LLMDevs/comments/1sj1hbw/for_the_noncoding_ai_users_among_us_a_better_file"
      ]
    },
    {
      "stance": "promotes",
      "who": "Open-source developer",
      "evidence_urls": [
        "https://reddit.com/r/LangChain/comments/1sitbdg/i_spent_3_months_building_an_opensource_tool_to"
      ]
    }
  ],
  "quality_flags": {
    "mixed_topic_risk": "low",
    "promo_risk": "low",
    "source_quality": "medium"
  },
  "editor_note": "This narrative synthesizes recent community insights on AI tooling improvements and token management challenges, highlighting practical solutions and ongoing development efforts.",
  "display_tags": [
    "models",
    "tooling",
    "ai_infrastructure"
  ],
  "tags": [
    "models",
    "tooling",
    "ai_infrastructure"
  ],
  "cscope_tags": [
    "models",
    "tooling",
    "ai_infrastructure"
  ],
  "why_now_display": [
    "Growing AI adoption exposes token inefficiencies and workflow bottlenecks.",
    "Community-driven open-source projects accelerate innovation in AI tooling.",
    "Demand for multi-agent orchestration reflects increasing complexity in AI applications."
  ],
  "why_it_matters_display": [
    "Efficient token usage reduces costs and improves AI tool responsiveness.",
    "Better local file access enhances productivity for users handling large, complex datasets.",
    "Orchestration platforms enable automation of sophisticated AI workflows, increasing usability."
  ],
  "show_why": true,
  "sources_display": [
    {
      "url": "https://v.redd.it/qzr395uaamug1",
      "label": "v.redd.it: I spent 3 months building an open-source tool to orchestrate AI agents. Would love some brutal feedback. (via Reddit)",
      "published_at": "2026-04-11T20:01:35+00:00"
    },
    {
      "url": "https://www.reddit.com/r/LLMDevs/comments/1sj1hbw/for_the_noncoding_ai_users_among_us_a_better_file/",
      "label": "Reddit r/LLMDevs discussion on SurgicalFS MCP for file system access (via Reddit)",
      "published_at": "2026-04-12T01:56:34+00:00"
    }
  ],
  "trend_status": "flat",
  "trend_sparkline": "▁",
  "trend_points_n": 0,
  "trend_window": {
    "lookback_days": 14,
    "max_points": 36,
    "value_key": null
  },
  "llm_status": "accepted",
  "llm_meta": {
    "model": "gpt-4.1-mini-2025-04-14",
    "prompt_version": "enrich_v3",
    "input_hash": "c2723d760cbfa79113724ca8c2f9316774e6d86da44aa471fad3d3bb2714aa1f",
    "updated_at": "2026-04-12T05:03:02.794707+00:00",
    "llm_metadata": {
      "env": "prod",
      "host": "4cc2c7d05da3",
      "stage": "signals.enrich",
      "run_id": "78fe43c9-82ee-4adb-b6be-1fe7d7c841f6",
      "tenant": "ai",
      "service": "api",
      "pipeline": "pipeline_run",
      "correlation_id": "7dd3e867-1a56-4990-9923-d6f18c7ea35c"
    }
  },
  "story_id": "4b9dfcd9-7d16-47b5-8343-a7351d6826d4",
  "public_surface": {
    "eligible_for_flagship_sample": true,
    "eligible_for_top_public_brief": true,
    "eligible_for_public_index": false,
    "eligible_only_for_lower_or_internal_surfaces": false,
    "public_rank_score": 11,
    "structural_priority": true,
    "mostly_social": false,
    "community_chatter": false,
    "reader_label": null,
    "excluded_reasons": [],
    "source_signals": {
      "post_count": 3,
      "unique_origins": 3,
      "source_types_count": 1,
      "independent_non_social_count": 3,
      "social_source_count": 0,
      "non_social_source_count": 0,
      "why_now_count": 3,
      "why_it_matters_count": 3
    }
  },
  "current_cycle_open": true
}
Capabilities
  • Signals and storylines feed endpoints with filtering and rate limits
  • Briefing delivery endpoints for integrations
  • Evidence link payloads for auditability
Integrate in your workflow
  • Route top stories into Slack or Teams for morning and evening desk updates.
  • Sync storyline evidence into Notion, Airtable, or internal research trackers.
  • Feed metrics into BI dashboards for momentum, concentration, and source mix monitoring.

Quick start endpoints: /v1/feed/stories, /v1/signals, /v1/storylines/search, /v1/briefings/latest.

For product access, see Pricing.