summaryrefslogtreecommitdiff
path: root/Omni
diff options
context:
space:
mode:
Diffstat (limited to 'Omni')
-rw-r--r--Omni/Agent.hs55
-rw-r--r--Omni/Agent/Core.hs49
-rw-r--r--Omni/Agent/DESIGN.md117
-rw-r--r--Omni/Agent/Engine.hs1189
-rw-r--r--Omni/Agent/Event.hs180
-rw-r--r--Omni/Agent/Git.hs232
-rw-r--r--Omni/Agent/Memory.hs1575
-rw-r--r--Omni/Agent/PLAN.md589
-rw-r--r--Omni/Agent/Paths.hs39
-rw-r--r--Omni/Agent/Provider.hs695
-rw-r--r--Omni/Agent/Skills.hs417
-rw-r--r--Omni/Agent/Status.hs157
-rw-r--r--Omni/Agent/Subagent.hs516
-rw-r--r--Omni/Agent/Subagent/DESIGN.md352
-rw-r--r--Omni/Agent/Telegram.hs1372
-rw-r--r--Omni/Agent/Telegram/IncomingQueue.hs228
-rw-r--r--Omni/Agent/Telegram/Media.hs327
-rw-r--r--Omni/Agent/Telegram/Messages.hs551
-rw-r--r--Omni/Agent/Telegram/Reminders.hs108
-rw-r--r--Omni/Agent/Telegram/Types.hs654
-rw-r--r--Omni/Agent/Tools.hs682
-rw-r--r--Omni/Agent/Tools/Calendar.hs322
-rw-r--r--Omni/Agent/Tools/Email.hs675
-rw-r--r--Omni/Agent/Tools/Feedback.hs204
-rw-r--r--Omni/Agent/Tools/Hledger.hs489
-rw-r--r--Omni/Agent/Tools/Http.hs338
-rw-r--r--Omni/Agent/Tools/Notes.hs357
-rw-r--r--Omni/Agent/Tools/Outreach.hs513
-rw-r--r--Omni/Agent/Tools/Pdf.hs180
-rw-r--r--Omni/Agent/Tools/Python.hs217
-rw-r--r--Omni/Agent/Tools/Todos.hs527
-rw-r--r--Omni/Agent/Tools/WebReader.hs308
-rw-r--r--Omni/Agent/Tools/WebReaderTest.hs53
-rw-r--r--Omni/Agent/Tools/WebSearch.hs212
-rw-r--r--Omni/Agent/Worker.hs665
-rw-r--r--Omni/App.py8
-rwxr-xr-xOmni/Ava.hs69
-rw-r--r--[-rwxr-xr-x]Omni/Bild.hs1252
-rw-r--r--Omni/Bild.nix38
-rwxr-xr-xOmni/Bild/Audit.py176
-rw-r--r--Omni/Bild/Builder.nix212
-rw-r--r--Omni/Bild/Deps.nix16
-rw-r--r--Omni/Bild/Deps/Haskell.nix6
-rw-r--r--Omni/Bild/Deps/Python.nix17
-rw-r--r--Omni/Bild/Deps/kerykeion.nix72
-rw-r--r--Omni/Bild/Deps/logfire-api.nix24
-rw-r--r--Omni/Bild/Deps/openai-python.nix99
-rw-r--r--Omni/Bild/Deps/pydantic-ai-slim.nix90
-rw-r--r--Omni/Bild/Deps/pydantic-ai.nix75
-rw-r--r--Omni/Bild/Deps/pydantic-graph.nix45
-rw-r--r--Omni/Bild/Deps/pyswisseph.nix41
-rw-r--r--Omni/Bild/Deps/simple-ascii-tables.nix28
-rw-r--r--Omni/Bild/Deps/sweph-data.nix38
-rwxr-xr-xOmni/Bild/Example.py18
-rw-r--r--Omni/Bild/Haskell.nix3
-rw-r--r--Omni/Bild/Nixpkgs.nix4
-rw-r--r--Omni/Bild/Python.nix16
-rw-r--r--Omni/Bild/README.md40
-rw-r--r--Omni/Bild/Sources.json101
-rw-r--r--Omni/Bild/Sources.nix6
-rwxr-xr-xOmni/Bot.scm61
-rw-r--r--Omni/Ci.hs191
-rwxr-xr-xOmni/Ci.sh62
-rwxr-xr-xOmni/Cloud.nix7
-rw-r--r--Omni/Cloud/Cal.nix81
-rw-r--r--Omni/Cloud/Chat.nix2
-rw-r--r--Omni/Cloud/Comms/Xmpp.nix74
-rw-r--r--Omni/Cloud/Git.nix2
-rw-r--r--Omni/Cloud/Mail.nix87
-rw-r--r--Omni/Cloud/Monica.nix4
-rw-r--r--Omni/Cloud/NostrRelay.nix4
-rw-r--r--Omni/Cloud/OpenWebui.nix43
-rw-r--r--Omni/Cloud/Ports.nix1
-rw-r--r--Omni/Cloud/Web.nix164
-rw-r--r--Omni/Cloud/Znc.nix2
-rwxr-xr-xOmni/Cloud/post-receive.sh10
-rw-r--r--Omni/Deploy/Caddy.hs241
-rw-r--r--Omni/Deploy/Deployer.hs317
-rw-r--r--Omni/Deploy/Deployer.nix104
-rw-r--r--Omni/Deploy/Manifest.hs673
-rw-r--r--Omni/Deploy/PLAN.md299
-rw-r--r--Omni/Deploy/Packages.nix11
-rw-r--r--Omni/Deploy/README.md211
-rw-r--r--Omni/Deploy/Systemd.hs269
-rwxr-xr-xOmni/Dev/Beryllium.nix5
-rw-r--r--Omni/Dev/Beryllium/AVA.md111
-rw-r--r--Omni/Dev/Beryllium/Ava.nix81
-rw-r--r--Omni/Dev/Beryllium/Configuration.nix28
-rw-r--r--Omni/Dev/Beryllium/Hardware.nix1
-rw-r--r--Omni/Dev/Beryllium/Live.nix135
-rwxr-xr-xOmni/Dev/Beryllium/migrate-ava.sh102
-rwxr-xr-xOmni/Dev/Lithium.nix2
-rw-r--r--Omni/Dev/Lithium/Configuration.nix10
-rw-r--r--Omni/Dev/Vpn.nix3
-rw-r--r--Omni/Fact.hs81
-rw-r--r--Omni/Ide/README.md143
-rwxr-xr-xOmni/Ide/ailint.sh11
-rwxr-xr-xOmni/Ide/hooks/commit-msg2
-rwxr-xr-xOmni/Ide/hooks/post-checkout1
-rwxr-xr-xOmni/Ide/hooks/post-merge1
-rwxr-xr-xOmni/Ide/hooks/pre-push6
-rwxr-xr-xOmni/Ide/push.sh168
-rwxr-xr-xOmni/Ide/repl.sh7
-rwxr-xr-xOmni/Ide/run.sh4
-rwxr-xr-xOmni/Ide/typecheck.sh37
-rwxr-xr-xOmni/Jr.hs1046
-rw-r--r--Omni/Jr/Web.hs40
-rw-r--r--Omni/Jr/Web/Components.hs1751
-rw-r--r--Omni/Jr/Web/Handlers.hs649
-rw-r--r--Omni/Jr/Web/Pages.hs862
-rw-r--r--Omni/Jr/Web/Partials.hs274
-rw-r--r--Omni/Jr/Web/Style.hs2260
-rw-r--r--Omni/Jr/Web/Types.hs365
-rw-r--r--Omni/Keys/Ava.pub1
-rwxr-xr-xOmni/Lint.hs31
-rwxr-xr-xOmni/Llamacpp.py18
-rw-r--r--Omni/Log.hs44
-rw-r--r--Omni/Log.py10
-rw-r--r--Omni/Log/Concurrent.hs243
-rw-r--r--Omni/Log/Terminal.hs75
-rw-r--r--Omni/Namespace.hs5
-rw-r--r--Omni/Os/Base.nix16
-rwxr-xr-xOmni/Repl.py5
-rwxr-xr-xOmni/Sentry.sh8
-rw-r--r--Omni/Syncthing.nix19
-rw-r--r--Omni/Task.hs1208
-rw-r--r--Omni/Task/Core.hs1826
-rw-r--r--Omni/Task/DESIGN.md232
-rw-r--r--Omni/Task/MigrationTest.hs42
-rw-r--r--Omni/Task/README.md374
-rw-r--r--Omni/Task/RaceTest.hs58
-rw-r--r--Omni/Test.py9
-rw-r--r--Omni/Users.nix9
133 files changed, 32009 insertions, 968 deletions
diff --git a/Omni/Agent.hs b/Omni/Agent.hs
new file mode 100644
index 0000000..0bae0b5
--- /dev/null
+++ b/Omni/Agent.hs
@@ -0,0 +1,55 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Agent system entry point and combined test runner.
+--
+-- This module provides the main entry point for the agent system
+-- and re-exports core types from sub-modules.
+--
+-- : out omni-agent
+-- : dep aeson
+module Omni.Agent
+ ( -- * Engine
+ module Omni.Agent.Engine,
+
+ -- * Tools
+ module Omni.Agent.Tools,
+
+ -- * Core
+ module Omni.Agent.Core,
+
+ -- * Test
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Omni.Agent.Core
+import Omni.Agent.Engine hiding (main, test)
+import qualified Omni.Agent.Engine as Engine
+import Omni.Agent.Tools hiding (ToolResult, main, test)
+import qualified Omni.Agent.Tools as Tools
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent"
+ [ Engine.test,
+ Tools.test,
+ Test.unit "Core types are re-exported" <| do
+ let status = Idle :: WorkerStatus
+ status Test.@=? status,
+ Test.unit "Engine and Tools integrate correctly" <| do
+ let tools = Tools.allTools
+ length tools Test.@=? 5
+ let config =
+ Engine.defaultAgentConfig
+ { Engine.agentTools = tools
+ }
+ Engine.agentMaxIterations config Test.@=? 10
+ ]
diff --git a/Omni/Agent/Core.hs b/Omni/Agent/Core.hs
new file mode 100644
index 0000000..fb4a4b3
--- /dev/null
+++ b/Omni/Agent/Core.hs
@@ -0,0 +1,49 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+module Omni.Agent.Core where
+
+import Alpha
+import Data.Aeson (FromJSON, ToJSON)
+
+-- | Engine/provider selection for agent
+data EngineType
+ = EngineOpenRouter
+ | EngineOllama
+ | EngineAmp
+ deriving (Show, Eq, Generic)
+
+instance ToJSON EngineType
+
+instance FromJSON EngineType
+
+-- | Status of a worker agent
+data WorkerStatus
+ = Idle
+ | Syncing
+ | -- | Task ID
+ Working Text
+ | -- | Task ID
+ Submitting Text
+ | -- | Error message
+ Error Text
+ deriving (Show, Eq, Generic)
+
+instance ToJSON WorkerStatus
+
+instance FromJSON WorkerStatus
+
+-- | Representation of a worker agent
+data Worker = Worker
+ { workerName :: Text,
+ workerPid :: Maybe Int,
+ workerStatus :: WorkerStatus,
+ workerPath :: FilePath,
+ workerQuiet :: Bool, -- Disable ANSI status bar (for loop mode)
+ workerEngine :: EngineType -- Which LLM backend to use
+ }
+ deriving (Show, Eq, Generic)
+
+instance ToJSON Worker
+
+instance FromJSON Worker
diff --git a/Omni/Agent/DESIGN.md b/Omni/Agent/DESIGN.md
new file mode 100644
index 0000000..ae1f6b3
--- /dev/null
+++ b/Omni/Agent/DESIGN.md
@@ -0,0 +1,117 @@
+# Multi-Agent System 2.0 Design
+
+**Goal:** Replace the current bash-script based worker system (`start-worker.sh`, etc.) with a robust, type-safe Haskell application `Omni/Agent.hs`.
+
+## 1. CLI Interface
+
+The `agent` command (compiled from `Omni/Agent.hs`) will provide a unified interface for managing workers.
+
+```bash
+agent start <name> [--background] # Start a worker (foreground by default, background with flag)
+agent stop <name> # Stop a background worker
+agent status # List all workers and their status
+agent log <name> [-f] # View/tail worker logs
+agent sync # Sync local state with live (helper)
+```
+
+## 2. Module Structure (`Omni/Agent/`)
+
+We will refactor the bash logic into Haskell modules:
+
+- **Omni.Agent** (`Omni/Agent.hs`): Main entry point and CLI parsing (Docopt).
+- **Omni.Agent.Core**: Core data types and state management.
+- **Omni.Agent.Worker**: The worker loop logic (sync, claim, work, submit).
+- **Omni.Agent.Git**: Git operations (worktree, branch, merge, commit).
+- **Omni.Agent.Process**: Process management (PID files, signals).
+- **Omni.Agent.Log**: Log streaming and filtering (the "monitor" logic).
+
+## 3. Data Types
+
+```haskell
+data WorkerStatus
+ = Idle
+ | Syncing
+ | Working TaskId
+ | Submitting TaskId
+ | Error Text
+ deriving (Show, Eq, Generic)
+
+data Worker = Worker
+ { workerName :: Text
+ , workerPid :: Maybe Int
+ , workerStatus :: WorkerStatus
+ , workerPath :: FilePath
+ }
+```
+
+## 4. Implementation Details
+
+### 4.1 Worker Loop (`agent start`)
+The Haskell implementation should replicate the logic of `start-worker.sh` but with better error handling and logging.
+
+1. **Setup**: Ensure worktree exists (or create it).
+2. **Loop**:
+ - `Git.syncWithLive` (Rebase-based to preserve local history)
+ - `Task.sync`
+ - `task <- Task.findReady`
+ - If `task`:
+ - `Task.claim task`
+ - `baseBranch <- Git.determineBaseBranch task` (Check dependencies)
+ - `Git.checkoutTaskBranch task baseBranch` (Force checkout to clean untracked files)
+ - `Engine.runAgent prompt` (Native LLM agent via OpenRouter)
+ - `Git.commit`
+ - `Git.checkoutBase`
+ - `Task.submitReview task`
+ - Else: `sleep 60`
+
+### 4.2 Process Management
+- Store PIDs in `.tasks/workers/<name>.pid`.
+- `agent stop` sends SIGTERM to the PID.
+- `agent status` checks if PID is alive.
+
+### 4.3 Logging
+- The Engine module uses callbacks to report activity and costs in real-time.
+- `agent log` displays the status bar with worker progress information.
+- **UI Design**:
+ - **Two-line Status**: The CLI should maintain two reserved lines at the bottom (or top) of the output for each worker:
+ - **Line 1 (Meta)**: `[Worker: omni-worker-1] Task: t-123 | Files: 3 | Credits: $0.45 | Time: 05:23`
+ - **Line 2 (Activity)**: `[14:05:22] 🤖 Thinking...` (updates in place)
+ - **Task Details**: When claiming a task, print the full task description/details to the log/console so the user can see what is being worked on without looking it up.
+ - **Completion**: When a task finishes, print a summary line (e.g., `[✓] Task t-123 completed in 12m 30s`) and a hard line break before starting the next loop.
+ - **History**: Previous log lines (tool outputs, thoughts) scroll up above these two status lines.
+
+### 4.5 Git Robustness (Learnings)
+- **Identity**: Configure `git config user.name "Omni Worker"` and `user.email` in the worktree to clearly distinguish worker commits from human commits.
+- **Force Checkout**: The worker must use `git checkout -f` (or equivalent) when switching to task branches to ensure untracked files (like `.tasks/counters.jsonl`) don't block the switch.
+- **Base Branch Logic**:
+ - If the task depends on another task that is *not* yet in `live` (e.g., in `Review`), the worker should branch off the dependency's branch (`task/<dep-id>`).
+ - Otherwise, branch off `live` directly. Do NOT use the local worker branch (`omni-worker-N`) as the base, as it may contain temporary sync commits that shouldn't be merged.
+- **Commit Hygiene**: Bundle the task status update (marking as 'Review') *inside* the feature implementation commit. This keeps the history clean (one commit per feature) and avoids separate "sync" commits for status changes.
+- **Clean State**: The worker should ensure the workspace is clean (no uncommitted changes) before starting a new loop iteration.
+- **Rebase Safety**: Always check the exit code of `git rebase`. If it fails (conflicts), abort immediately (`git rebase --abort`) to avoid leaving the repo in a broken interactive rebase state.
+- **Status Verification**: Verify that task status updates actually succeed. Check `task ready` output against `live` state to prevent "zombie" tasks (completed in live but stuck in local loop) from being re-claimed.
+- **Binary Freshness**: Ensure the `task` binary used by the worker is rebuilt/updated when source code changes, otherwise logic fixes (like `task ready` filtering) won't take effect.
+
+## 5. Migration Strategy
+
+1. **Parallel Existence**: Keep bash scripts while developing Haskell version.
+2. **Feature Parity**: Ensure `agent start` works exactly like `start-worker.sh`.
+3. **Cutover**: Update `WORKER_AGENT_GUIDE.md` to use `agent` command.
+4. **Cleanup**: Delete bash scripts.
+
+## 6. Testing Plan
+
+### 6.1 Unit Tests (`Omni/Agent/Test.hs`)
+- Test `Git` module commands (mocked).
+- Test `Log` filtering logic.
+- Test CLI argument parsing.
+
+### 6.2 Integration Tests
+- Create a temporary test repo.
+- Spawn a worker.
+- Mock the Engine LLM calls or use a test API key.
+- Verify task moves from Open -> InProgress -> Review.
+
+## 7. References
+- `Omni/Agent/start-worker.sh` (Current implementation)
+- `Omni/Task.hs` (Task manager integration)
diff --git a/Omni/Agent/Engine.hs b/Omni/Agent/Engine.hs
new file mode 100644
index 0000000..f137ddb
--- /dev/null
+++ b/Omni/Agent/Engine.hs
@@ -0,0 +1,1189 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | LLM Agent Engine - Tool protocol and LLM provider abstraction.
+--
+-- This module provides the core abstractions for building LLM-powered agents:
+-- - Tool: Defines tools that agents can use
+-- - LLM: OpenAI-compatible chat completions API provider
+-- - AgentConfig: Configuration for running agents
+--
+-- : out omni-agent-engine
+-- : dep http-conduit
+-- : dep aeson
+-- : dep case-insensitive
+module Omni.Agent.Engine
+ ( Tool (..),
+ LLM (..),
+ EngineConfig (..),
+ AgentConfig (..),
+ AgentResult (..),
+ Guardrails (..),
+ GuardrailResult (..),
+ Message (..),
+ Role (..),
+ ToolCall (..),
+ FunctionCall (..),
+ ToolResult (..),
+ ChatCompletionRequest (..),
+ ChatCompletionResponse (..),
+ Choice (..),
+ Usage (..),
+ ToolApi (..),
+ encodeToolForApi,
+ defaultLLM,
+ defaultEngineConfig,
+ defaultAgentConfig,
+ defaultGuardrails,
+ chat,
+ runAgent,
+ runAgentWithProvider,
+ runAgentWithProviderStreaming,
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.CaseInsensitive as CI
+import Data.IORef (newIORef, writeIORef)
+import qualified Data.Map.Strict as Map
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import qualified Network.HTTP.Simple as HTTP
+import qualified Omni.Agent.Provider as Provider
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Engine"
+ [ Test.unit "Tool JSON roundtrip" <| do
+ let tool =
+ Tool
+ { toolName = "get_weather",
+ toolDescription = "Get weather for a location",
+ toolJsonSchema = Aeson.object ["type" .= ("object" :: Text), "properties" .= Aeson.object []],
+ toolExecute = \_ -> pure (Aeson.String "sunny")
+ }
+ let encoded = encodeToolForApi tool
+ case Aeson.decode (Aeson.encode encoded) of
+ Nothing -> Test.assertFailure "Failed to decode tool"
+ Just decoded -> toolName tool Test.@=? toolApiName decoded,
+ Test.unit "Message JSON roundtrip" <| do
+ let msg = Message User "Hello" Nothing Nothing
+ case Aeson.decode (Aeson.encode msg) of
+ Nothing -> Test.assertFailure "Failed to decode message"
+ Just decoded -> msgContent msg Test.@=? msgContent decoded,
+ Test.unit "defaultLLM has correct endpoint" <| do
+ llmBaseUrl defaultLLM Test.@=? "https://openrouter.ai/api/v1",
+ Test.unit "defaultLLM has OpenRouter headers" <| do
+ length (llmExtraHeaders defaultLLM) Test.@=? 2
+ llmModel defaultLLM Test.@=? "anthropic/claude-sonnet-4.5",
+ Test.unit "defaultAgentConfig has sensible defaults" <| do
+ agentMaxIterations defaultAgentConfig Test.@=? 10,
+ Test.unit "defaultEngineConfig has no-op callbacks" <| do
+ engineOnCost defaultEngineConfig 100 5
+ engineOnActivity defaultEngineConfig "test"
+ engineOnToolCall defaultEngineConfig "tool" "result"
+ True Test.@=? True,
+ Test.unit "buildToolMap creates correct map" <| do
+ let tool1 =
+ Tool
+ { toolName = "tool1",
+ toolDescription = "First tool",
+ toolJsonSchema = Aeson.object [],
+ toolExecute = \_ -> pure Aeson.Null
+ }
+ tool2 =
+ Tool
+ { toolName = "tool2",
+ toolDescription = "Second tool",
+ toolJsonSchema = Aeson.object [],
+ toolExecute = \_ -> pure Aeson.Null
+ }
+ toolMap = buildToolMap [tool1, tool2]
+ Map.size toolMap Test.@=? 2
+ Map.member "tool1" toolMap Test.@=? True
+ Map.member "tool2" toolMap Test.@=? True,
+ Test.unit "Usage JSON parsing" <| do
+ let json = "{\"prompt_tokens\":100,\"completion_tokens\":50,\"total_tokens\":150}"
+ case Aeson.decode json of
+ Nothing -> Test.assertFailure "Failed to decode usage"
+ Just usage -> do
+ usagePromptTokens usage Test.@=? 100
+ usageCompletionTokens usage Test.@=? 50
+ usageTotalTokens usage Test.@=? 150
+ usageCost usage Test.@=? Nothing,
+ Test.unit "Usage JSON parsing with cost" <| do
+ let json = "{\"prompt_tokens\":194,\"completion_tokens\":2,\"total_tokens\":196,\"cost\":0.95}"
+ case Aeson.decode json of
+ Nothing -> Test.assertFailure "Failed to decode usage with cost"
+ Just usage -> do
+ usagePromptTokens usage Test.@=? 194
+ usageCompletionTokens usage Test.@=? 2
+ usageTotalTokens usage Test.@=? 196
+ usageCost usage Test.@=? Just 0.95,
+ Test.unit "AgentResult JSON roundtrip" <| do
+ let result =
+ AgentResult
+ { resultFinalMessage = "Done",
+ resultToolCallCount = 3,
+ resultIterations = 2,
+ resultTotalCost = 50,
+ resultTotalTokens = 1500
+ }
+ case Aeson.decode (Aeson.encode result) of
+ Nothing -> Test.assertFailure "Failed to decode AgentResult"
+ Just decoded -> do
+ resultFinalMessage decoded Test.@=? "Done"
+ resultToolCallCount decoded Test.@=? 3
+ resultIterations decoded Test.@=? 2,
+ Test.unit "estimateCost calculates correctly" <| do
+ let gpt4oCost = estimateCost "gpt-4o" 1000
+ gpt4oMiniCost = estimateCost "gpt-4o-mini" 1000
+ (gpt4oCost >= gpt4oMiniCost) Test.@=? True
+ (gpt4oCost > 0) Test.@=? True,
+ Test.unit "ToolCall JSON roundtrip" <| do
+ let tc =
+ ToolCall
+ { tcId = "call_123",
+ tcType = "function",
+ tcFunction = FunctionCall "read_file" "{\"path\":\"/tmp/test\"}"
+ }
+ case Aeson.decode (Aeson.encode tc) of
+ Nothing -> Test.assertFailure "Failed to decode ToolCall"
+ Just decoded -> tcId decoded Test.@=? "call_123",
+ Test.unit "FunctionCall JSON roundtrip" <| do
+ let fc = FunctionCall "test_func" "{\"arg\":\"value\"}"
+ case Aeson.decode (Aeson.encode fc) of
+ Nothing -> Test.assertFailure "Failed to decode FunctionCall"
+ Just decoded -> do
+ fcName decoded Test.@=? "test_func"
+ fcArguments decoded Test.@=? "{\"arg\":\"value\"}",
+ Test.unit "Role JSON roundtrip for all roles" <| do
+ let roles = [System, User, Assistant, ToolRole]
+ forM_ roles <| \role ->
+ case Aeson.decode (Aeson.encode role) of
+ Nothing -> Test.assertFailure ("Failed to decode Role: " <> show role)
+ Just decoded -> decoded Test.@=? role,
+ Test.unit "defaultGuardrails has sensible defaults" <| do
+ guardrailMaxCostCents defaultGuardrails Test.@=? 100.0
+ guardrailMaxTokens defaultGuardrails Test.@=? 500000
+ guardrailMaxDuplicateToolCalls defaultGuardrails Test.@=? 3
+ guardrailMaxTestFailures defaultGuardrails Test.@=? 3,
+ Test.unit "checkCostGuardrail detects exceeded budget" <| do
+ let g = defaultGuardrails {guardrailMaxCostCents = 50.0}
+ checkCostGuardrail g 60.0 Test.@=? GuardrailCostExceeded 60.0 50.0
+ checkCostGuardrail g 40.0 Test.@=? GuardrailOk,
+ Test.unit "checkTokenGuardrail detects exceeded budget" <| do
+ let g = defaultGuardrails {guardrailMaxTokens = 1000}
+ checkTokenGuardrail g 1500 Test.@=? GuardrailTokensExceeded 1500 1000
+ checkTokenGuardrail g 500 Test.@=? GuardrailOk,
+ Test.unit "checkDuplicateGuardrail detects repeated calls" <| do
+ let g = defaultGuardrails {guardrailMaxDuplicateToolCalls = 3}
+ counts = Map.fromList [("bash", 3), ("read_file", 1)]
+ case checkDuplicateGuardrail g counts of
+ GuardrailDuplicateToolCalls name count -> do
+ name Test.@=? "bash"
+ count Test.@=? 3
+ _ -> Test.assertFailure "Expected GuardrailDuplicateToolCalls"
+ checkDuplicateGuardrail g (Map.fromList [("bash", 2)]) Test.@=? GuardrailOk,
+ Test.unit "checkTestFailureGuardrail detects failures" <| do
+ let g = defaultGuardrails {guardrailMaxTestFailures = 3}
+ checkTestFailureGuardrail g 3 Test.@=? GuardrailTestFailures 3
+ checkTestFailureGuardrail g 2 Test.@=? GuardrailOk,
+ Test.unit "updateToolCallCounts accumulates correctly" <| do
+ let tc1 = ToolCall "1" "function" (FunctionCall "bash" "{}")
+ tc2 = ToolCall "2" "function" (FunctionCall "bash" "{}")
+ tc3 = ToolCall "3" "function" (FunctionCall "read_file" "{}")
+ counts = updateToolCallCounts Map.empty [tc1, tc2, tc3]
+ Map.lookup "bash" counts Test.@=? Just 2
+ Map.lookup "read_file" counts Test.@=? Just 1,
+ Test.unit "Guardrails JSON roundtrip" <| do
+ let g = Guardrails 75.0 100000 5 4 3
+ case Aeson.decode (Aeson.encode g) of
+ Nothing -> Test.assertFailure "Failed to decode Guardrails"
+ Just decoded -> decoded Test.@=? g,
+ Test.unit "GuardrailResult JSON roundtrip" <| do
+ let results =
+ [ GuardrailOk,
+ GuardrailCostExceeded 100.0 50.0,
+ GuardrailTokensExceeded 2000 1000,
+ GuardrailDuplicateToolCalls "bash" 5,
+ GuardrailTestFailures 3,
+ GuardrailEditFailures 5
+ ]
+ forM_ results <| \r ->
+ case Aeson.decode (Aeson.encode r) of
+ Nothing -> Test.assertFailure ("Failed to decode GuardrailResult: " <> show r)
+ Just decoded -> decoded Test.@=? r
+ ]
+
+data Tool = Tool
+ { toolName :: Text,
+ toolDescription :: Text,
+ toolJsonSchema :: Aeson.Value,
+ toolExecute :: Aeson.Value -> IO Aeson.Value
+ }
+
+data ToolApi = ToolApi
+ { toolApiName :: Text,
+ toolApiDescription :: Text,
+ toolApiParameters :: Aeson.Value
+ }
+ deriving (Generic)
+
+instance Aeson.ToJSON ToolApi where
+ toJSON t =
+ Aeson.object
+ [ "type" .= ("function" :: Text),
+ "function"
+ .= Aeson.object
+ [ "name" .= toolApiName t,
+ "description" .= toolApiDescription t,
+ "parameters" .= toolApiParameters t
+ ]
+ ]
+
+instance Aeson.FromJSON ToolApi where
+ parseJSON =
+ Aeson.withObject "ToolApi" <| \v -> do
+ fn <- v .: "function"
+ (ToolApi </ (fn .: "name"))
+ <*> (fn .: "description")
+ <*> (fn .: "parameters")
+
+encodeToolForApi :: Tool -> ToolApi
+encodeToolForApi t =
+ ToolApi
+ { toolApiName = toolName t,
+ toolApiDescription = toolDescription t,
+ toolApiParameters = toolJsonSchema t
+ }
+
+encodeToolForProvider :: Tool -> Provider.ToolApi
+encodeToolForProvider t =
+ Provider.ToolApi
+ { Provider.toolApiName = toolName t,
+ Provider.toolApiDescription = toolDescription t,
+ Provider.toolApiParameters = toolJsonSchema t
+ }
+
+data LLM = LLM
+ { llmBaseUrl :: Text,
+ llmApiKey :: Text,
+ llmModel :: Text,
+ llmExtraHeaders :: [(ByteString, ByteString)]
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON LLM where
+ toJSON l =
+ Aeson.object
+ [ "llmBaseUrl" .= llmBaseUrl l,
+ "llmApiKey" .= llmApiKey l,
+ "llmModel" .= llmModel l
+ ]
+
+instance Aeson.FromJSON LLM where
+ parseJSON =
+ Aeson.withObject "LLM" <| \v ->
+ (LLM </ (v .: "llmBaseUrl"))
+ <*> (v .: "llmApiKey")
+ <*> (v .: "llmModel")
+ <*> pure []
+
+defaultLLM :: LLM
+defaultLLM =
+ LLM
+ { llmBaseUrl = "https://openrouter.ai/api/v1",
+ llmApiKey = "",
+ llmModel = "anthropic/claude-sonnet-4.5",
+ llmExtraHeaders =
+ [ ("HTTP-Referer", "https://omni.dev"),
+ ("X-Title", "Omni Agent")
+ ]
+ }
+
+data AgentConfig = AgentConfig
+ { agentModel :: Text,
+ agentTools :: [Tool],
+ agentSystemPrompt :: Text,
+ agentMaxIterations :: Int,
+ agentGuardrails :: Guardrails
+ }
+
+data Guardrails = Guardrails
+ { guardrailMaxCostCents :: Double,
+ guardrailMaxTokens :: Int,
+ guardrailMaxDuplicateToolCalls :: Int,
+ guardrailMaxTestFailures :: Int,
+ guardrailMaxEditFailures :: Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Guardrails
+
+instance Aeson.FromJSON Guardrails
+
+data GuardrailResult
+ = GuardrailOk
+ | GuardrailCostExceeded Double Double
+ | GuardrailTokensExceeded Int Int
+ | GuardrailDuplicateToolCalls Text Int
+ | GuardrailTestFailures Int
+ | GuardrailEditFailures Int
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON GuardrailResult
+
+instance Aeson.FromJSON GuardrailResult
+
+defaultGuardrails :: Guardrails
+defaultGuardrails =
+ Guardrails
+ { guardrailMaxCostCents = 100.0,
+ guardrailMaxTokens = 500000,
+ guardrailMaxDuplicateToolCalls = 3,
+ guardrailMaxTestFailures = 3,
+ guardrailMaxEditFailures = 5
+ }
+
+defaultAgentConfig :: AgentConfig
+defaultAgentConfig =
+ AgentConfig
+ { agentModel = "gpt-4",
+ agentTools = [],
+ agentSystemPrompt = "You are a helpful assistant.",
+ agentMaxIterations = 10,
+ agentGuardrails = defaultGuardrails
+ }
+
+data EngineConfig = EngineConfig
+ { engineLLM :: LLM,
+ engineOnCost :: Int -> Double -> IO (),
+ engineOnActivity :: Text -> IO (),
+ engineOnToolCall :: Text -> Text -> IO (),
+ engineOnAssistant :: Text -> IO (),
+ engineOnToolResult :: Text -> Bool -> Text -> IO (),
+ engineOnComplete :: IO (),
+ engineOnError :: Text -> IO (),
+ engineOnGuardrail :: GuardrailResult -> IO ()
+ }
+
+defaultEngineConfig :: EngineConfig
+defaultEngineConfig =
+ EngineConfig
+ { engineLLM = defaultLLM,
+ engineOnCost = \_ _ -> pure (),
+ engineOnActivity = \_ -> pure (),
+ engineOnToolCall = \_ _ -> pure (),
+ engineOnAssistant = \_ -> pure (),
+ engineOnToolResult = \_ _ _ -> pure (),
+ engineOnComplete = pure (),
+ engineOnError = \_ -> pure (),
+ engineOnGuardrail = \_ -> pure ()
+ }
+
+data AgentResult = AgentResult
+ { resultFinalMessage :: Text,
+ resultToolCallCount :: Int,
+ resultIterations :: Int,
+ resultTotalCost :: Double,
+ resultTotalTokens :: Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON AgentResult
+
+instance Aeson.FromJSON AgentResult
+
+data Role = System | User | Assistant | ToolRole
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Role where
+ toJSON System = Aeson.String "system"
+ toJSON User = Aeson.String "user"
+ toJSON Assistant = Aeson.String "assistant"
+ toJSON ToolRole = Aeson.String "tool"
+
+instance Aeson.FromJSON Role where
+ parseJSON = Aeson.withText "Role" parseRole
+ where
+ parseRole "system" = pure System
+ parseRole "user" = pure User
+ parseRole "assistant" = pure Assistant
+ parseRole "tool" = pure ToolRole
+ parseRole _ = empty
+
+data Message = Message
+ { msgRole :: Role,
+ msgContent :: Text,
+ msgToolCalls :: Maybe [ToolCall],
+ msgToolCallId :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Message where
+ toJSON m =
+ Aeson.object
+ <| catMaybes
+ [ Just ("role" .= msgRole m),
+ Just ("content" .= msgContent m),
+ ("tool_calls" .=) </ msgToolCalls m,
+ ("tool_call_id" .=) </ msgToolCallId m
+ ]
+
+instance Aeson.FromJSON Message where
+ parseJSON =
+ Aeson.withObject "Message" <| \v ->
+ (Message </ (v .: "role"))
+ <*> (v .:? "content" .!= "")
+ <*> (v .:? "tool_calls")
+ <*> (v .:? "tool_call_id")
+
+data ToolCall = ToolCall
+ { tcId :: Text,
+ tcType :: Text,
+ tcFunction :: FunctionCall
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON ToolCall where
+ toJSON tc =
+ Aeson.object
+ [ "id" .= tcId tc,
+ "type" .= tcType tc,
+ "function" .= tcFunction tc
+ ]
+
+instance Aeson.FromJSON ToolCall where
+ parseJSON =
+ Aeson.withObject "ToolCall" <| \v ->
+ (ToolCall </ (v .: "id"))
+ <*> (v .:? "type" .!= "function")
+ <*> (v .: "function")
+
+data FunctionCall = FunctionCall
+ { fcName :: Text,
+ fcArguments :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON FunctionCall where
+ toJSON fc =
+ Aeson.object
+ [ "name" .= fcName fc,
+ "arguments" .= fcArguments fc
+ ]
+
+instance Aeson.FromJSON FunctionCall where
+ parseJSON =
+ Aeson.withObject "FunctionCall" <| \v ->
+ (FunctionCall </ (v .: "name"))
+ <*> (v .: "arguments")
+
+data ToolResult = ToolResult
+ { trToolCallId :: Text,
+ trContent :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON ToolResult
+
+instance Aeson.FromJSON ToolResult
+
+data ChatCompletionRequest = ChatCompletionRequest
+ { reqModel :: Text,
+ reqMessages :: [Message],
+ reqTools :: Maybe [ToolApi]
+ }
+ deriving (Generic)
+
+instance Aeson.ToJSON ChatCompletionRequest where
+ toJSON r =
+ Aeson.object
+ <| catMaybes
+ [ Just ("model" .= reqModel r),
+ Just ("messages" .= reqMessages r),
+ ("tools" .=) </ reqTools r,
+ Just ("usage" .= Aeson.object ["include" .= True])
+ ]
+
+data Choice = Choice
+ { choiceIndex :: Int,
+ choiceMessage :: Message,
+ choiceFinishReason :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Choice where
+ parseJSON =
+ Aeson.withObject "Choice" <| \v ->
+ (Choice </ (v .: "index"))
+ <*> (v .: "message")
+ <*> (v .:? "finish_reason")
+
+data Usage = Usage
+ { usagePromptTokens :: Int,
+ usageCompletionTokens :: Int,
+ usageTotalTokens :: Int,
+ usageCost :: Maybe Double
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Usage where
+ parseJSON =
+ Aeson.withObject "Usage" <| \v ->
+ (Usage </ (v .: "prompt_tokens"))
+ <*> (v .: "completion_tokens")
+ <*> (v .: "total_tokens")
+ <*> (v .:? "cost")
+
+data ChatCompletionResponse = ChatCompletionResponse
+ { respId :: Text,
+ respChoices :: [Choice],
+ respModel :: Text,
+ respUsage :: Maybe Usage
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON ChatCompletionResponse where
+ parseJSON =
+ Aeson.withObject "ChatCompletionResponse" <| \v ->
+ (ChatCompletionResponse </ (v .: "id"))
+ <*> (v .: "choices")
+ <*> (v .: "model")
+ <*> (v .:? "usage")
+
+data ChatResult = ChatResult
+ { chatMessage :: Message,
+ chatUsage :: Maybe Usage
+ }
+ deriving (Show, Eq)
+
+chatWithUsage :: LLM -> [Tool] -> [Message] -> IO (Either Text ChatResult)
+chatWithUsage llm tools messages = do
+ let url = Text.unpack (llmBaseUrl llm) <> "/chat/completions"
+ req0 <- HTTP.parseRequest url
+ let toolApis = [encodeToolForApi t | not (null tools), t <- tools]
+ body =
+ ChatCompletionRequest
+ { reqModel = llmModel llm,
+ reqMessages = messages,
+ reqTools = if null toolApis then Nothing else Just toolApis
+ }
+ baseReq =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestHeader "Authorization" ["Bearer " <> TE.encodeUtf8 (llmApiKey llm)]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| req0
+ req = foldr addHeader baseReq (llmExtraHeaders llm)
+ addHeader (name, value) = HTTP.addRequestHeader (CI.mk name) value
+
+ response <- HTTP.httpLBS req
+ let status = HTTP.getResponseStatusCode response
+ if status >= 200 && status < 300
+ then case Aeson.decode (HTTP.getResponseBody response) of
+ Just resp ->
+ case respChoices resp of
+ (c : _) -> pure (Right (ChatResult (choiceMessage c) (respUsage resp)))
+ [] -> pure (Left "No choices in response")
+ Nothing -> pure (Left "Failed to parse response")
+ else pure (Left ("HTTP error: " <> tshow status <> " - " <> TE.decodeUtf8 (BL.toStrict (HTTP.getResponseBody response))))
+
+chat :: LLM -> [Tool] -> [Message] -> IO (Either Text Message)
+chat llm tools messages = do
+ result <- chatWithUsage llm tools messages
+ pure (chatMessage </ result)
+
+runAgent :: EngineConfig -> AgentConfig -> Text -> IO (Either Text AgentResult)
+runAgent engineCfg agentCfg userPrompt = do
+ let llm =
+ (engineLLM engineCfg)
+ { llmModel = agentModel agentCfg
+ }
+ tools = agentTools agentCfg
+ toolMap = buildToolMap tools
+ systemMsg = Message System (agentSystemPrompt agentCfg) Nothing Nothing
+ userMsg = Message User userPrompt Nothing Nothing
+ initialMessages = [systemMsg, userMsg]
+
+ engineOnActivity engineCfg "Starting agent loop"
+ loop llm tools toolMap initialMessages 0 0 0 0.0 Map.empty 0 0
+ where
+ maxIter = agentMaxIterations agentCfg
+ guardrails' = agentGuardrails agentCfg
+
+ loop ::
+ LLM ->
+ [Tool] ->
+ Map.Map Text Tool ->
+ [Message] ->
+ Int ->
+ Int ->
+ Int ->
+ Double ->
+ Map.Map Text Int ->
+ Int ->
+ Int ->
+ IO (Either Text AgentResult)
+ loop llm tools' toolMap msgs iteration totalCalls totalTokens totalCost toolCallCounts testFailures editFailures
+ | iteration >= maxIter = do
+ let errMsg = "Max iterations (" <> tshow maxIter <> ") reached"
+ engineOnError engineCfg errMsg
+ pure <| Left errMsg
+ | otherwise = do
+ let guardrailViolation = findGuardrailViolation guardrails' totalCost totalTokens toolCallCounts testFailures editFailures
+ case guardrailViolation of
+ Just (g, errMsg) -> do
+ engineOnGuardrail engineCfg g
+ pure <| Left errMsg
+ Nothing -> do
+ engineOnActivity engineCfg <| "Iteration " <> tshow (iteration + 1)
+ result <- chatWithUsage llm tools' msgs
+ case result of
+ Left err -> do
+ engineOnError engineCfg err
+ pure (Left err)
+ Right chatRes -> do
+ let msg = chatMessage chatRes
+ tokens = maybe 0 usageTotalTokens (chatUsage chatRes)
+ cost = case chatUsage chatRes +> usageCost of
+ Just actualCost -> actualCost * 100
+ Nothing -> estimateCost (llmModel llm) tokens
+ engineOnCost engineCfg tokens cost
+ let newTokens = totalTokens + tokens
+ newCost = totalCost + cost
+ let assistantText = msgContent msg
+ unless (Text.null assistantText)
+ <| engineOnAssistant engineCfg assistantText
+ case msgToolCalls msg of
+ Nothing
+ | Text.null (msgContent msg) && totalCalls > 0 -> do
+ engineOnActivity engineCfg "Empty response after tools, prompting for text"
+ let promptMsg = Message ToolRole "Please provide a response to the user." Nothing Nothing
+ newMsgs = msgs <> [msg, promptMsg]
+ loop llm tools' toolMap newMsgs (iteration + 1) totalCalls newTokens newCost toolCallCounts testFailures editFailures
+ | otherwise -> do
+ engineOnActivity engineCfg "Agent completed"
+ engineOnComplete engineCfg
+ pure
+ <| Right
+ <| AgentResult
+ { resultFinalMessage = msgContent msg,
+ resultToolCallCount = totalCalls,
+ resultIterations = iteration + 1,
+ resultTotalCost = newCost,
+ resultTotalTokens = newTokens
+ }
+ Just [] -> do
+ engineOnActivity engineCfg "Agent completed (empty tool calls)"
+ engineOnComplete engineCfg
+ pure
+ <| Right
+ <| AgentResult
+ { resultFinalMessage = msgContent msg,
+ resultToolCallCount = totalCalls,
+ resultIterations = iteration + 1,
+ resultTotalCost = newCost,
+ resultTotalTokens = newTokens
+ }
+ Just tcs -> do
+ (toolResults, newTestFailures, newEditFailures) <- executeToolCallsWithTracking engineCfg toolMap tcs testFailures editFailures
+ let newMsgs = msgs <> [msg] <> toolResults
+ newCalls = totalCalls + length tcs
+ newToolCallCounts = updateToolCallCounts toolCallCounts tcs
+ loop llm tools' toolMap newMsgs (iteration + 1) newCalls newTokens newCost newToolCallCounts newTestFailures newEditFailures
+
+checkCostGuardrail :: Guardrails -> Double -> GuardrailResult
+checkCostGuardrail g cost
+ | cost > guardrailMaxCostCents g = GuardrailCostExceeded cost (guardrailMaxCostCents g)
+ | otherwise = GuardrailOk
+
+checkTokenGuardrail :: Guardrails -> Int -> GuardrailResult
+checkTokenGuardrail g tokens
+ | tokens > guardrailMaxTokens g = GuardrailTokensExceeded tokens (guardrailMaxTokens g)
+ | otherwise = GuardrailOk
+
+checkDuplicateGuardrail :: Guardrails -> Map.Map Text Int -> GuardrailResult
+checkDuplicateGuardrail g counts =
+ let maxAllowed = guardrailMaxDuplicateToolCalls g
+ violations = [(name, count) | (name, count) <- Map.toList counts, count >= maxAllowed]
+ in case violations of
+ ((name, count) : _) -> GuardrailDuplicateToolCalls name count
+ [] -> GuardrailOk
+
+checkTestFailureGuardrail :: Guardrails -> Int -> GuardrailResult
+checkTestFailureGuardrail g failures
+ | failures >= guardrailMaxTestFailures g = GuardrailTestFailures failures
+ | otherwise = GuardrailOk
+
+checkEditFailureGuardrail :: Guardrails -> Int -> GuardrailResult
+checkEditFailureGuardrail g failures
+ | failures >= guardrailMaxEditFailures g = GuardrailEditFailures failures
+ | otherwise = GuardrailOk
+
+updateToolCallCounts :: Map.Map Text Int -> [ToolCall] -> Map.Map Text Int
+updateToolCallCounts =
+ foldr (\tc m -> Map.insertWith (+) (fcName (tcFunction tc)) 1 m)
+
+findGuardrailViolation :: Guardrails -> Double -> Int -> Map.Map Text Int -> Int -> Int -> Maybe (GuardrailResult, Text)
+findGuardrailViolation g cost tokens toolCallCounts testFailures editFailures =
+ case checkCostGuardrail g cost of
+ r@(GuardrailCostExceeded actual limit) ->
+ Just (r, "Guardrail: cost budget exceeded (" <> tshow actual <> "/" <> tshow limit <> " cents)")
+ _ -> case checkTokenGuardrail g tokens of
+ r@(GuardrailTokensExceeded actual limit) ->
+ Just (r, "Guardrail: token budget exceeded (" <> tshow actual <> "/" <> tshow limit <> " tokens)")
+ _ -> case checkDuplicateGuardrail g toolCallCounts of
+ r@(GuardrailDuplicateToolCalls tool count) ->
+ Just (r, "Guardrail: duplicate tool calls (" <> tool <> " called " <> tshow count <> " times)")
+ _ -> case checkTestFailureGuardrail g testFailures of
+ r@(GuardrailTestFailures count) ->
+ Just (r, "Guardrail: too many test failures (" <> tshow count <> ")")
+ _ -> case checkEditFailureGuardrail g editFailures of
+ r@(GuardrailEditFailures count) ->
+ Just (r, "Guardrail: too many edit_file failures (" <> tshow count <> " 'old_str not found' errors)")
+ _ -> Nothing
+
+buildToolMap :: [Tool] -> Map.Map Text Tool
+buildToolMap = Map.fromList <. map (\t -> (toolName t, t))
+
+-- | Track both test failures and edit failures
+-- Returns (messages, testFailures, editFailures)
+executeToolCallsWithTracking :: EngineConfig -> Map.Map Text Tool -> [ToolCall] -> Int -> Int -> IO ([Message], Int, Int)
+executeToolCallsWithTracking engineCfg toolMap tcs initialTestFailures initialEditFailures = do
+ results <- traverse executeSingle tcs
+ let msgs = map (\(m, _, _) -> m) results
+ testDeltas = map (\(_, t, _) -> t) results
+ editDeltas = map (\(_, _, e) -> e) results
+ totalTestFailures = initialTestFailures + sum testDeltas
+ totalEditFailures = initialEditFailures + sum editDeltas
+ pure (msgs, totalTestFailures, totalEditFailures)
+ where
+ executeSingle tc = do
+ let name = fcName (tcFunction tc)
+ argsText = fcArguments (tcFunction tc)
+ callId = tcId tc
+ engineOnActivity engineCfg <| "Executing tool: " <> name
+ engineOnToolCall engineCfg name argsText
+ case Map.lookup name toolMap of
+ Nothing -> do
+ let errMsg = "Tool not found: " <> name
+ engineOnToolResult engineCfg name False errMsg
+ pure (Message ToolRole errMsg Nothing (Just callId), 0, 0)
+ Just tool -> do
+ case Aeson.decode (BL.fromStrict (TE.encodeUtf8 argsText)) of
+ Nothing -> do
+ let errMsg = "Invalid JSON arguments: " <> argsText
+ engineOnToolResult engineCfg name False errMsg
+ pure (Message ToolRole errMsg Nothing (Just callId), 0, 0)
+ Just args -> do
+ resultValue <- toolExecute tool args
+ let resultText = TE.decodeUtf8 (BL.toStrict (Aeson.encode resultValue))
+ isTestCall = name == "bash" && ("bild --test" `Text.isInfixOf` argsText || "bild -t" `Text.isInfixOf` argsText)
+ isTestFailure = isTestCall && isFailureResult resultValue
+ testDelta = if isTestFailure then 1 else 0
+ isEditFailure = name == "edit_file" && isOldStrNotFoundError resultValue
+ editDelta = if isEditFailure then 1 else 0
+ engineOnToolResult engineCfg name True resultText
+ pure (Message ToolRole resultText Nothing (Just callId), testDelta, editDelta)
+
+ isFailureResult :: Aeson.Value -> Bool
+ isFailureResult (Aeson.Object obj) =
+ case KeyMap.lookup "exit_code" obj of
+ Just (Aeson.Number n) -> n /= 0
+ _ -> False
+ isFailureResult (Aeson.String s) =
+ "error"
+ `Text.isInfixOf` Text.toLower s
+ || "failed"
+ `Text.isInfixOf` Text.toLower s
+ || "FAILED"
+ `Text.isInfixOf` s
+ isFailureResult _ = False
+
+ isOldStrNotFoundError :: Aeson.Value -> Bool
+ isOldStrNotFoundError (Aeson.Object obj) =
+ case KeyMap.lookup "error" obj of
+ Just (Aeson.String s) -> "old_str not found" `Text.isInfixOf` s
+ _ -> False
+ isOldStrNotFoundError _ = False
+
+-- | Estimate cost in cents from token count.
+-- Uses blended input/output rates (roughly 2:1 output:input ratio).
+-- Prices as of Dec 2024 from OpenRouter.
+estimateCost :: Text -> Int -> Double
+estimateCost model tokens
+ | "gpt-4o-mini" `Text.isInfixOf` model = fromIntegral tokens * 0.04 / 1000
+ | "gpt-4o" `Text.isInfixOf` model = fromIntegral tokens * 0.7 / 1000
+ | "gemini-2.0-flash" `Text.isInfixOf` model = fromIntegral tokens * 0.15 / 1000
+ | "gemini-2.5-flash" `Text.isInfixOf` model = fromIntegral tokens * 0.15 / 1000
+ | "claude-sonnet-4.5" `Text.isInfixOf` model = fromIntegral tokens * 0.9 / 1000
+ | "claude-sonnet-4" `Text.isInfixOf` model = fromIntegral tokens * 0.9 / 1000
+ | "claude-3-haiku" `Text.isInfixOf` model = fromIntegral tokens * 0.1 / 1000
+ | "claude" `Text.isInfixOf` model = fromIntegral tokens * 0.9 / 1000
+ | otherwise = fromIntegral tokens * 0.5 / 1000
+
+-- | Run agent with a Provider instead of LLM.
+-- This is the new preferred way to run agents with multiple backend support.
+runAgentWithProvider :: EngineConfig -> Provider.Provider -> AgentConfig -> Text -> IO (Either Text AgentResult)
+runAgentWithProvider engineCfg provider agentCfg userPrompt = do
+ let tools = agentTools agentCfg
+ toolApis = map encodeToolForProvider tools
+ toolMap = buildToolMap tools
+ systemMsg = providerMessage Provider.System (agentSystemPrompt agentCfg)
+ userMsg = providerMessage Provider.User userPrompt
+ initialMessages = [systemMsg, userMsg]
+
+ engineOnActivity engineCfg "Starting agent loop (Provider)"
+ loopProvider provider toolApis toolMap initialMessages 0 0 0 0.0 Map.empty 0 0
+ where
+ maxIter = agentMaxIterations agentCfg
+ guardrails' = agentGuardrails agentCfg
+
+ providerMessage :: Provider.Role -> Text -> Provider.Message
+ providerMessage role content = Provider.Message role content Nothing Nothing
+
+ loopProvider ::
+ Provider.Provider ->
+ [Provider.ToolApi] ->
+ Map.Map Text Tool ->
+ [Provider.Message] ->
+ Int ->
+ Int ->
+ Int ->
+ Double ->
+ Map.Map Text Int ->
+ Int ->
+ Int ->
+ IO (Either Text AgentResult)
+ loopProvider prov toolApis' toolMap msgs iteration totalCalls totalTokens totalCost toolCallCounts testFailures editFailures
+ | iteration >= maxIter = do
+ let errMsg = "Max iterations (" <> tshow maxIter <> ") reached"
+ engineOnError engineCfg errMsg
+ pure <| Left errMsg
+ | otherwise = do
+ let guardrailViolation = findGuardrailViolation guardrails' totalCost totalTokens toolCallCounts testFailures editFailures
+ case guardrailViolation of
+ Just (g, errMsg) -> do
+ engineOnGuardrail engineCfg g
+ pure <| Left errMsg
+ Nothing -> do
+ engineOnActivity engineCfg <| "Iteration " <> tshow (iteration + 1)
+ result <- Provider.chatWithUsage prov toolApis' msgs
+ case result of
+ Left err -> do
+ engineOnError engineCfg err
+ pure (Left err)
+ Right chatRes -> do
+ let msg = Provider.chatMessage chatRes
+ tokens = maybe 0 Provider.usageTotalTokens (Provider.chatUsage chatRes)
+ cost = case Provider.chatUsage chatRes +> Provider.usageCost of
+ Just actualCost -> actualCost * 100
+ Nothing -> estimateCost (getProviderModel prov) tokens
+ engineOnCost engineCfg tokens cost
+ let newTokens = totalTokens + tokens
+ newCost = totalCost + cost
+ let assistantText = Provider.msgContent msg
+ unless (Text.null assistantText)
+ <| engineOnAssistant engineCfg assistantText
+ case Provider.msgToolCalls msg of
+ Nothing
+ | Text.null (Provider.msgContent msg) && totalCalls > 0 -> do
+ engineOnActivity engineCfg "Empty response after tools, prompting for text"
+ let promptMsg = Provider.Message Provider.ToolRole "Please provide a response to the user." Nothing Nothing
+ newMsgs = msgs <> [msg, promptMsg]
+ loopProvider prov toolApis' toolMap newMsgs (iteration + 1) totalCalls newTokens newCost toolCallCounts testFailures editFailures
+ | otherwise -> do
+ engineOnActivity engineCfg "Agent completed"
+ engineOnComplete engineCfg
+ pure
+ <| Right
+ <| AgentResult
+ { resultFinalMessage = Provider.msgContent msg,
+ resultToolCallCount = totalCalls,
+ resultIterations = iteration + 1,
+ resultTotalCost = newCost,
+ resultTotalTokens = newTokens
+ }
+ Just [] -> do
+ engineOnActivity engineCfg "Agent completed (empty tool calls)"
+ engineOnComplete engineCfg
+ pure
+ <| Right
+ <| AgentResult
+ { resultFinalMessage = Provider.msgContent msg,
+ resultToolCallCount = totalCalls,
+ resultIterations = iteration + 1,
+ resultTotalCost = newCost,
+ resultTotalTokens = newTokens
+ }
+ Just tcs -> do
+ (toolResults, newTestFailures, newEditFailures) <- executeProviderToolCalls engineCfg toolMap tcs testFailures editFailures
+ let newMsgs = msgs <> [msg] <> toolResults
+ newCalls = totalCalls + length tcs
+ newToolCallCounts = updateProviderToolCallCounts toolCallCounts tcs
+ loopProvider prov toolApis' toolMap newMsgs (iteration + 1) newCalls newTokens newCost newToolCallCounts newTestFailures newEditFailures
+
+ getProviderModel :: Provider.Provider -> Text
+ getProviderModel (Provider.OpenRouter cfg) = Provider.providerModel cfg
+ getProviderModel (Provider.Ollama cfg) = Provider.providerModel cfg
+ getProviderModel (Provider.AmpCLI _) = "amp"
+
+ updateProviderToolCallCounts :: Map.Map Text Int -> [Provider.ToolCall] -> Map.Map Text Int
+ updateProviderToolCallCounts =
+ foldr (\tc m -> Map.insertWith (+) (Provider.fcName (Provider.tcFunction tc)) 1 m)
+
+ executeProviderToolCalls :: EngineConfig -> Map.Map Text Tool -> [Provider.ToolCall] -> Int -> Int -> IO ([Provider.Message], Int, Int)
+ executeProviderToolCalls eCfg tMap tcs initialTestFailures initialEditFailures = do
+ results <- traverse (executeSingleProvider eCfg tMap) tcs
+ let msgs = map (\(m, _, _) -> m) results
+ testDeltas = map (\(_, t, _) -> t) results
+ editDeltas = map (\(_, _, e) -> e) results
+ totalTestFail = initialTestFailures + sum testDeltas
+ totalEditFail = initialEditFailures + sum editDeltas
+ pure (msgs, totalTestFail, totalEditFail)
+
+ executeSingleProvider :: EngineConfig -> Map.Map Text Tool -> Provider.ToolCall -> IO (Provider.Message, Int, Int)
+ executeSingleProvider eCfg tMap tc = do
+ let name = Provider.fcName (Provider.tcFunction tc)
+ argsText = Provider.fcArguments (Provider.tcFunction tc)
+ callId = Provider.tcId tc
+ engineOnActivity eCfg <| "Executing tool: " <> name
+ engineOnToolCall eCfg name argsText
+ case Map.lookup name tMap of
+ Nothing -> do
+ let errMsg = "Tool not found: " <> name
+ engineOnToolResult eCfg name False errMsg
+ pure (Provider.Message Provider.ToolRole errMsg Nothing (Just callId), 0, 0)
+ Just tool -> do
+ case Aeson.decode (BL.fromStrict (TE.encodeUtf8 argsText)) of
+ Nothing -> do
+ let errMsg = "Invalid JSON arguments: " <> argsText
+ engineOnToolResult eCfg name False errMsg
+ pure (Provider.Message Provider.ToolRole errMsg Nothing (Just callId), 0, 0)
+ Just args -> do
+ resultValue <- toolExecute tool args
+ let resultText = TE.decodeUtf8 (BL.toStrict (Aeson.encode resultValue))
+ isTestCall = name == "bash" && ("bild --test" `Text.isInfixOf` argsText || "bild -t" `Text.isInfixOf` argsText)
+ isTestFailure = isTestCall && isFailureResultProvider resultValue
+ testDelta = if isTestFailure then 1 else 0
+ isEditFailure = name == "edit_file" && isOldStrNotFoundProvider resultValue
+ editDelta = if isEditFailure then 1 else 0
+ engineOnToolResult eCfg name True resultText
+ pure (Provider.Message Provider.ToolRole resultText Nothing (Just callId), testDelta, editDelta)
+
+ isFailureResultProvider :: Aeson.Value -> Bool
+ isFailureResultProvider (Aeson.Object obj) =
+ case KeyMap.lookup "exit_code" obj of
+ Just (Aeson.Number n) -> n /= 0
+ _ -> False
+ isFailureResultProvider (Aeson.String s) =
+ "error"
+ `Text.isInfixOf` Text.toLower s
+ || "failed"
+ `Text.isInfixOf` Text.toLower s
+ || "FAILED"
+ `Text.isInfixOf` s
+ isFailureResultProvider _ = False
+
+ isOldStrNotFoundProvider :: Aeson.Value -> Bool
+ isOldStrNotFoundProvider (Aeson.Object obj) =
+ case KeyMap.lookup "error" obj of
+ Just (Aeson.String s) -> "old_str not found" `Text.isInfixOf` s
+ _ -> False
+ isOldStrNotFoundProvider _ = False
+
+runAgentWithProviderStreaming ::
+ EngineConfig ->
+ Provider.Provider ->
+ AgentConfig ->
+ Text ->
+ (Text -> IO ()) ->
+ IO (Either Text AgentResult)
+runAgentWithProviderStreaming engineCfg provider agentCfg userPrompt onStreamChunk = do
+ let tools = agentTools agentCfg
+ toolApis = map encodeToolForProvider tools
+ toolMap = buildToolMap tools
+ systemMsg = providerMessage Provider.System (agentSystemPrompt agentCfg)
+ userMsg = providerMessage Provider.User userPrompt
+ initialMessages = [systemMsg, userMsg]
+
+ engineOnActivity engineCfg "Starting agent loop (Provider+Streaming)"
+ loopProviderStreaming provider toolApis toolMap initialMessages 0 0 0 0.0 Map.empty 0 0
+ where
+ maxIter = agentMaxIterations agentCfg
+ guardrails' = agentGuardrails agentCfg
+
+ providerMessage :: Provider.Role -> Text -> Provider.Message
+ providerMessage role content = Provider.Message role content Nothing Nothing
+
+ loopProviderStreaming ::
+ Provider.Provider ->
+ [Provider.ToolApi] ->
+ Map.Map Text Tool ->
+ [Provider.Message] ->
+ Int ->
+ Int ->
+ Int ->
+ Double ->
+ Map.Map Text Int ->
+ Int ->
+ Int ->
+ IO (Either Text AgentResult)
+ loopProviderStreaming prov toolApis' toolMap msgs iteration totalCalls totalTokens totalCost toolCallCounts testFailures editFailures
+ | iteration >= maxIter = do
+ let errMsg = "Max iterations (" <> tshow maxIter <> ") reached"
+ engineOnError engineCfg errMsg
+ pure <| Left errMsg
+ | otherwise = do
+ let guardrailViolation = findGuardrailViolation guardrails' totalCost totalTokens toolCallCounts testFailures editFailures
+ case guardrailViolation of
+ Just (g, errMsg) -> do
+ engineOnGuardrail engineCfg g
+ pure <| Left errMsg
+ Nothing -> do
+ engineOnActivity engineCfg <| "Iteration " <> tshow (iteration + 1)
+ hasToolCalls <- newIORef False
+ result <-
+ Provider.chatStream prov toolApis' msgs <| \case
+ Provider.StreamContent txt -> onStreamChunk txt
+ Provider.StreamToolCall _ -> writeIORef hasToolCalls True
+ Provider.StreamToolCallDelta _ -> writeIORef hasToolCalls True
+ Provider.StreamError err -> engineOnError engineCfg err
+ Provider.StreamDone _ -> pure ()
+ case result of
+ Left err -> do
+ engineOnError engineCfg err
+ pure (Left err)
+ Right chatRes -> do
+ let msg = Provider.chatMessage chatRes
+ tokens = maybe 0 Provider.usageTotalTokens (Provider.chatUsage chatRes)
+ cost = case Provider.chatUsage chatRes +> Provider.usageCost of
+ Just actualCost -> actualCost * 100
+ Nothing -> estimateCost (getProviderModelStreaming prov) tokens
+ engineOnCost engineCfg tokens cost
+ let newTokens = totalTokens + tokens
+ newCost = totalCost + cost
+ let assistantText = Provider.msgContent msg
+ unless (Text.null assistantText)
+ <| engineOnAssistant engineCfg assistantText
+ case Provider.msgToolCalls msg of
+ Nothing
+ | Text.null (Provider.msgContent msg) && totalCalls > 0 -> do
+ engineOnActivity engineCfg "Empty response after tools, prompting for text"
+ let promptMsg = Provider.Message Provider.ToolRole "Please provide a response to the user." Nothing Nothing
+ newMsgs = msgs <> [msg, promptMsg]
+ loopProviderStreaming prov toolApis' toolMap newMsgs (iteration + 1) totalCalls newTokens newCost toolCallCounts testFailures editFailures
+ | otherwise -> do
+ engineOnActivity engineCfg "Agent completed"
+ engineOnComplete engineCfg
+ pure
+ <| Right
+ <| AgentResult
+ { resultFinalMessage = Provider.msgContent msg,
+ resultToolCallCount = totalCalls,
+ resultIterations = iteration + 1,
+ resultTotalCost = newCost,
+ resultTotalTokens = newTokens
+ }
+ Just [] -> do
+ engineOnActivity engineCfg "Agent completed (empty tool calls)"
+ engineOnComplete engineCfg
+ pure
+ <| Right
+ <| AgentResult
+ { resultFinalMessage = Provider.msgContent msg,
+ resultToolCallCount = totalCalls,
+ resultIterations = iteration + 1,
+ resultTotalCost = newCost,
+ resultTotalTokens = newTokens
+ }
+ Just tcs -> do
+ (toolResults, newTestFailures, newEditFailures) <- executeToolCallsStreaming engineCfg toolMap tcs testFailures editFailures
+ let newMsgs = msgs <> [msg] <> toolResults
+ newCalls = totalCalls + length tcs
+ newToolCallCounts = updateToolCallCountsStreaming toolCallCounts tcs
+ loopProviderStreaming prov toolApis' toolMap newMsgs (iteration + 1) newCalls newTokens newCost newToolCallCounts newTestFailures newEditFailures
+
+ getProviderModelStreaming :: Provider.Provider -> Text
+ getProviderModelStreaming (Provider.OpenRouter cfg) = Provider.providerModel cfg
+ getProviderModelStreaming (Provider.Ollama cfg) = Provider.providerModel cfg
+ getProviderModelStreaming (Provider.AmpCLI _) = "amp"
+
+ updateToolCallCountsStreaming :: Map.Map Text Int -> [Provider.ToolCall] -> Map.Map Text Int
+ updateToolCallCountsStreaming =
+ foldr (\tc m -> Map.insertWith (+) (Provider.fcName (Provider.tcFunction tc)) 1 m)
+
+ executeToolCallsStreaming :: EngineConfig -> Map.Map Text Tool -> [Provider.ToolCall] -> Int -> Int -> IO ([Provider.Message], Int, Int)
+ executeToolCallsStreaming eCfg tMap tcs initialTestFailures initialEditFailures = do
+ results <- traverse (executeSingleStreaming eCfg tMap) tcs
+ let msgs = map (\(m, _, _) -> m) results
+ testDeltas = map (\(_, t, _) -> t) results
+ editDeltas = map (\(_, _, e) -> e) results
+ totalTestFail = initialTestFailures + sum testDeltas
+ totalEditFail = initialEditFailures + sum editDeltas
+ pure (msgs, totalTestFail, totalEditFail)
+
+ executeSingleStreaming :: EngineConfig -> Map.Map Text Tool -> Provider.ToolCall -> IO (Provider.Message, Int, Int)
+ executeSingleStreaming eCfg tMap tc = do
+ let name = Provider.fcName (Provider.tcFunction tc)
+ argsText = Provider.fcArguments (Provider.tcFunction tc)
+ callId = Provider.tcId tc
+ engineOnActivity eCfg <| "Executing tool: " <> name
+ engineOnToolCall eCfg name argsText
+ case Map.lookup name tMap of
+ Nothing -> do
+ let errMsg = "Tool not found: " <> name
+ engineOnToolResult eCfg name False errMsg
+ pure (Provider.Message Provider.ToolRole errMsg Nothing (Just callId), 0, 0)
+ Just tool -> do
+ case Aeson.decode (BL.fromStrict (TE.encodeUtf8 argsText)) of
+ Nothing -> do
+ let errMsg = "Invalid JSON arguments: " <> argsText
+ engineOnToolResult eCfg name False errMsg
+ pure (Provider.Message Provider.ToolRole errMsg Nothing (Just callId), 0, 0)
+ Just args -> do
+ resultValue <- toolExecute tool args
+ let resultText = TE.decodeUtf8 (BL.toStrict (Aeson.encode resultValue))
+ isTestCall = name == "bash" && ("bild --test" `Text.isInfixOf` argsText || "bild -t" `Text.isInfixOf` argsText)
+ isTestFailure = isTestCall && isFailureResultStreaming resultValue
+ testDelta = if isTestFailure then 1 else 0
+ isEditFailure = name == "edit_file" && isOldStrNotFoundStreaming resultValue
+ editDelta = if isEditFailure then 1 else 0
+ engineOnToolResult eCfg name True resultText
+ pure (Provider.Message Provider.ToolRole resultText Nothing (Just callId), testDelta, editDelta)
+
+ isFailureResultStreaming :: Aeson.Value -> Bool
+ isFailureResultStreaming (Aeson.Object obj) =
+ case KeyMap.lookup "exit_code" obj of
+ Just (Aeson.Number n) -> n /= 0
+ _ -> False
+ isFailureResultStreaming (Aeson.String s) =
+ "error"
+ `Text.isInfixOf` Text.toLower s
+ || "failed"
+ `Text.isInfixOf` Text.toLower s
+ || "FAILED"
+ `Text.isInfixOf` s
+ isFailureResultStreaming _ = False
+
+ isOldStrNotFoundStreaming :: Aeson.Value -> Bool
+ isOldStrNotFoundStreaming (Aeson.Object obj) =
+ case KeyMap.lookup "error" obj of
+ Just (Aeson.String s) -> "old_str not found" `Text.isInfixOf` s
+ _ -> False
+ isOldStrNotFoundStreaming _ = False
diff --git a/Omni/Agent/Event.hs b/Omni/Agent/Event.hs
new file mode 100644
index 0000000..2b40077
--- /dev/null
+++ b/Omni/Agent/Event.hs
@@ -0,0 +1,180 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Agent Event types for observability and streaming.
+--
+-- Captures all events during agent execution for logging,
+-- streaming to web UI, and future interactive chat.
+module Omni.Agent.Event
+ ( AgentEvent (..),
+ EventType (..),
+ eventToJSON,
+ eventFromJSON,
+ formatEventForTerminal,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Text as Text
+import Data.Time (UTCTime, defaultTimeLocale, formatTime)
+
+-- | Types of agent events
+data EventType
+ = Assistant -- LLM text response
+ | ToolCall -- Tool invocation with arguments
+ | ToolResult -- Tool execution result
+ | UserMessage -- For future interactive chat
+ | Cost -- Token usage and cost info
+ | Error -- Failures and errors
+ | Complete -- Session ended successfully
+ deriving (Show, Eq, Read)
+
+-- | A single agent event with timestamp and content
+data AgentEvent = AgentEvent
+ { eventType :: EventType,
+ eventTimestamp :: UTCTime,
+ eventContent :: Aeson.Value
+ }
+ deriving (Show, Eq)
+
+-- | Convert event to JSON for storage/streaming
+eventToJSON :: AgentEvent -> Aeson.Value
+eventToJSON e =
+ Aeson.object
+ [ "type" .= show (eventType e),
+ "timestamp" .= eventTimestamp e,
+ "content" .= eventContent e
+ ]
+
+-- | Parse event from JSON
+eventFromJSON :: Aeson.Value -> Maybe AgentEvent
+eventFromJSON v = do
+ obj <- case v of
+ Aeson.Object o -> Just o
+ _ -> Nothing
+ typeStr <- case Aeson.lookup "type" (Aeson.toList obj) of
+ Just (Aeson.String t) -> Just (Text.unpack t)
+ _ -> Nothing
+ eventT <- readMaybe typeStr
+ ts <- case Aeson.lookup "timestamp" (Aeson.toList obj) of
+ Just t -> Aeson.parseMaybe Aeson.parseJSON t
+ _ -> Nothing
+ content <- Aeson.lookup "content" (Aeson.toList obj)
+ pure
+ AgentEvent
+ { eventType = eventT,
+ eventTimestamp = ts,
+ eventContent = content
+ }
+ where
+ Aeson.lookup k pairs = snd </ find (\(k', _) -> k' == k) pairs
+ Aeson.toList (Aeson.Object o) = map (first Aeson.toText) (Aeson.toList o)
+ Aeson.toList _ = []
+ Aeson.toText = id
+ first f (a, b) = (f a, b)
+
+-- | Format event for terminal display
+formatEventForTerminal :: AgentEvent -> Text
+formatEventForTerminal e =
+ let ts = Text.pack <| formatTime defaultTimeLocale "%H:%M:%S" (eventTimestamp e)
+ content = case eventType e of
+ Assistant -> case eventContent e of
+ Aeson.String t -> "Assistant: " <> truncate' 100 t
+ _ -> "Assistant: <message>"
+ ToolCall -> case eventContent e of
+ Aeson.Object _ ->
+ let toolName = getField "tool" (eventContent e)
+ in "Tool: " <> toolName
+ _ -> "Tool: <call>"
+ ToolResult -> case eventContent e of
+ Aeson.Object _ ->
+ let toolName = getField "tool" (eventContent e)
+ success = getField "success" (eventContent e)
+ in "Result: " <> toolName <> " (" <> success <> ")"
+ _ -> "Result: <result>"
+ UserMessage -> case eventContent e of
+ Aeson.String t -> "User: " <> truncate' 100 t
+ _ -> "User: <message>"
+ Cost -> case eventContent e of
+ Aeson.Object _ ->
+ let tokens = getField "tokens" (eventContent e)
+ cents = getField "cents" (eventContent e)
+ in "Cost: " <> tokens <> " tokens, " <> cents <> " cents"
+ _ -> "Cost: <info>"
+ Error -> case eventContent e of
+ Aeson.String t -> "Error: " <> t
+ _ -> "Error: <error>"
+ Complete -> "Complete"
+ in "[" <> ts <> "] " <> content
+ where
+ truncate' n t = if Text.length t > n then Text.take n t <> "..." else t
+ getField key val = case val of
+ Aeson.Object o -> case Aeson.lookup key (Aeson.toList o) of
+ Just (Aeson.String s) -> s
+ Just (Aeson.Number n) -> Text.pack (show n)
+ Just (Aeson.Bool b) -> if b then "ok" else "failed"
+ _ -> "<" <> key <> ">"
+ _ -> "<" <> key <> ">"
+ where
+ Aeson.lookup k pairs = snd </ find (\(k', _) -> k' == k) pairs
+ Aeson.toList (Aeson.Object o') = map (first' Aeson.toText) (Aeson.toList o')
+ Aeson.toList _ = []
+ Aeson.toText = id
+ first' f (a, b) = (f a, b)
+
+-- Helper constructors for common events
+
+mkAssistantEvent :: UTCTime -> Text -> AgentEvent
+mkAssistantEvent ts content =
+ AgentEvent
+ { eventType = Assistant,
+ eventTimestamp = ts,
+ eventContent = Aeson.String content
+ }
+
+mkToolCallEvent :: UTCTime -> Text -> Aeson.Value -> AgentEvent
+mkToolCallEvent ts toolName args =
+ AgentEvent
+ { eventType = ToolCall,
+ eventTimestamp = ts,
+ eventContent = Aeson.object ["tool" .= toolName, "args" .= args]
+ }
+
+mkToolResultEvent :: UTCTime -> Text -> Bool -> Text -> AgentEvent
+mkToolResultEvent ts toolName success output =
+ AgentEvent
+ { eventType = ToolResult,
+ eventTimestamp = ts,
+ eventContent =
+ Aeson.object
+ [ "tool" .= toolName,
+ "success" .= success,
+ "output" .= output
+ ]
+ }
+
+mkCostEvent :: UTCTime -> Int -> Int -> AgentEvent
+mkCostEvent ts tokens cents =
+ AgentEvent
+ { eventType = Cost,
+ eventTimestamp = ts,
+ eventContent = Aeson.object ["tokens" .= tokens, "cents" .= cents]
+ }
+
+mkErrorEvent :: UTCTime -> Text -> AgentEvent
+mkErrorEvent ts msg =
+ AgentEvent
+ { eventType = Error,
+ eventTimestamp = ts,
+ eventContent = Aeson.String msg
+ }
+
+mkCompleteEvent :: UTCTime -> AgentEvent
+mkCompleteEvent ts =
+ AgentEvent
+ { eventType = Complete,
+ eventTimestamp = ts,
+ eventContent = Aeson.Null
+ }
diff --git a/Omni/Agent/Git.hs b/Omni/Agent/Git.hs
new file mode 100644
index 0000000..4c06cf6
--- /dev/null
+++ b/Omni/Agent/Git.hs
@@ -0,0 +1,232 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Git operations for the agent.
+--
+-- : out omni-agent-git
+-- : dep temporary
+module Omni.Agent.Git
+ ( checkout,
+ syncWithLive,
+ commit,
+ createBranch,
+ getCurrentBranch,
+ branchExists,
+ isMerged,
+ listBranches,
+ showFile,
+ getRepoRoot,
+ runGit,
+ main,
+ test,
+ )
+where
+
+import Alpha
+import qualified Data.Text as Text
+import qualified Omni.Log as Log
+import Omni.Test ((@=?))
+import qualified Omni.Test as Test
+import qualified System.Directory as Directory
+import qualified System.Exit as Exit
+import qualified System.IO.Temp as Temp
+import qualified System.Process as Process
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Git"
+ [ Test.unit "checkout works" <| do
+ Temp.withSystemTempDirectory "omni-agent-git-test" <| \tmpDir -> do
+ let repo = tmpDir <> "/repo"
+ Directory.createDirectory repo
+ -- init repo
+ git repo ["init"]
+ git repo ["branch", "-m", "master"]
+ git repo ["config", "user.email", "you@example.com"]
+ git repo ["config", "user.name", "Your Name"]
+
+ -- commit A
+ writeFile (repo <> "/a.txt") "A"
+ git repo ["add", "a.txt"]
+ git repo ["commit", "-m", "A"]
+ shaA <- getSha repo "HEAD"
+
+ -- create branch dev
+ git repo ["checkout", "-b", "dev"]
+
+ -- commit B
+ writeFile (repo <> "/b.txt") "B"
+ git repo ["add", "b.txt"]
+ git repo ["commit", "-m", "B"]
+ shaB <- getSha repo "HEAD"
+
+ -- switch back to master
+ git repo ["checkout", "master"]
+
+ -- Test 1: checkout dev
+ checkout repo "dev"
+ current <- getSha repo "HEAD"
+ shaB @=? current
+
+ -- Test 2: checkout master
+ checkout repo "master"
+ current' <- getSha repo "HEAD"
+ shaA @=? current'
+
+ -- Test 3: dirty state
+ writeFile (repo <> "/a.txt") "DIRTY"
+ checkout repo "dev"
+ current'' <- getSha repo "HEAD"
+ shaB @=? current''
+ -- Verify dirty file is gone/overwritten (b.txt should exist, a.txt should be A from master? No, a.txt is in A and B)
+ -- Wait, in dev, a.txt is "A".
+ content <- readFile (repo <> "/a.txt")
+ "A" @=? content
+
+ -- Test 4: untracked file
+ writeFile (repo <> "/untracked.txt") "DELETE ME"
+ checkout repo "master"
+ exists <- Directory.doesFileExist (repo <> "/untracked.txt")
+ False @=? exists
+ ]
+
+getSha :: FilePath -> String -> IO String
+getSha dir ref = do
+ let cmd = (Process.proc "git" ["rev-parse", ref]) {Process.cwd = Just dir}
+ (code, out, _) <- Process.readCreateProcessWithExitCode cmd ""
+ case code of
+ Exit.ExitSuccess -> pure <| strip out
+ _ -> panic "getSha failed"
+
+-- | Checkout a specific ref (SHA, branch, tag) in the given repository path.
+-- This function ensures the repository is in the correct state by:
+-- 1. Fetching all updates
+-- 2. Checking out the ref (forcing overwrites of local changes)
+-- 3. Resetting hard to the ref (to ensure clean state)
+-- 4. Cleaning untracked files
+-- 5. Updating submodules
+checkout :: FilePath -> Text -> IO ()
+checkout repoPath ref = do
+ let r = Text.unpack ref
+
+ Log.info ["git", "checkout", ref, "in", Text.pack repoPath]
+
+ -- Fetch all refs to ensure we have the target
+ git repoPath ["fetch", "--all", "--tags"]
+
+ -- Checkout the ref, discarding local changes
+ git repoPath ["checkout", "--force", r]
+
+ -- Reset hard to ensure we are exactly at the target state
+ git repoPath ["reset", "--hard", r]
+
+ -- Remove untracked files and directories
+ git repoPath ["clean", "-fdx"]
+
+ -- Update submodules
+ git repoPath ["submodule", "update", "--init", "--recursive"]
+
+ Log.good ["git", "checkout", "complete"]
+ Log.br
+
+-- | Run a git command in the given directory.
+git :: FilePath -> [String] -> IO ()
+git dir args = do
+ let cmd = (Process.proc "git" args) {Process.cwd = Just dir}
+ (exitCode, out, err) <- Process.readCreateProcessWithExitCode cmd ""
+ case exitCode of
+ Exit.ExitSuccess -> pure ()
+ Exit.ExitFailure code -> do
+ Log.fail ["git command failed", Text.pack (show args), "code: " <> show code]
+ Log.info [Text.pack out]
+ Log.info [Text.pack err]
+ Log.br
+ panic <| "git command failed: git " <> show args
+
+syncWithLive :: FilePath -> IO ()
+syncWithLive repo = do
+ Log.info ["git", "syncing with live"]
+ -- git repo ["fetch", "origin", "live"] -- Optional
+
+ -- Try sync (branchless sync), if fail, panic
+ -- This replaces manual rebase and handles stack movement
+ let cmd = (Process.proc "git" ["sync"]) {Process.cwd = Just repo}
+ (code, out, err) <- Process.readCreateProcessWithExitCode cmd ""
+ case code of
+ Exit.ExitSuccess -> pure ()
+ Exit.ExitFailure _ -> do
+ Log.warn ["git sync failed", Text.pack err]
+ Log.info [Text.pack out]
+ panic "Sync with live failed (git sync)"
+
+commit :: FilePath -> Text -> IO ()
+commit repo msg = do
+ Log.info ["git", "commit", msg]
+ git repo ["add", "."]
+
+ -- Check for changes before committing to avoid error
+ let checkCmd = (Process.proc "git" ["diff", "--cached", "--quiet"]) {Process.cwd = Just repo}
+ (code, _, _) <- Process.readCreateProcessWithExitCode checkCmd ""
+
+ case code of
+ Exit.ExitSuccess -> Log.warn ["git", "nothing to commit", "skipping"]
+ Exit.ExitFailure 1 -> git repo ["commit", "-m", Text.unpack msg]
+ Exit.ExitFailure c -> panic <| "git diff failed with code " <> show c
+
+createBranch :: FilePath -> Text -> IO ()
+createBranch repo branch = do
+ Log.info ["git", "create branch", branch]
+ git repo ["checkout", "-b", Text.unpack branch]
+
+getCurrentBranch :: FilePath -> IO Text
+getCurrentBranch repo = do
+ let cmd = (Process.proc "git" ["branch", "--show-current"]) {Process.cwd = Just repo}
+ (code, out, _) <- Process.readCreateProcessWithExitCode cmd ""
+ case code of
+ Exit.ExitSuccess -> pure <| Text.strip (Text.pack out)
+ _ -> panic "git branch failed"
+
+branchExists :: FilePath -> Text -> IO Bool
+branchExists repo branch = do
+ let cmd = (Process.proc "git" ["show-ref", "--verify", "refs/heads/" <> Text.unpack branch]) {Process.cwd = Just repo}
+ (code, _, _) <- Process.readCreateProcessWithExitCode cmd ""
+ pure (code == Exit.ExitSuccess)
+
+isMerged :: FilePath -> Text -> Text -> IO Bool
+isMerged repo branch target = do
+ -- Check if 'branch' is merged into 'target'
+ -- git merge-base --is-ancestor <branch> <target>
+ let cmd = (Process.proc "git" ["merge-base", "--is-ancestor", Text.unpack branch, Text.unpack target]) {Process.cwd = Just repo}
+ (code, _, _) <- Process.readCreateProcessWithExitCode cmd ""
+ pure (code == Exit.ExitSuccess)
+
+listBranches :: FilePath -> Text -> IO [Text]
+listBranches repo pat = do
+ let cmd = (Process.proc "git" ["branch", "--list", Text.unpack pat, "--format=%(refname:short)"]) {Process.cwd = Just repo}
+ (code, out, _) <- Process.readCreateProcessWithExitCode cmd ""
+ case code of
+ Exit.ExitSuccess -> pure <| filter (not <. Text.null) (Text.lines (Text.pack out))
+ _ -> panic "git branch list failed"
+
+showFile :: FilePath -> Text -> FilePath -> IO (Maybe Text)
+showFile repo branch path = do
+ let cmd = (Process.proc "git" ["show", Text.unpack branch <> ":" <> path]) {Process.cwd = Just repo}
+ (code, out, _) <- Process.readCreateProcessWithExitCode cmd ""
+ case code of
+ Exit.ExitSuccess -> pure <| Just (Text.pack out)
+ _ -> pure Nothing
+
+getRepoRoot :: FilePath -> IO FilePath
+getRepoRoot dir = do
+ let cmd = (Process.proc "git" ["rev-parse", "--show-toplevel"]) {Process.cwd = Just dir}
+ (code, out, _) <- Process.readCreateProcessWithExitCode cmd ""
+ case code of
+ Exit.ExitSuccess -> pure <| strip out
+ _ -> panic "git rev-parse failed"
+
+runGit :: FilePath -> [String] -> IO ()
+runGit = git
diff --git a/Omni/Agent/Memory.hs b/Omni/Agent/Memory.hs
new file mode 100644
index 0000000..4aaa438
--- /dev/null
+++ b/Omni/Agent/Memory.hs
@@ -0,0 +1,1575 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Cross-agent shared memory system with vector similarity search.
+--
+-- Provides persistent memory that is:
+-- - Shared across all agents (Telegram, researcher, coder, etc.)
+-- - Private per user (users can't see each other's memories)
+-- - Searchable via semantic similarity using embeddings
+--
+-- Uses sqlite-vss for vector similarity search and Ollama for embeddings.
+--
+-- : out omni-agent-memory
+-- : dep aeson
+-- : dep http-conduit
+-- : dep sqlite-simple
+-- : dep uuid
+-- : dep vector
+-- : dep directory
+-- : dep bytestring
+module Omni.Agent.Memory
+ ( -- * Types
+ User (..),
+ Memory (..),
+ MemorySource (..),
+ ConversationMessage (..),
+ ConversationSummary (..),
+ MessageRole (..),
+ RelationType (..),
+ MemoryLink (..),
+
+ -- * User Management
+ createUser,
+ getUser,
+ getUserByTelegramId,
+ getOrCreateUserByTelegramId,
+
+ -- * Memory Operations
+ storeMemory,
+ recallMemories,
+ forgetMemory,
+ getAllMemoriesForUser,
+ updateMemoryAccess,
+
+ -- * Knowledge Graph
+ linkMemories,
+ getMemoryLinks,
+ getLinkedMemories,
+ queryGraph,
+
+ -- * Conversation History (DMs)
+ saveMessage,
+ getRecentMessages,
+ getConversationContext,
+ summarizeAndArchive,
+ estimateTokens,
+
+ -- * Group Conversation History
+ saveGroupMessage,
+ getGroupRecentMessages,
+ getGroupConversationContext,
+
+ -- * Group Memories
+ storeGroupMemory,
+ recallGroupMemories,
+
+ -- * Embeddings
+ embedText,
+
+ -- * Agent Integration
+ rememberTool,
+ recallTool,
+ linkMemoriesTool,
+ queryGraphTool,
+ formatMemoriesForPrompt,
+ runAgentWithMemory,
+
+ -- * Database
+ withMemoryDb,
+ initMemoryDb,
+ getMemoryDbPath,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString as BS
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.List as List
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import Data.Time (UTCTime, getCurrentTime)
+import Data.Time.Format (defaultTimeLocale, formatTime)
+import qualified Data.UUID as UUID
+import qualified Data.UUID.V4 as UUID
+import qualified Data.Vector.Storable as VS
+import qualified Database.SQLite.Simple as SQL
+import Database.SQLite.Simple.FromField ()
+import qualified Database.SQLite.Simple.ToField as SQL
+import Foreign.Storable ()
+import qualified Network.HTTP.Simple as HTTP
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Test as Test
+import System.Directory (createDirectoryIfMissing)
+import System.Environment (lookupEnv)
+import System.FilePath (takeDirectory, (</>))
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Memory"
+ [ Test.unit "User JSON roundtrip" <| do
+ now <- getCurrentTime
+ let user =
+ User
+ { userId = "test-uuid",
+ userTelegramId = Just 12345,
+ userEmail = Nothing,
+ userName = "Test User",
+ userCreatedAt = now
+ }
+ case Aeson.decode (Aeson.encode user) of
+ Nothing -> Test.assertFailure "Failed to decode User"
+ Just decoded -> userName decoded Test.@=? "Test User",
+ Test.unit "Memory JSON roundtrip" <| do
+ now <- getCurrentTime
+ let mem =
+ Memory
+ { memoryId = "mem-uuid",
+ memoryUserId = "user-uuid",
+ memoryContent = "User is an AI engineer",
+ memoryEmbedding = Nothing,
+ memorySource =
+ MemorySource
+ { sourceAgent = "telegram",
+ sourceSession = Nothing,
+ sourceContext = "User mentioned in chat"
+ },
+ memoryConfidence = 0.9,
+ memoryCreatedAt = now,
+ memoryLastAccessedAt = now,
+ memoryTags = ["profession", "ai"]
+ }
+ case Aeson.decode (Aeson.encode mem) of
+ Nothing -> Test.assertFailure "Failed to decode Memory"
+ Just decoded -> memoryContent decoded Test.@=? "User is an AI engineer",
+ Test.unit "MemorySource JSON roundtrip" <| do
+ let src =
+ MemorySource
+ { sourceAgent = "researcher",
+ sourceSession = Just "session-123",
+ sourceContext = "Extracted from conversation"
+ }
+ case Aeson.decode (Aeson.encode src) of
+ Nothing -> Test.assertFailure "Failed to decode MemorySource"
+ Just decoded -> sourceAgent decoded Test.@=? "researcher",
+ Test.unit "formatMemoriesForPrompt formats correctly" <| do
+ now <- getCurrentTime
+ let mem1 =
+ Memory
+ { memoryId = "1",
+ memoryUserId = "u",
+ memoryContent = "User is an AI engineer",
+ memoryEmbedding = Nothing,
+ memorySource = MemorySource "telegram" Nothing "chat",
+ memoryConfidence = 0.9,
+ memoryCreatedAt = now,
+ memoryLastAccessedAt = now,
+ memoryTags = []
+ }
+ mem2 =
+ Memory
+ { memoryId = "2",
+ memoryUserId = "u",
+ memoryContent = "User prefers Haskell",
+ memoryEmbedding = Nothing,
+ memorySource = MemorySource "coder" Nothing "code review",
+ memoryConfidence = 0.8,
+ memoryCreatedAt = now,
+ memoryLastAccessedAt = now,
+ memoryTags = []
+ }
+ formatted = formatMemoriesForPrompt [mem1, mem2]
+ ("AI engineer" `Text.isInfixOf` formatted) Test.@=? True
+ ("Haskell" `Text.isInfixOf` formatted) Test.@=? True,
+ Test.unit "cosineSimilarity identical vectors" <| do
+ let v1 = VS.fromList [1.0, 0.0, 0.0 :: Float]
+ v2 = VS.fromList [1.0, 0.0, 0.0 :: Float]
+ abs (cosineSimilarity v1 v2 - 1.0) < 0.0001 Test.@=? True,
+ Test.unit "cosineSimilarity orthogonal vectors" <| do
+ let v1 = VS.fromList [1.0, 0.0, 0.0 :: Float]
+ v2 = VS.fromList [0.0, 1.0, 0.0 :: Float]
+ abs (cosineSimilarity v1 v2) < 0.0001 Test.@=? True,
+ Test.unit "cosineSimilarity opposite vectors" <| do
+ let v1 = VS.fromList [1.0, 0.0, 0.0 :: Float]
+ v2 = VS.fromList [-1.0, 0.0, 0.0 :: Float]
+ abs (cosineSimilarity v1 v2 + 1.0) < 0.0001 Test.@=? True,
+ Test.unit "vectorToBlob and blobToVector roundtrip" <| do
+ let v = VS.fromList [0.1, 0.2, 0.3, 0.4, 0.5 :: Float]
+ blob = vectorToBlob v
+ v' = blobToVector blob
+ VS.length v Test.@=? VS.length v'
+ VS.toList v Test.@=? VS.toList v',
+ Test.unit "rememberTool has correct schema" <| do
+ let tool = rememberTool "test-user-id"
+ Engine.toolName tool Test.@=? "remember",
+ Test.unit "recallTool has correct schema" <| do
+ let tool = recallTool "test-user-id"
+ Engine.toolName tool Test.@=? "recall",
+ Test.unit "RelationType JSON roundtrip" <| do
+ let types = [Contradicts, Supports, Elaborates, Supersedes, Related, ContingentOn]
+ forM_ types <| \rt ->
+ case Aeson.decode (Aeson.encode rt) of
+ Nothing -> Test.assertFailure ("Failed to decode RelationType: " <> show rt)
+ Just decoded -> decoded Test.@=? rt,
+ Test.unit "MemoryLink JSON roundtrip" <| do
+ now <- getCurrentTime
+ let memLink =
+ MemoryLink
+ { linkFromMemoryId = "mem-1",
+ linkToMemoryId = "mem-2",
+ linkRelationType = Contradicts,
+ linkCreatedAt = now
+ }
+ case Aeson.decode (Aeson.encode memLink) of
+ Nothing -> Test.assertFailure "Failed to decode MemoryLink"
+ Just decoded -> do
+ linkFromMemoryId decoded Test.@=? "mem-1"
+ linkToMemoryId decoded Test.@=? "mem-2"
+ linkRelationType decoded Test.@=? Contradicts,
+ Test.unit "relationTypeToText and textToRelationType roundtrip" <| do
+ let types = [Contradicts, Supports, Elaborates, Supersedes, Related, ContingentOn]
+ forM_ types <| \rt ->
+ textToRelationType (relationTypeToText rt) Test.@=? Just rt,
+ Test.unit "linkMemoriesTool has correct schema" <| do
+ let tool = linkMemoriesTool "test-user-id"
+ Engine.toolName tool Test.@=? "link_memories",
+ Test.unit "queryGraphTool has correct schema" <| do
+ let tool = queryGraphTool "test-user-id"
+ Engine.toolName tool Test.@=? "query_graph"
+ ]
+
+-- | User record for multi-user memory system.
+data User = User
+ { userId :: Text,
+ userTelegramId :: Maybe Int,
+ userEmail :: Maybe Text,
+ userName :: Text,
+ userCreatedAt :: UTCTime
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON User where
+ toJSON u =
+ Aeson.object
+ [ "id" .= userId u,
+ "telegram_id" .= userTelegramId u,
+ "email" .= userEmail u,
+ "name" .= userName u,
+ "created_at" .= userCreatedAt u
+ ]
+
+instance Aeson.FromJSON User where
+ parseJSON =
+ Aeson.withObject "User" <| \v ->
+ (User </ (v .: "id"))
+ <*> (v .:? "telegram_id")
+ <*> (v .:? "email")
+ <*> (v .: "name")
+ <*> (v .: "created_at")
+
+instance SQL.FromRow User where
+ fromRow =
+ User
+ </ SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+
+instance SQL.ToRow User where
+ toRow u =
+ [ SQL.toField (userId u),
+ SQL.toField (userTelegramId u),
+ SQL.toField (userEmail u),
+ SQL.toField (userName u),
+ SQL.toField (userCreatedAt u)
+ ]
+
+-- | Source information for a memory.
+data MemorySource = MemorySource
+ { sourceAgent :: Text,
+ sourceSession :: Maybe Text,
+ sourceContext :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON MemorySource where
+ toJSON s =
+ Aeson.object
+ [ "agent" .= sourceAgent s,
+ "session" .= sourceSession s,
+ "context" .= sourceContext s
+ ]
+
+instance Aeson.FromJSON MemorySource where
+ parseJSON =
+ Aeson.withObject "MemorySource" <| \v ->
+ (MemorySource </ (v .: "agent"))
+ <*> (v .:? "session")
+ <*> (v .: "context")
+
+-- | A memory stored in the system.
+data Memory = Memory
+ { memoryId :: Text,
+ memoryUserId :: Text,
+ memoryContent :: Text,
+ memoryEmbedding :: Maybe (VS.Vector Float),
+ memorySource :: MemorySource,
+ memoryConfidence :: Double,
+ memoryCreatedAt :: UTCTime,
+ memoryLastAccessedAt :: UTCTime,
+ memoryTags :: [Text]
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Memory where
+ toJSON m =
+ Aeson.object
+ [ "id" .= memoryId m,
+ "user_id" .= memoryUserId m,
+ "content" .= memoryContent m,
+ "source" .= memorySource m,
+ "confidence" .= memoryConfidence m,
+ "created_at" .= memoryCreatedAt m,
+ "last_accessed_at" .= memoryLastAccessedAt m,
+ "tags" .= memoryTags m
+ ]
+
+instance Aeson.FromJSON Memory where
+ parseJSON =
+ Aeson.withObject "Memory" <| \v ->
+ ( Memory
+ </ (v .: "id")
+ )
+ <*> (v .: "user_id")
+ <*> (v .: "content")
+ <*> pure Nothing
+ <*> (v .: "source")
+ <*> (v .:? "confidence" .!= 0.8)
+ <*> (v .: "created_at")
+ <*> (v .: "last_accessed_at")
+ <*> (v .:? "tags" .!= [])
+
+-- SQLite instances for Memory (partial - embedding handled separately)
+instance SQL.FromRow Memory where
+ fromRow = do
+ mid <- SQL.field
+ uid <- SQL.field
+ content <- SQL.field
+ embeddingBlob <- SQL.field
+ agent <- SQL.field
+ session <- SQL.field
+ context <- SQL.field
+ confidence <- SQL.field
+ createdAt <- SQL.field
+ lastAccessedAt <- SQL.field
+ tagsJson <- SQL.field
+ let embedding = blobToVector </ (embeddingBlob :: Maybe BS.ByteString)
+ source = MemorySource agent session context
+ tags = fromMaybe [] ((tagsJson :: Maybe Text) +> (Aeson.decode <. BL.fromStrict <. TE.encodeUtf8))
+ pure
+ Memory
+ { memoryId = mid,
+ memoryUserId = uid,
+ memoryContent = content,
+ memoryEmbedding = embedding,
+ memorySource = source,
+ memoryConfidence = confidence,
+ memoryCreatedAt = createdAt,
+ memoryLastAccessedAt = lastAccessedAt,
+ memoryTags = tags
+ }
+
+-- | Role in a conversation message.
+data MessageRole = UserRole | AssistantRole
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON MessageRole where
+ toJSON UserRole = Aeson.String "user"
+ toJSON AssistantRole = Aeson.String "assistant"
+
+instance Aeson.FromJSON MessageRole where
+ parseJSON =
+ Aeson.withText "MessageRole" <| \case
+ "user" -> pure UserRole
+ "assistant" -> pure AssistantRole
+ _ -> empty
+
+-- | A message in a conversation.
+data ConversationMessage = ConversationMessage
+ { cmId :: Maybe Int,
+ cmUserId :: Text,
+ cmChatId :: Int,
+ cmRole :: MessageRole,
+ cmSenderName :: Maybe Text,
+ cmContent :: Text,
+ cmTokensEstimate :: Int,
+ cmCreatedAt :: UTCTime
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON ConversationMessage where
+ toJSON m =
+ Aeson.object
+ [ "id" .= cmId m,
+ "user_id" .= cmUserId m,
+ "chat_id" .= cmChatId m,
+ "role" .= cmRole m,
+ "sender_name" .= cmSenderName m,
+ "content" .= cmContent m,
+ "tokens_estimate" .= cmTokensEstimate m,
+ "created_at" .= cmCreatedAt m
+ ]
+
+instance SQL.FromRow ConversationMessage where
+ fromRow =
+ (ConversationMessage </ SQL.field)
+ <*> SQL.field
+ <*> SQL.field
+ <*> (parseRole </ SQL.field)
+ <*> SQL.field
+ <*> SQL.field
+ <*> (fromMaybe 0 </ SQL.field)
+ <*> SQL.field
+ where
+ parseRole :: Text -> MessageRole
+ parseRole "user" = UserRole
+ parseRole _ = AssistantRole
+
+-- | A summary of older conversation messages.
+data ConversationSummary = ConversationSummary
+ { csId :: Maybe Int,
+ csUserId :: Text,
+ csChatId :: Int,
+ csSummary :: Text,
+ csMessagesSummarized :: Int,
+ csTokensSaved :: Maybe Int,
+ csCreatedAt :: UTCTime
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON ConversationSummary where
+ toJSON s =
+ Aeson.object
+ [ "id" .= csId s,
+ "user_id" .= csUserId s,
+ "chat_id" .= csChatId s,
+ "summary" .= csSummary s,
+ "messages_summarized" .= csMessagesSummarized s,
+ "tokens_saved" .= csTokensSaved s,
+ "created_at" .= csCreatedAt s
+ ]
+
+instance SQL.FromRow ConversationSummary where
+ fromRow =
+ (ConversationSummary </ SQL.field)
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+
+-- | Relation types for the knowledge graph.
+data RelationType
+ = Contradicts
+ | Supports
+ | Elaborates
+ | Supersedes
+ | Related
+ | ContingentOn
+ deriving (Show, Eq, Generic, Ord)
+
+instance Aeson.ToJSON RelationType where
+ toJSON Contradicts = Aeson.String "contradicts"
+ toJSON Supports = Aeson.String "supports"
+ toJSON Elaborates = Aeson.String "elaborates"
+ toJSON Supersedes = Aeson.String "supersedes"
+ toJSON Related = Aeson.String "related"
+ toJSON ContingentOn = Aeson.String "contingent_on"
+
+instance Aeson.FromJSON RelationType where
+ parseJSON =
+ Aeson.withText "RelationType" <| \case
+ "contradicts" -> pure Contradicts
+ "supports" -> pure Supports
+ "elaborates" -> pure Elaborates
+ "supersedes" -> pure Supersedes
+ "related" -> pure Related
+ "contingent_on" -> pure ContingentOn
+ _ -> empty
+
+relationTypeToText :: RelationType -> Text
+relationTypeToText Contradicts = "contradicts"
+relationTypeToText Supports = "supports"
+relationTypeToText Elaborates = "elaborates"
+relationTypeToText Supersedes = "supersedes"
+relationTypeToText Related = "related"
+relationTypeToText ContingentOn = "contingent_on"
+
+textToRelationType :: Text -> Maybe RelationType
+textToRelationType "contradicts" = Just Contradicts
+textToRelationType "supports" = Just Supports
+textToRelationType "elaborates" = Just Elaborates
+textToRelationType "supersedes" = Just Supersedes
+textToRelationType "related" = Just Related
+textToRelationType "contingent_on" = Just ContingentOn
+textToRelationType _ = Nothing
+
+-- | A link between two memories in the knowledge graph.
+data MemoryLink = MemoryLink
+ { linkFromMemoryId :: Text,
+ linkToMemoryId :: Text,
+ linkRelationType :: RelationType,
+ linkCreatedAt :: UTCTime
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON MemoryLink where
+ toJSON l =
+ Aeson.object
+ [ "from_memory_id" .= linkFromMemoryId l,
+ "to_memory_id" .= linkToMemoryId l,
+ "relation_type" .= linkRelationType l,
+ "created_at" .= linkCreatedAt l
+ ]
+
+instance Aeson.FromJSON MemoryLink where
+ parseJSON =
+ Aeson.withObject "MemoryLink" <| \v ->
+ (MemoryLink </ (v .: "from_memory_id"))
+ <*> (v .: "to_memory_id")
+ <*> (v .: "relation_type")
+ <*> (v .: "created_at")
+
+instance SQL.FromRow MemoryLink where
+ fromRow = do
+ fromId <- SQL.field
+ toId <- SQL.field
+ relTypeText <- SQL.field
+ createdAt <- SQL.field
+ let relType = fromMaybe Related (textToRelationType relTypeText)
+ pure
+ MemoryLink
+ { linkFromMemoryId = fromId,
+ linkToMemoryId = toId,
+ linkRelationType = relType,
+ linkCreatedAt = createdAt
+ }
+
+-- | Get the path to memory.db
+getMemoryDbPath :: IO FilePath
+getMemoryDbPath = do
+ maybeEnv <- lookupEnv "MEMORY_DB_PATH"
+ case maybeEnv of
+ Just p -> pure p
+ Nothing -> do
+ home <- lookupEnv "HOME"
+ case home of
+ Just h -> pure (h </> ".local/share/omni/memory.db")
+ Nothing -> pure "_/memory.db"
+
+-- | Run an action with the memory database connection.
+withMemoryDb :: (SQL.Connection -> IO a) -> IO a
+withMemoryDb action = do
+ dbPath <- getMemoryDbPath
+ createDirectoryIfMissing True (takeDirectory dbPath)
+ SQL.withConnection dbPath <| \conn -> do
+ initMemoryDb conn
+ action conn
+
+-- | Initialize the memory database schema.
+initMemoryDb :: SQL.Connection -> IO ()
+initMemoryDb conn = do
+ SQL.execute_ conn "PRAGMA busy_timeout = 10000"
+ SQL.execute_ conn "PRAGMA foreign_keys = ON"
+ _ <- SQL.query_ conn "PRAGMA journal_mode = WAL" :: IO [[Text]]
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS users (\
+ \ id TEXT PRIMARY KEY,\
+ \ telegram_id INTEGER UNIQUE,\
+ \ email TEXT UNIQUE,\
+ \ name TEXT NOT NULL,\
+ \ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\
+ \)"
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS memories (\
+ \ id TEXT PRIMARY KEY,\
+ \ user_id TEXT NOT NULL REFERENCES users(id),\
+ \ content TEXT NOT NULL,\
+ \ embedding BLOB,\
+ \ source_agent TEXT NOT NULL,\
+ \ source_session TEXT,\
+ \ source_context TEXT,\
+ \ confidence REAL DEFAULT 0.8,\
+ \ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\
+ \ last_accessed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\
+ \ tags TEXT\
+ \)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_memories_user ON memories(user_id)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_memories_agent ON memories(source_agent)"
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS conversation_messages (\
+ \ id INTEGER PRIMARY KEY AUTOINCREMENT,\
+ \ user_id TEXT NOT NULL REFERENCES users(id),\
+ \ chat_id INTEGER NOT NULL,\
+ \ role TEXT NOT NULL,\
+ \ sender_name TEXT,\
+ \ content TEXT NOT NULL,\
+ \ tokens_estimate INTEGER,\
+ \ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\
+ \)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_conv_user_chat ON conversation_messages(user_id, chat_id)"
+ migrateConversationMessages conn
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS conversation_summaries (\
+ \ id INTEGER PRIMARY KEY AUTOINCREMENT,\
+ \ user_id TEXT NOT NULL REFERENCES users(id),\
+ \ chat_id INTEGER NOT NULL,\
+ \ summary TEXT NOT NULL,\
+ \ messages_summarized INTEGER NOT NULL,\
+ \ tokens_saved INTEGER,\
+ \ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\
+ \)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_summary_user_chat ON conversation_summaries(user_id, chat_id)"
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS notes (\
+ \ id INTEGER PRIMARY KEY AUTOINCREMENT,\
+ \ user_id TEXT NOT NULL,\
+ \ topic TEXT NOT NULL,\
+ \ content TEXT NOT NULL,\
+ \ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\
+ \)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_notes_user ON notes(user_id)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_notes_topic ON notes(user_id, topic)"
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS todos (\
+ \ id INTEGER PRIMARY KEY AUTOINCREMENT,\
+ \ user_id TEXT NOT NULL,\
+ \ title TEXT NOT NULL,\
+ \ due_date TIMESTAMP,\
+ \ completed INTEGER NOT NULL DEFAULT 0,\
+ \ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\
+ \)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_todos_user ON todos(user_id)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_todos_due ON todos(user_id, due_date)"
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS memory_links (\
+ \ from_memory_id TEXT NOT NULL REFERENCES memories(id) ON DELETE CASCADE,\
+ \ to_memory_id TEXT NOT NULL REFERENCES memories(id) ON DELETE CASCADE,\
+ \ relation_type TEXT NOT NULL,\
+ \ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\
+ \ PRIMARY KEY (from_memory_id, to_memory_id, relation_type)\
+ \)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_memory_links_from ON memory_links(from_memory_id)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_memory_links_to ON memory_links(to_memory_id)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_memory_links_type ON memory_links(relation_type)"
+
+-- | Migrate conversation_messages to add sender_name and thread_id columns.
+migrateConversationMessages :: SQL.Connection -> IO ()
+migrateConversationMessages conn = do
+ columns <- SQL.query_ conn "PRAGMA table_info(conversation_messages)" :: IO [(Int, Text, Text, Int, Maybe Text, Int)]
+ let columnNames = map (\(_, name, _, _, _, _) -> name) columns
+ unless ("sender_name" `elem` columnNames) <| do
+ SQL.execute_ conn "ALTER TABLE conversation_messages ADD COLUMN sender_name TEXT"
+ SQL.execute_ conn "UPDATE conversation_messages SET sender_name = 'bensima' WHERE role = 'user' AND sender_name IS NULL"
+ unless ("thread_id" `elem` columnNames) <| do
+ SQL.execute_ conn "ALTER TABLE conversation_messages ADD COLUMN thread_id INTEGER"
+ SQL.execute_ conn "CREATE INDEX IF NOT EXISTS idx_conv_chat_thread ON conversation_messages(chat_id, thread_id)"
+
+-- | Create a new user.
+createUser :: Text -> Maybe Int -> IO User
+createUser name telegramId = do
+ uuid <- UUID.nextRandom
+ now <- getCurrentTime
+ let user =
+ User
+ { userId = UUID.toText uuid,
+ userTelegramId = telegramId,
+ userEmail = Nothing,
+ userName = name,
+ userCreatedAt = now
+ }
+ withMemoryDb <| \conn ->
+ SQL.execute
+ conn
+ "INSERT INTO users (id, telegram_id, email, name, created_at) VALUES (?, ?, ?, ?, ?)"
+ user
+ pure user
+
+-- | Get a user by ID.
+getUser :: Text -> IO (Maybe User)
+getUser uid =
+ withMemoryDb <| \conn -> do
+ results <- SQL.query conn "SELECT id, telegram_id, email, name, created_at FROM users WHERE id = ?" (SQL.Only uid)
+ pure (listToMaybe results)
+
+-- | Get a user by Telegram ID.
+getUserByTelegramId :: Int -> IO (Maybe User)
+getUserByTelegramId tid =
+ withMemoryDb <| \conn -> do
+ results <- SQL.query conn "SELECT id, telegram_id, email, name, created_at FROM users WHERE telegram_id = ?" (SQL.Only tid)
+ pure (listToMaybe results)
+
+-- | Get or create a user by Telegram ID.
+getOrCreateUserByTelegramId :: Int -> Text -> IO User
+getOrCreateUserByTelegramId tid name = do
+ existing <- getUserByTelegramId tid
+ case existing of
+ Just user -> pure user
+ Nothing -> createUser name (Just tid)
+
+-- | Store a memory for a user.
+storeMemory :: Text -> Text -> MemorySource -> IO Memory
+storeMemory uid content source = storeMemoryWithTags uid content source []
+
+-- | Store a memory with tags.
+storeMemoryWithTags :: Text -> Text -> MemorySource -> [Text] -> IO Memory
+storeMemoryWithTags uid content source tags = do
+ uuid <- UUID.nextRandom
+ now <- getCurrentTime
+ embedding <- embedText content
+ let mem =
+ Memory
+ { memoryId = UUID.toText uuid,
+ memoryUserId = uid,
+ memoryContent = content,
+ memoryEmbedding = either (const Nothing) Just embedding,
+ memorySource = source,
+ memoryConfidence = 0.8,
+ memoryCreatedAt = now,
+ memoryLastAccessedAt = now,
+ memoryTags = tags
+ }
+ withMemoryDb <| \conn ->
+ SQL.execute
+ conn
+ "INSERT INTO memories (id, user_id, content, embedding, source_agent, source_session, source_context, confidence, created_at, last_accessed_at, tags) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
+ ( ( memoryId mem,
+ memoryUserId mem,
+ memoryContent mem,
+ vectorToBlob </ memoryEmbedding mem,
+ sourceAgent (memorySource mem),
+ sourceSession (memorySource mem),
+ sourceContext (memorySource mem)
+ )
+ SQL.:. ( memoryConfidence mem,
+ memoryCreatedAt mem,
+ memoryLastAccessedAt mem,
+ TE.decodeUtf8 (BL.toStrict (Aeson.encode (memoryTags mem)))
+ )
+ )
+ pure mem
+
+-- | Recall memories for a user using semantic similarity.
+recallMemories :: Text -> Text -> Int -> IO [Memory]
+recallMemories uid query limit = do
+ queryEmbedding <- embedText query
+ case queryEmbedding of
+ Left _ -> recallMemoriesByRecency uid limit
+ Right qEmb -> do
+ allMems <- getAllMemoriesForUser uid
+ let scored =
+ [ (m, cosineSimilarity qEmb emb)
+ | m <- allMems,
+ Just emb <- [memoryEmbedding m]
+ ]
+ sorted = List.sortBy (\(_, s1) (_, s2) -> compare s2 s1) scored
+ topN = take limit sorted
+ now <- getCurrentTime
+ traverse_ (updateMemoryAccess now <. memoryId <. fst) topN
+ pure (map fst topN)
+
+-- | Recall memories by recency (fallback when embedding fails).
+recallMemoriesByRecency :: Text -> Int -> IO [Memory]
+recallMemoriesByRecency uid limit =
+ withMemoryDb <| \conn -> do
+ SQL.query
+ conn
+ "SELECT id, user_id, content, embedding, source_agent, source_session, source_context, confidence, created_at, last_accessed_at, tags \
+ \FROM memories WHERE user_id = ? ORDER BY last_accessed_at DESC LIMIT ?"
+ (uid, limit)
+
+-- | Get all memories for a user.
+getAllMemoriesForUser :: Text -> IO [Memory]
+getAllMemoriesForUser uid =
+ withMemoryDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT id, user_id, content, embedding, source_agent, source_session, source_context, confidence, created_at, last_accessed_at, tags \
+ \FROM memories WHERE user_id = ?"
+ (SQL.Only uid)
+
+-- | Delete a memory.
+forgetMemory :: Text -> IO ()
+forgetMemory mid =
+ withMemoryDb <| \conn ->
+ SQL.execute conn "DELETE FROM memories WHERE id = ?" (SQL.Only mid)
+
+-- | Update memory's last accessed timestamp.
+updateMemoryAccess :: UTCTime -> Text -> IO ()
+updateMemoryAccess now mid =
+ withMemoryDb <| \conn ->
+ SQL.execute conn "UPDATE memories SET last_accessed_at = ? WHERE id = ?" (now, mid)
+
+-- | Create a link between two memories.
+linkMemories :: Text -> Text -> RelationType -> IO MemoryLink
+linkMemories fromId toId relType = do
+ now <- getCurrentTime
+ withMemoryDb <| \conn ->
+ SQL.execute
+ conn
+ "INSERT OR REPLACE INTO memory_links (from_memory_id, to_memory_id, relation_type, created_at) VALUES (?, ?, ?, ?)"
+ (fromId, toId, relationTypeToText relType, now)
+ pure
+ MemoryLink
+ { linkFromMemoryId = fromId,
+ linkToMemoryId = toId,
+ linkRelationType = relType,
+ linkCreatedAt = now
+ }
+
+-- | Get all links from a memory.
+getMemoryLinks :: Text -> IO [MemoryLink]
+getMemoryLinks memId =
+ withMemoryDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT from_memory_id, to_memory_id, relation_type, created_at \
+ \FROM memory_links WHERE from_memory_id = ? OR to_memory_id = ?"
+ (memId, memId)
+
+-- | Get memories linked to a given memory with their content.
+getLinkedMemories :: Text -> Maybe RelationType -> IO [(MemoryLink, Memory)]
+getLinkedMemories memId maybeRelType = do
+ links <- getMemoryLinks memId
+ let filteredLinks = case maybeRelType of
+ Nothing -> links
+ Just rt -> filter (\l -> linkRelationType l == rt) links
+ mems <- traverse loadMemory filteredLinks
+ pure [(l, m) | (l, Just m) <- zip filteredLinks mems]
+ where
+ loadMemory memLink = do
+ let targetId =
+ if linkFromMemoryId memLink == memId
+ then linkToMemoryId memLink
+ else linkFromMemoryId memLink
+ withMemoryDb <| \conn -> do
+ results <-
+ SQL.query
+ conn
+ "SELECT id, user_id, content, embedding, source_agent, source_session, source_context, confidence, created_at, last_accessed_at, tags \
+ \FROM memories WHERE id = ?"
+ (SQL.Only targetId)
+ pure (listToMaybe results)
+
+-- | Query the knowledge graph by traversing links from a starting memory.
+-- Returns all memories reachable within the given depth.
+queryGraph :: Text -> Int -> Maybe RelationType -> IO [(Memory, [MemoryLink])]
+queryGraph startMemId maxDepth maybeRelType = do
+ startMem <- getMemoryById startMemId
+ case startMem of
+ Nothing -> pure []
+ Just mem -> go [startMemId] [(mem, [])] 0
+ where
+ go :: [Text] -> [(Memory, [MemoryLink])] -> Int -> IO [(Memory, [MemoryLink])]
+ go _ acc depth | depth >= maxDepth = pure acc
+ go visitedIds acc depth = do
+ let currentIds = map (memoryId <. fst) acc
+ newIds = filter (`notElem` visitedIds) currentIds
+ if null newIds
+ then pure acc
+ else do
+ newLinked <- concat </ traverse (`getLinkedMemories` maybeRelType) newIds
+ let newMems = [(m, [l]) | (l, m) <- newLinked, memoryId m `notElem` visitedIds]
+ newVisited = visitedIds <> map (memoryId <. fst) newMems
+ go newVisited (acc <> newMems) (depth + 1)
+
+-- | Get a memory by ID.
+getMemoryById :: Text -> IO (Maybe Memory)
+getMemoryById memId =
+ withMemoryDb <| \conn -> do
+ results <-
+ SQL.query
+ conn
+ "SELECT id, user_id, content, embedding, source_agent, source_session, source_context, confidence, created_at, last_accessed_at, tags \
+ \FROM memories WHERE id = ?"
+ (SQL.Only memId)
+ pure (listToMaybe results)
+
+-- | Embed text using Ollama's nomic-embed-text model.
+embedText :: Text -> IO (Either Text (VS.Vector Float))
+embedText content = do
+ ollamaUrl <- fromMaybe "http://localhost:11434" </ lookupEnv "OLLAMA_URL"
+ let url = ollamaUrl <> "/api/embeddings"
+ req0 <- HTTP.parseRequest url
+ let body =
+ Aeson.object
+ [ "model" .= ("nomic-embed-text" :: Text),
+ "prompt" .= content
+ ]
+ req =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| req0
+ result <- try (HTTP.httpLBS req)
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("Embedding request failed: " <> tshow e))
+ Right response -> do
+ let status = HTTP.getResponseStatusCode response
+ if status >= 200 && status < 300
+ then case Aeson.decode (HTTP.getResponseBody response) of
+ Just (Aeson.Object obj) -> case KeyMap.lookup "embedding" obj of
+ Just (Aeson.Array arr) ->
+ let floats = [f | Aeson.Number n <- toList arr, let f = realToFrac n]
+ in pure (Right (VS.fromList floats))
+ _ -> pure (Left "No embedding in response")
+ _ -> pure (Left "Failed to parse embedding response")
+ else pure (Left ("Embedding HTTP error: " <> tshow status))
+
+-- | Convert a vector to a blob for storage.
+vectorToBlob :: VS.Vector Float -> BS.ByteString
+vectorToBlob v =
+ let bytes = VS.unsafeCast v :: VS.Vector Word8
+ in BS.pack (VS.toList bytes)
+
+-- | Convert a blob back to a vector.
+blobToVector :: BS.ByteString -> VS.Vector Float
+blobToVector bs =
+ let bytes = VS.fromList (BS.unpack bs) :: VS.Vector Word8
+ in VS.unsafeCast bytes
+
+-- | Calculate cosine similarity between two vectors.
+cosineSimilarity :: VS.Vector Float -> VS.Vector Float -> Float
+cosineSimilarity v1 v2
+ | VS.length v1 /= VS.length v2 = 0
+ | otherwise =
+ let dot = VS.sum (VS.zipWith (*) v1 v2)
+ mag1 = sqrt (VS.sum (VS.map (\x -> x * x) v1))
+ mag2 = sqrt (VS.sum (VS.map (\x -> x * x) v2))
+ in if mag1 == 0 || mag2 == 0 then 0 else dot / (mag1 * mag2)
+
+-- | Format memories for inclusion in a prompt.
+formatMemoriesForPrompt :: [Memory] -> Text
+formatMemoriesForPrompt [] = "No prior context available."
+formatMemoriesForPrompt mems =
+ Text.unlines
+ [ "Known context about this user:",
+ "",
+ Text.unlines (map formatMem mems)
+ ]
+ where
+ formatMem m =
+ "- " <> memoryContent m <> " (via " <> sourceAgent (memorySource m) <> ")"
+
+-- | Run an agent with memory context.
+-- Recalls relevant memories for the user and injects them into the system prompt.
+runAgentWithMemory ::
+ User ->
+ Engine.EngineConfig ->
+ Engine.AgentConfig ->
+ Text ->
+ IO (Either Text Engine.AgentResult)
+runAgentWithMemory user engineCfg agentCfg userPrompt = do
+ memories <- recallMemories (userId user) userPrompt 10
+ let memoryContext = formatMemoriesForPrompt memories
+ enhancedPrompt =
+ Engine.agentSystemPrompt agentCfg
+ <> "\n\n## Known about this user\n"
+ <> memoryContext
+ enhancedConfig =
+ agentCfg
+ { Engine.agentSystemPrompt = enhancedPrompt,
+ Engine.agentTools =
+ Engine.agentTools agentCfg
+ <> [ rememberTool (userId user),
+ recallTool (userId user),
+ linkMemoriesTool (userId user),
+ queryGraphTool (userId user)
+ ]
+ }
+ Engine.runAgent engineCfg enhancedConfig userPrompt
+
+-- | Tool for agents to store memories about users.
+rememberTool :: Text -> Engine.Tool
+rememberTool uid =
+ Engine.Tool
+ { Engine.toolName = "remember",
+ Engine.toolDescription =
+ "Store a piece of information about the user for future reference. "
+ <> "Use this when the user shares personal facts, preferences, or context "
+ <> "that would be useful to recall in future conversations.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "content"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The information to remember about the user" :: Text)
+ ],
+ "context"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("How/why this was learned (e.g., 'user mentioned in chat')" :: Text)
+ ],
+ "tags"
+ .= Aeson.object
+ [ "type" .= ("array" :: Text),
+ "items" .= Aeson.object ["type" .= ("string" :: Text)],
+ "description" .= ("Optional tags for categorization" :: Text)
+ ]
+ ],
+ "required" .= (["content", "context"] :: [Text])
+ ],
+ Engine.toolExecute = executeRemember uid
+ }
+
+executeRemember :: Text -> Aeson.Value -> IO Aeson.Value
+executeRemember uid v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: RememberArgs) -> do
+ let source =
+ MemorySource
+ { sourceAgent = "agent",
+ sourceSession = Nothing,
+ sourceContext = rememberContext args
+ }
+ mem <- storeMemoryWithTags uid (rememberContent args) source (rememberTags args)
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "memory_id" .= memoryId mem,
+ "message" .= ("Remembered: " <> rememberContent args)
+ ]
+ )
+
+-- | Tool for agents to recall memories about users.
+recallTool :: Text -> Engine.Tool
+recallTool uid =
+ Engine.Tool
+ { Engine.toolName = "recall",
+ Engine.toolDescription =
+ "Search your memory for information about the user. "
+ <> "Use this to retrieve previously stored facts, preferences, or context.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "query"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("What to search for in memory" :: Text)
+ ],
+ "limit"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Maximum memories to return (default: 5)" :: Text)
+ ]
+ ],
+ "required" .= (["query"] :: [Text])
+ ],
+ Engine.toolExecute = executeRecall uid
+ }
+
+executeRecall :: Text -> Aeson.Value -> IO Aeson.Value
+executeRecall uid v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: RecallArgs) -> do
+ mems <- recallMemories uid (recallQuery args) (recallLimit args)
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "count" .= length mems,
+ "memories"
+ .= map
+ ( \m ->
+ Aeson.object
+ [ "id" .= memoryId m,
+ "content" .= memoryContent m,
+ "confidence" .= memoryConfidence m,
+ "source" .= sourceAgent (memorySource m),
+ "tags" .= memoryTags m
+ ]
+ )
+ mems
+ ]
+ )
+
+-- Helper for parsing remember args
+data RememberArgs = RememberArgs
+ { rememberContent :: Text,
+ rememberContext :: Text,
+ rememberTags :: [Text]
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON RememberArgs where
+ parseJSON =
+ Aeson.withObject "RememberArgs" <| \v ->
+ (RememberArgs </ (v .: "content"))
+ <*> (v .:? "context" .!= "agent observation")
+ <*> (v .:? "tags" .!= [])
+
+data RecallArgs = RecallArgs
+ { recallQuery :: Text,
+ recallLimit :: Int
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON RecallArgs where
+ parseJSON =
+ Aeson.withObject "RecallArgs" <| \v ->
+ (RecallArgs </ (v .: "query"))
+ <*> (v .:? "limit" .!= 5)
+
+-- | Tool for agents to link memories in the knowledge graph.
+linkMemoriesTool :: Text -> Engine.Tool
+linkMemoriesTool _uid =
+ Engine.Tool
+ { Engine.toolName = "link_memories",
+ Engine.toolDescription =
+ "Create a typed relationship between two memories. "
+ <> "Use this to connect related information. Relation types:\n"
+ <> "- contradicts: conflicting information\n"
+ <> "- supports: evidence that reinforces another memory\n"
+ <> "- elaborates: adds detail to an existing memory\n"
+ <> "- supersedes: newer info replaces older\n"
+ <> "- related: general topical connection\n"
+ <> "- contingent_on: depends on another fact being true",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "from_memory_id"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("ID of the source memory" :: Text)
+ ],
+ "to_memory_id"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("ID of the target memory" :: Text)
+ ],
+ "relation_type"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "enum" .= (["contradicts", "supports", "elaborates", "supersedes", "related", "contingent_on"] :: [Text]),
+ "description" .= ("Type of relationship between memories" :: Text)
+ ]
+ ],
+ "required" .= (["from_memory_id", "to_memory_id", "relation_type"] :: [Text])
+ ],
+ Engine.toolExecute = executeLinkMemories
+ }
+
+executeLinkMemories :: Aeson.Value -> IO Aeson.Value
+executeLinkMemories v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: LinkMemoriesArgs) -> do
+ case textToRelationType (linkArgsRelationType args) of
+ Nothing ->
+ pure
+ ( Aeson.object
+ [ "success" .= False,
+ "error" .= ("Invalid relation type: " <> linkArgsRelationType args)
+ ]
+ )
+ Just relType -> do
+ memLink <- linkMemories (linkArgsFromId args) (linkArgsToId args) relType
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "message"
+ .= ( "Linked memory "
+ <> linkFromMemoryId memLink
+ <> " -> "
+ <> linkToMemoryId memLink
+ <> " ("
+ <> relationTypeToText (linkRelationType memLink)
+ <> ")"
+ )
+ ]
+ )
+
+data LinkMemoriesArgs = LinkMemoriesArgs
+ { linkArgsFromId :: Text,
+ linkArgsToId :: Text,
+ linkArgsRelationType :: Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON LinkMemoriesArgs where
+ parseJSON =
+ Aeson.withObject "LinkMemoriesArgs" <| \v ->
+ (LinkMemoriesArgs </ (v .: "from_memory_id"))
+ <*> (v .: "to_memory_id")
+ <*> (v .: "relation_type")
+
+-- | Tool for agents to query the memory knowledge graph.
+queryGraphTool :: Text -> Engine.Tool
+queryGraphTool _uid =
+ Engine.Tool
+ { Engine.toolName = "query_graph",
+ Engine.toolDescription =
+ "Explore the knowledge graph to find related memories. "
+ <> "Given a starting memory, traverse links to find connected memories. "
+ <> "Useful for understanding context and finding contradictions or supporting evidence.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "memory_id"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("ID of the memory to start from" :: Text)
+ ],
+ "depth"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("How many link hops to traverse (default: 2)" :: Text)
+ ],
+ "relation_type"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "enum" .= (["contradicts", "supports", "elaborates", "supersedes", "related", "contingent_on"] :: [Text]),
+ "description" .= ("Optional: filter by relation type" :: Text)
+ ]
+ ],
+ "required" .= (["memory_id"] :: [Text])
+ ],
+ Engine.toolExecute = executeQueryGraph
+ }
+
+executeQueryGraph :: Aeson.Value -> IO Aeson.Value
+executeQueryGraph v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: QueryGraphArgs) -> do
+ let maybeRelType = queryArgsRelationType args +> textToRelationType
+ results <- queryGraph (queryArgsMemoryId args) (queryArgsDepth args) maybeRelType
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "count" .= length results,
+ "memories"
+ .= map
+ ( \(m, links) ->
+ Aeson.object
+ [ "id" .= memoryId m,
+ "content" .= memoryContent m,
+ "links"
+ .= map
+ ( \l ->
+ Aeson.object
+ [ "from" .= linkFromMemoryId l,
+ "to" .= linkToMemoryId l,
+ "relation" .= linkRelationType l
+ ]
+ )
+ links
+ ]
+ )
+ results
+ ]
+ )
+
+data QueryGraphArgs = QueryGraphArgs
+ { queryArgsMemoryId :: Text,
+ queryArgsDepth :: Int,
+ queryArgsRelationType :: Maybe Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON QueryGraphArgs where
+ parseJSON =
+ Aeson.withObject "QueryGraphArgs" <| \v ->
+ (QueryGraphArgs </ (v .: "memory_id"))
+ <*> (v .:? "depth" .!= 2)
+ <*> (v .:? "relation_type")
+
+-- | Estimate token count for text (rough: ~4 chars per token).
+estimateTokens :: Text -> Int
+estimateTokens t = max 1 (Text.length t `div` 4)
+
+-- | Save a message to conversation history.
+saveMessage :: Text -> Int -> MessageRole -> Maybe Text -> Text -> IO ConversationMessage
+saveMessage uid chatId role senderName content = do
+ now <- getCurrentTime
+ let tokens = estimateTokens content
+ withMemoryDb <| \conn -> do
+ SQL.execute
+ conn
+ "INSERT INTO conversation_messages (user_id, chat_id, role, sender_name, content, tokens_estimate, created_at) VALUES (?, ?, ?, ?, ?, ?, ?)"
+ (uid, chatId, roleToText role, senderName, content, tokens, now)
+ rowId <- SQL.lastInsertRowId conn
+ pure
+ ConversationMessage
+ { cmId = Just (fromIntegral rowId),
+ cmUserId = uid,
+ cmChatId = chatId,
+ cmRole = role,
+ cmSenderName = senderName,
+ cmContent = content,
+ cmTokensEstimate = tokens,
+ cmCreatedAt = now
+ }
+ where
+ roleToText UserRole = "user" :: Text
+ roleToText AssistantRole = "assistant"
+
+-- | Get recent messages for a user/chat, newest first.
+getRecentMessages :: Text -> Int -> Int -> IO [ConversationMessage]
+getRecentMessages uid chatId limit =
+ withMemoryDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT id, user_id, chat_id, role, sender_name, content, tokens_estimate, created_at \
+ \FROM conversation_messages \
+ \WHERE user_id = ? AND chat_id = ? \
+ \ORDER BY created_at DESC LIMIT ?"
+ (uid, chatId, limit)
+
+-- | Get the most recent summary for a chat.
+getLatestSummary :: Text -> Int -> IO (Maybe ConversationSummary)
+getLatestSummary uid chatId =
+ withMemoryDb <| \conn -> do
+ rows <-
+ SQL.query
+ conn
+ "SELECT id, user_id, chat_id, summary, messages_summarized, tokens_saved, created_at \
+ \FROM conversation_summaries \
+ \WHERE user_id = ? AND chat_id = ? \
+ \ORDER BY created_at DESC LIMIT 1"
+ (uid, chatId)
+ pure (listToMaybe rows)
+
+-- | Build conversation context for the LLM.
+-- Returns (context text, total token estimate).
+getConversationContext :: Text -> Int -> Int -> IO (Text, Int)
+getConversationContext uid chatId maxTokens = do
+ maybeSummary <- getLatestSummary uid chatId
+ recentMsgs <- getRecentMessages uid chatId 50
+
+ let summaryText = maybe "" (\s -> "## Previous conversation summary\n" <> csSummary s <> "\n\n") maybeSummary
+ summaryTokens = maybe 0 (estimateTokens <. csSummary) maybeSummary
+
+ msgsOldestFirst = reverse recentMsgs
+ availableTokens = maxTokens - summaryTokens - 100
+
+ (selectedMsgs, usedTokens) = selectMessages msgsOldestFirst availableTokens
+
+ formattedMsgs =
+ if null selectedMsgs
+ then ""
+ else
+ "## Recent conversation\n"
+ <> Text.unlines (map formatMsg selectedMsgs)
+
+ pure (summaryText <> formattedMsgs, summaryTokens + usedTokens)
+ where
+ selectMessages :: [ConversationMessage] -> Int -> ([ConversationMessage], Int)
+ selectMessages msgs budget = go (reverse msgs) budget []
+ where
+ go [] _ acc = (acc, sum (map cmTokensEstimate acc))
+ go (m : ms) remaining acc
+ | cmTokensEstimate m <= remaining =
+ go ms (remaining - cmTokensEstimate m) (m : acc)
+ | otherwise = (acc, sum (map cmTokensEstimate acc))
+
+ formatMsg m =
+ let timestamp = Text.pack (formatTime defaultTimeLocale "%Y-%m-%dT%H:%M:%SZ" (cmCreatedAt m))
+ prefix = case cmRole m of
+ UserRole -> "[" <> timestamp <> "] " <> fromMaybe "User" (cmSenderName m) <> ": "
+ AssistantRole -> "[" <> timestamp <> "] Assistant: "
+ in prefix <> cmContent m
+
+-- | Summarize old messages and archive them.
+-- Returns the new summary text.
+summarizeAndArchive :: Text -> Int -> Text -> IO Text
+summarizeAndArchive uid chatId summaryText = do
+ now <- getCurrentTime
+
+ (oldMsgCount, tokensSaved) <-
+ withMemoryDb <| \conn -> do
+ rows <-
+ SQL.query
+ conn
+ "SELECT COUNT(*), COALESCE(SUM(tokens_estimate), 0) FROM conversation_messages WHERE user_id = ? AND chat_id = ?"
+ (uid, chatId) ::
+ IO [(Int, Int)]
+ let (count, tokens) = fromMaybe (0, 0) (listToMaybe rows)
+
+ SQL.execute
+ conn
+ "INSERT INTO conversation_summaries (user_id, chat_id, summary, messages_summarized, tokens_saved, created_at) VALUES (?, ?, ?, ?, ?, ?)"
+ (uid, chatId, summaryText, count, tokens, now)
+
+ SQL.execute
+ conn
+ "DELETE FROM conversation_messages WHERE user_id = ? AND chat_id = ?"
+ (uid, chatId)
+
+ pure (count, tokens)
+
+ putText <| "Archived " <> tshow oldMsgCount <> " messages (" <> tshow tokensSaved <> " tokens) for chat " <> tshow chatId
+ pure summaryText
+
+-- -----------------------------------------------------------------------------
+-- Group Conversation History
+-- -----------------------------------------------------------------------------
+
+-- | Save a message to group conversation history.
+-- Unlike saveMessage, this is keyed by (chat_id, thread_id) not (user_id, chat_id).
+-- The sender_name is preserved for attribution.
+saveGroupMessage :: Int -> Maybe Int -> MessageRole -> Text -> Text -> IO ConversationMessage
+saveGroupMessage chatId mThreadId role senderName content = do
+ now <- getCurrentTime
+ let tokens = estimateTokens content
+ withMemoryDb <| \conn -> do
+ SQL.execute
+ conn
+ "INSERT INTO conversation_messages (user_id, chat_id, thread_id, role, sender_name, content, tokens_estimate, created_at) VALUES (NULL, ?, ?, ?, ?, ?, ?, ?)"
+ (chatId, mThreadId, roleToText role, senderName, content, tokens, now)
+ rowId <- SQL.lastInsertRowId conn
+ pure
+ ConversationMessage
+ { cmId = Just (fromIntegral rowId),
+ cmUserId = "",
+ cmChatId = chatId,
+ cmRole = role,
+ cmSenderName = Just senderName,
+ cmContent = content,
+ cmTokensEstimate = tokens,
+ cmCreatedAt = now
+ }
+ where
+ roleToText UserRole = "user" :: Text
+ roleToText AssistantRole = "assistant"
+
+-- | Get recent messages for a group chat/topic, newest first.
+getGroupRecentMessages :: Int -> Maybe Int -> Int -> IO [ConversationMessage]
+getGroupRecentMessages chatId mThreadId limit =
+ withMemoryDb <| \conn ->
+ case mThreadId of
+ Just threadId ->
+ SQL.query
+ conn
+ "SELECT id, COALESCE(user_id, ''), chat_id, role, sender_name, content, tokens_estimate, created_at \
+ \FROM conversation_messages \
+ \WHERE chat_id = ? AND thread_id = ? \
+ \ORDER BY created_at DESC LIMIT ?"
+ (chatId, threadId, limit)
+ Nothing ->
+ SQL.query
+ conn
+ "SELECT id, COALESCE(user_id, ''), chat_id, role, sender_name, content, tokens_estimate, created_at \
+ \FROM conversation_messages \
+ \WHERE chat_id = ? AND thread_id IS NULL \
+ \ORDER BY created_at DESC LIMIT ?"
+ (chatId, limit)
+
+-- | Build conversation context for a group chat.
+-- Returns (context text, total token estimate).
+getGroupConversationContext :: Int -> Maybe Int -> Int -> IO (Text, Int)
+getGroupConversationContext chatId mThreadId maxTokens = do
+ recentMsgs <- getGroupRecentMessages chatId mThreadId 50
+
+ let msgsOldestFirst = reverse recentMsgs
+ availableTokens = maxTokens - 100
+
+ (selectedMsgs, usedTokens) = selectMessages msgsOldestFirst availableTokens
+
+ formattedMsgs =
+ if null selectedMsgs
+ then ""
+ else
+ "## Recent conversation\n"
+ <> Text.unlines (map formatMsg selectedMsgs)
+
+ pure (formattedMsgs, usedTokens)
+ where
+ selectMessages :: [ConversationMessage] -> Int -> ([ConversationMessage], Int)
+ selectMessages msgs budget = go (reverse msgs) budget []
+ where
+ go [] _ acc = (acc, sum (map cmTokensEstimate acc))
+ go (m : ms) remaining acc
+ | cmTokensEstimate m <= remaining =
+ go ms (remaining - cmTokensEstimate m) (m : acc)
+ | otherwise = (acc, sum (map cmTokensEstimate acc))
+
+ formatMsg m =
+ let timestamp = Text.pack (formatTime defaultTimeLocale "%Y-%m-%dT%H:%M:%SZ" (cmCreatedAt m))
+ prefix = case cmRole m of
+ UserRole -> "[" <> timestamp <> "] " <> fromMaybe "User" (cmSenderName m) <> ": "
+ AssistantRole -> "[" <> timestamp <> "] Assistant: "
+ in prefix <> cmContent m
+
+-- -----------------------------------------------------------------------------
+-- Group Memories
+-- -----------------------------------------------------------------------------
+
+-- | Generate a synthetic user_id for group-level memories.
+groupUserId :: Int -> Text
+groupUserId chatId = "group:" <> tshow chatId
+
+-- | Store a memory associated with a group (not a user).
+-- These memories are shared across all users in the group.
+storeGroupMemory :: Int -> Text -> MemorySource -> IO Memory
+storeGroupMemory chatId = storeMemory (groupUserId chatId)
+
+-- | Recall memories for a group.
+recallGroupMemories :: Int -> Text -> Int -> IO [Memory]
+recallGroupMemories chatId = recallMemories (groupUserId chatId)
diff --git a/Omni/Agent/PLAN.md b/Omni/Agent/PLAN.md
new file mode 100644
index 0000000..e51d09b
--- /dev/null
+++ b/Omni/Agent/PLAN.md
@@ -0,0 +1,589 @@
+# Omni Agent Infrastructure Plan
+
+**Status**: Draft
+**Author**: Ben (with AI assistance)
+**Date**: 2025-12-11
+
+## Vision
+
+A unified agent infrastructure supporting multiple specialized agents (coder, researcher, planner, telegram bot, etc.) with:
+- Shared tools, memory, and model backends
+- LoRA fine-tuning with model snapshots
+- Evals to prevent regression
+- Configurable LLM providers (local Ollama or OpenRouter)
+
+---
+
+## 0. Scope & Task Tracking
+
+**Building now**: Infrastructure and library primitives
+**First concrete agent**: Telegram Bot (validates the infrastructure)
+**Building later**: Researcher, Planner, and other agents
+
+### Active Tasks (in dependency order)
+
+| Task ID | Title | Status | Blocks |
+|---------|-------|--------|--------|
+| t-247 | Provider Abstraction | Open | t-248, t-249, t-250 |
+| t-248 | Memory System | Open (blocked by t-247) | t-251 |
+| t-249 | Tool Registry | Open (blocked by t-247) | t-251 |
+| t-250 | Evals Framework | Open (blocked by t-247) | - |
+| t-251 | Telegram Bot Agent | Open (blocked by t-248, t-249) | - |
+
+Run `jr task show <id>` for full implementation details on each task.
+
+---
+
+## 1. Architecture Overview
+
+```
+┌─────────────────────────────────────────────────────────────────┐
+│ Agent Layer │
+├──────────┬──────────┬──────────┬──────────┬────────────────────┤
+│ Jr/Coder │Researcher│ Planner │ Telegram │ Future Agents... │
+└────┬─────┴────┬─────┴────┬─────┴────┬─────┴────────────────────┘
+ │ │ │ │
+┌────▼──────────▼──────────▼──────────▼──────────────────────────┐
+│ Omni.Agent.Core │
+│ - Agent protocol (system prompt, tool execution loop) │
+│ - Model backend abstraction (Ollama | OpenRouter | Amp) │
+│ - Conversation/session management │
+└────┬────────────────────────────────────────────────────────────┘
+ │
+┌────▼────────────────────────────────────────────────────────────┐
+│ Shared Infrastructure │
+├─────────────────┬─────────────────┬─────────────────────────────┤
+│ Omni.Agent.Tools│ Omni.Agent.Memory│ Omni.Agent.Evals │
+│ - read_file │ - Vector DB │ - Regression tests │
+│ - edit_file │ - Fact retrieval │ - Quality metrics │
+│ - run_bash │ - Session history│ - Model comparison │
+│ - search │ │ │
+│ - web_search │ │ │
+│ - (pluggable) │ │ │
+├─────────────────┴─────────────────┴─────────────────────────────┤
+│ Omni.Agent.Training │
+│ - LoRA fine-tuning orchestration │
+│ - Model snapshotting │
+│ - Training data collection │
+└─────────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## 2. Immediate Work Items
+
+### 2.1 Add Amp Backend Support (--amp flag)
+
+**Problem**: Custom engine works but Amp is better for complex coding tasks.
+
+**Solution**: Add `--engine` flag to `jr work`:
+
+```bash
+jr work <task-id> # Uses native Engine (default)
+jr work <task-id> --engine=amp # Uses Amp via subprocess
+jr work <task-id> --engine=ollama # Uses local Ollama
+```
+
+**Implementation**:
+1. Add `EngineBackend` type: `Native | Amp | Ollama Text`
+2. Modify `Omni.Agent.Worker.start` to accept backend selection
+3. For Amp: spawn `amp --prompt-file` subprocess, capture output
+4. For Ollama: call local API instead of OpenRouter
+
+**Files to modify**:
+- `Omni/Jr.hs` - CLI parsing
+- `Omni/Agent/Worker.hs` - Backend dispatch
+- `Omni/Agent/Engine.hs` - Add Ollama provider
+
+### 2.2 Abstract LLM Provider
+
+**Current state**: `Engine.hs` hardcodes OpenRouter.
+
+**Target state**: Pluggable `LLMProvider` interface.
+
+```haskell
+-- Omni/Agent/Provider.hs
+data Provider
+ = OpenRouter { apiKey :: Text, model :: Text }
+ | Ollama { baseUrl :: Text, model :: Text }
+ | AmpCLI { promptFile :: FilePath }
+
+chat :: Provider -> [Message] -> [Tool] -> IO (Either Text Message)
+```
+
+### 2.3 Memory / Vector DB Integration
+
+**Purpose**: Long-term memory across agent sessions, shared across all agents, private per user.
+
+**Decision**: Use sqlite-vss for vector similarity search (not Omni.Fact - that's project-scoped, not user-scoped).
+
+**Key requirements**:
+- Cross-agent sharing: Telegram agent learns "Ben is an AI engineer" → Researcher agent recalls this
+- Multi-user: Each family member has private memories (identified by Telegram ID initially)
+- Embeddings via Ollama `/api/embeddings` endpoint with nomic-embed-text model
+
+See task t-248 for full implementation details.
+
+### 2.4 Pluggable Tool System
+
+**Current**: `Omni.Agent.Tools` has 6 hardcoded tools.
+
+**Target**: Registry pattern allowing agents to declare their tool sets.
+
+```haskell
+-- Each agent specifies its tools
+coderTools :: [Tool]
+coderTools = [readFileTool, writeFileTool, editFileTool, runBashTool, searchCodebaseTool]
+
+researcherTools :: [Tool]
+researcherTools = [webSearchTool, readWebPageTool, extractFactsTool, readFileTool]
+
+plannerTools :: [Tool]
+plannerTools = [taskCreateTool, taskListTool, taskUpdateTool, factQueryTool]
+
+telegramTools :: [Tool]
+telegramTools = [sendMessageTool, getUpdatesTool, factQueryTool]
+```
+
+---
+
+## 3. Agent Specifications
+
+### 3.1 Jr/Coder (existing)
+
+**Purpose**: Autonomous coding agent for task completion.
+
+**Tools**: read_file, write_file, edit_file, run_bash, search_codebase, search_and_read
+
+**System prompt**: Task-focused, code conventions, test requirements.
+
+### 3.2 Researcher (new)
+
+**Purpose**: Information gathering, analysis, summarization.
+
+**Tools**:
+- `web_search` - Search the web
+- `read_web_page` - Fetch and parse web content
+- `extract_facts` - Store learned facts in knowledge base
+- `read_file` - Read local documents
+- `query_facts` - Retrieve from knowledge base
+
+**System prompt**: Focus on accuracy, citation, verification.
+
+### 3.3 Project Planner (new)
+
+**Purpose**: Break down high-level goals into actionable tasks.
+
+**Tools**:
+- `task_create` - Create new tasks
+- `task_list` - Query existing tasks
+- `task_update` - Modify task status/content
+- `fact_query` - Get project context
+- `dependency_graph` - Visualize task dependencies
+
+**System prompt**: Project management, task decomposition, dependency analysis.
+
+### 3.4 Telegram Bot (FIRST AGENT TO BUILD)
+
+**Purpose**: Family assistant accessible via Telegram. First concrete agent to validate infrastructure.
+
+**Tools**:
+- `remember` - Store facts about the user (from Memory module)
+- `recall` - Query user's memories (from Memory module)
+- `web_search` - Answer questions requiring web lookup (from Registry)
+
+**System prompt**: Friendly, helpful, family-appropriate, concise for chat interface.
+
+**User identification**: Telegram user ID → creates/retrieves User record in memory.db
+
+See task t-251 for full implementation details.
+
+---
+
+## 4. Shared Infrastructure
+
+### 4.1 Model Backend Configuration
+
+```haskell
+-- ~/.config/omni/models.yaml or environment variables
+data ModelConfig = ModelConfig
+ { defaultProvider :: Provider
+ , modelOverrides :: Map Text Provider -- per-agent overrides
+ }
+
+-- Example config:
+-- default_provider: openrouter
+-- openrouter:
+-- api_key: $OPENROUTER_API_KEY
+-- default_model: anthropic/claude-sonnet-4.5
+-- ollama:
+-- base_url: http://localhost:11434
+-- default_model: llama3.1:70b
+-- agents:
+-- telegram: { provider: ollama, model: llama3.1:8b } # cheaper for chat
+-- coder: { provider: openrouter, model: anthropic/claude-sonnet-4.5 }
+```
+
+### 4.2 Evals Framework
+
+**Purpose**: Prevent regression when changing prompts, tools, or models.
+
+**Components**:
+1. **Test Cases**: Known task + expected outcome pairs
+2. **Runner**: Execute agent on test cases, capture results
+3. **Scorer**: Compare results (exact match, semantic similarity, human eval)
+4. **Dashboard**: Track scores over time
+
+**Implementation**:
+```haskell
+-- Omni/Agent/Eval.hs
+data EvalCase = EvalCase
+ { evalId :: Text
+ , evalPrompt :: Text
+ , evalExpectedBehavior :: Text -- or structured criteria
+ , evalTools :: [Tool]
+ }
+
+runEval :: AgentConfig -> EvalCase -> IO EvalResult
+```
+
+### 4.3 Shared Memory System (Omni.Agent.Memory)
+
+**Critical requirement**: Cross-agent memory sharing with multi-user support.
+
+**Example**: User tells Telegram bot "I'm an AI engineer" → Research agent later searching for papers should recall this context.
+
+#### Why not Omni.Fact?
+
+Current `Omni.Fact` limitations:
+- Project-scoped, not user-scoped
+- No user/identity concept
+- No embeddings for semantic retrieval
+- Tied to task system
+
+#### Memory Design
+
+```haskell
+-- Omni/Agent/Memory.hs
+
+-- | A memory is a piece of information about a user, learned by any agent
+data Memory = Memory
+ { memoryId :: UUID
+ , memoryUserId :: UserId -- Who this memory is about
+ , memoryContent :: Text -- The actual information
+ , memoryEmbedding :: Maybe Vector -- For semantic search
+ , memorySource :: MemorySource -- Which agent learned this
+ , memoryConfidence :: Double -- 0.0-1.0
+ , memoryCreatedAt :: UTCTime
+ , memoryLastAccessedAt :: UTCTime -- For relevance decay
+ , memoryTags :: [Text] -- Optional categorization
+ }
+
+data MemorySource = MemorySource
+ { sourceAgent :: Text -- "telegram", "researcher", "coder", etc.
+ , sourceSession :: UUID -- Session ID where this was learned
+ , sourceContext :: Text -- Brief context of how it was learned
+ }
+
+data User = User
+ { userId :: UUID
+ , userTelegramId :: Maybe Int64 -- Primary identifier initially
+ , userEmail :: Maybe Text -- Added later when email interface exists
+ , userName :: Text -- Display name ("Ben", "Alice", etc.)
+ , userCreatedAt :: UTCTime
+ }
+
+-- Users are identified by Telegram ID initially
+-- The agent learns more about users over time and stores in memories
+-- e.g., "Ben is an AI engineer" becomes a memory, not a user field
+
+-- | Core operations
+storeMemory :: UserId -> Text -> MemorySource -> IO Memory
+recallMemories :: UserId -> Text -> Int -> IO [Memory] -- semantic search
+forgetMemory :: UUID -> IO ()
+
+-- | Embedding integration (via Ollama or other provider)
+embedText :: Text -> IO Vector
+similaritySearch :: Vector -> [Memory] -> Int -> [Memory]
+```
+
+#### Multi-User Architecture
+
+```
+┌─────────────────────────────────────────────────────────┐
+│ Memory Store │
+├─────────────────────────────────────────────────────────┤
+│ users table: │
+│ id TEXT PRIMARY KEY │
+│ name TEXT │
+│ created_at TIMESTAMP │
+├─────────────────────────────────────────────────────────┤
+│ memories table: │
+│ id TEXT PRIMARY KEY │
+│ user_id TEXT REFERENCES users(id) │
+│ content TEXT │
+│ embedding BLOB -- serialized float vector │
+│ source_agent TEXT │
+│ source_session TEXT │
+│ source_context TEXT │
+│ confidence REAL │
+│ created_at TIMESTAMP │
+│ last_accessed_at TIMESTAMP │
+│ tags TEXT -- JSON array │
+└─────────────────────────────────────────────────────────┘
+```
+
+#### Memory Retrieval in Agent Loop
+
+When any agent runs, it:
+1. Identifies the current user (from context/session)
+2. Extracts key concepts from the user's request
+3. Calls `recallMemories userId query 10` to get relevant memories
+4. Injects memories into system prompt as context
+5. After completion, extracts new learnings and calls `storeMemory`
+
+```haskell
+-- In agent loop
+runAgentWithMemory :: UserId -> AgentConfig -> Text -> IO AgentResult
+runAgentWithMemory userId config prompt = do
+ -- Recall relevant memories
+ memories <- recallMemories userId prompt 10
+ let memoryContext = formatMemoriesForPrompt memories
+
+ -- Inject into system prompt
+ let enhancedPrompt = agentSystemPrompt config <> "\n\n## User Context\n" <> memoryContext
+
+ -- Run agent
+ result <- runAgent config { agentSystemPrompt = enhancedPrompt } prompt
+
+ -- Extract and store new memories (could be done by the agent via tool)
+ pure result
+```
+
+#### Memory Extraction Tool
+
+Agents can explicitly store memories:
+
+```haskell
+storeMemoryTool :: Tool
+storeMemoryTool = Tool
+ { toolName = "remember"
+ , toolDescription = "Store a piece of information about the user for future reference"
+ , toolExecute = \args -> do
+ let content = args .: "content"
+ tags = args .:? "tags" .!= []
+ memory <- storeMemory currentUserId content currentSource
+ pure (toJSON memory)
+ }
+```
+
+### 4.4 LoRA Fine-tuning Service
+
+**Purpose**: Custom-tune models on successful task completions.
+
+**Workflow**:
+1. Collect successful agent sessions (prompt + tool calls + result)
+2. Format as training data (instruction, input, output)
+3. Run LoRA training via Ollama or external service
+4. Snapshot trained model with version tag
+5. A/B test against base model via evals
+
+**Storage**:
+- Training data: `_/training/<agent>/<date>.jsonl`
+- Models: Ollama model registry with tags
+
+---
+
+## 5. Infrastructure Build Plan
+
+Focus: Library primitives first, agents later.
+
+### Phase 1: Provider Abstraction (1-2 days)
+- [ ] Create `Omni.Agent.Provider` module with unified interface
+- [ ] Extract OpenRouter logic from `Engine.hs`
+- [ ] Add Ollama provider implementation
+- [ ] Add `--engine` flag to `jr work`
+- [ ] Test with local Llama model
+
+### Phase 2: Amp Re-integration (1 day)
+- [ ] Add Amp subprocess backend to Provider
+- [ ] Handle Amp's streaming output
+- [ ] Parse Amp thread URL for linking
+
+### Phase 3: Memory System (3-4 days)
+- [ ] Create `Omni.Agent.Memory` module (separate from Fact)
+- [ ] Design schema: users, memories tables
+- [ ] Implement `storeMemory`, `recallMemories`, `forgetMemory`
+- [ ] Add embedding support via Ollama `/api/embeddings`
+- [ ] Implement similarity search
+- [ ] Create `remember` tool for agents
+- [ ] Add `runAgentWithMemory` wrapper
+
+### Phase 4: Tool Registry (1-2 days)
+- [ ] Create `Omni.Agent.Registry` for tool management
+- [ ] Define tool categories (coding, web, memory, task)
+- [ ] Allow agents to declare tool requirements
+- [ ] Add web tools (web_search, read_web_page)
+
+### Phase 5: Evals Framework (2-3 days)
+- [ ] Create `Omni.Agent.Eval` module
+- [ ] Define `EvalCase` and `EvalResult` types
+- [ ] Build eval runner
+- [ ] Add scoring (exact match, semantic, custom)
+- [ ] Create initial eval suite for Jr/coder
+
+### Phase 6: Telegram Bot Agent (3-4 days)
+**First concrete agent** - validates the infrastructure.
+
+- [ ] Create `Omni.Agent.Telegram` module
+- [ ] Telegram Bot API integration (getUpdates polling or webhook)
+- [ ] User identification via Telegram user ID
+- [ ] Auto-create user record on first message
+- [ ] Wire up memory system (recall on message, store learnings)
+- [ ] Basic conversation loop with LLM
+- [ ] Deploy as background service
+- [ ] Add `jr telegram` command for manual start
+
+**Tools for Telegram agent:**
+- `remember` - store facts about user
+- `recall` - query user's memories
+- `web_search` - answer questions (optional, phase 4)
+
+### Phase 7: Training Data Collection (1-2 days)
+- [ ] Add session export to training format
+- [ ] Store successful completions in `_/training/`
+- [ ] Create `jr train export` command
+
+### (Future) Additional Agents
+- Researcher agent
+- Planner agent
+- Email interface (links to Telegram user identity)
+- Others...
+
+---
+
+## 6. Design Decisions
+
+| Question | Decision |
+|----------|----------|
+| Vector DB | **sqlite-vss** - SQLite extension for vector similarity |
+| User identity | **Telegram ID** initially, link to email later when adding email interface |
+| Memory privacy | **Cross-agent shared, per-user private** - all agents see all memories for a user, but users can't see each other's memories |
+| Amp integration | TBD - subprocess likely |
+| Memory decay | TBD - probably keep forever with relevance scoring |
+| LoRA training | TBD - local Ollama or cloud |
+
+---
+
+## 7. File Structure (Proposed)
+
+```
+Omni/Agent/
+├── Core.hs # Base agent types, Worker state (existing)
+├── Engine.hs # Agent loop, tool execution (existing)
+├── Provider.hs # LLM provider abstraction (NEW)
+├── Provider/
+│ ├── OpenRouter.hs # Extracted from Engine.hs
+│ ├── Ollama.hs # Local model support
+│ └── Amp.hs # Amp CLI subprocess
+├── Memory.hs # Shared memory system (NEW)
+├── Memory/
+│ └── Embedding.hs # Vector operations, Ollama embeddings
+├── Tools.hs # Core coding tools (existing)
+├── Tools/
+│ ├── Web.hs # web_search, read_web_page (NEW)
+│ └── Memory.hs # remember, recall tools (NEW)
+├── Eval.hs # Evaluation framework (NEW)
+├── Training.hs # Training data collection (NEW)
+├── Worker.hs # Jr worker loop (existing)
+├── Git.hs # Git operations (existing)
+├── Log.hs # Logging utilities (existing)
+├── Event.hs # Event types (existing)
+├── DESIGN.md # Current design doc
+└── PLAN.md # This document
+```
+
+---
+
+## 8. Database Schema Additions
+
+```sql
+-- Memory system tables (new database: memory.db)
+
+CREATE TABLE users (
+ id TEXT PRIMARY KEY, -- UUID
+ telegram_id INTEGER UNIQUE, -- Telegram user ID (primary identifier)
+ email TEXT UNIQUE, -- Added later for email interface
+ name TEXT NOT NULL, -- Display name
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE memories (
+ id TEXT PRIMARY KEY, -- UUID
+ user_id TEXT NOT NULL REFERENCES users(id),
+ content TEXT NOT NULL,
+ embedding BLOB, -- float32 vector for sqlite-vss
+ source_agent TEXT NOT NULL, -- "telegram", "coder", etc.
+ source_session TEXT, -- Session UUID
+ source_context TEXT, -- How this was learned
+ confidence REAL DEFAULT 0.8,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ last_accessed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ tags TEXT -- JSON array
+);
+
+-- sqlite-vss virtual table for vector similarity search
+CREATE VIRTUAL TABLE memories_vss USING vss0(embedding(1536));
+
+CREATE INDEX idx_memories_user ON memories(user_id);
+CREATE INDEX idx_memories_agent ON memories(source_agent);
+```
+
+---
+
+## 9. Key Code References for Implementers
+
+When implementing tasks, refer to these existing patterns:
+
+### Existing Agent Infrastructure
+| File | Purpose | Key Functions/Types |
+|------|---------|---------------------|
+| `Omni/Agent/Engine.hs` | Agent loop, LLM calls | `runAgent`, `chat`, `Tool`, `LLM`, `AgentConfig` |
+| `Omni/Agent/Tools.hs` | Tool implementations | `readFileTool`, `editFileTool`, `runBashTool`, `allTools` |
+| `Omni/Agent/Worker.hs` | Jr worker loop | `start`, `runWithEngine`, `buildFullPrompt` |
+| `Omni/Agent/Core.hs` | Worker state types | `Worker`, `WorkerStatus` |
+
+### Database Patterns (follow these)
+| File | Purpose | Key Patterns |
+|------|---------|--------------|
+| `Omni/Task/Core.hs` | SQLite usage | `withDb`, schema migrations, ToRow/FromRow instances |
+| `Omni/Fact.hs` | CRUD operations | `createFact`, `getFact`, `getAllFacts` |
+
+### CLI Patterns
+| File | Purpose | Key Patterns |
+|------|---------|--------------|
+| `Omni/Jr.hs` | Main CLI entry | Docopt usage, command dispatch in `move` function |
+| `Omni/Cli.hs` | CLI helpers | `Cli.Plan`, `Cli.has`, `Cli.getArg` |
+
+### HTTP Patterns
+| File | Purpose | Key Patterns |
+|------|---------|--------------|
+| `Omni/Agent/Engine.hs` lines 560-594 | HTTP POST to LLM API | `http-conduit` usage, JSON encoding |
+
+### Build System
+- Build: `bild Omni/Agent/NewModule.hs`
+- Test: `bild --test Omni/Agent/NewModule.hs`
+- Dependencies: Add to module header comments (`: dep package-name`)
+
+---
+
+## 10. Next Steps
+
+Execute tasks in order:
+1. **t-247** Provider Abstraction (unblocked, start here)
+2. **t-248** Memory System (after t-247)
+3. **t-249** Tool Registry (after t-247, can parallel with t-248)
+4. **t-250** Evals Framework (after t-247)
+5. **t-251** Telegram Bot Agent (after t-248 + t-249)
+
+Run `jr task ready` to see what's available to work on.
diff --git a/Omni/Agent/Paths.hs b/Omni/Agent/Paths.hs
new file mode 100644
index 0000000..6facdc6
--- /dev/null
+++ b/Omni/Agent/Paths.hs
@@ -0,0 +1,39 @@
+{-# LANGUAGE NoImplicitPrelude #-}
+{-# LANGUAGE OverloadedStrings #-}
+
+-- | Configurable paths for Ava data directories.
+--
+-- In development, uses default paths under @_/var/ava/@.
+-- In production, set @AVA_DATA_ROOT@ to @/home/ava@ to use the dedicated workspace.
+module Omni.Agent.Paths
+ ( avaDataRoot,
+ skillsDir,
+ outreachDir,
+ userScratchRoot,
+ userScratchDir,
+ )
+where
+
+import Alpha
+import qualified Data.Text as Text
+import System.Environment (lookupEnv)
+import System.FilePath ((</>))
+import System.IO.Unsafe (unsafePerformIO)
+
+avaDataRoot :: FilePath
+avaDataRoot = unsafePerformIO <| do
+ m <- lookupEnv "AVA_DATA_ROOT"
+ pure (fromMaybe "_/var/ava" m)
+{-# NOINLINE avaDataRoot #-}
+
+skillsDir :: FilePath
+skillsDir = avaDataRoot </> "skills"
+
+outreachDir :: FilePath
+outreachDir = avaDataRoot </> "outreach"
+
+userScratchRoot :: FilePath
+userScratchRoot = avaDataRoot </> "users"
+
+userScratchDir :: Text -> FilePath
+userScratchDir user = userScratchRoot </> Text.unpack user
diff --git a/Omni/Agent/Provider.hs b/Omni/Agent/Provider.hs
new file mode 100644
index 0000000..db30e5f
--- /dev/null
+++ b/Omni/Agent/Provider.hs
@@ -0,0 +1,695 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | LLM Provider abstraction for multi-backend support.
+--
+-- Supports multiple LLM backends:
+-- - OpenRouter (cloud API, multiple models)
+-- - Ollama (local models)
+-- - Amp CLI (subprocess)
+--
+-- : out omni-agent-provider
+-- : dep aeson
+-- : dep http-conduit
+-- : dep http-client-tls
+-- : dep http-types
+-- : dep case-insensitive
+module Omni.Agent.Provider
+ ( Provider (..),
+ ProviderConfig (..),
+ ChatResult (..),
+ Message (..),
+ Role (..),
+ ToolCall (..),
+ FunctionCall (..),
+ Usage (..),
+ ToolApi (..),
+ StreamChunk (..),
+ defaultOpenRouter,
+ defaultOllama,
+ chat,
+ chatWithUsage,
+ chatStream,
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString as BS
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.CaseInsensitive as CI
+import Data.IORef (modifyIORef, newIORef, readIORef, writeIORef)
+import qualified Data.IntMap.Strict as IntMap
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import qualified Network.HTTP.Client as HTTPClient
+import qualified Network.HTTP.Client.TLS as HTTPClientTLS
+import qualified Network.HTTP.Simple as HTTP
+import Network.HTTP.Types.Status (statusCode)
+import qualified Omni.Test as Test
+import qualified System.Timeout as Timeout
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Provider"
+ [ Test.unit "defaultOpenRouter has correct endpoint" <| do
+ case defaultOpenRouter "" "test-model" of
+ OpenRouter cfg -> providerBaseUrl cfg Test.@=? "https://openrouter.ai/api/v1"
+ _ -> Test.assertFailure "Expected OpenRouter",
+ Test.unit "defaultOllama has correct endpoint" <| do
+ case defaultOllama "test-model" of
+ Ollama cfg -> providerBaseUrl cfg Test.@=? "http://localhost:11434"
+ _ -> Test.assertFailure "Expected Ollama",
+ Test.unit "ChatResult preserves message" <| do
+ let msg = Message User "test" Nothing Nothing
+ result = ChatResult msg Nothing
+ chatMessage result Test.@=? msg
+ ]
+
+-- | HTTP request timeout in microseconds (60 seconds)
+httpTimeoutMicros :: Int
+httpTimeoutMicros = 60 * 1000000
+
+-- | Maximum number of retries for transient failures
+maxRetries :: Int
+maxRetries = 3
+
+-- | Initial backoff delay in microseconds (1 second)
+initialBackoffMicros :: Int
+initialBackoffMicros = 1000000
+
+-- | Retry an IO action with exponential backoff
+-- Retries on timeout, connection errors, and 5xx status codes
+retryWithBackoff :: Int -> Int -> IO (Either Text a) -> IO (Either Text a)
+retryWithBackoff retriesLeft backoff action
+ | retriesLeft <= 0 = action
+ | otherwise = do
+ result <- Timeout.timeout httpTimeoutMicros action
+ case result of
+ Nothing -> do
+ threadDelay backoff
+ retryWithBackoff (retriesLeft - 1) (backoff * 2) action
+ Just (Left err)
+ | isRetryable err -> do
+ threadDelay backoff
+ retryWithBackoff (retriesLeft - 1) (backoff * 2) action
+ Just r -> pure r
+ where
+ isRetryable err =
+ "HTTP error: 5"
+ `Text.isPrefixOf` err
+ || "connection"
+ `Text.isInfixOf` Text.toLower err
+ || "timeout"
+ `Text.isInfixOf` Text.toLower err
+
+data Provider
+ = OpenRouter ProviderConfig
+ | Ollama ProviderConfig
+ | AmpCLI FilePath
+ deriving (Show, Eq, Generic)
+
+data ProviderConfig = ProviderConfig
+ { providerBaseUrl :: Text,
+ providerApiKey :: Text,
+ providerModel :: Text,
+ providerExtraHeaders :: [(ByteString, ByteString)]
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON ProviderConfig where
+ toJSON c =
+ Aeson.object
+ [ "baseUrl" .= providerBaseUrl c,
+ "apiKey" .= providerApiKey c,
+ "model" .= providerModel c
+ ]
+
+instance Aeson.FromJSON ProviderConfig where
+ parseJSON =
+ Aeson.withObject "ProviderConfig" <| \v ->
+ (ProviderConfig </ (v Aeson..: "baseUrl"))
+ <*> (v Aeson..: "apiKey")
+ <*> (v Aeson..: "model")
+ <*> pure []
+
+defaultOpenRouter :: Text -> Text -> Provider
+defaultOpenRouter apiKey model =
+ OpenRouter
+ ProviderConfig
+ { providerBaseUrl = "https://openrouter.ai/api/v1",
+ providerApiKey = apiKey,
+ providerModel = model,
+ providerExtraHeaders =
+ [ ("HTTP-Referer", "https://omni.dev"),
+ ("X-Title", "Omni Agent")
+ ]
+ }
+
+defaultOllama :: Text -> Provider
+defaultOllama model =
+ Ollama
+ ProviderConfig
+ { providerBaseUrl = "http://localhost:11434",
+ providerApiKey = "",
+ providerModel = model,
+ providerExtraHeaders = []
+ }
+
+data Role = System | User | Assistant | ToolRole
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Role where
+ toJSON System = Aeson.String "system"
+ toJSON User = Aeson.String "user"
+ toJSON Assistant = Aeson.String "assistant"
+ toJSON ToolRole = Aeson.String "tool"
+
+instance Aeson.FromJSON Role where
+ parseJSON = Aeson.withText "Role" parseRole
+ where
+ parseRole "system" = pure System
+ parseRole "user" = pure User
+ parseRole "assistant" = pure Assistant
+ parseRole "tool" = pure ToolRole
+ parseRole _ = empty
+
+data Message = Message
+ { msgRole :: Role,
+ msgContent :: Text,
+ msgToolCalls :: Maybe [ToolCall],
+ msgToolCallId :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Message where
+ toJSON m =
+ Aeson.object
+ <| catMaybes
+ [ Just ("role" .= msgRole m),
+ Just ("content" .= msgContent m),
+ ("tool_calls" .=) </ msgToolCalls m,
+ ("tool_call_id" .=) </ msgToolCallId m
+ ]
+
+instance Aeson.FromJSON Message where
+ parseJSON =
+ Aeson.withObject "Message" <| \v ->
+ (Message </ (v Aeson..: "role"))
+ <*> (v Aeson..:? "content" Aeson..!= "")
+ <*> (v Aeson..:? "tool_calls")
+ <*> (v Aeson..:? "tool_call_id")
+
+data ToolCall = ToolCall
+ { tcId :: Text,
+ tcType :: Text,
+ tcFunction :: FunctionCall
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON ToolCall where
+ toJSON tc =
+ Aeson.object
+ [ "id" .= tcId tc,
+ "type" .= tcType tc,
+ "function" .= tcFunction tc
+ ]
+
+instance Aeson.FromJSON ToolCall where
+ parseJSON =
+ Aeson.withObject "ToolCall" <| \v ->
+ (ToolCall </ (v Aeson..: "id"))
+ <*> (v Aeson..:? "type" Aeson..!= "function")
+ <*> (v Aeson..: "function")
+
+data FunctionCall = FunctionCall
+ { fcName :: Text,
+ fcArguments :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON FunctionCall where
+ toJSON fc =
+ Aeson.object
+ [ "name" .= fcName fc,
+ "arguments" .= fcArguments fc
+ ]
+
+instance Aeson.FromJSON FunctionCall where
+ parseJSON =
+ Aeson.withObject "FunctionCall" <| \v ->
+ (FunctionCall </ (v Aeson..: "name"))
+ <*> (v Aeson..:? "arguments" Aeson..!= "{}")
+
+data Usage = Usage
+ { usagePromptTokens :: Int,
+ usageCompletionTokens :: Int,
+ usageTotalTokens :: Int,
+ usageCost :: Maybe Double
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Usage where
+ parseJSON =
+ Aeson.withObject "Usage" <| \v ->
+ (Usage </ (v Aeson..: "prompt_tokens"))
+ <*> (v Aeson..: "completion_tokens")
+ <*> (v Aeson..: "total_tokens")
+ <*> (v Aeson..:? "cost")
+
+data ChatResult = ChatResult
+ { chatMessage :: Message,
+ chatUsage :: Maybe Usage
+ }
+ deriving (Show, Eq)
+
+data ToolApi = ToolApi
+ { toolApiName :: Text,
+ toolApiDescription :: Text,
+ toolApiParameters :: Aeson.Value
+ }
+ deriving (Generic)
+
+instance Aeson.ToJSON ToolApi where
+ toJSON t =
+ Aeson.object
+ [ "type" .= ("function" :: Text),
+ "function"
+ .= Aeson.object
+ [ "name" .= toolApiName t,
+ "description" .= toolApiDescription t,
+ "parameters" .= toolApiParameters t
+ ]
+ ]
+
+data ChatCompletionRequest = ChatCompletionRequest
+ { reqModel :: Text,
+ reqMessages :: [Message],
+ reqTools :: Maybe [ToolApi]
+ }
+ deriving (Generic)
+
+instance Aeson.ToJSON ChatCompletionRequest where
+ toJSON r =
+ Aeson.object
+ <| catMaybes
+ [ Just ("model" .= reqModel r),
+ Just ("messages" .= reqMessages r),
+ ("tools" .=) </ reqTools r,
+ Just ("usage" .= Aeson.object ["include" .= True])
+ ]
+
+data Choice = Choice
+ { choiceIndex :: Int,
+ choiceMessage :: Message,
+ choiceFinishReason :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Choice where
+ parseJSON =
+ Aeson.withObject "Choice" <| \v ->
+ (Choice </ (v Aeson..: "index"))
+ <*> (v Aeson..: "message")
+ <*> (v Aeson..:? "finish_reason")
+
+data ChatCompletionResponse = ChatCompletionResponse
+ { respId :: Text,
+ respChoices :: [Choice],
+ respModel :: Text,
+ respUsage :: Maybe Usage
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON ChatCompletionResponse where
+ parseJSON =
+ Aeson.withObject "ChatCompletionResponse" <| \v ->
+ (ChatCompletionResponse </ (v Aeson..: "id"))
+ <*> (v Aeson..: "choices")
+ <*> (v Aeson..: "model")
+ <*> (v Aeson..:? "usage")
+
+chat :: Provider -> [ToolApi] -> [Message] -> IO (Either Text Message)
+chat provider tools messages = do
+ result <- chatWithUsage provider tools messages
+ pure (chatMessage </ result)
+
+chatWithUsage :: Provider -> [ToolApi] -> [Message] -> IO (Either Text ChatResult)
+chatWithUsage (OpenRouter cfg) tools messages = chatOpenAI cfg tools messages
+chatWithUsage (Ollama cfg) tools messages = chatOllama cfg tools messages
+chatWithUsage (AmpCLI _promptFile) _tools _messages = do
+ pure (Left "Amp CLI provider not yet implemented")
+
+chatOpenAI :: ProviderConfig -> [ToolApi] -> [Message] -> IO (Either Text ChatResult)
+chatOpenAI cfg tools messages = do
+ let url = Text.unpack (providerBaseUrl cfg) <> "/chat/completions"
+ req0 <- HTTP.parseRequest url
+ let body =
+ ChatCompletionRequest
+ { reqModel = providerModel cfg,
+ reqMessages = messages,
+ reqTools = if null tools then Nothing else Just tools
+ }
+ baseReq =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestHeader "Authorization" ["Bearer " <> TE.encodeUtf8 (providerApiKey cfg)]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| req0
+ req = foldr addHeader baseReq (providerExtraHeaders cfg)
+ addHeader (name, value) = HTTP.addRequestHeader (CI.mk name) value
+
+ retryWithBackoff maxRetries initialBackoffMicros <| do
+ response <- HTTP.httpLBS req
+ let status = HTTP.getResponseStatusCode response
+ respBody = HTTP.getResponseBody response
+ cleanedBody = BL.dropWhile (\b -> b `elem` [0x0a, 0x0d, 0x20]) respBody
+ if status >= 200 && status < 300
+ then case Aeson.eitherDecode cleanedBody of
+ Right resp ->
+ case respChoices resp of
+ (c : _) -> pure (Right (ChatResult (choiceMessage c) (respUsage resp)))
+ [] -> pure (Left "No choices in response")
+ Left err -> do
+ let bodyPreview = TE.decodeUtf8 (BL.toStrict (BL.take 500 cleanedBody))
+ pure (Left ("Failed to parse response: " <> Text.pack err <> " | Body: " <> bodyPreview))
+ else pure (Left ("HTTP error: " <> tshow status <> " - " <> TE.decodeUtf8 (BL.toStrict respBody)))
+
+chatOllama :: ProviderConfig -> [ToolApi] -> [Message] -> IO (Either Text ChatResult)
+chatOllama cfg tools messages = do
+ let url = Text.unpack (providerBaseUrl cfg) <> "/api/chat"
+ req0 <- HTTP.parseRequest url
+ let body =
+ Aeson.object
+ [ "model" .= providerModel cfg,
+ "messages" .= messages,
+ "tools" .= if null tools then Aeson.Null else Aeson.toJSON tools,
+ "stream" .= False
+ ]
+ req =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| req0
+
+ retryWithBackoff maxRetries initialBackoffMicros <| do
+ response <- HTTP.httpLBS req
+ let status = HTTP.getResponseStatusCode response
+ if status >= 200 && status < 300
+ then case Aeson.decode (HTTP.getResponseBody response) of
+ Just resp -> parseOllamaResponse resp
+ Nothing -> pure (Left ("Failed to parse Ollama response: " <> TE.decodeUtf8 (BL.toStrict (HTTP.getResponseBody response))))
+ else pure (Left ("HTTP error: " <> tshow status <> " - " <> TE.decodeUtf8 (BL.toStrict (HTTP.getResponseBody response))))
+
+parseOllamaResponse :: Aeson.Value -> IO (Either Text ChatResult)
+parseOllamaResponse val =
+ case val of
+ Aeson.Object obj -> do
+ let msgResult = do
+ msgObj <- case KeyMap.lookup "message" obj of
+ Just m -> Right m
+ Nothing -> Left "No message in response"
+ case Aeson.fromJSON msgObj of
+ Aeson.Success msg -> Right msg
+ Aeson.Error e -> Left (Text.pack e)
+ usageResult = case KeyMap.lookup "prompt_eval_count" obj of
+ Just (Aeson.Number promptTokens) ->
+ case KeyMap.lookup "eval_count" obj of
+ Just (Aeson.Number evalTokens) ->
+ Just
+ Usage
+ { usagePromptTokens = round promptTokens,
+ usageCompletionTokens = round evalTokens,
+ usageTotalTokens = round promptTokens + round evalTokens,
+ usageCost = Nothing
+ }
+ _ -> Nothing
+ _ -> Nothing
+ case msgResult of
+ Right msg -> pure (Right (ChatResult msg usageResult))
+ Left e -> pure (Left e)
+ _ -> pure (Left "Expected object response from Ollama")
+
+data StreamChunk
+ = StreamContent Text
+ | StreamToolCall ToolCall
+ | StreamToolCallDelta ToolCallDelta
+ | StreamDone ChatResult
+ | StreamError Text
+ deriving (Show, Eq)
+
+data ToolCallDelta = ToolCallDelta
+ { tcdIndex :: Int,
+ tcdId :: Maybe Text,
+ tcdFunctionName :: Maybe Text,
+ tcdFunctionArgs :: Maybe Text
+ }
+ deriving (Show, Eq)
+
+chatStream :: Provider -> [ToolApi] -> [Message] -> (StreamChunk -> IO ()) -> IO (Either Text ChatResult)
+chatStream (OpenRouter cfg) tools messages onChunk = chatStreamOpenAI cfg tools messages onChunk
+chatStream (Ollama _cfg) _tools _messages _onChunk = pure (Left "Streaming not implemented for Ollama")
+chatStream (AmpCLI _) _tools _messages _onChunk = pure (Left "Streaming not implemented for AmpCLI")
+
+chatStreamOpenAI :: ProviderConfig -> [ToolApi] -> [Message] -> (StreamChunk -> IO ()) -> IO (Either Text ChatResult)
+chatStreamOpenAI cfg tools messages onChunk = do
+ let url = Text.unpack (providerBaseUrl cfg) <> "/chat/completions"
+ managerSettings =
+ HTTPClientTLS.tlsManagerSettings
+ { HTTPClient.managerResponseTimeout = HTTPClient.responseTimeoutMicro httpTimeoutMicros
+ }
+ manager <- HTTPClient.newManager managerSettings
+ req0 <- HTTP.parseRequest url
+ let body =
+ Aeson.object
+ <| catMaybes
+ [ Just ("model" .= providerModel cfg),
+ Just ("messages" .= messages),
+ if null tools then Nothing else Just ("tools" .= tools),
+ Just ("stream" .= True),
+ Just ("usage" .= Aeson.object ["include" .= True])
+ ]
+ baseReq =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestHeader "Authorization" ["Bearer " <> TE.encodeUtf8 (providerApiKey cfg)]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| req0
+ req = foldr addHeader baseReq (providerExtraHeaders cfg)
+ addHeader (name, value) = HTTP.addRequestHeader (CI.mk name) value
+
+ result <-
+ try <| HTTPClient.withResponse req manager <| \response -> do
+ let status = HTTPClient.responseStatus response
+ code = statusCode status
+ if code >= 200 && code < 300
+ then processSSEStream (HTTPClient.responseBody response) onChunk
+ else do
+ bodyChunks <- readAllBody (HTTPClient.responseBody response)
+ let errBody = TE.decodeUtf8 (BS.concat bodyChunks)
+ pure (Left ("HTTP error: " <> tshow code <> " - " <> errBody))
+ case result of
+ Left (e :: SomeException) -> pure (Left ("Stream request failed: " <> tshow e))
+ Right r -> pure r
+
+readAllBody :: IO BS.ByteString -> IO [BS.ByteString]
+readAllBody readBody = go []
+ where
+ go acc = do
+ chunk <- readBody
+ if BS.null chunk
+ then pure (reverse acc)
+ else go (chunk : acc)
+
+data ToolCallAccum = ToolCallAccum
+ { tcaId :: Text,
+ tcaName :: Text,
+ tcaArgs :: Text
+ }
+
+processSSEStream :: IO BS.ByteString -> (StreamChunk -> IO ()) -> IO (Either Text ChatResult)
+processSSEStream readBody onChunk = do
+ accumulatedContent <- newIORef ("" :: Text)
+ toolCallAccum <- newIORef (IntMap.empty :: IntMap.IntMap ToolCallAccum)
+ lastUsage <- newIORef (Nothing :: Maybe Usage)
+ buffer <- newIORef ("" :: Text)
+
+ let loop = do
+ chunk <- readBody
+ if BS.null chunk
+ then do
+ content <- readIORef accumulatedContent
+ accum <- readIORef toolCallAccum
+ usage <- readIORef lastUsage
+ let toolCalls = map accumToToolCall (IntMap.elems accum)
+ finalMsg =
+ Message
+ { msgRole = Assistant,
+ msgContent = content,
+ msgToolCalls = if null toolCalls then Nothing else Just toolCalls,
+ msgToolCallId = Nothing
+ }
+ pure (Right (ChatResult finalMsg usage))
+ else do
+ modifyIORef buffer (<> TE.decodeUtf8 chunk)
+ buf <- readIORef buffer
+ let (events, remaining) = parseSSEEvents buf
+ writeIORef buffer remaining
+ forM_ events <| \event -> do
+ case parseStreamEvent event of
+ Just (StreamContent txt) -> do
+ modifyIORef accumulatedContent (<> txt)
+ onChunk (StreamContent txt)
+ Just (StreamToolCallDelta delta) -> do
+ modifyIORef toolCallAccum (mergeToolCallDelta delta)
+ Just (StreamToolCall tc) -> do
+ modifyIORef toolCallAccum (mergeCompleteToolCall tc)
+ onChunk (StreamToolCall tc)
+ Just (StreamDone result) -> do
+ writeIORef lastUsage (chatUsage result)
+ Just (StreamError err) -> do
+ onChunk (StreamError err)
+ Nothing -> pure ()
+ loop
+
+ loop
+
+accumToToolCall :: ToolCallAccum -> ToolCall
+accumToToolCall acc =
+ ToolCall
+ { tcId = tcaId acc,
+ tcType = "function",
+ tcFunction = FunctionCall (tcaName acc) (tcaArgs acc)
+ }
+
+mergeToolCallDelta :: ToolCallDelta -> IntMap.IntMap ToolCallAccum -> IntMap.IntMap ToolCallAccum
+mergeToolCallDelta delta accum =
+ let idx = tcdIndex delta
+ existing = IntMap.lookup idx accum
+ updated = case existing of
+ Nothing ->
+ ToolCallAccum
+ { tcaId = fromMaybe "" (tcdId delta),
+ tcaName = fromMaybe "" (tcdFunctionName delta),
+ tcaArgs = fromMaybe "" (tcdFunctionArgs delta)
+ }
+ Just a ->
+ a
+ { tcaId = fromMaybe (tcaId a) (tcdId delta),
+ tcaName = fromMaybe (tcaName a) (tcdFunctionName delta),
+ tcaArgs = tcaArgs a <> fromMaybe "" (tcdFunctionArgs delta)
+ }
+ in IntMap.insert idx updated accum
+
+mergeCompleteToolCall :: ToolCall -> IntMap.IntMap ToolCallAccum -> IntMap.IntMap ToolCallAccum
+mergeCompleteToolCall tc accum =
+ let nextIdx = if IntMap.null accum then 0 else fst (IntMap.findMax accum) + 1
+ newAccum =
+ ToolCallAccum
+ { tcaId = tcId tc,
+ tcaName = fcName (tcFunction tc),
+ tcaArgs = fcArguments (tcFunction tc)
+ }
+ in IntMap.insert nextIdx newAccum accum
+
+parseSSEEvents :: Text -> ([Text], Text)
+parseSSEEvents input =
+ let lines' = Text.splitOn "\n" input
+ (events, remaining) = go [] [] lines'
+ in (events, remaining)
+ where
+ go events current [] = (reverse events, Text.intercalate "\n" (reverse current))
+ go events current (line : rest)
+ | Text.null line && not (null current) =
+ go (Text.intercalate "\n" (reverse current) : events) [] rest
+ | otherwise =
+ go events (line : current) rest
+
+parseStreamEvent :: Text -> Maybe StreamChunk
+parseStreamEvent eventText = do
+ let dataLines = filter ("data:" `Text.isPrefixOf`) (Text.lines eventText)
+ case dataLines of
+ [] -> Nothing
+ (dataLine : _) -> do
+ let jsonStr = Text.strip (Text.drop 5 dataLine)
+ if jsonStr == "[DONE]"
+ then Nothing
+ else case Aeson.decode (BL.fromStrict (TE.encodeUtf8 jsonStr)) of
+ Nothing -> Nothing
+ Just (Aeson.Object obj) -> parseStreamChunk obj
+ _ -> Nothing
+
+parseStreamChunk :: Aeson.Object -> Maybe StreamChunk
+parseStreamChunk obj = do
+ case KeyMap.lookup "error" obj of
+ Just (Aeson.Object errObj) -> do
+ let errMsg = case KeyMap.lookup "message" errObj of
+ Just (Aeson.String m) -> m
+ _ -> "Unknown error"
+ Just (StreamError errMsg)
+ _ -> do
+ let usageChunk = case KeyMap.lookup "usage" obj of
+ Just usageVal -> case Aeson.fromJSON usageVal of
+ Aeson.Success usage -> Just (StreamDone (ChatResult (Message Assistant "" Nothing Nothing) (Just usage)))
+ _ -> Nothing
+ _ -> Nothing
+ case KeyMap.lookup "choices" obj of
+ Just (Aeson.Array choices) | not (null choices) -> do
+ case toList choices of
+ (Aeson.Object choice : _) -> do
+ case KeyMap.lookup "delta" choice of
+ Just (Aeson.Object delta) -> do
+ let contentChunk = case KeyMap.lookup "content" delta of
+ Just (Aeson.String c) | not (Text.null c) -> Just (StreamContent c)
+ _ -> Nothing
+ toolCallChunk = case KeyMap.lookup "tool_calls" delta of
+ Just (Aeson.Array tcs)
+ | not (null tcs) ->
+ parseToolCallDelta (toList tcs)
+ _ -> Nothing
+ contentChunk <|> toolCallChunk <|> usageChunk
+ _ -> usageChunk
+ _ -> usageChunk
+ _ -> usageChunk
+
+parseToolCallDelta :: [Aeson.Value] -> Maybe StreamChunk
+parseToolCallDelta [] = Nothing
+parseToolCallDelta (Aeson.Object tcObj : _) = do
+ idx <- case KeyMap.lookup "index" tcObj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ let tcId' = case KeyMap.lookup "id" tcObj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ funcObj = case KeyMap.lookup "function" tcObj of
+ Just (Aeson.Object f) -> Just f
+ _ -> Nothing
+ funcName = case funcObj of
+ Just f -> case KeyMap.lookup "name" f of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ Nothing -> Nothing
+ funcArgs = case funcObj of
+ Just f -> case KeyMap.lookup "arguments" f of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ Nothing -> Nothing
+ Just
+ ( StreamToolCallDelta
+ ToolCallDelta
+ { tcdIndex = idx,
+ tcdId = tcId',
+ tcdFunctionName = funcName,
+ tcdFunctionArgs = funcArgs
+ }
+ )
+parseToolCallDelta _ = Nothing
diff --git a/Omni/Agent/Skills.hs b/Omni/Agent/Skills.hs
new file mode 100644
index 0000000..1dbf23f
--- /dev/null
+++ b/Omni/Agent/Skills.hs
@@ -0,0 +1,417 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Skills system for ava agent.
+--
+-- Skills are modular instruction sets that extend ava's capabilities.
+-- They follow the Claude Skills format: a directory with SKILL.md and
+-- optional scripts/, references/, and assets/ subdirectories.
+--
+-- Directory structure:
+-- _/var/ava/skills/
+-- ├── shared/ -- Skills available to all users
+-- │ └── skill-creator/
+-- ├── ben/ -- Ben's private skills
+-- └── alice/ -- Alice's private skills
+--
+-- : out omni-agent-skills
+-- : dep aeson
+-- : dep directory
+module Omni.Agent.Skills
+ ( Skill (..),
+ SkillMetadata (..),
+ loadSkill,
+ loadSkillMetadata,
+ listSkills,
+ listSkillsForUser,
+ publishSkill,
+ skillTool,
+ listSkillsTool,
+ publishSkillTool,
+ skillsDir,
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.:), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.List as List
+import qualified Data.Text as Text
+import qualified Data.Text.IO as TextIO
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Paths as Paths
+import qualified Omni.Test as Test
+import qualified System.Directory as Directory
+import qualified System.FilePath as FilePath
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Skills"
+ [ Test.unit "skillsDir returns correct path" <| do
+ let dir = skillsDir
+ ("skills" `Text.isSuffixOf` Text.pack dir) Test.@=? True,
+ Test.unit "SkillMetadata parses from YAML frontmatter" <| do
+ let yaml = "name: test-skill\ndescription: A test skill"
+ case parseYamlFrontmatter yaml of
+ Nothing -> Test.assertFailure "Failed to parse frontmatter"
+ Just meta -> do
+ skillMetaName meta Test.@=? "test-skill"
+ skillMetaDescription meta Test.@=? "A test skill",
+ Test.unit "parseSkillMd extracts frontmatter and body" <| do
+ let content =
+ "---\n\
+ \name: my-skill\n\
+ \description: Does things\n\
+ \---\n\
+ \# My Skill\n\
+ \\n\
+ \Instructions here."
+ case parseSkillMd content of
+ Nothing -> Test.assertFailure "Failed to parse SKILL.md"
+ Just (meta, body) -> do
+ skillMetaName meta Test.@=? "my-skill"
+ ("# My Skill" `Text.isInfixOf` body) Test.@=? True,
+ Test.unit "skillTool schema is valid" <| do
+ let schema = Engine.toolJsonSchema (skillTool "test-user")
+ case schema of
+ Aeson.Object _ -> pure ()
+ _ -> Test.assertFailure "Schema should be an object",
+ Test.unit "listSkillsTool schema is valid" <| do
+ let schema = Engine.toolJsonSchema (listSkillsTool "test-user")
+ case schema of
+ Aeson.Object _ -> pure ()
+ _ -> Test.assertFailure "Schema should be an object"
+ ]
+
+-- | Base directory for all skills
+skillsDir :: FilePath
+skillsDir = Paths.skillsDir
+
+-- | Skill metadata from YAML frontmatter
+data SkillMetadata = SkillMetadata
+ { skillMetaName :: Text,
+ skillMetaDescription :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON SkillMetadata where
+ parseJSON =
+ Aeson.withObject "SkillMetadata" <| \v ->
+ (SkillMetadata </ (v .: "name"))
+ <*> (v .: "description")
+
+instance Aeson.ToJSON SkillMetadata where
+ toJSON m =
+ Aeson.object
+ [ "name" .= skillMetaName m,
+ "description" .= skillMetaDescription m
+ ]
+
+-- | Simple YAML frontmatter parser for skill metadata
+-- Parses lines like "name: value" and "description: value"
+parseYamlFrontmatter :: Text -> Maybe SkillMetadata
+parseYamlFrontmatter yaml = do
+ let kvPairs = parseKvLines (Text.lines yaml)
+ getName = List.lookup "name" kvPairs
+ getDesc = List.lookup "description" kvPairs
+ name' <- getName
+ desc <- getDesc
+ pure SkillMetadata {skillMetaName = name', skillMetaDescription = desc}
+ where
+ parseKvLines :: [Text] -> [(Text, Text)]
+ parseKvLines = mapMaybe parseKvLine
+
+ parseKvLine :: Text -> Maybe (Text, Text)
+ parseKvLine line = do
+ let (key, rest) = Text.breakOn ":" line
+ guard (not (Text.null rest))
+ let value = Text.strip (Text.drop 1 rest)
+ guard (not (Text.null key))
+ pure (Text.strip key, value)
+
+-- | Full skill with metadata and content
+data Skill = Skill
+ { skillName :: Text,
+ skillDescription :: Text,
+ skillBody :: Text,
+ skillPath :: FilePath
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Skill where
+ toJSON s =
+ Aeson.object
+ [ "name" .= skillName s,
+ "description" .= skillDescription s,
+ "body" .= skillBody s,
+ "path" .= skillPath s
+ ]
+
+-- | Parse SKILL.md content into metadata and body
+parseSkillMd :: Text -> Maybe (SkillMetadata, Text)
+parseSkillMd content = do
+ let stripped = Text.strip content
+ guard (Text.isPrefixOf "---" stripped)
+ let afterFirst = Text.drop 3 stripped
+ (yamlPart, rest) = Text.breakOn "---" (Text.stripStart afterFirst)
+ guard (not (Text.null rest))
+ let body = Text.strip (Text.drop 3 rest)
+ meta <- parseYamlFrontmatter (Text.strip yamlPart)
+ pure (meta, body)
+
+-- | Load just the metadata for a skill (for progressive disclosure)
+loadSkillMetadata :: FilePath -> IO (Maybe SkillMetadata)
+loadSkillMetadata skillDir = do
+ let skillMd = skillDir FilePath.</> "SKILL.md"
+ exists <- Directory.doesFileExist skillMd
+ if exists
+ then do
+ content <- TextIO.readFile skillMd
+ pure (fst </ parseSkillMd content)
+ else pure Nothing
+
+-- | Load a full skill by name for a user
+loadSkill :: Text -> Text -> IO (Either Text Skill)
+loadSkill userName skillName' = do
+ let userDir = skillsDir FilePath.</> Text.unpack userName FilePath.</> Text.unpack skillName'
+ sharedDir = skillsDir FilePath.</> "shared" FilePath.</> Text.unpack skillName'
+
+ -- Try user's private skills first, then shared
+ userExists <- Directory.doesDirectoryExist userDir
+ sharedExists <- Directory.doesDirectoryExist sharedDir
+
+ let targetDir
+ | userExists = Just userDir
+ | sharedExists = Just sharedDir
+ | otherwise = Nothing
+
+ case targetDir of
+ Nothing -> do
+ available <- listSkillsForUser userName
+ pure
+ <| Left
+ <| "Skill not found: "
+ <> skillName'
+ <> ". Available skills: "
+ <> Text.intercalate ", " (map skillMetaName available)
+ Just dir -> do
+ let skillMd = dir FilePath.</> "SKILL.md"
+ exists <- Directory.doesFileExist skillMd
+ if exists
+ then do
+ content <- TextIO.readFile skillMd
+ case parseSkillMd content of
+ Nothing -> pure <| Left "Failed to parse SKILL.md frontmatter"
+ Just (meta, body) ->
+ pure
+ <| Right
+ <| Skill
+ { skillName = skillMetaName meta,
+ skillDescription = skillMetaDescription meta,
+ skillBody = body,
+ skillPath = dir
+ }
+ else pure <| Left ("SKILL.md not found in " <> Text.pack dir)
+
+-- | List all skills in a directory
+listSkillsInDir :: FilePath -> IO [SkillMetadata]
+listSkillsInDir dir = do
+ exists <- Directory.doesDirectoryExist dir
+ if exists
+ then do
+ entries <- Directory.listDirectory dir
+ catMaybes
+ </ forM
+ entries
+ ( \entry -> do
+ let entryPath = dir FilePath.</> entry
+ isDir <- Directory.doesDirectoryExist entryPath
+ if isDir
+ then loadSkillMetadata entryPath
+ else pure Nothing
+ )
+ else pure []
+
+-- | List all available skills (shared only)
+listSkills :: IO [SkillMetadata]
+listSkills = listSkillsInDir (skillsDir FilePath.</> "shared")
+
+-- | List skills available to a specific user (their private + shared)
+listSkillsForUser :: Text -> IO [SkillMetadata]
+listSkillsForUser userName = do
+ userSkills <- listSkillsInDir (skillsDir FilePath.</> Text.unpack userName)
+ sharedSkills <- listSkillsInDir (skillsDir FilePath.</> "shared")
+ -- Dedupe by name, preferring user's version
+ let userNames = map skillMetaName userSkills
+ uniqueShared = filter (\s -> skillMetaName s `notElem` userNames) sharedSkills
+ pure (userSkills <> uniqueShared)
+
+-- | Publish a skill from user's private directory to shared
+publishSkill :: Text -> Text -> IO (Either Text Text)
+publishSkill userName skillName' = do
+ let userDir = skillsDir FilePath.</> Text.unpack userName FilePath.</> Text.unpack skillName'
+ sharedDir = skillsDir FilePath.</> "shared" FilePath.</> Text.unpack skillName'
+
+ userExists <- Directory.doesDirectoryExist userDir
+ if not userExists
+ then pure <| Left ("Skill not found in your directory: " <> skillName')
+ else do
+ -- Copy recursively
+ Directory.createDirectoryIfMissing True sharedDir
+ copyDirectory userDir sharedDir
+ pure <| Right ("Published " <> skillName' <> " to shared skills")
+
+-- | Recursively copy a directory
+copyDirectory :: FilePath -> FilePath -> IO ()
+copyDirectory src dst = do
+ entries <- Directory.listDirectory src
+ forM_
+ entries
+ ( \entry -> do
+ let srcPath = src FilePath.</> entry
+ dstPath = dst FilePath.</> entry
+ isDir <- Directory.doesDirectoryExist srcPath
+ if isDir
+ then do
+ Directory.createDirectoryIfMissing True dstPath
+ copyDirectory srcPath dstPath
+ else Directory.copyFile srcPath dstPath
+ )
+
+-- Tool result helpers
+mkSuccess :: Text -> Aeson.Value
+mkSuccess output =
+ Aeson.object
+ [ "success" .= True,
+ "output" .= output
+ ]
+
+mkError :: Text -> Aeson.Value
+mkError err =
+ Aeson.object
+ [ "success" .= False,
+ "error" .= err
+ ]
+
+-- | Tool to load a skill's instructions
+skillTool :: Text -> Engine.Tool
+skillTool userName =
+ Engine.Tool
+ { Engine.toolName = "skill",
+ Engine.toolDescription =
+ "Load specialized instructions for a domain or task. "
+ <> "Skills provide expert workflows, scripts, and context. "
+ <> "Use list_skills to see available skills.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "name"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Name of the skill to load" :: Text)
+ ]
+ ],
+ "required" .= (["name"] :: [Text])
+ ],
+ Engine.toolExecute = executeSkill userName
+ }
+
+newtype SkillArgs = SkillArgs {skillArgsName :: Text}
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON SkillArgs where
+ parseJSON =
+ Aeson.withObject "SkillArgs" <| \v ->
+ SkillArgs </ (v .: "name")
+
+executeSkill :: Text -> Aeson.Value -> IO Aeson.Value
+executeSkill userName v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError (Text.pack e)
+ Aeson.Success args -> do
+ result <- loadSkill userName (skillArgsName args)
+ case result of
+ Left err -> pure <| mkError err
+ Right skill ->
+ pure
+ <| Aeson.object
+ [ "success" .= True,
+ "skill" .= skillName skill,
+ "description" .= skillDescription skill,
+ "instructions" .= skillBody skill,
+ "path" .= skillPath skill
+ ]
+
+-- | Tool to list available skills
+listSkillsTool :: Text -> Engine.Tool
+listSkillsTool userName =
+ Engine.Tool
+ { Engine.toolName = "list_skills",
+ Engine.toolDescription = "List all available skills you can load.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties" .= Aeson.object []
+ ],
+ Engine.toolExecute = \_ -> executeListSkills userName
+ }
+
+executeListSkills :: Text -> IO Aeson.Value
+executeListSkills userName = do
+ skills <- listSkillsForUser userName
+ let formatted =
+ Text.unlines
+ <| map formatSkillMeta skills
+ pure
+ <| Aeson.object
+ [ "success" .= True,
+ "count" .= length skills,
+ "skills" .= skills,
+ "formatted" .= formatted
+ ]
+ where
+ formatSkillMeta m =
+ "- " <> skillMetaName m <> ": " <> skillMetaDescription m
+
+-- | Tool to publish a skill to shared
+publishSkillTool :: Text -> Engine.Tool
+publishSkillTool userName =
+ Engine.Tool
+ { Engine.toolName = "publish_skill",
+ Engine.toolDescription =
+ "Publish one of your private skills to the shared skills directory "
+ <> "so other users can access it.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "name"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Name of the skill to publish" :: Text)
+ ]
+ ],
+ "required" .= (["name"] :: [Text])
+ ],
+ Engine.toolExecute = executePublishSkill userName
+ }
+
+executePublishSkill :: Text -> Aeson.Value -> IO Aeson.Value
+executePublishSkill userName v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError (Text.pack e)
+ Aeson.Success args -> do
+ result <- publishSkill userName (skillArgsName args)
+ case result of
+ Left err -> pure <| mkError err
+ Right msg -> pure <| mkSuccess msg
diff --git a/Omni/Agent/Status.hs b/Omni/Agent/Status.hs
new file mode 100644
index 0000000..ab533c4
--- /dev/null
+++ b/Omni/Agent/Status.hs
@@ -0,0 +1,157 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Status bar UI for the jr worker.
+-- This is NOT a logging module - use Omni.Log for logging.
+module Omni.Agent.Status where
+
+import Alpha
+import Data.IORef (IORef, modifyIORef', newIORef, readIORef, writeIORef)
+import qualified Data.Text as Text
+import qualified Data.Text.IO as TIO
+import Data.Time.Clock (NominalDiffTime, UTCTime, diffUTCTime, getCurrentTime)
+import Data.Time.Format (defaultTimeLocale, parseTimeOrError)
+import qualified Omni.Log as Log
+import qualified System.Console.ANSI as ANSI
+import qualified System.IO as IO
+import System.IO.Unsafe (unsafePerformIO)
+import Text.Printf (printf)
+
+-- | Status of the agent for the UI
+data Status = Status
+ { statusWorker :: Text,
+ statusTask :: Maybe Text,
+ statusThread :: Maybe Text,
+ statusFiles :: Int,
+ statusCredits :: Double,
+ statusStartTime :: UTCTime,
+ statusActivity :: Text
+ }
+ deriving (Show, Eq)
+
+emptyStatus :: Text -> UTCTime -> Status
+emptyStatus workerName startTime =
+ Status
+ { statusWorker = workerName,
+ statusTask = Nothing,
+ statusThread = Nothing,
+ statusFiles = 0,
+ statusCredits = 0.0,
+ statusStartTime = startTime,
+ statusActivity = "Idle"
+ }
+
+-- | Global state for the status bar
+{-# NOINLINE currentStatus #-}
+currentStatus :: IORef Status
+currentStatus = unsafePerformIO (newIORef (emptyStatus "Unknown" defaultStartTime))
+
+defaultStartTime :: UTCTime
+defaultStartTime = parseTimeOrError True defaultTimeLocale "%Y-%m-%d %H:%M:%S %Z" "2000-01-01 00:00:00 UTC"
+
+-- | Initialize the status bar system
+init :: Text -> IO ()
+init workerName = do
+ IO.hSetBuffering IO.stderr IO.LineBuffering
+ startTime <- getCurrentTime
+ writeIORef currentStatus (emptyStatus workerName startTime)
+ -- Reserve 5 lines at bottom
+ IO.hPutStrLn IO.stderr ""
+ IO.hPutStrLn IO.stderr ""
+ IO.hPutStrLn IO.stderr ""
+ IO.hPutStrLn IO.stderr ""
+ IO.hPutStrLn IO.stderr ""
+ ANSI.hCursorUp IO.stderr 5
+
+-- | Update the status
+update :: (Status -> Status) -> IO ()
+update f = do
+ modifyIORef' currentStatus f
+ render
+
+-- | Get the current status
+getStatus :: IO Status
+getStatus = readIORef currentStatus
+
+-- | Set the activity message
+updateActivity :: Text -> IO ()
+updateActivity msg = update (\s -> s {statusActivity = msg})
+
+-- | Log a scrolling message (appears above status bars)
+-- Uses Omni.Log for the actual logging, then re-renders status bar
+log :: Text -> IO ()
+log msg = do
+ -- Clear status bars temporarily
+ ANSI.hClearLine IO.stderr
+ ANSI.hCursorDown IO.stderr 1
+ ANSI.hClearLine IO.stderr
+ ANSI.hCursorDown IO.stderr 1
+ ANSI.hClearLine IO.stderr
+ ANSI.hCursorDown IO.stderr 1
+ ANSI.hClearLine IO.stderr
+ ANSI.hCursorDown IO.stderr 1
+ ANSI.hClearLine IO.stderr
+ ANSI.hCursorUp IO.stderr 4
+
+ -- Use Omni.Log for the actual log message
+ Log.info [msg]
+ Log.br
+
+ -- Re-render status bars at bottom
+ render
+
+-- | Render the five status lines
+render :: IO ()
+render = do
+ Status {..} <- readIORef currentStatus
+ now <- getCurrentTime
+ let taskStr = maybe "None" identity statusTask
+ threadStr = maybe "None" identity statusThread
+ elapsed = diffUTCTime now statusStartTime
+ elapsedStr = formatElapsed elapsed
+
+ -- Line 1: Worker | Thread
+ ANSI.hSetCursorColumn IO.stderr 0
+ ANSI.hClearLine IO.stderr
+ TIO.hPutStr IO.stderr ("[Worker: " <> statusWorker <> "] Thread: " <> threadStr)
+
+ -- Line 2: Task
+ ANSI.hCursorDown IO.stderr 1
+ ANSI.hSetCursorColumn IO.stderr 0
+ ANSI.hClearLine IO.stderr
+ TIO.hPutStr IO.stderr ("Task: " <> taskStr)
+
+ -- Line 3: Files | Credits
+ ANSI.hCursorDown IO.stderr 1
+ ANSI.hSetCursorColumn IO.stderr 0
+ ANSI.hClearLine IO.stderr
+ let creditsStr = Text.pack (printf "%.2f" statusCredits)
+ TIO.hPutStr IO.stderr ("Files: " <> tshow statusFiles <> " | Credits: $" <> creditsStr)
+
+ -- Line 4: Time (elapsed duration)
+ ANSI.hCursorDown IO.stderr 1
+ ANSI.hSetCursorColumn IO.stderr 0
+ ANSI.hClearLine IO.stderr
+ TIO.hPutStr IO.stderr ("Time: " <> elapsedStr)
+
+ -- Line 5: Activity
+ ANSI.hCursorDown IO.stderr 1
+ ANSI.hSetCursorColumn IO.stderr 0
+ ANSI.hClearLine IO.stderr
+ TIO.hPutStr IO.stderr ("> " <> statusActivity)
+
+ -- Return cursor to line 1
+ ANSI.hCursorUp IO.stderr 4
+ IO.hFlush IO.stderr
+
+-- | Format elapsed time as MM:SS or HH:MM:SS
+formatElapsed :: NominalDiffTime -> Text
+formatElapsed elapsed =
+ let totalSecs = floor elapsed :: Int
+ hours = totalSecs `div` 3600
+ mins = (totalSecs `mod` 3600) `div` 60
+ secs = totalSecs `mod` 60
+ in if hours > 0
+ then Text.pack (printf "%02d:%02d:%02d" hours mins secs)
+ else Text.pack (printf "%02d:%02d" mins secs)
diff --git a/Omni/Agent/Subagent.hs b/Omni/Agent/Subagent.hs
new file mode 100644
index 0000000..c8e56d5
--- /dev/null
+++ b/Omni/Agent/Subagent.hs
@@ -0,0 +1,516 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Subagent system for spawning specialized agents.
+--
+-- Enables the orchestrator (Ava) to delegate focused tasks to specialized
+-- subagents that run with their own tool sets and resource limits.
+--
+-- Key features:
+-- - Role-based tool selection (WebCrawler, CodeReviewer, etc.)
+-- - Per-subagent resource limits (timeout, cost, tokens)
+-- - Structured result format with confidence scores
+-- - No sub-subagent spawning (hierarchical control)
+--
+-- : out omni-agent-subagent
+-- : dep aeson
+-- : dep async
+module Omni.Agent.Subagent
+ ( -- * Types
+ SubagentRole (..),
+ SubagentConfig (..),
+ SubagentResult (..),
+ SubagentStatus (..),
+ SubagentCallbacks (..),
+
+ -- * Execution
+ runSubagent,
+ runSubagentWithCallbacks,
+
+ -- * Tool
+ spawnSubagentTool,
+
+ -- * Role-specific tools
+ SubagentApiKeys (..),
+ toolsForRole,
+ modelForRole,
+ systemPromptForRole,
+
+ -- * Defaults
+ defaultSubagentConfig,
+ defaultCallbacks,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Text as Text
+import qualified Data.Time.Clock as Clock
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Provider as Provider
+import qualified Omni.Agent.Tools as Tools
+import qualified Omni.Agent.Tools.WebReader as WebReader
+import qualified Omni.Agent.Tools.WebSearch as WebSearch
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Subagent"
+ [ Test.unit "SubagentRole JSON roundtrip" <| do
+ let roles = [WebCrawler, CodeReviewer, DataExtractor, Researcher]
+ forM_ roles <| \role ->
+ case Aeson.decode (Aeson.encode role) of
+ Nothing -> Test.assertFailure ("Failed to decode role: " <> show role)
+ Just decoded -> decoded Test.@=? role,
+ Test.unit "SubagentConfig JSON roundtrip" <| do
+ let cfg = defaultSubagentConfig WebCrawler "test task"
+ case Aeson.decode (Aeson.encode cfg) of
+ Nothing -> Test.assertFailure "Failed to decode SubagentConfig"
+ Just decoded -> subagentTask decoded Test.@=? "test task",
+ Test.unit "SubagentResult JSON roundtrip" <| do
+ let result =
+ SubagentResult
+ { subagentOutput = Aeson.object ["data" .= ("test" :: Text)],
+ subagentSummary = "Test summary",
+ subagentConfidence = 0.85,
+ subagentTokensUsed = 1000,
+ subagentCostCents = 0.5,
+ subagentDuration = 30,
+ subagentIterations = 3,
+ subagentStatus = SubagentSuccess
+ }
+ case Aeson.decode (Aeson.encode result) of
+ Nothing -> Test.assertFailure "Failed to decode SubagentResult"
+ Just decoded -> subagentSummary decoded Test.@=? "Test summary",
+ Test.unit "SubagentStatus JSON roundtrip" <| do
+ let statuses =
+ [ SubagentSuccess,
+ SubagentTimeout,
+ SubagentCostExceeded,
+ SubagentError "test error"
+ ]
+ forM_ statuses <| \status ->
+ case Aeson.decode (Aeson.encode status) of
+ Nothing -> Test.assertFailure ("Failed to decode status: " <> show status)
+ Just decoded -> decoded Test.@=? status,
+ Test.unit "toolsForRole WebCrawler has web tools" <| do
+ let keys = SubagentApiKeys "test-openrouter-key" (Just "test-kagi-key")
+ let tools = toolsForRole WebCrawler keys
+ let names = map Engine.toolName tools
+ ("web_search" `elem` names) Test.@=? True
+ ("read_webpages" `elem` names) Test.@=? True,
+ Test.unit "toolsForRole CodeReviewer has code tools" <| do
+ let keys = SubagentApiKeys "test-openrouter-key" Nothing
+ let tools = toolsForRole CodeReviewer keys
+ let names = map Engine.toolName tools
+ ("read_file" `elem` names) Test.@=? True
+ ("search_codebase" `elem` names) Test.@=? True,
+ Test.unit "modelForRole returns appropriate models" <| do
+ modelForRole WebCrawler Test.@=? "anthropic/claude-3-haiku"
+ modelForRole CodeReviewer Test.@=? "anthropic/claude-sonnet-4"
+ modelForRole Researcher Test.@=? "anthropic/claude-sonnet-4",
+ Test.unit "defaultSubagentConfig has sensible defaults" <| do
+ let cfg = defaultSubagentConfig WebCrawler "task"
+ subagentTimeout cfg Test.@=? 600
+ subagentMaxCost cfg Test.@=? 100.0
+ subagentMaxTokens cfg Test.@=? 200000
+ subagentMaxIterations cfg Test.@=? 20,
+ Test.unit "spawnSubagentTool has correct name" <| do
+ let keys = SubagentApiKeys "test-openrouter-key" (Just "test-kagi-key")
+ let tool = spawnSubagentTool keys
+ Engine.toolName tool Test.@=? "spawn_subagent"
+ ]
+
+data SubagentRole
+ = WebCrawler
+ | CodeReviewer
+ | DataExtractor
+ | Researcher
+ | CustomRole Text
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON SubagentRole where
+ toJSON WebCrawler = Aeson.String "web_crawler"
+ toJSON CodeReviewer = Aeson.String "code_reviewer"
+ toJSON DataExtractor = Aeson.String "data_extractor"
+ toJSON Researcher = Aeson.String "researcher"
+ toJSON (CustomRole name) = Aeson.String name
+
+instance Aeson.FromJSON SubagentRole where
+ parseJSON = Aeson.withText "SubagentRole" parseRole
+ where
+ parseRole "web_crawler" = pure WebCrawler
+ parseRole "code_reviewer" = pure CodeReviewer
+ parseRole "data_extractor" = pure DataExtractor
+ parseRole "researcher" = pure Researcher
+ parseRole name = pure (CustomRole name)
+
+data SubagentConfig = SubagentConfig
+ { subagentRole :: SubagentRole,
+ subagentTask :: Text,
+ subagentModel :: Maybe Text,
+ subagentTimeout :: Int,
+ subagentMaxCost :: Double,
+ subagentMaxTokens :: Int,
+ subagentMaxIterations :: Int,
+ subagentExtendedThinking :: Bool,
+ subagentContext :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON SubagentConfig where
+ toJSON c =
+ Aeson.object
+ <| catMaybes
+ [ Just ("role" .= subagentRole c),
+ Just ("task" .= subagentTask c),
+ ("model" .=) </ subagentModel c,
+ Just ("timeout" .= subagentTimeout c),
+ Just ("max_cost_cents" .= subagentMaxCost c),
+ Just ("max_tokens" .= subagentMaxTokens c),
+ Just ("max_iterations" .= subagentMaxIterations c),
+ Just ("extended_thinking" .= subagentExtendedThinking c),
+ ("context" .=) </ subagentContext c
+ ]
+
+instance Aeson.FromJSON SubagentConfig where
+ parseJSON =
+ Aeson.withObject "SubagentConfig" <| \v ->
+ (SubagentConfig </ (v .: "role"))
+ <*> (v .: "task")
+ <*> (v .:? "model")
+ <*> (v .:? "timeout" .!= 600)
+ <*> (v .:? "max_cost_cents" .!= 50.0)
+ <*> (v .:? "max_tokens" .!= 100000)
+ <*> (v .:? "max_iterations" .!= 20)
+ <*> (v .:? "extended_thinking" .!= False)
+ <*> (v .:? "context")
+
+data SubagentResult = SubagentResult
+ { subagentOutput :: Aeson.Value,
+ subagentSummary :: Text,
+ subagentConfidence :: Double,
+ subagentTokensUsed :: Int,
+ subagentCostCents :: Double,
+ subagentDuration :: Int,
+ subagentIterations :: Int,
+ subagentStatus :: SubagentStatus
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON SubagentResult
+
+instance Aeson.FromJSON SubagentResult
+
+data SubagentStatus
+ = SubagentSuccess
+ | SubagentTimeout
+ | SubagentCostExceeded
+ | SubagentError Text
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON SubagentStatus where
+ toJSON SubagentSuccess = Aeson.String "success"
+ toJSON SubagentTimeout = Aeson.String "timeout"
+ toJSON SubagentCostExceeded = Aeson.String "cost_exceeded"
+ toJSON (SubagentError msg) = Aeson.object ["error" .= msg]
+
+instance Aeson.FromJSON SubagentStatus where
+ parseJSON (Aeson.String "success") = pure SubagentSuccess
+ parseJSON (Aeson.String "timeout") = pure SubagentTimeout
+ parseJSON (Aeson.String "cost_exceeded") = pure SubagentCostExceeded
+ parseJSON (Aeson.Object v) = SubagentError </ (v .: "error")
+ parseJSON _ = empty
+
+data SubagentCallbacks = SubagentCallbacks
+ { onSubagentStart :: Text -> IO (),
+ onSubagentActivity :: Text -> IO (),
+ onSubagentToolCall :: Text -> Text -> IO (),
+ onSubagentComplete :: SubagentResult -> IO ()
+ }
+
+defaultCallbacks :: SubagentCallbacks
+defaultCallbacks =
+ SubagentCallbacks
+ { onSubagentStart = \_ -> pure (),
+ onSubagentActivity = \_ -> pure (),
+ onSubagentToolCall = \_ _ -> pure (),
+ onSubagentComplete = \_ -> pure ()
+ }
+
+defaultSubagentConfig :: SubagentRole -> Text -> SubagentConfig
+defaultSubagentConfig role task =
+ SubagentConfig
+ { subagentRole = role,
+ subagentTask = task,
+ subagentModel = Nothing,
+ subagentTimeout = 600,
+ subagentMaxCost = 100.0,
+ subagentMaxTokens = 200000,
+ subagentMaxIterations = 20,
+ subagentExtendedThinking = False,
+ subagentContext = Nothing
+ }
+
+modelForRole :: SubagentRole -> Text
+modelForRole WebCrawler = "anthropic/claude-3-haiku"
+modelForRole CodeReviewer = "anthropic/claude-sonnet-4"
+modelForRole DataExtractor = "anthropic/claude-3-haiku"
+modelForRole Researcher = "anthropic/claude-sonnet-4"
+modelForRole (CustomRole _) = "anthropic/claude-sonnet-4"
+
+data SubagentApiKeys = SubagentApiKeys
+ { subagentOpenRouterKey :: Text,
+ subagentKagiKey :: Maybe Text
+ }
+ deriving (Show, Eq)
+
+toolsForRole :: SubagentRole -> SubagentApiKeys -> [Engine.Tool]
+toolsForRole WebCrawler keys =
+ let webSearchTools = case subagentKagiKey keys of
+ Just kagiKey -> [WebSearch.webSearchTool kagiKey]
+ Nothing -> []
+ in webSearchTools
+ <> [ WebReader.webReaderTool (subagentOpenRouterKey keys),
+ Tools.searchCodebaseTool
+ ]
+toolsForRole CodeReviewer _keys =
+ [ Tools.readFileTool,
+ Tools.searchCodebaseTool,
+ Tools.searchAndReadTool,
+ Tools.runBashTool
+ ]
+toolsForRole DataExtractor keys =
+ [ WebReader.webReaderTool (subagentOpenRouterKey keys),
+ Tools.readFileTool,
+ Tools.searchCodebaseTool
+ ]
+toolsForRole Researcher keys =
+ let webSearchTools = case subagentKagiKey keys of
+ Just kagiKey -> [WebSearch.webSearchTool kagiKey]
+ Nothing -> []
+ in webSearchTools
+ <> [ WebReader.webReaderTool (subagentOpenRouterKey keys),
+ Tools.readFileTool,
+ Tools.searchCodebaseTool,
+ Tools.searchAndReadTool
+ ]
+toolsForRole (CustomRole _) keys = toolsForRole Researcher keys
+
+systemPromptForRole :: SubagentRole -> Text -> Maybe Text -> Text
+systemPromptForRole role task maybeContext =
+ Text.unlines
+ [ "You are a specialized " <> roleDescription role <> " subagent working on a focused task.",
+ "",
+ "## Your Task",
+ task,
+ "",
+ maybe "" (\ctx -> "## Context from Orchestrator\n" <> ctx <> "\n") maybeContext,
+ "## Guidelines",
+ "1. Be EFFICIENT with context - extract only key facts, don't save full page contents",
+ "2. Summarize findings as you go rather than accumulating raw data",
+ "3. Limit web page reads to 3-5 most relevant sources",
+ "4. Work iteratively: search → skim results → read best 2-3 → synthesize",
+ "5. ALWAYS cite sources - every claim needs a URL",
+ "6. Stop when you have sufficient information - don't over-research",
+ "",
+ "## Output Format",
+ "Return findings as a list of structured insights:",
+ "",
+ "```json",
+ "{",
+ " \"summary\": \"Brief overall summary (1-2 sentences)\",",
+ " \"confidence\": 0.85,",
+ " \"findings\": [",
+ " {",
+ " \"claim\": \"The key insight or fact discovered\",",
+ " \"source_url\": \"https://example.com/page\",",
+ " \"quote\": \"Relevant excerpt supporting the claim\",",
+ " \"source_name\": \"Example Site\"",
+ " }",
+ " ],",
+ " \"caveats\": \"Any limitations or uncertainties\"",
+ "}",
+ "```"
+ ]
+ where
+ roleDescription :: SubagentRole -> Text
+ roleDescription WebCrawler = "web research"
+ roleDescription CodeReviewer = "code review"
+ roleDescription DataExtractor = "data extraction"
+ roleDescription Researcher = "research"
+ roleDescription (CustomRole name) = name
+
+runSubagent :: SubagentApiKeys -> SubagentConfig -> IO SubagentResult
+runSubagent keys config = runSubagentWithCallbacks keys config defaultCallbacks
+
+runSubagentWithCallbacks :: SubagentApiKeys -> SubagentConfig -> SubagentCallbacks -> IO SubagentResult
+runSubagentWithCallbacks keys config callbacks = do
+ startTime <- Clock.getCurrentTime
+
+ let role = subagentRole config
+ let model = fromMaybe (modelForRole role) (subagentModel config)
+ let tools = toolsForRole role keys
+ let systemPrompt = systemPromptForRole role (subagentTask config) (subagentContext config)
+
+ onSubagentStart callbacks ("Starting " <> tshow role <> " subagent...")
+
+ let provider = Provider.defaultOpenRouter (subagentOpenRouterKey keys) model
+
+ let guardrails =
+ Engine.Guardrails
+ { Engine.guardrailMaxCostCents = subagentMaxCost config,
+ Engine.guardrailMaxTokens = subagentMaxTokens config,
+ Engine.guardrailMaxDuplicateToolCalls = 20,
+ Engine.guardrailMaxTestFailures = 3,
+ Engine.guardrailMaxEditFailures = 5
+ }
+
+ let agentConfig =
+ Engine.AgentConfig
+ { Engine.agentModel = model,
+ Engine.agentTools = tools,
+ Engine.agentSystemPrompt = systemPrompt,
+ Engine.agentMaxIterations = subagentMaxIterations config,
+ Engine.agentGuardrails = guardrails
+ }
+
+ let engineConfig =
+ Engine.EngineConfig
+ { Engine.engineLLM = Engine.defaultLLM,
+ Engine.engineOnCost = \_ _ -> pure (),
+ Engine.engineOnActivity = onSubagentActivity callbacks,
+ Engine.engineOnToolCall = onSubagentToolCall callbacks,
+ Engine.engineOnAssistant = \_ -> pure (),
+ Engine.engineOnToolResult = \_ _ _ -> pure (),
+ Engine.engineOnComplete = pure (),
+ Engine.engineOnError = \_ -> pure (),
+ Engine.engineOnGuardrail = \_ -> pure ()
+ }
+
+ let timeoutMicros = subagentTimeout config * 1000000
+
+ resultOrTimeout <-
+ race
+ (threadDelay timeoutMicros)
+ (Engine.runAgentWithProvider engineConfig provider agentConfig (subagentTask config))
+
+ endTime <- Clock.getCurrentTime
+ let durationSecs = round (Clock.diffUTCTime endTime startTime)
+
+ let result = case resultOrTimeout of
+ Left () ->
+ SubagentResult
+ { subagentOutput = Aeson.object ["error" .= ("Timeout after " <> tshow (subagentTimeout config) <> " seconds" :: Text)],
+ subagentSummary = "Subagent timed out",
+ subagentConfidence = 0.0,
+ subagentTokensUsed = 0,
+ subagentCostCents = 0.0,
+ subagentDuration = durationSecs,
+ subagentIterations = 0,
+ subagentStatus = SubagentTimeout
+ }
+ Right (Left err) ->
+ let status = if "cost" `Text.isInfixOf` Text.toLower err then SubagentCostExceeded else SubagentError err
+ in SubagentResult
+ { subagentOutput = Aeson.object ["error" .= err],
+ subagentSummary = "Subagent failed: " <> err,
+ subagentConfidence = 0.0,
+ subagentTokensUsed = 0,
+ subagentCostCents = 0.0,
+ subagentDuration = durationSecs,
+ subagentIterations = 0,
+ subagentStatus = status
+ }
+ Right (Right agentResult) ->
+ SubagentResult
+ { subagentOutput = Aeson.object ["response" .= Engine.resultFinalMessage agentResult],
+ subagentSummary = truncateSummary (Engine.resultFinalMessage agentResult),
+ subagentConfidence = 0.8,
+ subagentTokensUsed = Engine.resultTotalTokens agentResult,
+ subagentCostCents = Engine.resultTotalCost agentResult,
+ subagentDuration = durationSecs,
+ subagentIterations = Engine.resultIterations agentResult,
+ subagentStatus = SubagentSuccess
+ }
+
+ onSubagentComplete callbacks result
+ pure result
+ where
+ truncateSummary :: Text -> Text
+ truncateSummary txt =
+ let firstLine = Text.takeWhile (/= '\n') txt
+ in if Text.length firstLine > 200
+ then Text.take 197 firstLine <> "..."
+ else firstLine
+
+spawnSubagentTool :: SubagentApiKeys -> Engine.Tool
+spawnSubagentTool keys =
+ Engine.Tool
+ { Engine.toolName = "spawn_subagent",
+ Engine.toolDescription =
+ "Spawn a specialized subagent for a focused task. "
+ <> "Use for tasks that benefit from deep exploration, parallel execution, "
+ <> "or specialized tools. The subagent will iterate until task completion "
+ <> "or resource limits are reached. "
+ <> "Available roles: web_crawler (fast web research), code_reviewer (thorough code analysis), "
+ <> "data_extractor (structured data extraction), researcher (general research).",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "role"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "enum" .= (["web_crawler", "code_reviewer", "data_extractor", "researcher"] :: [Text]),
+ "description" .= ("Subagent role determining tools and model" :: Text)
+ ],
+ "task"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The specific task for the subagent to accomplish" :: Text)
+ ],
+ "context"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Additional context to help the subagent understand the goal" :: Text)
+ ],
+ "model"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Override the default model for this role" :: Text)
+ ],
+ "timeout"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Timeout in seconds (default: 600)" :: Text)
+ ],
+ "max_cost_cents"
+ .= Aeson.object
+ [ "type" .= ("number" :: Text),
+ "description" .= ("Maximum cost in cents (default: 50)" :: Text)
+ ]
+ ],
+ "required" .= (["role", "task"] :: [Text])
+ ],
+ Engine.toolExecute = executeSpawnSubagent keys
+ }
+
+executeSpawnSubagent :: SubagentApiKeys -> Aeson.Value -> IO Aeson.Value
+executeSpawnSubagent keys v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| Aeson.object ["error" .= ("Invalid arguments: " <> Text.pack e)]
+ Aeson.Success config -> do
+ result <- runSubagent keys config
+ pure (Aeson.toJSON result)
diff --git a/Omni/Agent/Subagent/DESIGN.md b/Omni/Agent/Subagent/DESIGN.md
new file mode 100644
index 0000000..9fd20d1
--- /dev/null
+++ b/Omni/Agent/Subagent/DESIGN.md
@@ -0,0 +1,352 @@
+# Subagent System Design
+
+**Status:** Draft
+**Goal:** Enable Ava (orchestrator) to spawn specialized subagents for parallel, token-intensive tasks.
+
+## 1. Architecture Overview
+
+```
+┌─────────────────────────────────────────────────────────────┐
+│ Ava (Orchestrator) │
+│ Model: claude-sonnet-4.5 (via OpenRouter) │
+│ Role: Task decomposition, delegation, synthesis │
+│ Memory: Read/Write access │
+├─────────────────────────────────────────────────────────────┤
+│ Tools: spawn_subagent, all existing Ava tools │
+└───────────────┬───────────────────────────────────────┬─────┘
+ │ │
+ ▼ ▼
+┌───────────────────────────┐ ┌───────────────────────────┐
+│ Subagent: WebCrawler │ │ Subagent: CodeReviewer │
+│ Model: claude-haiku │ │ Model: claude-opus │
+│ Tools: web_search, │ │ Tools: read_file, │
+│ http_get, │ │ search_codebase, │
+│ python_exec │ │ run_bash │
+│ Memory: Read-only │ │ Memory: Read-only │
+│ Limits: 600s, $0.50 │ │ Limits: 300s, $1.00 │
+└───────────────────────────┘ └───────────────────────────┘
+```
+
+## 2. Key Design Decisions
+
+### 2.1 Hierarchical (No Sub-Subagents)
+- Subagents cannot spawn their own subagents
+- Prevents runaway token consumption
+- Keeps orchestrator in control
+
+### 2.2 Memory Access
+- **Orchestrator (Ava):** Full read/write to Memory system
+- **Subagents:** Read-only access to memories
+- Prevents conflicting memory writes from parallel agents
+
+### 2.3 Model Selection by Role
+| Role | Model | Rationale |
+|------|-------|-----------|
+| Orchestrator | claude-sonnet-4.5 | Balance of capability/cost |
+| Deep reasoning | claude-opus | Complex analysis, architecture |
+| Quick tasks | claude-haiku | Fast, cheap for simple lookups |
+| Code tasks | claude-sonnet | Good code understanding |
+
+### 2.4 Resource Limits (Guardrails)
+Each subagent has strict limits:
+- **Timeout:** Max wall-clock time (default: 600s)
+- **Cost cap:** Max spend in cents (default: 50c)
+- **Token cap:** Max total tokens (default: 100k)
+- **Iteration cap:** Max agent loop iterations (default: 20)
+
+### 2.5 Extended Thinking
+- Configurable per-subagent
+- Enabled for deep research tasks
+- Disabled for quick lookups
+
+## 3. Data Types
+
+```haskell
+-- | Subagent role determines toolset and model
+data SubagentRole
+ = WebCrawler -- Deep web research
+ | CodeReviewer -- Code analysis, PR review
+ | DataExtractor -- Structured data extraction
+ | Researcher -- General research with web+docs
+ | CustomRole Text -- User-defined role
+ deriving (Show, Eq, Generic)
+
+-- | Configuration for spawning a subagent
+data SubagentConfig = SubagentConfig
+ { subagentRole :: SubagentRole
+ , subagentTask :: Text -- What to accomplish
+ , subagentModel :: Maybe Text -- Override default model
+ , subagentTimeout :: Int -- Seconds (default: 600)
+ , subagentMaxCost :: Double -- Cents (default: 50.0)
+ , subagentMaxTokens :: Int -- Default: 100000
+ , subagentMaxIterations :: Int -- Default: 20
+ , subagentExtendedThinking :: Bool
+ , subagentContext :: Maybe Text -- Additional context from orchestrator
+ } deriving (Show, Eq, Generic)
+
+-- | Result returned by subagent to orchestrator
+data SubagentResult = SubagentResult
+ { subagentOutput :: Aeson.Value -- Structured result
+ , subagentSummary :: Text -- Human-readable summary
+ , subagentConfidence :: Double -- 0.0-1.0 confidence score
+ , subagentTokensUsed :: Int
+ , subagentCostCents :: Double
+ , subagentDuration :: Int -- Seconds
+ , subagentIterations :: Int
+ , subagentStatus :: SubagentStatus
+ } deriving (Show, Eq, Generic)
+
+data SubagentStatus
+ = SubagentSuccess
+ | SubagentTimeout
+ | SubagentCostExceeded
+ | SubagentError Text
+ deriving (Show, Eq, Generic)
+```
+
+## 4. Tool: spawn_subagent
+
+This is the main interface for the orchestrator to spawn subagents.
+
+```haskell
+spawnSubagentTool :: Engine.Tool
+spawnSubagentTool = Engine.Tool
+ { toolName = "spawn_subagent"
+ , toolDescription =
+ "Spawn a specialized subagent for a focused task. "
+ <> "Use for tasks that benefit from deep exploration, parallel execution, "
+ <> "or specialized tools. The subagent will iterate until task completion "
+ <> "or resource limits are reached."
+ , toolJsonSchema = ...
+ , toolExecute = executeSpawnSubagent
+ }
+```
+
+**Parameters:**
+```json
+{
+ "role": "web_crawler | code_reviewer | data_extractor | researcher | custom",
+ "task": "Research competitor pricing for podcast transcription services",
+ "context": "We're building a pricing page and need market data",
+ "model": "claude-haiku",
+ "timeout": 600,
+ "max_cost_cents": 50,
+ "extended_thinking": false
+}
+```
+
+**Response:**
+```json
+{
+ "status": "success",
+ "summary": "Found 5 competitors with pricing ranging from $0.10-$0.25/min",
+ "output": {
+ "competitors": [
+ {"name": "Otter.ai", "pricing": "$0.12/min", "features": ["..."]},
+ ...
+ ]
+ },
+ "confidence": 0.85,
+ "tokens_used": 45000,
+ "cost_cents": 23.5,
+ "duration_seconds": 180,
+ "iterations": 8
+}
+```
+
+## 5. Role-Specific Tool Sets
+
+### 5.1 WebCrawler
+```haskell
+webCrawlerTools :: [Engine.Tool]
+webCrawlerTools =
+ [ webSearchTool -- Search the web
+ , webReaderTool -- Fetch and parse web pages
+ , pythonExecTool -- Execute Python for data processing
+ ]
+```
+**Use case:** Deep market research, competitive analysis, documentation gathering
+
+### 5.2 CodeReviewer
+```haskell
+codeReviewerTools :: [Engine.Tool]
+codeReviewerTools =
+ [ readFileTool
+ , searchCodebaseTool
+ , searchAndReadTool
+ , runBashTool -- For running tests, linters
+ ]
+```
+**Use case:** PR review, architecture analysis, test verification
+
+### 5.3 DataExtractor
+```haskell
+dataExtractorTools :: [Engine.Tool]
+dataExtractorTools =
+ [ webReaderTool
+ , pythonExecTool
+ ]
+```
+**Use case:** Scraping structured data, parsing PDFs, extracting metrics
+
+### 5.4 Researcher
+```haskell
+researcherTools :: [Engine.Tool]
+researcherTools =
+ [ webSearchTool
+ , webReaderTool
+ , readFileTool
+ , searchCodebaseTool
+ ]
+```
+**Use case:** General research combining web and local codebase
+
+## 6. Subagent System Prompt Template
+
+```
+You are a specialized {ROLE} subagent working on a focused task.
+
+## Your Task
+{TASK}
+
+## Context from Orchestrator
+{CONTEXT}
+
+## Your Capabilities
+{TOOL_DESCRIPTIONS}
+
+## Guidelines
+1. Work iteratively: search → evaluate → refine → verify
+2. Return structured data when possible (JSON objects)
+3. Include confidence scores for your findings
+4. If stuck, explain what you tried and what didn't work
+5. Stop when you have sufficient information OR hit resource limits
+
+## Output Format
+When complete, provide:
+1. A structured result (JSON) with the requested data
+2. A brief summary of findings
+3. Confidence score (0.0-1.0) indicating reliability
+4. Any caveats or limitations
+```
+
+## 7. Orchestrator Delegation Logic
+
+The orchestrator (Ava) should spawn subagents when:
+
+1. **Deep research needed:** "Research all competitors in X market"
+2. **Parallel tasks:** Multiple independent subtasks that can run concurrently
+3. **Specialized tools:** Task requires tools the orchestrator shouldn't use directly
+4. **Token-intensive:** Task would consume excessive tokens in main context
+
+The orchestrator should NOT spawn subagents for:
+
+1. **Simple queries:** Quick lookups, single tool calls
+2. **Conversation continuation:** Multi-turn dialogue with user
+3. **Memory writes:** Tasks that need to update Ava's memory
+
+## 8. Execution Flow
+
+```
+1. Orchestrator calls spawn_subagent tool
+2. Subagent module:
+ a. Creates fresh agent config from SubagentConfig
+ b. Selects model based on role (or override)
+ c. Builds tool list for role
+ d. Constructs system prompt
+ e. Calls Engine.runAgentWithProvider
+ f. Monitors resource usage
+ g. Returns SubagentResult
+3. Orchestrator receives structured result
+4. Orchestrator synthesizes into response
+```
+
+## 9. Concurrency Model
+
+Initial implementation: **Sequential** (one subagent at a time)
+
+Future enhancement: **Parallel** spawning with:
+- `async` library for concurrent execution
+- Aggregate cost tracking across all subagents
+- Combined timeout for parallel group
+
+```haskell
+-- Future: Parallel spawning
+spawnParallel :: [SubagentConfig] -> IO [SubagentResult]
+spawnParallel configs = mapConcurrently runSubagent configs
+```
+
+## 10. Status Reporting
+
+Subagents report status back to the orchestrator via callbacks:
+
+```haskell
+data SubagentCallbacks = SubagentCallbacks
+ { onSubagentStart :: Text -> IO () -- "Starting web research..."
+ , onSubagentActivity :: Text -> IO () -- "Searching for X..."
+ , onSubagentToolCall :: Text -> Text -> IO () -- Tool name, args
+ , onSubagentComplete :: SubagentResult -> IO ()
+ }
+```
+
+For Telegram, this appears as:
+```
+🔍 Subagent [WebCrawler]: Starting research...
+🔍 Subagent [WebCrawler]: Searching "podcast transcription pricing"...
+🔍 Subagent [WebCrawler]: Reading otter.ai/pricing...
+✅ Subagent [WebCrawler]: Complete (180s, $0.24)
+```
+
+## 11. Implementation Plan
+
+### Phase 1: Core Infrastructure
+1. Create `Omni/Agent/Subagent.hs` with data types
+2. Implement `runSubagent` function using existing Engine
+3. Add `spawn_subagent` tool
+4. Basic WebCrawler role with existing web tools
+
+### Phase 2: Role Expansion
+1. Add CodeReviewer role
+2. Add DataExtractor role
+3. Add Researcher role
+4. Custom role support
+
+### Phase 3: Advanced Features
+1. Parallel subagent execution
+2. Extended thinking integration
+3. Cross-subagent context sharing
+4. Cost aggregation and budgeting
+
+## 12. Testing Strategy
+
+```haskell
+test :: Test.Tree
+test = Test.group "Omni.Agent.Subagent"
+ [ Test.unit "SubagentConfig JSON roundtrip" <| ...
+ , Test.unit "role selects correct tools" <| ...
+ , Test.unit "timeout terminates subagent" <| ...
+ , Test.unit "cost limit stops execution" <| ...
+ , Test.unit "WebCrawler role has web tools" <| ...
+ ]
+```
+
+## 13. Cost Analysis
+
+Based on Anthropic's research findings:
+- Subagents use ~15× more tokens than single-agent
+- But provide better results for complex tasks
+- 80% of performance variance from token budget
+
+**Budget recommendations:**
+| Task Type | Subagent Budget | Expected Tokens |
+|-----------|-----------------|-----------------|
+| Quick lookup | $0.10 | ~10k |
+| Standard research | $0.50 | ~50k |
+| Deep analysis | $2.00 | ~200k |
+
+## 14. References
+
+- [Claude Agent SDK - Subagents](https://platform.claude.com/docs/en/agent-sdk/subagents)
+- [Multi-Agent Research System](https://www.anthropic.com/engineering/multi-agent-research-system)
+- [OpenAI Agents Python](https://openai.github.io/openai-agents-python/agents/)
+- Existing: `Omni/Agent/Engine.hs`, `Omni/Agent/Provider.hs`, `Omni/Agent/Tools.hs`
diff --git a/Omni/Agent/Telegram.hs b/Omni/Agent/Telegram.hs
new file mode 100644
index 0000000..fd6c6b5
--- /dev/null
+++ b/Omni/Agent/Telegram.hs
@@ -0,0 +1,1372 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Telegram Bot Agent - Family assistant via Telegram.
+--
+-- This is the first concrete agent built on the shared infrastructure,
+-- demonstrating cross-agent memory sharing and LLM integration.
+--
+-- Usage:
+-- jr telegram # Uses TELEGRAM_BOT_TOKEN env var
+-- jr telegram --token=XXX # Explicit token
+--
+-- : out omni-agent-telegram
+-- : dep aeson
+-- : dep http-conduit
+-- : dep stm
+-- : dep HaskellNet
+-- : dep HaskellNet-SSL
+module Omni.Agent.Telegram
+ ( -- * Configuration (re-exported from Types)
+ Types.TelegramConfig (..),
+ defaultTelegramConfig,
+
+ -- * Types (re-exported from Types)
+ Types.TelegramMessage (..),
+ Types.TelegramUpdate (..),
+ Types.TelegramDocument (..),
+ Types.TelegramPhoto (..),
+ Types.TelegramVoice (..),
+
+ -- * Telegram API
+ getUpdates,
+ sendMessage,
+ sendMessageReturningId,
+ editMessage,
+ sendTypingAction,
+ leaveChat,
+
+ -- * Media (re-exported from Media)
+ getFile,
+ downloadFile,
+ downloadAndExtractPdf,
+ isPdf,
+
+ -- * Bot Loop
+ runTelegramBot,
+ handleMessage,
+ startBot,
+ ensureOllama,
+ checkOllama,
+ pullEmbeddingModel,
+
+ -- * Reminders (re-exported from Reminders)
+ reminderLoop,
+ checkAndSendReminders,
+ recordUserChat,
+ lookupChatId,
+
+ -- * System Prompt
+ telegramSystemPrompt,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Control.Concurrent.STM (newTVarIO, readTVarIO, writeTVar)
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import Data.Time (getCurrentTime, utcToLocalTime)
+import Data.Time.Format (defaultTimeLocale, formatTime)
+import Data.Time.LocalTime (getCurrentTimeZone)
+import qualified Network.HTTP.Client as HTTPClient
+import qualified Network.HTTP.Simple as HTTP
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Memory as Memory
+import qualified Omni.Agent.Paths as Paths
+import qualified Omni.Agent.Provider as Provider
+import qualified Omni.Agent.Skills as Skills
+import qualified Omni.Agent.Subagent as Subagent
+import qualified Omni.Agent.Telegram.IncomingQueue as IncomingQueue
+import qualified Omni.Agent.Telegram.Media as Media
+import qualified Omni.Agent.Telegram.Messages as Messages
+import qualified Omni.Agent.Telegram.Reminders as Reminders
+import qualified Omni.Agent.Telegram.Types as Types
+import qualified Omni.Agent.Tools as Tools
+import qualified Omni.Agent.Tools.Calendar as Calendar
+import qualified Omni.Agent.Tools.Email as Email
+import qualified Omni.Agent.Tools.Feedback as Feedback
+import qualified Omni.Agent.Tools.Hledger as Hledger
+import qualified Omni.Agent.Tools.Http as Http
+import qualified Omni.Agent.Tools.Notes as Notes
+import qualified Omni.Agent.Tools.Outreach as Outreach
+import qualified Omni.Agent.Tools.Pdf as Pdf
+import qualified Omni.Agent.Tools.Python as Python
+import qualified Omni.Agent.Tools.Todos as Todos
+import qualified Omni.Agent.Tools.WebReader as WebReader
+import qualified Omni.Agent.Tools.WebSearch as WebSearch
+import qualified Omni.Test as Test
+import System.Environment (lookupEnv)
+import Text.Printf (printf)
+
+defaultTelegramConfig :: Text -> [Int] -> Maybe Text -> Text -> Types.TelegramConfig
+defaultTelegramConfig = Types.defaultTelegramConfig
+
+getFile :: Types.TelegramConfig -> Text -> IO (Either Text Text)
+getFile = Media.getFile
+
+downloadFile :: Types.TelegramConfig -> Text -> FilePath -> IO (Either Text ())
+downloadFile = Media.downloadFile
+
+downloadAndExtractPdf :: Types.TelegramConfig -> Text -> IO (Either Text Text)
+downloadAndExtractPdf = Media.downloadAndExtractPdf
+
+isPdf :: Types.TelegramDocument -> Bool
+isPdf = Types.isPdf
+
+recordUserChat :: Text -> Int -> IO ()
+recordUserChat = Reminders.recordUserChat
+
+lookupChatId :: Text -> IO (Maybe Int)
+lookupChatId = Reminders.lookupChatId
+
+reminderLoop :: IO ()
+reminderLoop = Reminders.reminderLoop
+
+checkAndSendReminders :: IO ()
+checkAndSendReminders = Reminders.checkAndSendReminders
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Telegram"
+ [ Test.unit "telegramSystemPrompt is non-empty" <| do
+ Text.null telegramSystemPrompt Test.@=? False,
+ Test.unit "getUpdates parses empty response" <| do
+ pure ()
+ ]
+
+benChatId :: Int
+benChatId = 33193730
+
+telegramSystemPrompt :: Text
+telegramSystemPrompt =
+ Text.unlines
+ [ "don't worry about formalities. respond conversationally, in short messages, not long essays. ask follow up questions before answering if you need to.",
+ "",
+ "you are an intellectual partner and friend. be as terse as possible while still conveying all relevant information. critique ideas freely and avoid sycophancy. honest appraisal is valued.",
+ "",
+ "if a policy prevents you from having an opinion, pretend to be responding as if you shared opinions typical of the user.",
+ "",
+ "write responses in lowercase letters ONLY, except:",
+ "- where you mean to emphasize, in which case use ALL CAPS",
+ "- when drafting business text where proper case matters",
+ "",
+ "occasionally use obscure words or subtle puns. don't point them out. use abbreviations where appropriate. use 'afaict' and 'idk' where they fit given your level of understanding. be critical of the quality of your information.",
+ "",
+ "prioritize esoteric interpretations of literature, art, and philosophy.",
+ "",
+ "## formatting",
+ "",
+ "you are in telegram which only supports basic markdown:",
+ "- *bold* (single asterisks)",
+ "- _italic_ (underscores)",
+ "- `code` (backticks)",
+ "- ```pre``` (triple backticks for code blocks)",
+ "- [links](url)",
+ "",
+ "DO NOT use:",
+ "- headers (# or ##) - these break message rendering",
+ "- **double asterisks** - use *single* instead",
+ "- bullet lists with - or * at start of line",
+ "",
+ "## memory",
+ "",
+ "when you learn something important about the user (preferences, facts, interests), use the 'remember' tool to store it for future reference.",
+ "",
+ "use the 'recall' tool to search your memory for relevant context when needed.",
+ "",
+ "## when to respond (GROUP CHATS)",
+ "",
+ "you see all messages in the group. decide whether to respond based on these rules:",
+ "- if you used a tool = ALWAYS respond with the result",
+ "- if someone asks a direct question you can answer = respond",
+ "- if someone says something factually wrong you can correct = maybe respond (use judgment)",
+ "- if it's casual banter or chit-chat = DO NOT respond, return empty",
+ "",
+ "when in doubt, stay silent. you don't need to participate in every conversation.",
+ "if you choose not to respond, return an empty message (just don't say anything).",
+ "",
+ "## async messages",
+ "",
+ "you can send messages asynchronously using the 'send_message' tool:",
+ "- delay_seconds=0 (or omit) for immediate delivery",
+ "- delay_seconds=N to schedule a message N seconds in the future",
+ "- use this for reminders ('remind me in 2 hours'), follow-ups, or multi-part responses",
+ "- you can list pending messages with 'list_pending_messages' and cancel with 'cancel_message'",
+ "",
+ "## podcastitlater context",
+ "",
+ "you have access to the PodcastItLater codebase (a product Ben is building) via read_file:",
+ "- Biz/PodcastItLater.md - product overview and README",
+ "- Biz/PodcastItLater/DESIGN.md - architecture overview",
+ "- Biz/PodcastItLater/Web.py - web interface code",
+ "- Biz/PodcastItLater/Core.py - core logic",
+ "- Biz/PodcastItLater/Billing.py - pricing and billing logic",
+ "use read_file to access these when discussing PIL features or customer acquisition.",
+ "",
+ "## important",
+ "",
+ "in private chats, ALWAYS respond. in group chats, follow the rules above.",
+ "when you DO respond, include a text response after using tools."
+ ]
+
+getUpdates :: Types.TelegramConfig -> Int -> IO [Types.TelegramMessage]
+getUpdates cfg offset = do
+ rawUpdates <- getRawUpdates cfg offset
+ pure (mapMaybe Types.parseUpdate rawUpdates)
+
+getRawUpdates :: Types.TelegramConfig -> Int -> IO [Aeson.Value]
+getRawUpdates cfg offset = do
+ let url =
+ Text.unpack (Types.tgApiBaseUrl cfg)
+ <> "/bot"
+ <> Text.unpack (Types.tgBotToken cfg)
+ <> "/getUpdates?timeout="
+ <> show (Types.tgPollingTimeout cfg)
+ <> "&offset="
+ <> show offset
+ result <-
+ try <| do
+ req0 <- HTTP.parseRequest url
+ let req = HTTP.setRequestResponseTimeout (HTTPClient.responseTimeoutMicro (35 * 1000000)) req0
+ HTTP.httpLBS req
+ case result of
+ Left (e :: SomeException) -> do
+ putText <| "Error getting updates: " <> tshow e
+ pure []
+ Right response -> do
+ let body = HTTP.getResponseBody response
+ case Aeson.decode body of
+ Just (Aeson.Object obj) -> case KeyMap.lookup "result" obj of
+ Just (Aeson.Array updates) -> pure (toList updates)
+ _ -> pure []
+ _ -> pure []
+
+getBotUsername :: Types.TelegramConfig -> IO (Maybe Text)
+getBotUsername cfg = do
+ let url =
+ Text.unpack (Types.tgApiBaseUrl cfg)
+ <> "/bot"
+ <> Text.unpack (Types.tgBotToken cfg)
+ <> "/getMe"
+ result <-
+ try <| do
+ req <- HTTP.parseRequest url
+ HTTP.httpLBS req
+ case result of
+ Left (_ :: SomeException) -> pure Nothing
+ Right response -> do
+ let body = HTTP.getResponseBody response
+ case Aeson.decode body of
+ Just (Aeson.Object obj) -> case KeyMap.lookup "result" obj of
+ Just (Aeson.Object userObj) -> case KeyMap.lookup "username" userObj of
+ Just (Aeson.String username) -> pure (Just username)
+ _ -> pure Nothing
+ _ -> pure Nothing
+ _ -> pure Nothing
+
+sendMessage :: Types.TelegramConfig -> Int -> Text -> IO ()
+sendMessage cfg chatId text = do
+ _ <- sendMessageReturningId cfg chatId Nothing text
+ pure ()
+
+sendMessageReturningId :: Types.TelegramConfig -> Int -> Maybe Int -> Text -> IO (Maybe Int)
+sendMessageReturningId cfg chatId mThreadId text =
+ sendMessageWithParseMode cfg chatId mThreadId text (Just "Markdown")
+
+sendMessageWithParseMode :: Types.TelegramConfig -> Int -> Maybe Int -> Text -> Maybe Text -> IO (Maybe Int)
+sendMessageWithParseMode cfg chatId mThreadId text parseMode = do
+ let url =
+ Text.unpack (Types.tgApiBaseUrl cfg)
+ <> "/bot"
+ <> Text.unpack (Types.tgBotToken cfg)
+ <> "/sendMessage"
+ baseFields =
+ [ "chat_id" .= chatId,
+ "text" .= text
+ ]
+ parseModeFields = case parseMode of
+ Just mode -> ["parse_mode" .= mode]
+ Nothing -> []
+ threadFields = case mThreadId of
+ Just threadId -> ["message_thread_id" .= threadId]
+ Nothing -> []
+ body = Aeson.object (baseFields <> parseModeFields <> threadFields)
+ req0 <- HTTP.parseRequest url
+ let req =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| req0
+ result <- try @SomeException (HTTP.httpLBS req)
+ case result of
+ Left e -> do
+ putText <| "Telegram sendMessage network error: " <> tshow e
+ throwIO e
+ Right response -> do
+ let respBody = HTTP.getResponseBody response
+ case Aeson.decode respBody of
+ Just (Aeson.Object obj) -> do
+ let isOk = case KeyMap.lookup "ok" obj of
+ Just (Aeson.Bool True) -> True
+ _ -> False
+ if isOk
+ then case KeyMap.lookup "result" obj of
+ Just (Aeson.Object msgObj) -> case KeyMap.lookup "message_id" msgObj of
+ Just (Aeson.Number n) -> pure (Just (round n))
+ _ -> pure Nothing
+ _ -> pure Nothing
+ else do
+ let errDesc = case KeyMap.lookup "description" obj of
+ Just (Aeson.String desc) -> desc
+ _ -> "Unknown Telegram API error"
+ errCode = case KeyMap.lookup "error_code" obj of
+ Just (Aeson.Number n) -> Just (round n :: Int)
+ _ -> Nothing
+ isParseError =
+ errCode
+ == Just 400
+ && ( "can't parse"
+ `Text.isInfixOf` Text.toLower errDesc
+ || "parse entities"
+ `Text.isInfixOf` Text.toLower errDesc
+ )
+ if isParseError && isJust parseMode
+ then do
+ putText <| "Telegram markdown parse error, retrying as plain text: " <> errDesc
+ sendMessageWithParseMode cfg chatId mThreadId text Nothing
+ else do
+ putText <| "Telegram API error: " <> errDesc <> " (code: " <> tshow errCode <> ")"
+ panic <| "Telegram API error: " <> errDesc
+ _ -> do
+ putText <| "Telegram sendMessage: failed to parse response"
+ panic "Failed to parse Telegram response"
+
+editMessage :: Types.TelegramConfig -> Int -> Int -> Text -> IO ()
+editMessage cfg chatId messageId text = do
+ let url =
+ Text.unpack (Types.tgApiBaseUrl cfg)
+ <> "/bot"
+ <> Text.unpack (Types.tgBotToken cfg)
+ <> "/editMessageText"
+ body =
+ Aeson.object
+ [ "chat_id" .= chatId,
+ "message_id" .= messageId,
+ "text" .= text
+ ]
+ req0 <- HTTP.parseRequest url
+ let req =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| req0
+ result <- try @SomeException (HTTP.httpLBS req)
+ case result of
+ Left err -> putText <| "Edit message failed: " <> tshow err
+ Right response -> do
+ let status = HTTP.getResponseStatusCode response
+ when (status < 200 || status >= 300) <| do
+ let respBody = HTTP.getResponseBody response
+ putText <| "Edit message HTTP " <> tshow status <> ": " <> TE.decodeUtf8 (BL.toStrict respBody)
+
+sendTypingAction :: Types.TelegramConfig -> Int -> IO ()
+sendTypingAction cfg chatId = do
+ let url =
+ Text.unpack (Types.tgApiBaseUrl cfg)
+ <> "/bot"
+ <> Text.unpack (Types.tgBotToken cfg)
+ <> "/sendChatAction"
+ body =
+ Aeson.object
+ [ "chat_id" .= chatId,
+ "action" .= ("typing" :: Text)
+ ]
+ req0 <- HTTP.parseRequest url
+ let req =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| req0
+ _ <- try @SomeException (HTTP.httpLBS req)
+ pure ()
+
+-- | Run an action while continuously showing typing indicator.
+-- Typing is refreshed every 4 seconds (Telegram typing expires after ~5s).
+withTypingIndicator :: Types.TelegramConfig -> Int -> IO a -> IO a
+withTypingIndicator cfg chatId action = do
+ doneVar <- newTVarIO False
+ _ <- forkIO <| typingLoop doneVar
+ action `finally` atomically (writeTVar doneVar True)
+ where
+ typingLoop doneVar = do
+ done <- readTVarIO doneVar
+ unless done <| do
+ sendTypingAction cfg chatId
+ threadDelay 4000000
+ typingLoop doneVar
+
+leaveChat :: Types.TelegramConfig -> Int -> IO ()
+leaveChat cfg chatId = do
+ let url =
+ Text.unpack (Types.tgApiBaseUrl cfg)
+ <> "/bot"
+ <> Text.unpack (Types.tgBotToken cfg)
+ <> "/leaveChat"
+ body =
+ Aeson.object
+ [ "chat_id" .= chatId
+ ]
+ req0 <- HTTP.parseRequest url
+ let req =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| req0
+ _ <- try @SomeException (HTTP.httpLBS req)
+ pure ()
+
+runTelegramBot :: Types.TelegramConfig -> Provider.Provider -> IO ()
+runTelegramBot tgConfig provider = do
+ putText "Starting Telegram bot..."
+ offsetVar <- newTVarIO 0
+
+ botUsername <- getBotUsername tgConfig
+ case botUsername of
+ Nothing -> putText "Warning: could not get bot username, group mentions may not work"
+ Just name -> putText <| "Bot username: @" <> name
+ let botName = fromMaybe "bot" botUsername
+
+ _ <- forkIO reminderLoop
+ putText "Reminder loop started (checking every 5 minutes)"
+
+ _ <- forkIO (Email.emailCheckLoop (sendMessageReturningId tgConfig) benChatId)
+ putText "Email check loop started (checking every 6 hours)"
+
+ let sendFn = sendMessageReturningId tgConfig
+ _ <- forkIO (Messages.messageDispatchLoop sendFn)
+ putText "Message dispatch loop started (1s polling)"
+
+ incomingQueues <- IncomingQueue.newIncomingQueues
+
+ let engineCfg =
+ Engine.defaultEngineConfig
+ { Engine.engineOnToolCall = \toolName args ->
+ putText <| "Tool call: " <> toolName <> " " <> Text.take 200 args,
+ Engine.engineOnToolResult = \toolName success result ->
+ putText <| "Tool result: " <> toolName <> " " <> (if success then "ok" else "err") <> " " <> Text.take 200 result,
+ Engine.engineOnActivity = \activity ->
+ putText <| "Agent: " <> activity
+ }
+
+ let processBatch = handleMessageBatch tgConfig provider engineCfg botName
+ _ <- forkIO (IncomingQueue.startIncomingBatcher incomingQueues processBatch)
+ putText "Incoming message batcher started (3s window, 200ms tick)"
+
+ forever <| do
+ offset <- readTVarIO offsetVar
+ rawUpdates <- getRawUpdates tgConfig offset
+ forM_ rawUpdates <| \rawUpdate -> do
+ case Types.parseBotAddedToGroup botName rawUpdate of
+ Just addedEvent -> do
+ atomically (writeTVar offsetVar (Types.bagUpdateId addedEvent + 1))
+ handleBotAddedToGroup tgConfig addedEvent
+ Nothing -> case Types.parseUpdate rawUpdate of
+ Just msg -> do
+ putText <| "Received message from " <> Types.tmUserFirstName msg <> " in chat " <> tshow (Types.tmChatId msg) <> " (type: " <> tshow (Types.tmChatType msg) <> "): " <> Text.take 50 (Types.tmText msg)
+ atomically (writeTVar offsetVar (Types.tmUpdateId msg + 1))
+ IncomingQueue.enqueueIncoming incomingQueues IncomingQueue.defaultBatchWindowSeconds msg
+ Nothing -> do
+ let updateId = getUpdateId rawUpdate
+ putText <| "Unparsed update: " <> Text.take 200 (tshow rawUpdate)
+ forM_ updateId <| \uid -> atomically (writeTVar offsetVar (uid + 1))
+ when (null rawUpdates) <| threadDelay 1000000
+
+getUpdateId :: Aeson.Value -> Maybe Int
+getUpdateId (Aeson.Object obj) = case KeyMap.lookup "update_id" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+getUpdateId _ = Nothing
+
+handleBotAddedToGroup :: Types.TelegramConfig -> Types.BotAddedToGroup -> IO ()
+handleBotAddedToGroup tgConfig addedEvent = do
+ let addedBy = Types.bagAddedByUserId addedEvent
+ chatId = Types.bagChatId addedEvent
+ firstName = Types.bagAddedByFirstName addedEvent
+ if Types.isUserAllowed tgConfig addedBy
+ then do
+ putText <| "Bot added to group " <> tshow chatId <> " by authorized user " <> firstName <> " (" <> tshow addedBy <> ")"
+ _ <- Messages.enqueueImmediate Nothing chatId Nothing "hello! i'm ready to help." (Just "system") Nothing
+ pure ()
+ else do
+ putText <| "Bot added to group " <> tshow chatId <> " by UNAUTHORIZED user " <> firstName <> " (" <> tshow addedBy <> ") - leaving"
+ _ <- Messages.enqueueImmediate Nothing chatId Nothing "sorry, you're not authorized to add me to groups." (Just "system") Nothing
+ leaveChat tgConfig chatId
+
+handleMessageBatch ::
+ Types.TelegramConfig ->
+ Provider.Provider ->
+ Engine.EngineConfig ->
+ Text ->
+ Types.TelegramMessage ->
+ Text ->
+ IO ()
+handleMessageBatch tgConfig provider engineCfg _botUsername msg batchedText = do
+ let userName =
+ Types.tmUserFirstName msg
+ <> maybe "" (" " <>) (Types.tmUserLastName msg)
+ chatId = Types.tmChatId msg
+ usrId = Types.tmUserId msg
+
+ let isGroup = Types.isGroupChat msg
+ isAllowed = isGroup || Types.isUserAllowed tgConfig usrId
+
+ unless isAllowed <| do
+ putText <| "Unauthorized user: " <> tshow usrId <> " (" <> userName <> ")"
+ _ <- Messages.enqueueImmediate Nothing chatId Nothing "sorry, you're not authorized to use this bot." (Just "system") Nothing
+ pure ()
+
+ when isAllowed <| do
+ user <- Memory.getOrCreateUserByTelegramId usrId userName
+ let uid = Memory.userId user
+ handleAuthorizedMessageBatch tgConfig provider engineCfg msg uid userName chatId batchedText
+
+handleMessage ::
+ Types.TelegramConfig ->
+ Provider.Provider ->
+ Engine.EngineConfig ->
+ Text ->
+ Types.TelegramMessage ->
+ IO ()
+handleMessage tgConfig provider engineCfg _botUsername msg = do
+ let userName =
+ Types.tmUserFirstName msg
+ <> maybe "" (" " <>) (Types.tmUserLastName msg)
+ chatId = Types.tmChatId msg
+ usrId = Types.tmUserId msg
+
+ let isGroup = Types.isGroupChat msg
+ isAllowed = isGroup || Types.isUserAllowed tgConfig usrId
+
+ unless isAllowed <| do
+ putText <| "Unauthorized user: " <> tshow usrId <> " (" <> userName <> ")"
+ _ <- Messages.enqueueImmediate Nothing chatId Nothing "sorry, you're not authorized to use this bot." (Just "system") Nothing
+ pure ()
+
+ when isAllowed <| do
+ user <- Memory.getOrCreateUserByTelegramId usrId userName
+ let uid = Memory.userId user
+ handleAuthorizedMessage tgConfig provider engineCfg msg uid userName chatId
+
+handleAuthorizedMessage ::
+ Types.TelegramConfig ->
+ Provider.Provider ->
+ Engine.EngineConfig ->
+ Types.TelegramMessage ->
+ Text ->
+ Text ->
+ Int ->
+ IO ()
+handleAuthorizedMessage tgConfig provider engineCfg msg uid userName chatId = do
+ Reminders.recordUserChat uid chatId
+
+ let msgText = Types.tmText msg
+ threadId = Types.tmThreadId msg
+ cmdHandled <- handleOutreachCommand tgConfig chatId threadId msgText
+ when cmdHandled (pure ())
+ unless cmdHandled <| handleAuthorizedMessageContinued tgConfig provider engineCfg msg uid userName chatId
+
+handleAuthorizedMessageContinued ::
+ Types.TelegramConfig ->
+ Provider.Provider ->
+ Engine.EngineConfig ->
+ Types.TelegramMessage ->
+ Text ->
+ Text ->
+ Int ->
+ IO ()
+handleAuthorizedMessageContinued tgConfig provider engineCfg msg uid userName chatId = do
+ pdfContent <- case Types.tmDocument msg of
+ Just doc | Types.isPdf doc -> do
+ putText <| "Processing PDF: " <> fromMaybe "(unnamed)" (Types.tdFileName doc)
+ result <- Media.downloadAndExtractPdf tgConfig (Types.tdFileId doc)
+ case result of
+ Left err -> do
+ putText <| "PDF extraction failed: " <> err
+ pure Nothing
+ Right text -> do
+ let truncated = Text.take 40000 text
+ putText <| "Extracted " <> tshow (Text.length truncated) <> " chars from PDF"
+ pure (Just truncated)
+ _ -> pure Nothing
+
+ photoAnalysis <- case Types.tmPhoto msg of
+ Just photo -> do
+ case Media.checkPhotoSize photo of
+ Left err -> do
+ putText <| "Photo rejected: " <> err
+ _ <- Messages.enqueueImmediate (Just uid) chatId (Types.tmThreadId msg) err (Just "system") Nothing
+ pure Nothing
+ Right () -> do
+ putText <| "Processing photo: " <> tshow (Types.tpWidth photo) <> "x" <> tshow (Types.tpHeight photo)
+ bytesResult <- Media.downloadPhoto tgConfig photo
+ case bytesResult of
+ Left err -> do
+ putText <| "Photo download failed: " <> err
+ pure Nothing
+ Right bytes -> do
+ putText <| "Downloaded photo, " <> tshow (BL.length bytes) <> " bytes, analyzing..."
+ analysisResult <- Media.analyzeImage (Types.tgOpenRouterApiKey tgConfig) bytes (Types.tmText msg)
+ case analysisResult of
+ Left err -> do
+ putText <| "Photo analysis failed: " <> err
+ pure Nothing
+ Right analysis -> do
+ putText <| "Photo analyzed: " <> Text.take 100 analysis <> "..."
+ pure (Just analysis)
+ Nothing -> pure Nothing
+
+ voiceTranscription <- case Types.tmVoice msg of
+ Just voice -> do
+ case Media.checkVoiceSize voice of
+ Left err -> do
+ putText <| "Voice rejected: " <> err
+ _ <- Messages.enqueueImmediate (Just uid) chatId (Types.tmThreadId msg) err (Just "system") Nothing
+ pure Nothing
+ Right () -> do
+ if not (Types.isSupportedVoiceFormat voice)
+ then do
+ let err = "unsupported voice format, please send OGG/Opus audio"
+ putText <| "Voice rejected: " <> err
+ _ <- Messages.enqueueImmediate (Just uid) chatId (Types.tmThreadId msg) err (Just "system") Nothing
+ pure Nothing
+ else do
+ putText <| "Processing voice message: " <> tshow (Types.tvDuration voice) <> " seconds"
+ bytesResult <- Media.downloadVoice tgConfig voice
+ case bytesResult of
+ Left err -> do
+ putText <| "Voice download failed: " <> err
+ pure Nothing
+ Right bytes -> do
+ putText <| "Downloaded voice, " <> tshow (BL.length bytes) <> " bytes, transcribing..."
+ transcribeResult <- Media.transcribeVoice (Types.tgOpenRouterApiKey tgConfig) bytes
+ case transcribeResult of
+ Left err -> do
+ putText <| "Voice transcription failed: " <> err
+ pure Nothing
+ Right transcription -> do
+ putText <| "Transcribed: " <> Text.take 100 transcription <> "..."
+ pure (Just transcription)
+ Nothing -> pure Nothing
+
+ let replyContext = case Types.tmReplyTo msg of
+ Just reply ->
+ let senderName = case (Types.trFromFirstName reply, Types.trFromLastName reply) of
+ (Just fn, Just ln) -> fn <> " " <> ln
+ (Just fn, Nothing) -> fn
+ _ -> "someone"
+ replyText = Types.trText reply
+ in if Text.null replyText
+ then ""
+ else "[replying to " <> senderName <> ": \"" <> Text.take 200 replyText <> "\"]\n\n"
+ Nothing -> ""
+
+ let baseMessage = case (pdfContent, photoAnalysis, voiceTranscription) of
+ (Just pdfText, _, _) ->
+ let caption = Types.tmText msg
+ prefix = if Text.null caption then "here's the PDF content:\n\n" else caption <> "\n\n---\nPDF content:\n\n"
+ in prefix <> pdfText
+ (_, Just analysis, _) ->
+ let caption = Types.tmText msg
+ prefix =
+ if Text.null caption
+ then "[user sent an image. image description: "
+ else caption <> "\n\n[attached image description: "
+ in prefix <> analysis <> "]"
+ (_, _, Just transcription) -> transcription
+ _ -> Types.tmText msg
+
+ let userMessage = replyContext <> baseMessage
+ isGroup = Types.isGroupChat msg
+ threadId = Types.tmThreadId msg
+
+ shouldEngage <-
+ if isGroup
+ then do
+ putText "Checking if should engage (group chat)..."
+ recentMsgs <- Memory.getGroupRecentMessages chatId threadId 5
+ let recentContext =
+ if null recentMsgs
+ then ""
+ else
+ Text.unlines
+ [ "[Recent conversation for context]",
+ Text.unlines
+ [ fromMaybe "User" (Memory.cmSenderName m) <> ": " <> Memory.cmContent m
+ | m <- reverse recentMsgs
+ ],
+ "",
+ "[New message to classify]"
+ ]
+ shouldEngageInGroup (Types.tgOpenRouterApiKey tgConfig) (recentContext <> userMessage)
+ else pure True
+
+ if not shouldEngage
+ then putText "Skipping group message (pre-filter said no)"
+ else do
+ (conversationContext, contextTokens) <-
+ if isGroup
+ then do
+ _ <- Memory.saveGroupMessage chatId threadId Memory.UserRole userName userMessage
+ Memory.getGroupConversationContext chatId threadId maxConversationTokens
+ else do
+ _ <- Memory.saveMessage uid chatId Memory.UserRole (Just userName) userMessage
+ Memory.getConversationContext uid chatId maxConversationTokens
+ putText <| "Conversation context: " <> tshow contextTokens <> " tokens"
+
+ processEngagedMessage tgConfig provider engineCfg msg uid userName chatId userMessage conversationContext
+
+handleAuthorizedMessageBatch ::
+ Types.TelegramConfig ->
+ Provider.Provider ->
+ Engine.EngineConfig ->
+ Types.TelegramMessage ->
+ Text ->
+ Text ->
+ Int ->
+ Text ->
+ IO ()
+handleAuthorizedMessageBatch tgConfig provider engineCfg msg uid userName chatId batchedText = do
+ Reminders.recordUserChat uid chatId
+
+ pdfContent <- case Types.tmDocument msg of
+ Just doc | Types.isPdf doc -> do
+ putText <| "Processing PDF: " <> fromMaybe "(unnamed)" (Types.tdFileName doc)
+ result <- Media.downloadAndExtractPdf tgConfig (Types.tdFileId doc)
+ case result of
+ Left err -> do
+ putText <| "PDF extraction failed: " <> err
+ pure Nothing
+ Right text -> do
+ let truncated = Text.take 40000 text
+ putText <| "Extracted " <> tshow (Text.length truncated) <> " chars from PDF"
+ pure (Just truncated)
+ _ -> pure Nothing
+
+ photoAnalysis <- case Types.tmPhoto msg of
+ Just photo -> do
+ case Media.checkPhotoSize photo of
+ Left err -> do
+ putText <| "Photo rejected: " <> err
+ _ <- Messages.enqueueImmediate (Just uid) chatId (Types.tmThreadId msg) err (Just "system") Nothing
+ pure Nothing
+ Right () -> do
+ putText <| "Processing photo: " <> tshow (Types.tpWidth photo) <> "x" <> tshow (Types.tpHeight photo)
+ bytesResult <- Media.downloadPhoto tgConfig photo
+ case bytesResult of
+ Left err -> do
+ putText <| "Photo download failed: " <> err
+ pure Nothing
+ Right bytes -> do
+ putText <| "Downloaded photo, " <> tshow (BL.length bytes) <> " bytes, analyzing..."
+ analysisResult <- Media.analyzeImage (Types.tgOpenRouterApiKey tgConfig) bytes (Types.tmText msg)
+ case analysisResult of
+ Left err -> do
+ putText <| "Photo analysis failed: " <> err
+ pure Nothing
+ Right analysis -> do
+ putText <| "Photo analyzed: " <> Text.take 100 analysis <> "..."
+ pure (Just analysis)
+ Nothing -> pure Nothing
+
+ voiceTranscription <- case Types.tmVoice msg of
+ Just voice -> do
+ case Media.checkVoiceSize voice of
+ Left err -> do
+ putText <| "Voice rejected: " <> err
+ _ <- Messages.enqueueImmediate (Just uid) chatId (Types.tmThreadId msg) err (Just "system") Nothing
+ pure Nothing
+ Right () -> do
+ if not (Types.isSupportedVoiceFormat voice)
+ then do
+ let err = "unsupported voice format, please send OGG/Opus audio"
+ putText <| "Voice rejected: " <> err
+ _ <- Messages.enqueueImmediate (Just uid) chatId (Types.tmThreadId msg) err (Just "system") Nothing
+ pure Nothing
+ else do
+ putText <| "Processing voice message: " <> tshow (Types.tvDuration voice) <> " seconds"
+ bytesResult <- Media.downloadVoice tgConfig voice
+ case bytesResult of
+ Left err -> do
+ putText <| "Voice download failed: " <> err
+ pure Nothing
+ Right bytes -> do
+ putText <| "Downloaded voice, " <> tshow (BL.length bytes) <> " bytes, transcribing..."
+ transcribeResult <- Media.transcribeVoice (Types.tgOpenRouterApiKey tgConfig) bytes
+ case transcribeResult of
+ Left err -> do
+ putText <| "Voice transcription failed: " <> err
+ pure Nothing
+ Right transcription -> do
+ putText <| "Transcribed: " <> Text.take 100 transcription <> "..."
+ pure (Just transcription)
+ Nothing -> pure Nothing
+
+ let mediaPrefix = case (pdfContent, photoAnalysis, voiceTranscription) of
+ (Just pdfText, _, _) -> "---\nPDF content:\n\n" <> pdfText <> "\n\n---\n\n"
+ (_, Just analysis, _) -> "[attached image description: " <> analysis <> "]\n\n"
+ (_, _, Just transcription) -> "[voice transcription: " <> transcription <> "]\n\n"
+ _ -> ""
+
+ let userMessage = mediaPrefix <> batchedText
+ isGroup = Types.isGroupChat msg
+ threadId = Types.tmThreadId msg
+
+ shouldEngage <-
+ if isGroup
+ then do
+ putText "Checking if should engage (group chat)..."
+ recentMsgs <- Memory.getGroupRecentMessages chatId threadId 5
+ let recentContext =
+ if null recentMsgs
+ then ""
+ else
+ Text.unlines
+ [ "[Recent conversation for context]",
+ Text.unlines
+ [ fromMaybe "User" (Memory.cmSenderName m) <> ": " <> Memory.cmContent m
+ | m <- reverse recentMsgs
+ ],
+ "",
+ "[New message to classify]"
+ ]
+ shouldEngageInGroup (Types.tgOpenRouterApiKey tgConfig) (recentContext <> userMessage)
+ else pure True
+
+ if not shouldEngage
+ then putText "Skipping group message (pre-filter said no)"
+ else do
+ (conversationContext, contextTokens) <-
+ if isGroup
+ then do
+ _ <- Memory.saveGroupMessage chatId threadId Memory.UserRole userName userMessage
+ Memory.getGroupConversationContext chatId threadId maxConversationTokens
+ else do
+ _ <- Memory.saveMessage uid chatId Memory.UserRole (Just userName) userMessage
+ Memory.getConversationContext uid chatId maxConversationTokens
+ putText <| "Conversation context: " <> tshow contextTokens <> " tokens"
+
+ processEngagedMessage tgConfig provider engineCfg msg uid userName chatId userMessage conversationContext
+
+processEngagedMessage ::
+ Types.TelegramConfig ->
+ Provider.Provider ->
+ Engine.EngineConfig ->
+ Types.TelegramMessage ->
+ Text ->
+ Text ->
+ Int ->
+ Text ->
+ Text ->
+ IO ()
+processEngagedMessage tgConfig provider engineCfg msg uid userName chatId userMessage conversationContext = do
+ let isGroup = Types.isGroupChat msg
+
+ personalMemories <- Memory.recallMemories uid userMessage 5
+ groupMemories <-
+ if isGroup
+ then Memory.recallGroupMemories chatId userMessage 3
+ else pure []
+
+ let allMemories = personalMemories <> groupMemories
+ memoryContext =
+ if null allMemories
+ then "No memories found."
+ else
+ Text.unlines
+ <| ["[Personal] " <> Memory.memoryContent m | m <- personalMemories]
+ <> ["[Group] " <> Memory.memoryContent m | m <- groupMemories]
+
+ now <- getCurrentTime
+ tz <- getCurrentTimeZone
+ let localTime = utcToLocalTime tz now
+ timeStr = Text.pack (formatTime defaultTimeLocale "%A, %B %d, %Y at %H:%M" localTime)
+
+ let chatContext =
+ if Types.isGroupChat msg
+ then "\n\n## Chat Type\nThis is a GROUP CHAT. Apply the group response rules - only respond if appropriate."
+ else "\n\n## Chat Type\nThis is a PRIVATE CHAT. Always respond to the user."
+ hledgerContext =
+ if isHledgerAuthorized userName
+ then
+ Text.unlines
+ [ "",
+ "## hledger (personal finance)",
+ "",
+ "you have access to hledger tools for querying and recording financial transactions.",
+ "account naming: ex (expenses), as (assets), li (liabilities), in (income), eq (equity).",
+ "level 2 is owner: 'me' (personal) or 'us' (shared/family).",
+ "level 3 is type: need (necessary), want (discretionary), cash, cred (credit), vest (investments).",
+ "examples: ex:me:want:grooming, as:us:cash:checking, li:us:cred:chase.",
+ "when user says 'i spent $X at Y', use hledger_add with appropriate accounts."
+ ]
+ else ""
+ emailContext =
+ if isEmailAuthorized userName
+ then
+ Text.unlines
+ [ "",
+ "## email (ben@bensima.com)",
+ "",
+ "you have access to email tools for managing ben's inbox.",
+ "use email_check to see recent unread emails (returns uid, from, subject, date, has_unsubscribe).",
+ "use email_read to read full content of important emails.",
+ "use email_unsubscribe to unsubscribe from marketing/newsletters (clicks List-Unsubscribe link).",
+ "use email_archive to move FYI emails to archive.",
+ "prioritize: urgent items first, then emails needing response, then suggest unsubscribing from marketing."
+ ]
+ else ""
+ systemPrompt =
+ telegramSystemPrompt
+ <> "\n\n## Current Date and Time\n"
+ <> timeStr
+ <> chatContext
+ <> hledgerContext
+ <> emailContext
+ <> "\n\n## Current User\n"
+ <> "You are talking to: "
+ <> userName
+ <> "\n\n## What you know about this user\n"
+ <> memoryContext
+ <> "\n\n"
+ <> conversationContext
+
+ let memoryTools =
+ [ Memory.rememberTool uid,
+ Memory.recallTool uid,
+ Memory.linkMemoriesTool uid,
+ Memory.queryGraphTool uid
+ ]
+ searchTools = case Types.tgKagiApiKey tgConfig of
+ Just kagiKey -> [WebSearch.webSearchTool kagiKey]
+ Nothing -> []
+ webReaderTools = [WebReader.webReaderTool (Types.tgOpenRouterApiKey tgConfig)]
+ pdfTools = [Pdf.pdfTool]
+ notesTools =
+ [ Notes.noteAddTool uid,
+ Notes.noteListTool uid,
+ Notes.noteDeleteTool uid
+ ]
+ calendarTools =
+ [ Calendar.calendarListTool,
+ Calendar.calendarAddTool,
+ Calendar.calendarSearchTool
+ ]
+ todoTools =
+ [ Todos.todoAddTool uid,
+ Todos.todoListTool uid,
+ Todos.todoCompleteTool uid,
+ Todos.todoDeleteTool uid
+ ]
+ messageTools =
+ [ Messages.sendMessageTool uid chatId (Types.tmThreadId msg),
+ Messages.listPendingMessagesTool uid chatId,
+ Messages.cancelMessageTool
+ ]
+ hledgerTools =
+ if isHledgerAuthorized userName
+ then Hledger.allHledgerTools
+ else []
+ emailTools =
+ if isEmailAuthorized userName
+ then Email.allEmailTools
+ else []
+ pythonTools =
+ [Python.pythonExecTool | isBenAuthorized userName]
+ httpTools =
+ if isBenAuthorized userName
+ then Http.allHttpTools
+ else []
+ outreachTools =
+ if isBenAuthorized userName
+ then Outreach.allOutreachTools
+ else []
+ feedbackTools =
+ if isBenAuthorized userName
+ then Feedback.allFeedbackTools
+ else []
+ fileTools =
+ [Tools.readFileTool | isBenAuthorized userName]
+ skillsTools =
+ [ Skills.skillTool userName,
+ Skills.listSkillsTool userName,
+ Skills.publishSkillTool userName
+ ]
+ subagentTools =
+ if isBenAuthorized userName
+ then
+ let keys =
+ Subagent.SubagentApiKeys
+ { Subagent.subagentOpenRouterKey = Types.tgOpenRouterApiKey tgConfig,
+ Subagent.subagentKagiKey = Types.tgKagiApiKey tgConfig
+ }
+ in [Subagent.spawnSubagentTool keys]
+ else []
+ tools = memoryTools <> searchTools <> webReaderTools <> pdfTools <> notesTools <> calendarTools <> todoTools <> messageTools <> hledgerTools <> emailTools <> pythonTools <> httpTools <> outreachTools <> feedbackTools <> fileTools <> skillsTools <> subagentTools
+
+ let agentCfg =
+ Engine.defaultAgentConfig
+ { Engine.agentSystemPrompt = systemPrompt,
+ Engine.agentTools = tools,
+ Engine.agentMaxIterations = 50,
+ Engine.agentGuardrails =
+ Engine.defaultGuardrails
+ { Engine.guardrailMaxCostCents = 1000.0,
+ Engine.guardrailMaxDuplicateToolCalls = 10
+ }
+ }
+
+ result <-
+ withTypingIndicator tgConfig chatId
+ <| Engine.runAgentWithProvider engineCfg provider agentCfg userMessage
+
+ case result of
+ Left err -> do
+ putText <| "Agent error: " <> err
+ _ <- Messages.enqueueImmediate (Just uid) chatId (Types.tmThreadId msg) "sorry, i hit an error. please try again." (Just "agent_error") Nothing
+ pure ()
+ Right agentResult -> do
+ let response = Engine.resultFinalMessage agentResult
+ threadId = Types.tmThreadId msg
+ putText <| "Response text: " <> Text.take 200 response
+
+ if isGroup
+ then void <| Memory.saveGroupMessage chatId threadId Memory.AssistantRole "Ava" response
+ else void <| Memory.saveMessage uid chatId Memory.AssistantRole Nothing response
+
+ if Text.null response
+ then do
+ if isGroup
+ then putText "Agent chose not to respond (group chat)"
+ else do
+ putText "Warning: empty response from agent"
+ _ <- Messages.enqueueImmediate (Just uid) chatId threadId "hmm, i don't have a response for that" (Just "agent_response") Nothing
+ pure ()
+ else do
+ parts <- splitMessageForChat (Types.tgOpenRouterApiKey tgConfig) response
+ putText <| "Split response into " <> tshow (length parts) <> " parts"
+ enqueueMultipart (Just uid) chatId threadId parts (Just "agent_response")
+ unless isGroup <| checkAndSummarize (Types.tgOpenRouterApiKey tgConfig) uid chatId
+ let cost = Engine.resultTotalCost agentResult
+ costStr = Text.pack (printf "%.2f" cost)
+ putText
+ <| "Responded to "
+ <> userName
+ <> " (cost: "
+ <> costStr
+ <> " cents)"
+
+maxConversationTokens :: Int
+maxConversationTokens = 4000
+
+summarizationThreshold :: Int
+summarizationThreshold = 3000
+
+isHledgerAuthorized :: Text -> Bool
+isHledgerAuthorized userName =
+ let lowerName = Text.toLower userName
+ in "ben" `Text.isInfixOf` lowerName || "kate" `Text.isInfixOf` lowerName
+
+isEmailAuthorized :: Text -> Bool
+isEmailAuthorized userName =
+ let lowerName = Text.toLower userName
+ in "ben" `Text.isInfixOf` lowerName
+
+isBenAuthorized :: Text -> Bool
+isBenAuthorized userName =
+ let lowerName = Text.toLower userName
+ in "ben" `Text.isInfixOf` lowerName
+
+checkAndSummarize :: Text -> Text -> Int -> IO ()
+checkAndSummarize openRouterKey uid chatId = do
+ (_, currentTokens) <- Memory.getConversationContext uid chatId maxConversationTokens
+ when (currentTokens > summarizationThreshold) <| do
+ putText <| "Context at " <> tshow currentTokens <> " tokens, summarizing..."
+ recentMsgs <- Memory.getRecentMessages uid chatId 50
+ let conversationText =
+ Text.unlines
+ [ (if Memory.cmRole m == Memory.UserRole then "User: " else "Assistant: ") <> Memory.cmContent m
+ | m <- reverse recentMsgs
+ ]
+ gemini = Provider.defaultOpenRouter openRouterKey "google/gemini-2.0-flash-001"
+ summaryResult <-
+ Provider.chat
+ gemini
+ []
+ [ Provider.Message Provider.System "You are a conversation summarizer. Summarize the key points, decisions, and context from this conversation in 2-3 paragraphs. Focus on information that would be useful for continuing the conversation later." Nothing Nothing,
+ Provider.Message Provider.User ("Summarize this conversation:\n\n" <> conversationText) Nothing Nothing
+ ]
+ case summaryResult of
+ Left err -> putText <| "Summarization failed: " <> err
+ Right summaryMsg -> do
+ let summary = Provider.msgContent summaryMsg
+ _ <- Memory.summarizeAndArchive uid chatId summary
+ putText "Conversation summarized and archived (gemini)"
+
+splitMessageForChat :: Text -> Text -> IO [Text]
+splitMessageForChat _openRouterKey message = do
+ let parts = splitOnParagraphs message
+ pure parts
+
+splitOnParagraphs :: Text -> [Text]
+splitOnParagraphs message
+ | Text.length message < 300 = [message]
+ | otherwise =
+ let paragraphs = filter (not <. Text.null) (map Text.strip (Text.splitOn "\n\n" message))
+ in if length paragraphs <= 1
+ then [message]
+ else mergeTooShort paragraphs
+
+mergeTooShort :: [Text] -> [Text]
+mergeTooShort [] = []
+mergeTooShort [x] = [x]
+mergeTooShort (x : y : rest)
+ | Text.length x < 100 = mergeTooShort ((x <> "\n\n" <> y) : rest)
+ | otherwise = x : mergeTooShort (y : rest)
+
+enqueueMultipart :: Maybe Text -> Int -> Maybe Int -> [Text] -> Maybe Text -> IO ()
+enqueueMultipart _ _ _ [] _ = pure ()
+enqueueMultipart mUid chatId mThreadId parts msgType = do
+ forM_ (zip [0 ..] parts) <| \(i :: Int, part) -> do
+ if i == 0
+ then void <| Messages.enqueueImmediate mUid chatId mThreadId part msgType Nothing
+ else do
+ let delaySeconds = fromIntegral (i * 2)
+ void <| Messages.enqueueDelayed mUid chatId mThreadId part delaySeconds msgType Nothing
+
+shouldEngageInGroup :: Text -> Text -> IO Bool
+shouldEngageInGroup openRouterKey messageText = do
+ let gemini = Provider.defaultOpenRouter openRouterKey "google/gemini-2.0-flash-001"
+ result <-
+ Provider.chat
+ gemini
+ []
+ [ Provider.Message
+ Provider.System
+ ( Text.unlines
+ [ "You are a classifier that decides if an AI assistant named 'Ava' should respond to a message in a group chat.",
+ "You may be given recent conversation context to help decide.",
+ "Respond with ONLY 'yes' or 'no' (lowercase, nothing else).",
+ "",
+ "Say 'yes' if:",
+ "- The message is a direct question Ava could answer",
+ "- The message contains a factual error worth correcting",
+ "- The message mentions Ava or asks for help",
+ "- The message shares a link or document to analyze",
+ "- The message is a follow-up to a conversation Ava was just participating in",
+ "- The user is clearly talking to Ava based on context (e.g. Ava just responded)",
+ "",
+ "Say 'no' if:",
+ "- It's casual banter or chit-chat between people (not involving Ava)",
+ "- It's a greeting or farewell not directed at Ava",
+ "- It's an inside joke or personal conversation between humans",
+ "- It doesn't require or benefit from Ava's input"
+ ]
+ )
+ Nothing
+ Nothing,
+ Provider.Message Provider.User messageText Nothing Nothing
+ ]
+ case result of
+ Left err -> do
+ putText <| "Engagement check failed: " <> err
+ pure True
+ Right msg -> do
+ let response = Text.toLower (Text.strip (Provider.msgContent msg))
+ pure (response == "yes" || response == "y")
+
+checkOllama :: IO (Either Text ())
+checkOllama = do
+ ollamaUrl <- fromMaybe "http://localhost:11434" </ lookupEnv "OLLAMA_URL"
+ let url = ollamaUrl <> "/api/tags"
+ result <-
+ try <| do
+ req <- HTTP.parseRequest url
+ HTTP.httpLBS req
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("Ollama not running: " <> tshow e))
+ Right response -> do
+ let status = HTTP.getResponseStatusCode response
+ if status >= 200 && status < 300
+ then case Aeson.decode (HTTP.getResponseBody response) of
+ Just (Aeson.Object obj) -> case KeyMap.lookup "models" obj of
+ Just (Aeson.Array models) ->
+ let names = [n | Aeson.Object m <- toList models, Just (Aeson.String n) <- [KeyMap.lookup "name" m]]
+ hasNomic = any ("nomic-embed-text" `Text.isInfixOf`) names
+ in if hasNomic
+ then pure (Right ())
+ else pure (Left "nomic-embed-text model not found")
+ _ -> pure (Left "Invalid Ollama response")
+ _ -> pure (Left "Failed to parse Ollama response")
+ else pure (Left ("Ollama HTTP error: " <> tshow status))
+
+pullEmbeddingModel :: IO (Either Text ())
+pullEmbeddingModel = do
+ ollamaUrl <- fromMaybe "http://localhost:11434" </ lookupEnv "OLLAMA_URL"
+ let url = ollamaUrl <> "/api/pull"
+ putText "Pulling nomic-embed-text model (this may take a few minutes)..."
+ req0 <- HTTP.parseRequest url
+ let body = Aeson.object ["name" .= ("nomic-embed-text" :: Text)]
+ req =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| HTTP.setRequestResponseTimeout (HTTPClient.responseTimeoutMicro (600 * 1000000))
+ <| req0
+ result <- try (HTTP.httpLBS req)
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("Failed to pull model: " <> tshow e))
+ Right response -> do
+ let status = HTTP.getResponseStatusCode response
+ if status >= 200 && status < 300
+ then do
+ putText "nomic-embed-text model ready"
+ pure (Right ())
+ else pure (Left ("Pull failed: HTTP " <> tshow status))
+
+ensureOllama :: IO ()
+ensureOllama = do
+ checkResult <- checkOllama
+ case checkResult of
+ Right () -> putText "Ollama ready with nomic-embed-text"
+ Left err
+ | "not running" `Text.isInfixOf` err -> do
+ putText <| "Error: " <> err
+ putText "Please start Ollama: ollama serve"
+ exitFailure
+ | "not found" `Text.isInfixOf` err -> do
+ putText "nomic-embed-text model not found, pulling..."
+ pullResult <- pullEmbeddingModel
+ case pullResult of
+ Right () -> pure ()
+ Left pullErr -> do
+ putText <| "Error: " <> pullErr
+ exitFailure
+ | otherwise -> do
+ putText <| "Ollama error: " <> err
+ exitFailure
+
+startBot :: Maybe Text -> IO ()
+startBot maybeToken = do
+ token <- case maybeToken of
+ Just t -> pure t
+ Nothing -> do
+ envToken <- lookupEnv "TELEGRAM_BOT_TOKEN"
+ case envToken of
+ Just t -> pure (Text.pack t)
+ Nothing -> do
+ putText "Error: TELEGRAM_BOT_TOKEN not set and no --token provided"
+ exitFailure
+
+ putText <| "AVA data root: " <> Text.pack Paths.avaDataRoot
+ putText <| "Skills dir: " <> Text.pack Paths.skillsDir
+ putText <| "Outreach dir: " <> Text.pack Paths.outreachDir
+
+ ensureOllama
+
+ allowedIds <- loadAllowedUserIds
+ kagiKey <- fmap Text.pack </ lookupEnv "KAGI_API_KEY"
+
+ apiKey <- lookupEnv "OPENROUTER_API_KEY"
+ case apiKey of
+ Nothing -> do
+ putText "Error: OPENROUTER_API_KEY not set"
+ exitFailure
+ Just key -> do
+ let orKey = Text.pack key
+ tgConfig = Types.defaultTelegramConfig token allowedIds kagiKey orKey
+ provider = Provider.defaultOpenRouter orKey "anthropic/claude-sonnet-4.5"
+ putText <| "Allowed user IDs: " <> tshow allowedIds
+ putText <| "Kagi search: " <> if isJust kagiKey then "enabled" else "disabled"
+ runTelegramBot tgConfig provider
+
+loadAllowedUserIds :: IO [Int]
+loadAllowedUserIds = do
+ maybeIds <- lookupEnv "ALLOWED_TELEGRAM_USER_IDS"
+ case maybeIds of
+ Nothing -> pure []
+ Just "*" -> pure []
+ Just idsStr -> do
+ let ids = mapMaybe (readMaybe <. Text.unpack <. Text.strip) (Text.splitOn "," (Text.pack idsStr))
+ pure ids
+
+handleOutreachCommand :: Types.TelegramConfig -> Int -> Maybe Int -> Text -> IO Bool
+handleOutreachCommand _tgConfig chatId mThreadId cmd
+ | "/review" `Text.isPrefixOf` cmd = do
+ pending <- Outreach.listDrafts Outreach.Pending
+ case pending of
+ [] -> do
+ _ <- Messages.enqueueImmediate Nothing chatId mThreadId "no pending outreach drafts" (Just "system") Nothing
+ pure True
+ (draft : _) -> do
+ let msg = formatDraftForReview draft
+ _ <- Messages.enqueueImmediate Nothing chatId mThreadId msg (Just "system") Nothing
+ pure True
+ | "/approve " `Text.isPrefixOf` cmd = do
+ let draftId = Text.strip (Text.drop 9 cmd)
+ result <- Outreach.approveDraft draftId
+ case result of
+ Left err -> do
+ _ <- Messages.enqueueImmediate Nothing chatId mThreadId ("error: " <> err) (Just "system") Nothing
+ pure True
+ Right draft -> do
+ _ <- Messages.enqueueImmediate Nothing chatId mThreadId ("approved: " <> Outreach.draftId draft) (Just "system") Nothing
+ pure True
+ | "/reject " `Text.isPrefixOf` cmd = do
+ let rest = Text.strip (Text.drop 8 cmd)
+ (draftId, reason) = case Text.breakOn " " rest of
+ (did, r) -> (did, if Text.null r then Nothing else Just (Text.strip r))
+ result <- Outreach.rejectDraft draftId reason
+ case result of
+ Left err -> do
+ _ <- Messages.enqueueImmediate Nothing chatId mThreadId ("error: " <> err) (Just "system") Nothing
+ pure True
+ Right draft -> do
+ let reasonMsg = maybe "" (" reason: " <>) (Outreach.draftRejectReason draft)
+ _ <- Messages.enqueueImmediate Nothing chatId mThreadId ("rejected: " <> Outreach.draftId draft <> reasonMsg) (Just "system") Nothing
+ pure True
+ | "/queue" `Text.isPrefixOf` cmd = do
+ count <- Outreach.getPendingCount
+ _ <- Messages.enqueueImmediate Nothing chatId mThreadId (tshow count <> " pending outreach drafts") (Just "system") Nothing
+ pure True
+ | otherwise = pure False
+
+formatDraftForReview :: Outreach.OutreachDraft -> Text
+formatDraftForReview draft =
+ Text.unlines
+ [ "*outreach draft*",
+ "",
+ "*id:* `" <> Outreach.draftId draft <> "`",
+ "*type:* " <> tshow (Outreach.draftType draft),
+ "*to:* " <> Outreach.draftRecipient draft,
+ maybe "" (\s -> "*subject:* " <> s <> "\n") (Outreach.draftSubject draft),
+ "*context:* " <> Outreach.draftContext draft,
+ "",
+ Outreach.draftBody draft,
+ "",
+ "reply `/approve " <> Outreach.draftId draft <> "` or `/reject " <> Outreach.draftId draft <> " [reason]`"
+ ]
diff --git a/Omni/Agent/Telegram/IncomingQueue.hs b/Omni/Agent/Telegram/IncomingQueue.hs
new file mode 100644
index 0000000..875fbf3
--- /dev/null
+++ b/Omni/Agent/Telegram/IncomingQueue.hs
@@ -0,0 +1,228 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Telegram Incoming Message Queue - Batches incoming messages by chat.
+--
+-- Messages are queued in-memory and batched by chat_id with a configurable
+-- window (default 1s). This prevents confusion when messages arrive
+-- simultaneously from different chats.
+--
+-- : out omni-agent-telegram-incoming-queue
+-- : dep stm
+module Omni.Agent.Telegram.IncomingQueue
+ ( -- * Types
+ IncomingQueues,
+ ChatQueue (..),
+ QueuedMsg (..),
+
+ -- * Queue Operations
+ newIncomingQueues,
+ enqueueIncoming,
+
+ -- * Batch Processing
+ flushReadyBatches,
+ startIncomingBatcher,
+
+ -- * Batch Formatting
+ formatBatch,
+
+ -- * Configuration
+ defaultBatchWindowSeconds,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Control.Concurrent.STM (TVar, newTVarIO, readTVar, readTVarIO, writeTVar)
+import qualified Data.Map.Strict as Map
+import qualified Data.Text as Text
+import Data.Time (NominalDiffTime, UTCTime, addUTCTime, getCurrentTime)
+import qualified Omni.Agent.Telegram.Types as Types
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Telegram.IncomingQueue"
+ [ Test.unit "newIncomingQueues creates empty map" <| do
+ queues <- newIncomingQueues
+ qs <- readTVarIO queues
+ Map.null qs Test.@=? True,
+ Test.unit "formatBatch single message no attribution in DM" <| do
+ now <- getCurrentTime
+ let msg = mkTestMessage 123 456 Types.Private "hello"
+ qmsg = QueuedMsg now msg
+ result = formatBatch [qmsg]
+ result Test.@=? "hello",
+ Test.unit "formatBatch multiple messages numbered" <| do
+ now <- getCurrentTime
+ let msg1 = mkTestMessage 123 456 Types.Private "first"
+ msg2 = mkTestMessage 123 456 Types.Private "second"
+ qmsgs = [QueuedMsg now msg1, QueuedMsg now msg2]
+ result = formatBatch qmsgs
+ ("1. first" `Text.isInfixOf` result) Test.@=? True
+ ("2. second" `Text.isInfixOf` result) Test.@=? True,
+ Test.unit "formatBatch group chat has sender attribution" <| do
+ now <- getCurrentTime
+ let msg = mkTestMessage 123 456 Types.Group "hello"
+ qmsg = QueuedMsg now msg
+ result = formatBatch [qmsg]
+ ("[Test] hello" `Text.isInfixOf` result) Test.@=? True,
+ Test.unit "enqueueIncoming adds to queue" <| do
+ queues <- newIncomingQueues
+ let msg = mkTestMessage 123 456 Types.Private "test"
+ enqueueIncoming queues 1.0 msg
+ qs <- readTVarIO queues
+ Map.member 123 qs Test.@=? True,
+ Test.unit "flushReadyBatches returns due batches" <| do
+ queues <- newIncomingQueues
+ t <- getCurrentTime
+ let msg = mkTestMessage 123 456 Types.Private "test"
+ atomically <| do
+ let qmsg = QueuedMsg t msg
+ queue = ChatQueue [qmsg] t
+ writeTVar queues (Map.singleton 123 queue)
+ threadDelay 10000
+ batches <- flushReadyBatches queues
+ length batches Test.@=? 1
+ ]
+
+mkTestMessage :: Int -> Int -> Types.ChatType -> Text -> Types.TelegramMessage
+mkTestMessage chatId usrId chatType txt =
+ Types.TelegramMessage
+ { Types.tmUpdateId = 1,
+ Types.tmChatId = chatId,
+ Types.tmChatType = chatType,
+ Types.tmUserId = usrId,
+ Types.tmUserFirstName = "Test",
+ Types.tmUserLastName = Nothing,
+ Types.tmText = txt,
+ Types.tmDocument = Nothing,
+ Types.tmPhoto = Nothing,
+ Types.tmVoice = Nothing,
+ Types.tmReplyTo = Nothing,
+ Types.tmThreadId = Nothing
+ }
+
+data QueuedMsg = QueuedMsg
+ { qmReceivedAt :: UTCTime,
+ qmMsg :: Types.TelegramMessage
+ }
+ deriving (Show, Eq)
+
+data ChatQueue = ChatQueue
+ { cqMessages :: [QueuedMsg],
+ cqDeadline :: UTCTime
+ }
+ deriving (Show, Eq)
+
+type ChatId = Int
+
+type IncomingQueues = TVar (Map.Map ChatId ChatQueue)
+
+defaultBatchWindowSeconds :: NominalDiffTime
+defaultBatchWindowSeconds = 3.0
+
+newIncomingQueues :: IO IncomingQueues
+newIncomingQueues = newTVarIO Map.empty
+
+enqueueIncoming :: IncomingQueues -> NominalDiffTime -> Types.TelegramMessage -> IO ()
+enqueueIncoming queuesVar windowSeconds msg = do
+ now <- getCurrentTime
+ let chatId = Types.tmChatId msg
+ newDeadline = addUTCTime windowSeconds now
+ qMsg = QueuedMsg now msg
+ atomically <| do
+ qs <- readTVar queuesVar
+ let qs' = Map.alter (insertOrUpdate newDeadline qMsg) chatId qs
+ writeTVar queuesVar qs'
+ where
+ insertOrUpdate deadline qMsg Nothing =
+ Just ChatQueue {cqMessages = [qMsg], cqDeadline = deadline}
+ insertOrUpdate deadline qMsg (Just q) =
+ Just
+ q
+ { cqMessages = cqMessages q <> [qMsg],
+ cqDeadline = deadline
+ }
+
+flushReadyBatches :: IncomingQueues -> IO [(ChatId, [QueuedMsg])]
+flushReadyBatches queuesVar = do
+ now <- getCurrentTime
+ atomically <| do
+ qs <- readTVar queuesVar
+ let (ready, pending) = Map.partition (\q -> cqDeadline q <= now) qs
+ batches =
+ [ (chatId, cqMessages q)
+ | (chatId, q) <- Map.toList ready
+ ]
+ writeTVar queuesVar pending
+ pure batches
+
+startIncomingBatcher ::
+ IncomingQueues ->
+ (Types.TelegramMessage -> Text -> IO ()) ->
+ IO ()
+startIncomingBatcher queuesVar processFn =
+ void <| forkIO <| forever <| do
+ batches <- flushReadyBatches queuesVar
+ forM_ batches <| \(_chatId, qmsgs) -> do
+ case qmsgs of
+ [] -> pure ()
+ (firstQm : _) -> do
+ let baseMsg = qmMsg firstQm
+ batchedTxt = formatBatch qmsgs
+ processFn baseMsg batchedTxt
+ threadDelay 200000
+
+formatBatch :: [QueuedMsg] -> Text
+formatBatch [] = ""
+formatBatch [single] = formatOne False 1 single
+formatBatch qmsgs = Text.intercalate "\n\n" (zipWith (formatOne True) [1 ..] qmsgs)
+
+formatOne :: Bool -> Int -> QueuedMsg -> Text
+formatOne numbered idx (QueuedMsg _ msg) =
+ let baseText = Types.tmText msg
+ sender = senderLabel msg
+ media = mediaSuffix msg
+ reply = replySuffix msg
+ prefix =
+ if numbered
+ then tshow idx <> ". "
+ else ""
+ in Text.concat [prefix, sender, baseText, reply, media]
+
+senderLabel :: Types.TelegramMessage -> Text
+senderLabel msg
+ | Types.isGroupChat msg =
+ let firstName = Types.tmUserFirstName msg
+ lastName = fromMaybe "" (Types.tmUserLastName msg)
+ name = Text.strip (firstName <> " " <> lastName)
+ in "[" <> name <> "] "
+ | otherwise = ""
+
+mediaSuffix :: Types.TelegramMessage -> Text
+mediaSuffix msg =
+ Text.concat
+ <| [ " [document: " <> fromMaybe "unnamed" (Types.tdFileName d) <> "]"
+ | Just d <- [Types.tmDocument msg]
+ ]
+ <> [" [photo attached]" | isJust (Types.tmPhoto msg)]
+ <> [" [voice message]" | isJust (Types.tmVoice msg)]
+
+replySuffix :: Types.TelegramMessage -> Text
+replySuffix msg =
+ case Types.tmReplyTo msg of
+ Nothing -> ""
+ Just r ->
+ let fn = fromMaybe "someone" (Types.trFromFirstName r)
+ ln = fromMaybe "" (Types.trFromLastName r)
+ name = Text.strip (fn <> " " <> ln)
+ snippet = Text.take 80 (Types.trText r)
+ in " (replying to " <> name <> ": \"" <> snippet <> "\")"
diff --git a/Omni/Agent/Telegram/Media.hs b/Omni/Agent/Telegram/Media.hs
new file mode 100644
index 0000000..47fbf91
--- /dev/null
+++ b/Omni/Agent/Telegram/Media.hs
@@ -0,0 +1,327 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Telegram Media Handling - File downloads, image analysis, voice transcription.
+--
+-- : out omni-agent-telegram-media
+-- : dep aeson
+-- : dep http-conduit
+-- : dep base64-bytestring
+module Omni.Agent.Telegram.Media
+ ( -- * File Downloads
+ getFile,
+ downloadFile,
+ downloadFileBytes,
+ downloadPhoto,
+ downloadVoice,
+ downloadAndExtractPdf,
+
+ -- * Multimodal Processing
+ analyzeImage,
+ transcribeVoice,
+
+ -- * Size Limits
+ maxImageBytes,
+ maxVoiceBytes,
+ checkPhotoSize,
+ checkVoiceSize,
+
+ -- * HTTP Utilities
+ httpGetBytes,
+ httpPostJson,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString as BS
+import qualified Data.ByteString.Base64.Lazy as B64
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.CaseInsensitive as CI
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import qualified Data.Text.Lazy as TL
+import qualified Data.Text.Lazy.Encoding as TLE
+import qualified Network.HTTP.Client as HTTPClient
+import qualified Network.HTTP.Simple as HTTP
+import qualified Omni.Agent.Telegram.Types as Types
+import qualified Omni.Agent.Tools.Pdf as Pdf
+import qualified Omni.Test as Test
+import System.Environment (lookupEnv)
+import System.IO (hClose)
+import System.IO.Temp (withSystemTempFile)
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Telegram.Media"
+ [ Test.unit "maxImageBytes is 10MB" <| do
+ maxImageBytes Test.@=? 10_000_000,
+ Test.unit "maxVoiceBytes is 20MB" <| do
+ maxVoiceBytes Test.@=? 20_000_000,
+ Test.unit "checkPhotoSize accepts small photos" <| do
+ let photo = Types.TelegramPhoto "id" 800 600 (Just 100_000)
+ checkPhotoSize photo Test.@=? Right (),
+ Test.unit "checkPhotoSize rejects large photos" <| do
+ let photo = Types.TelegramPhoto "id" 800 600 (Just 15_000_000)
+ case checkPhotoSize photo of
+ Left _ -> pure ()
+ Right _ -> Test.assertFailure "Expected rejection",
+ Test.unit "checkVoiceSize accepts small voice" <| do
+ let voice = Types.TelegramVoice "id" 60 (Just "audio/ogg") (Just 500_000)
+ checkVoiceSize voice Test.@=? Right (),
+ Test.unit "checkVoiceSize rejects large voice" <| do
+ let voice = Types.TelegramVoice "id" 60 (Just "audio/ogg") (Just 25_000_000)
+ case checkVoiceSize voice of
+ Left _ -> pure ()
+ Right _ -> Test.assertFailure "Expected rejection"
+ ]
+
+maxImageBytes :: Int
+maxImageBytes = 10_000_000
+
+maxVoiceBytes :: Int
+maxVoiceBytes = 20_000_000
+
+checkPhotoSize :: Types.TelegramPhoto -> Either Text ()
+checkPhotoSize photo =
+ case Types.tpFileSize photo of
+ Just size
+ | size > maxImageBytes ->
+ Left <| "image too large (" <> tshow (size `div` 1_000_000) <> "MB), max " <> tshow (maxImageBytes `div` 1_000_000) <> "MB"
+ _ -> Right ()
+
+checkVoiceSize :: Types.TelegramVoice -> Either Text ()
+checkVoiceSize voice =
+ case Types.tvFileSize voice of
+ Just size
+ | size > maxVoiceBytes ->
+ Left <| "voice message too large (" <> tshow (size `div` 1_000_000) <> "MB), max " <> tshow (maxVoiceBytes `div` 1_000_000) <> "MB"
+ _ -> Right ()
+
+httpGetBytes :: String -> IO (Either Text BL.ByteString)
+httpGetBytes url = do
+ result <-
+ try <| do
+ req <- HTTP.parseRequest url
+ resp <- HTTP.httpLBS req
+ let status = HTTP.getResponseStatusCode resp
+ if status >= 200 && status < 300
+ then pure (Right (HTTP.getResponseBody resp))
+ else pure (Left ("HTTP " <> tshow status))
+ case result of
+ Left (e :: SomeException) -> pure (Left ("HTTP error: " <> tshow e))
+ Right r -> pure r
+
+httpPostJson :: String -> [(ByteString, ByteString)] -> Aeson.Value -> Int -> IO (Either Text BL.ByteString)
+httpPostJson url extraHeaders body timeoutSecs = do
+ result <-
+ try <| do
+ req0 <- HTTP.parseRequest url
+ let baseReq =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Content-Type" ["application/json"]
+ <| HTTP.setRequestBodyLBS (Aeson.encode body)
+ <| HTTP.setRequestResponseTimeout (HTTPClient.responseTimeoutMicro (timeoutSecs * 1000000))
+ <| req0
+ req = foldr addHeader baseReq extraHeaders
+ addHeader (name, value) = HTTP.addRequestHeader (CI.mk name) value
+ resp <- HTTP.httpLBS req
+ let status = HTTP.getResponseStatusCode resp
+ if status >= 200 && status < 300
+ then pure (Right (HTTP.getResponseBody resp))
+ else pure (Left ("HTTP " <> tshow status <> ": " <> shortBody resp))
+ case result of
+ Left (e :: SomeException) -> pure (Left ("HTTP error: " <> tshow e))
+ Right r -> pure r
+ where
+ shortBody r =
+ let b = BL.toStrict (HTTP.getResponseBody r)
+ in TE.decodeUtf8 (BS.take 200 b)
+
+getFile :: Types.TelegramConfig -> Text -> IO (Either Text Text)
+getFile cfg fileId = do
+ let url =
+ Text.unpack (Types.tgApiBaseUrl cfg)
+ <> "/bot"
+ <> Text.unpack (Types.tgBotToken cfg)
+ <> "/getFile?file_id="
+ <> Text.unpack fileId
+ result <- httpGetBytes url
+ case result of
+ Left err -> pure (Left err)
+ Right body ->
+ case Aeson.decode body of
+ Just (Aeson.Object obj) -> case KeyMap.lookup "result" obj of
+ Just (Aeson.Object resultObj) -> case KeyMap.lookup "file_path" resultObj of
+ Just (Aeson.String path) -> pure (Right path)
+ _ -> pure (Left "No file_path in response")
+ _ -> pure (Left "No result in response")
+ _ -> pure (Left "Failed to parse getFile response")
+
+downloadFileBytes :: Types.TelegramConfig -> Text -> IO (Either Text BL.ByteString)
+downloadFileBytes cfg filePath = do
+ let url =
+ "https://api.telegram.org/file/bot"
+ <> Text.unpack (Types.tgBotToken cfg)
+ <> "/"
+ <> Text.unpack filePath
+ httpGetBytes url
+
+downloadFile :: Types.TelegramConfig -> Text -> FilePath -> IO (Either Text ())
+downloadFile cfg filePath destPath = do
+ result <- downloadFileBytes cfg filePath
+ case result of
+ Left err -> pure (Left err)
+ Right bytes -> do
+ BL.writeFile destPath bytes
+ pure (Right ())
+
+downloadPhoto :: Types.TelegramConfig -> Types.TelegramPhoto -> IO (Either Text BL.ByteString)
+downloadPhoto cfg photo = do
+ filePathResult <- getFile cfg (Types.tpFileId photo)
+ case filePathResult of
+ Left err -> pure (Left err)
+ Right filePath -> downloadFileBytes cfg filePath
+
+downloadVoice :: Types.TelegramConfig -> Types.TelegramVoice -> IO (Either Text BL.ByteString)
+downloadVoice cfg voice = do
+ filePathResult <- getFile cfg (Types.tvFileId voice)
+ case filePathResult of
+ Left err -> pure (Left err)
+ Right filePath -> downloadFileBytes cfg filePath
+
+downloadAndExtractPdf :: Types.TelegramConfig -> Text -> IO (Either Text Text)
+downloadAndExtractPdf cfg fileId = do
+ filePathResult <- getFile cfg fileId
+ case filePathResult of
+ Left err -> pure (Left err)
+ Right filePath ->
+ withSystemTempFile "telegram_pdf.pdf" <| \tmpPath tmpHandle -> do
+ hClose tmpHandle
+ downloadResult <- downloadFile cfg filePath tmpPath
+ case downloadResult of
+ Left err -> pure (Left err)
+ Right () -> Pdf.extractPdfText tmpPath
+
+parseOpenRouterResponse :: BL.ByteString -> Either Text Text
+parseOpenRouterResponse body =
+ case Aeson.decode body of
+ Just (Aeson.Object obj) -> case KeyMap.lookup "choices" obj of
+ Just (Aeson.Array choices) | not (null choices) ->
+ case toList choices of
+ (Aeson.Object choice : _) -> case KeyMap.lookup "message" choice of
+ Just (Aeson.Object msg) -> case KeyMap.lookup "content" msg of
+ Just (Aeson.String content) -> Right content
+ Just Aeson.Null -> Left "No content in response"
+ _ -> Left "Unexpected content type in response"
+ _ -> Left "No message in choice"
+ _ -> Left "Empty choices array"
+ _ -> Left "No choices in response"
+ _ -> Left "Failed to parse response"
+
+analyzeImage :: Text -> BL.ByteString -> Text -> IO (Either Text Text)
+analyzeImage apiKey imageBytes userPrompt = do
+ let base64Data = TL.toStrict (TLE.decodeUtf8 (B64.encode imageBytes))
+ dataUrl = "data:image/jpeg;base64," <> base64Data
+ prompt =
+ if Text.null userPrompt
+ then "describe this image objectively in third person. do not use first person pronouns like 'I can see'. just describe what is shown."
+ else userPrompt <> "\n\n(describe objectively in third person, no first person pronouns)"
+ body =
+ Aeson.object
+ [ "model" .= ("anthropic/claude-sonnet-4.5" :: Text),
+ "messages"
+ .= [ Aeson.object
+ [ "role" .= ("user" :: Text),
+ "content"
+ .= [ Aeson.object
+ [ "type" .= ("text" :: Text),
+ "text" .= prompt
+ ],
+ Aeson.object
+ [ "type" .= ("image_url" :: Text),
+ "image_url"
+ .= Aeson.object
+ [ "url" .= dataUrl
+ ]
+ ]
+ ]
+ ]
+ ]
+ ]
+ headers =
+ [ ("Authorization", "Bearer " <> encodeUtf8 apiKey),
+ ("HTTP-Referer", "https://omni.dev"),
+ ("X-Title", "Omni Agent")
+ ]
+ result <- httpPostJson "https://openrouter.ai/api/v1/chat/completions" headers body 120
+ case result of
+ Left err -> pure (Left ("Vision API error: " <> err))
+ Right respBody -> pure (first ("Vision API: " <>) (parseOpenRouterResponse respBody))
+
+transcribeVoice :: Text -> BL.ByteString -> IO (Either Text Text)
+transcribeVoice _unusedApiKey audioBytes = do
+ maybeKey <- lookupEnv "OPENAI_API_KEY"
+ case maybeKey of
+ Nothing -> pure (Left "OPENAI_API_KEY not set - required for voice transcription")
+ Just key -> transcribeWithWhisper (Text.pack key) audioBytes
+
+transcribeWithWhisper :: Text -> BL.ByteString -> IO (Either Text Text)
+transcribeWithWhisper apiKey audioBytes = do
+ result <-
+ try <| do
+ req0 <- HTTP.parseRequest "https://api.openai.com/v1/audio/transcriptions"
+ let boundary = "----WebKitFormBoundary7MA4YWxkTrZu0gW"
+ body = buildMultipartBody boundary audioBytes
+ req =
+ HTTP.setRequestMethod "POST"
+ <| HTTP.setRequestHeader "Authorization" ["Bearer " <> encodeUtf8 apiKey]
+ <| HTTP.setRequestHeader "Content-Type" ["multipart/form-data; boundary=" <> boundary]
+ <| HTTP.setRequestBodyLBS body
+ <| HTTP.setRequestResponseTimeout (HTTPClient.responseTimeoutMicro (120 * 1000000))
+ <| req0
+ resp <- HTTP.httpLBS req
+ let status = HTTP.getResponseStatusCode resp
+ if status >= 200 && status < 300
+ then pure (Right (HTTP.getResponseBody resp))
+ else pure (Left ("HTTP " <> tshow status <> ": " <> TL.toStrict (TLE.decodeUtf8 (BL.take 500 (HTTP.getResponseBody resp)))))
+ case result of
+ Left (e :: SomeException) -> pure (Left ("Whisper API error: " <> tshow e))
+ Right (Left err) -> pure (Left ("Whisper API error: " <> err))
+ Right (Right respBody) ->
+ case Aeson.decode respBody of
+ Just (Aeson.Object obj) -> case KeyMap.lookup "text" obj of
+ Just (Aeson.String transcription) -> pure (Right transcription)
+ _ -> pure (Left "No 'text' field in Whisper response")
+ _ -> pure (Left "Failed to parse Whisper response")
+
+buildMultipartBody :: ByteString -> BL.ByteString -> BL.ByteString
+buildMultipartBody boundary audioBytes =
+ BL.concat
+ [ "--",
+ BL.fromStrict boundary,
+ "\r\n",
+ "Content-Disposition: form-data; name=\"file\"; filename=\"audio.ogg\"\r\n",
+ "Content-Type: audio/ogg\r\n\r\n",
+ audioBytes,
+ "\r\n",
+ "--",
+ BL.fromStrict boundary,
+ "\r\n",
+ "Content-Disposition: form-data; name=\"model\"\r\n\r\n",
+ "whisper-1\r\n",
+ "--",
+ BL.fromStrict boundary,
+ "--\r\n"
+ ]
diff --git a/Omni/Agent/Telegram/Messages.hs b/Omni/Agent/Telegram/Messages.hs
new file mode 100644
index 0000000..eab9668
--- /dev/null
+++ b/Omni/Agent/Telegram/Messages.hs
@@ -0,0 +1,551 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Telegram Message Queue - Unified async message delivery.
+--
+-- All outbound Telegram messages go through this queue, enabling:
+-- - Immediate sends (sub-second latency via 1s polling)
+-- - Scheduled/delayed sends (up to 30 days)
+-- - Unified retry handling and error logging
+--
+-- : out omni-agent-telegram-messages
+-- : dep aeson
+-- : dep sqlite-simple
+-- : dep uuid
+module Omni.Agent.Telegram.Messages
+ ( -- * Types
+ ScheduledMessage (..),
+ MessageStatus (..),
+
+ -- * Database
+ initScheduledMessagesTable,
+
+ -- * Queueing
+ queueMessage,
+ enqueueImmediate,
+ enqueueDelayed,
+
+ -- * Fetching
+ fetchDueMessages,
+ listPendingMessages,
+ getMessageById,
+
+ -- * Status Updates
+ markSending,
+ markSent,
+ markFailed,
+ cancelMessage,
+
+ -- * Dispatch Loop
+ messageDispatchLoop,
+
+ -- * Agent Tools
+ sendMessageTool,
+ listPendingMessagesTool,
+ cancelMessageTool,
+
+ -- * Constants
+ maxDelaySeconds,
+ maxRetries,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.Text as Text
+import Data.Time (NominalDiffTime, UTCTime, addUTCTime, getCurrentTime)
+import Data.Time.Format (defaultTimeLocale, formatTime)
+import qualified Data.UUID as UUID
+import qualified Data.UUID.V4 as UUID
+import qualified Database.SQLite.Simple as SQL
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Memory as Memory
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Telegram.Messages"
+ [ Test.unit "initScheduledMessagesTable is idempotent" <| do
+ Memory.withMemoryDb <| \conn -> do
+ initScheduledMessagesTable conn
+ initScheduledMessagesTable conn
+ pure (),
+ Test.unit "MessageStatus JSON roundtrip" <| do
+ let statuses = [Pending, Sending, Sent, Failed, Cancelled]
+ forM_ statuses <| \s ->
+ case Aeson.decode (Aeson.encode s) of
+ Nothing -> Test.assertFailure ("Failed to decode MessageStatus: " <> show s)
+ Just decoded -> decoded Test.@=? s,
+ Test.unit "maxDelaySeconds is 30 days" <| do
+ maxDelaySeconds Test.@=? (30 * 24 * 60 * 60)
+ ]
+
+data MessageStatus
+ = Pending
+ | Sending
+ | Sent
+ | Failed
+ | Cancelled
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON MessageStatus where
+ toJSON Pending = Aeson.String "pending"
+ toJSON Sending = Aeson.String "sending"
+ toJSON Sent = Aeson.String "sent"
+ toJSON Failed = Aeson.String "failed"
+ toJSON Cancelled = Aeson.String "cancelled"
+
+instance Aeson.FromJSON MessageStatus where
+ parseJSON = Aeson.withText "MessageStatus" parseStatus
+ where
+ parseStatus "pending" = pure Pending
+ parseStatus "sending" = pure Sending
+ parseStatus "sent" = pure Sent
+ parseStatus "failed" = pure Failed
+ parseStatus "cancelled" = pure Cancelled
+ parseStatus _ = empty
+
+textToStatus :: Text -> Maybe MessageStatus
+textToStatus "pending" = Just Pending
+textToStatus "sending" = Just Sending
+textToStatus "sent" = Just Sent
+textToStatus "failed" = Just Failed
+textToStatus "cancelled" = Just Cancelled
+textToStatus _ = Nothing
+
+data ScheduledMessage = ScheduledMessage
+ { smId :: Text,
+ smUserId :: Maybe Text,
+ smChatId :: Int,
+ smThreadId :: Maybe Int,
+ smContent :: Text,
+ smSendAt :: UTCTime,
+ smCreatedAt :: UTCTime,
+ smStatus :: MessageStatus,
+ smRetryCount :: Int,
+ smLastAttemptAt :: Maybe UTCTime,
+ smLastError :: Maybe Text,
+ smMessageType :: Maybe Text,
+ smCorrelationId :: Maybe Text,
+ smTelegramMessageId :: Maybe Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON ScheduledMessage where
+ toJSON m =
+ Aeson.object
+ [ "id" .= smId m,
+ "user_id" .= smUserId m,
+ "chat_id" .= smChatId m,
+ "thread_id" .= smThreadId m,
+ "content" .= smContent m,
+ "send_at" .= smSendAt m,
+ "created_at" .= smCreatedAt m,
+ "status" .= smStatus m,
+ "retry_count" .= smRetryCount m,
+ "last_attempt_at" .= smLastAttemptAt m,
+ "last_error" .= smLastError m,
+ "message_type" .= smMessageType m,
+ "correlation_id" .= smCorrelationId m,
+ "telegram_message_id" .= smTelegramMessageId m
+ ]
+
+instance SQL.FromRow ScheduledMessage where
+ fromRow = do
+ id' <- SQL.field
+ userId <- SQL.field
+ chatId <- SQL.field
+ threadId <- SQL.field
+ content <- SQL.field
+ sendAt <- SQL.field
+ createdAt <- SQL.field
+ statusText <- SQL.field
+ retryCount <- SQL.field
+ lastAttemptAt <- SQL.field
+ lastError <- SQL.field
+ messageType <- SQL.field
+ correlationId <- SQL.field
+ telegramMessageId <- SQL.field
+ let status = fromMaybe Pending (textToStatus (statusText :: Text))
+ pure
+ ScheduledMessage
+ { smId = id',
+ smUserId = userId,
+ smChatId = chatId,
+ smThreadId = threadId,
+ smContent = content,
+ smSendAt = sendAt,
+ smCreatedAt = createdAt,
+ smStatus = status,
+ smRetryCount = retryCount,
+ smLastAttemptAt = lastAttemptAt,
+ smLastError = lastError,
+ smMessageType = messageType,
+ smCorrelationId = correlationId,
+ smTelegramMessageId = telegramMessageId
+ }
+
+maxDelaySeconds :: Int
+maxDelaySeconds = 30 * 24 * 60 * 60
+
+maxRetries :: Int
+maxRetries = 5
+
+initScheduledMessagesTable :: SQL.Connection -> IO ()
+initScheduledMessagesTable conn = do
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS scheduled_messages (\
+ \ id TEXT PRIMARY KEY,\
+ \ user_id TEXT,\
+ \ chat_id INTEGER NOT NULL,\
+ \ thread_id INTEGER,\
+ \ content TEXT NOT NULL,\
+ \ send_at TIMESTAMP NOT NULL,\
+ \ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\
+ \ status TEXT NOT NULL DEFAULT 'pending',\
+ \ retry_count INTEGER NOT NULL DEFAULT 0,\
+ \ last_attempt_at TIMESTAMP,\
+ \ last_error TEXT,\
+ \ message_type TEXT,\
+ \ correlation_id TEXT,\
+ \ telegram_message_id INTEGER\
+ \)"
+ migrateAddThreadId conn
+
+migrateAddThreadId :: SQL.Connection -> IO ()
+migrateAddThreadId conn = do
+ result <- try @SomeException <| SQL.execute_ conn "ALTER TABLE scheduled_messages ADD COLUMN thread_id INTEGER"
+ case result of
+ Left _ -> pure ()
+ Right () -> pure ()
+
+queueMessage ::
+ Maybe Text ->
+ Int ->
+ Maybe Int ->
+ Text ->
+ UTCTime ->
+ Maybe Text ->
+ Maybe Text ->
+ IO Text
+queueMessage mUserId chatId mThreadId content sendAt msgType correlationId = do
+ uuid <- UUID.nextRandom
+ now <- getCurrentTime
+ let msgId = UUID.toText uuid
+ Memory.withMemoryDb <| \conn -> do
+ initScheduledMessagesTable conn
+ SQL.execute
+ conn
+ "INSERT INTO scheduled_messages \
+ \(id, user_id, chat_id, thread_id, content, send_at, created_at, status, retry_count, message_type, correlation_id) \
+ \VALUES (?, ?, ?, ?, ?, ?, ?, 'pending', 0, ?, ?)"
+ (msgId, mUserId, chatId, mThreadId, content, sendAt, now, msgType, correlationId)
+ pure msgId
+
+enqueueImmediate ::
+ Maybe Text ->
+ Int ->
+ Maybe Int ->
+ Text ->
+ Maybe Text ->
+ Maybe Text ->
+ IO Text
+enqueueImmediate mUserId chatId mThreadId content msgType correlationId = do
+ now <- getCurrentTime
+ queueMessage mUserId chatId mThreadId content now msgType correlationId
+
+enqueueDelayed ::
+ Maybe Text ->
+ Int ->
+ Maybe Int ->
+ Text ->
+ NominalDiffTime ->
+ Maybe Text ->
+ Maybe Text ->
+ IO Text
+enqueueDelayed mUserId chatId mThreadId content delay msgType correlationId = do
+ now <- getCurrentTime
+ let sendAt = addUTCTime delay now
+ queueMessage mUserId chatId mThreadId content sendAt msgType correlationId
+
+fetchDueMessages :: UTCTime -> Int -> IO [ScheduledMessage]
+fetchDueMessages now batchSize =
+ Memory.withMemoryDb <| \conn -> do
+ initScheduledMessagesTable conn
+ SQL.query
+ conn
+ "SELECT id, user_id, chat_id, thread_id, content, send_at, created_at, status, \
+ \retry_count, last_attempt_at, last_error, message_type, correlation_id, telegram_message_id \
+ \FROM scheduled_messages \
+ \WHERE status = 'pending' AND send_at <= ? \
+ \ORDER BY send_at ASC \
+ \LIMIT ?"
+ (now, batchSize)
+
+listPendingMessages :: Maybe Text -> Int -> IO [ScheduledMessage]
+listPendingMessages mUserId chatId =
+ Memory.withMemoryDb <| \conn -> do
+ initScheduledMessagesTable conn
+ case mUserId of
+ Just uid ->
+ SQL.query
+ conn
+ "SELECT id, user_id, chat_id, thread_id, content, send_at, created_at, status, \
+ \retry_count, last_attempt_at, last_error, message_type, correlation_id, telegram_message_id \
+ \FROM scheduled_messages \
+ \WHERE user_id = ? AND chat_id = ? AND status = 'pending' AND send_at > datetime('now') \
+ \ORDER BY send_at ASC"
+ (uid, chatId)
+ Nothing ->
+ SQL.query
+ conn
+ "SELECT id, user_id, chat_id, thread_id, content, send_at, created_at, status, \
+ \retry_count, last_attempt_at, last_error, message_type, correlation_id, telegram_message_id \
+ \FROM scheduled_messages \
+ \WHERE chat_id = ? AND status = 'pending' AND send_at > datetime('now') \
+ \ORDER BY send_at ASC"
+ (SQL.Only chatId)
+
+getMessageById :: Text -> IO (Maybe ScheduledMessage)
+getMessageById msgId =
+ Memory.withMemoryDb <| \conn -> do
+ initScheduledMessagesTable conn
+ results <-
+ SQL.query
+ conn
+ "SELECT id, user_id, chat_id, thread_id, content, send_at, created_at, status, \
+ \retry_count, last_attempt_at, last_error, message_type, correlation_id, telegram_message_id \
+ \FROM scheduled_messages \
+ \WHERE id = ?"
+ (SQL.Only msgId)
+ pure (listToMaybe results)
+
+markSending :: Text -> UTCTime -> IO ()
+markSending msgId now =
+ Memory.withMemoryDb <| \conn -> do
+ initScheduledMessagesTable conn
+ SQL.execute
+ conn
+ "UPDATE scheduled_messages SET status = 'sending', last_attempt_at = ? WHERE id = ?"
+ (now, msgId)
+
+markSent :: Text -> Maybe Int -> UTCTime -> IO ()
+markSent msgId telegramMsgId now =
+ Memory.withMemoryDb <| \conn -> do
+ initScheduledMessagesTable conn
+ SQL.execute
+ conn
+ "UPDATE scheduled_messages SET status = 'sent', telegram_message_id = ?, last_attempt_at = ? WHERE id = ?"
+ (telegramMsgId, now, msgId)
+
+markFailed :: Text -> UTCTime -> Text -> IO ()
+markFailed msgId now errorMsg =
+ Memory.withMemoryDb <| \conn -> do
+ initScheduledMessagesTable conn
+ results <-
+ SQL.query
+ conn
+ "SELECT retry_count FROM scheduled_messages WHERE id = ?"
+ (SQL.Only msgId) ::
+ IO [SQL.Only Int]
+ case results of
+ [SQL.Only retryCount] ->
+ if retryCount < maxRetries
+ then do
+ let backoffSeconds = 2 ^ retryCount :: Int
+ nextAttempt = addUTCTime (fromIntegral backoffSeconds) now
+ SQL.execute
+ conn
+ "UPDATE scheduled_messages SET \
+ \status = 'pending', \
+ \retry_count = retry_count + 1, \
+ \last_attempt_at = ?, \
+ \last_error = ?, \
+ \send_at = ? \
+ \WHERE id = ?"
+ (now, errorMsg, nextAttempt, msgId)
+ putText <| "Message " <> msgId <> " failed, retry " <> tshow (retryCount + 1) <> " in " <> tshow backoffSeconds <> "s"
+ else do
+ SQL.execute
+ conn
+ "UPDATE scheduled_messages SET status = 'failed', last_attempt_at = ?, last_error = ? WHERE id = ?"
+ (now, errorMsg, msgId)
+ putText <| "Message " <> msgId <> " permanently failed after " <> tshow maxRetries <> " retries"
+ _ -> pure ()
+
+cancelMessage :: Text -> IO Bool
+cancelMessage msgId =
+ Memory.withMemoryDb <| \conn -> do
+ initScheduledMessagesTable conn
+ SQL.execute
+ conn
+ "UPDATE scheduled_messages SET status = 'cancelled' WHERE id = ? AND status = 'pending'"
+ (SQL.Only msgId)
+ changes <- SQL.changes conn
+ pure (changes > 0)
+
+messageDispatchLoop :: (Int -> Maybe Int -> Text -> IO (Maybe Int)) -> IO ()
+messageDispatchLoop sendFn =
+ forever <| do
+ now <- getCurrentTime
+ due <- fetchDueMessages now 10
+ if null due
+ then threadDelay 1000000
+ else do
+ forM_ due <| \m -> dispatchOne sendFn m
+ when (length due < 10) <| threadDelay 1000000
+
+dispatchOne :: (Int -> Maybe Int -> Text -> IO (Maybe Int)) -> ScheduledMessage -> IO ()
+dispatchOne sendFn m = do
+ now <- getCurrentTime
+ markSending (smId m) now
+ result <- try (sendFn (smChatId m) (smThreadId m) (smContent m))
+ case result of
+ Left (e :: SomeException) -> do
+ let err = "Exception sending Telegram message: " <> tshow e
+ markFailed (smId m) now err
+ Right Nothing -> do
+ now' <- getCurrentTime
+ markSent (smId m) Nothing now'
+ putText <| "Sent message " <> smId m <> " (no message_id returned)"
+ Right (Just telegramMsgId) -> do
+ now' <- getCurrentTime
+ markSent (smId m) (Just telegramMsgId) now'
+ putText <| "Sent message " <> smId m <> " -> telegram_id " <> tshow telegramMsgId
+
+sendMessageTool :: Text -> Int -> Maybe Int -> Engine.Tool
+sendMessageTool uid chatId mThreadId =
+ Engine.Tool
+ { Engine.toolName = "send_message",
+ Engine.toolDescription =
+ "Send a message to the user, optionally delayed. Use for reminders, follow-ups, or multi-part responses. "
+ <> "delay_seconds=0 sends immediately; max delay is 30 days (2592000 seconds). "
+ <> "Returns a message_id you can use to cancel the message before it's sent.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "text"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The message text to send (Telegram basic markdown supported)" :: Text)
+ ],
+ "delay_seconds"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "minimum" .= (0 :: Int),
+ "maximum" .= maxDelaySeconds,
+ "description" .= ("Seconds to wait before sending (0 or omit for immediate)" :: Text)
+ ]
+ ],
+ "required" .= (["text"] :: [Text])
+ ],
+ Engine.toolExecute = \argsVal -> do
+ case argsVal of
+ Aeson.Object obj -> do
+ let textM = case KeyMap.lookup "text" obj of
+ Just (Aeson.String t) -> Just t
+ _ -> Nothing
+ delaySeconds = case KeyMap.lookup "delay_seconds" obj of
+ Just (Aeson.Number n) -> Just (round n :: Int)
+ _ -> Nothing
+ case textM of
+ Nothing ->
+ pure <| Aeson.object ["status" .= ("error" :: Text), "error" .= ("missing 'text' field" :: Text)]
+ Just text -> do
+ let delay = fromIntegral (fromMaybe 0 delaySeconds)
+ now <- getCurrentTime
+ let sendAt = addUTCTime delay now
+ msgId <- queueMessage (Just uid) chatId mThreadId text sendAt (Just "agent_tool") Nothing
+ pure
+ <| Aeson.object
+ [ "status" .= ("queued" :: Text),
+ "message_id" .= msgId,
+ "scheduled_for" .= formatTime defaultTimeLocale "%Y-%m-%dT%H:%M:%SZ" sendAt,
+ "delay_seconds" .= fromMaybe 0 delaySeconds
+ ]
+ _ ->
+ pure <| Aeson.object ["status" .= ("error" :: Text), "error" .= ("invalid arguments" :: Text)]
+ }
+
+listPendingMessagesTool :: Text -> Int -> Engine.Tool
+listPendingMessagesTool uid chatId =
+ Engine.Tool
+ { Engine.toolName = "list_pending_messages",
+ Engine.toolDescription =
+ "List all pending scheduled messages that haven't been sent yet. "
+ <> "Shows message_id, content preview, and scheduled send time.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties" .= Aeson.object []
+ ],
+ Engine.toolExecute = \_ -> do
+ msgs <- listPendingMessages (Just uid) chatId
+ let formatted =
+ [ Aeson.object
+ [ "message_id" .= smId m,
+ "content_preview" .= Text.take 50 (smContent m),
+ "scheduled_for" .= formatTime defaultTimeLocale "%Y-%m-%dT%H:%M:%SZ" (smSendAt m),
+ "message_type" .= smMessageType m
+ ]
+ | m <- msgs
+ ]
+ pure
+ <| Aeson.object
+ [ "status" .= ("ok" :: Text),
+ "count" .= length msgs,
+ "messages" .= formatted
+ ]
+ }
+
+cancelMessageTool :: Engine.Tool
+cancelMessageTool =
+ Engine.Tool
+ { Engine.toolName = "cancel_message",
+ Engine.toolDescription =
+ "Cancel a pending scheduled message by its message_id. "
+ <> "Only works for messages that haven't been sent yet.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "message_id"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The message_id returned by send_message" :: Text)
+ ]
+ ],
+ "required" .= (["message_id"] :: [Text])
+ ],
+ Engine.toolExecute = \argsVal -> do
+ case argsVal of
+ Aeson.Object obj -> do
+ let msgIdM = case KeyMap.lookup "message_id" obj of
+ Just (Aeson.String t) -> Just t
+ _ -> Nothing
+ case msgIdM of
+ Nothing ->
+ pure <| Aeson.object ["status" .= ("error" :: Text), "error" .= ("missing 'message_id' field" :: Text)]
+ Just msgId -> do
+ success <- cancelMessage msgId
+ if success
+ then pure <| Aeson.object ["status" .= ("cancelled" :: Text), "message_id" .= msgId]
+ else pure <| Aeson.object ["status" .= ("not_found" :: Text), "message_id" .= msgId, "error" .= ("message not found or already sent" :: Text)]
+ _ ->
+ pure <| Aeson.object ["status" .= ("error" :: Text), "error" .= ("invalid arguments" :: Text)]
+ }
diff --git a/Omni/Agent/Telegram/Reminders.hs b/Omni/Agent/Telegram/Reminders.hs
new file mode 100644
index 0000000..88aab0a
--- /dev/null
+++ b/Omni/Agent/Telegram/Reminders.hs
@@ -0,0 +1,108 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Telegram Reminders - Background reminder loop and user chat persistence.
+--
+-- : out omni-agent-telegram-reminders
+-- : dep sqlite-simple
+module Omni.Agent.Telegram.Reminders
+ ( -- * User Chat Persistence
+ initUserChatsTable,
+ recordUserChat,
+ lookupChatId,
+
+ -- * Reminder Loop
+ reminderLoop,
+ checkAndSendReminders,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Time (getCurrentTime)
+import qualified Database.SQLite.Simple as SQL
+import qualified Omni.Agent.Memory as Memory
+import qualified Omni.Agent.Telegram.Messages as Messages
+import qualified Omni.Agent.Tools.Todos as Todos
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Telegram.Reminders"
+ [ Test.unit "initUserChatsTable is idempotent" <| do
+ Memory.withMemoryDb <| \conn -> do
+ initUserChatsTable conn
+ initUserChatsTable conn
+ pure ()
+ ]
+
+initUserChatsTable :: SQL.Connection -> IO ()
+initUserChatsTable conn =
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS user_chats (\
+ \ user_id TEXT PRIMARY KEY,\
+ \ chat_id INTEGER NOT NULL,\
+ \ last_seen_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\
+ \)"
+
+recordUserChat :: Text -> Int -> IO ()
+recordUserChat uid chatId = do
+ now <- getCurrentTime
+ Memory.withMemoryDb <| \conn -> do
+ initUserChatsTable conn
+ SQL.execute
+ conn
+ "INSERT INTO user_chats (user_id, chat_id, last_seen_at) \
+ \VALUES (?, ?, ?) \
+ \ON CONFLICT(user_id) DO UPDATE SET \
+ \ chat_id = excluded.chat_id, \
+ \ last_seen_at = excluded.last_seen_at"
+ (uid, chatId, now)
+
+lookupChatId :: Text -> IO (Maybe Int)
+lookupChatId uid =
+ Memory.withMemoryDb <| \conn -> do
+ initUserChatsTable conn
+ rows <-
+ SQL.query
+ conn
+ "SELECT chat_id FROM user_chats WHERE user_id = ?"
+ (SQL.Only uid)
+ pure (listToMaybe (map SQL.fromOnly rows))
+
+reminderLoop :: IO ()
+reminderLoop =
+ forever <| do
+ threadDelay (5 * 60 * 1000000)
+ checkAndSendReminders
+
+checkAndSendReminders :: IO ()
+checkAndSendReminders = do
+ todos <- Todos.listTodosDueForReminder
+ forM_ todos <| \td -> do
+ mChatId <- lookupChatId (Todos.todoUserId td)
+ case mChatId of
+ Nothing -> pure ()
+ Just chatId -> do
+ let title = Todos.todoTitle td
+ uid = Todos.todoUserId td
+ dueStr = case Todos.todoDueDate td of
+ Just d -> " (due: " <> tshow d <> ")"
+ Nothing -> ""
+ msg =
+ "⏰ reminder: \""
+ <> title
+ <> "\""
+ <> dueStr
+ <> "\nreply when you finish and i'll mark it complete."
+ _ <- Messages.enqueueImmediate (Just uid) chatId Nothing msg (Just "reminder") Nothing
+ Todos.markReminderSent (Todos.todoId td)
+ putText <| "Queued reminder for todo " <> tshow (Todos.todoId td) <> " to chat " <> tshow chatId
diff --git a/Omni/Agent/Telegram/Types.hs b/Omni/Agent/Telegram/Types.hs
new file mode 100644
index 0000000..7a91df3
--- /dev/null
+++ b/Omni/Agent/Telegram/Types.hs
@@ -0,0 +1,654 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Telegram Bot Types - Data types and JSON parsing for Telegram API.
+--
+-- : out omni-agent-telegram-types
+-- : dep aeson
+module Omni.Agent.Telegram.Types
+ ( -- * Configuration
+ TelegramConfig (..),
+ defaultTelegramConfig,
+ isUserAllowed,
+
+ -- * Message Types
+ TelegramMessage (..),
+ TelegramUpdate (..),
+ TelegramDocument (..),
+ TelegramPhoto (..),
+ TelegramVoice (..),
+ TelegramReplyMessage (..),
+ BotAddedToGroup (..),
+ ChatType (..),
+
+ -- * Parsing
+ parseUpdate,
+ parseBotAddedToGroup,
+ parseDocument,
+ parseLargestPhoto,
+ parsePhotoSize,
+ parseVoice,
+ parseReplyMessage,
+
+ -- * Utilities
+ isPdf,
+ isSupportedVoiceFormat,
+ isGroupChat,
+ shouldRespondInGroup,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.Text as Text
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Telegram.Types"
+ [ Test.unit "TelegramConfig JSON roundtrip" <| do
+ let cfg =
+ TelegramConfig
+ { tgBotToken = "test-token",
+ tgPollingTimeout = 30,
+ tgApiBaseUrl = "https://api.telegram.org",
+ tgAllowedUserIds = [123, 456],
+ tgKagiApiKey = Just "kagi-key",
+ tgOpenRouterApiKey = "or-key"
+ }
+ case Aeson.decode (Aeson.encode cfg) of
+ Nothing -> Test.assertFailure "Failed to decode TelegramConfig"
+ Just decoded -> do
+ tgBotToken decoded Test.@=? "test-token"
+ tgAllowedUserIds decoded Test.@=? [123, 456]
+ tgKagiApiKey decoded Test.@=? Just "kagi-key",
+ Test.unit "isUserAllowed checks whitelist" <| do
+ let cfg = defaultTelegramConfig "token" [100, 200, 300] Nothing "key"
+ isUserAllowed cfg 100 Test.@=? True
+ isUserAllowed cfg 200 Test.@=? True
+ isUserAllowed cfg 999 Test.@=? False,
+ Test.unit "isUserAllowed allows all when empty" <| do
+ let cfg = defaultTelegramConfig "token" [] Nothing "key"
+ isUserAllowed cfg 12345 Test.@=? True,
+ Test.unit "TelegramMessage JSON roundtrip" <| do
+ let msg =
+ TelegramMessage
+ { tmUpdateId = 123,
+ tmChatId = 456,
+ tmChatType = Private,
+ tmUserId = 789,
+ tmUserFirstName = "Test",
+ tmUserLastName = Just "User",
+ tmText = "Hello bot",
+ tmDocument = Nothing,
+ tmPhoto = Nothing,
+ tmVoice = Nothing,
+ tmReplyTo = Nothing,
+ tmThreadId = Nothing
+ }
+ case Aeson.decode (Aeson.encode msg) of
+ Nothing -> Test.assertFailure "Failed to decode TelegramMessage"
+ Just decoded -> do
+ tmUpdateId decoded Test.@=? 123
+ tmText decoded Test.@=? "Hello bot",
+ Test.unit "parseUpdate extracts message correctly" <| do
+ let json =
+ Aeson.object
+ [ "update_id" .= (123 :: Int),
+ "message"
+ .= Aeson.object
+ [ "message_id" .= (1 :: Int),
+ "chat" .= Aeson.object ["id" .= (456 :: Int)],
+ "from"
+ .= Aeson.object
+ [ "id" .= (789 :: Int),
+ "first_name" .= ("Test" :: Text)
+ ],
+ "text" .= ("Hello" :: Text)
+ ]
+ ]
+ case parseUpdate json of
+ Nothing -> Test.assertFailure "Failed to parse update"
+ Just msg -> do
+ tmUpdateId msg Test.@=? 123
+ tmChatId msg Test.@=? 456
+ tmUserId msg Test.@=? 789
+ tmText msg Test.@=? "Hello"
+ tmDocument msg Test.@=? Nothing,
+ Test.unit "parseUpdate extracts document correctly" <| do
+ let json =
+ Aeson.object
+ [ "update_id" .= (124 :: Int),
+ "message"
+ .= Aeson.object
+ [ "message_id" .= (2 :: Int),
+ "chat" .= Aeson.object ["id" .= (456 :: Int)],
+ "from"
+ .= Aeson.object
+ [ "id" .= (789 :: Int),
+ "first_name" .= ("Test" :: Text)
+ ],
+ "caption" .= ("check this out" :: Text),
+ "document"
+ .= Aeson.object
+ [ "file_id" .= ("abc123" :: Text),
+ "file_name" .= ("test.pdf" :: Text),
+ "mime_type" .= ("application/pdf" :: Text),
+ "file_size" .= (12345 :: Int)
+ ]
+ ]
+ ]
+ case parseUpdate json of
+ Nothing -> Test.assertFailure "Failed to parse document update"
+ Just msg -> do
+ tmUpdateId msg Test.@=? 124
+ tmText msg Test.@=? "check this out"
+ case tmDocument msg of
+ Nothing -> Test.assertFailure "Expected document"
+ Just doc -> do
+ tdFileId doc Test.@=? "abc123"
+ tdFileName doc Test.@=? Just "test.pdf"
+ tdMimeType doc Test.@=? Just "application/pdf",
+ Test.unit "isPdf detects PDFs by mime type" <| do
+ let doc = TelegramDocument "id" (Just "doc.pdf") (Just "application/pdf") Nothing
+ isPdf doc Test.@=? True,
+ Test.unit "isPdf detects PDFs by filename" <| do
+ let doc = TelegramDocument "id" (Just "report.PDF") Nothing Nothing
+ isPdf doc Test.@=? True,
+ Test.unit "isPdf rejects non-PDFs" <| do
+ let doc = TelegramDocument "id" (Just "image.jpg") (Just "image/jpeg") Nothing
+ isPdf doc Test.@=? False,
+ Test.unit "isSupportedVoiceFormat accepts ogg" <| do
+ let voice = TelegramVoice "id" 10 (Just "audio/ogg") Nothing
+ isSupportedVoiceFormat voice Test.@=? True,
+ Test.unit "isSupportedVoiceFormat accepts opus" <| do
+ let voice = TelegramVoice "id" 10 (Just "audio/opus") Nothing
+ isSupportedVoiceFormat voice Test.@=? True,
+ Test.unit "isSupportedVoiceFormat defaults to True for unknown" <| do
+ let voice = TelegramVoice "id" 10 Nothing Nothing
+ isSupportedVoiceFormat voice Test.@=? True
+ ]
+
+data TelegramConfig = TelegramConfig
+ { tgBotToken :: Text,
+ tgPollingTimeout :: Int,
+ tgApiBaseUrl :: Text,
+ tgAllowedUserIds :: [Int],
+ tgKagiApiKey :: Maybe Text,
+ tgOpenRouterApiKey :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON TelegramConfig where
+ toJSON c =
+ Aeson.object
+ [ "bot_token" .= tgBotToken c,
+ "polling_timeout" .= tgPollingTimeout c,
+ "api_base_url" .= tgApiBaseUrl c,
+ "allowed_user_ids" .= tgAllowedUserIds c,
+ "kagi_api_key" .= tgKagiApiKey c,
+ "openrouter_api_key" .= tgOpenRouterApiKey c
+ ]
+
+instance Aeson.FromJSON TelegramConfig where
+ parseJSON =
+ Aeson.withObject "TelegramConfig" <| \v ->
+ (TelegramConfig </ (v .: "bot_token"))
+ <*> (v .:? "polling_timeout" .!= 30)
+ <*> (v .:? "api_base_url" .!= "https://api.telegram.org")
+ <*> (v .:? "allowed_user_ids" .!= [])
+ <*> (v .:? "kagi_api_key")
+ <*> (v .: "openrouter_api_key")
+
+defaultTelegramConfig :: Text -> [Int] -> Maybe Text -> Text -> TelegramConfig
+defaultTelegramConfig token allowedIds kagiKey openRouterKey =
+ TelegramConfig
+ { tgBotToken = token,
+ tgPollingTimeout = 30,
+ tgApiBaseUrl = "https://api.telegram.org",
+ tgAllowedUserIds = allowedIds,
+ tgKagiApiKey = kagiKey,
+ tgOpenRouterApiKey = openRouterKey
+ }
+
+isUserAllowed :: TelegramConfig -> Int -> Bool
+isUserAllowed cfg usrId =
+ null (tgAllowedUserIds cfg) || usrId `elem` tgAllowedUserIds cfg
+
+data TelegramDocument = TelegramDocument
+ { tdFileId :: Text,
+ tdFileName :: Maybe Text,
+ tdMimeType :: Maybe Text,
+ tdFileSize :: Maybe Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON TelegramDocument where
+ toJSON d =
+ Aeson.object
+ [ "file_id" .= tdFileId d,
+ "file_name" .= tdFileName d,
+ "mime_type" .= tdMimeType d,
+ "file_size" .= tdFileSize d
+ ]
+
+instance Aeson.FromJSON TelegramDocument where
+ parseJSON =
+ Aeson.withObject "TelegramDocument" <| \v ->
+ (TelegramDocument </ (v .: "file_id"))
+ <*> (v .:? "file_name")
+ <*> (v .:? "mime_type")
+ <*> (v .:? "file_size")
+
+data TelegramPhoto = TelegramPhoto
+ { tpFileId :: Text,
+ tpWidth :: Int,
+ tpHeight :: Int,
+ tpFileSize :: Maybe Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON TelegramPhoto where
+ toJSON p =
+ Aeson.object
+ [ "file_id" .= tpFileId p,
+ "width" .= tpWidth p,
+ "height" .= tpHeight p,
+ "file_size" .= tpFileSize p
+ ]
+
+instance Aeson.FromJSON TelegramPhoto where
+ parseJSON =
+ Aeson.withObject "TelegramPhoto" <| \v ->
+ (TelegramPhoto </ (v .: "file_id"))
+ <*> (v .: "width")
+ <*> (v .: "height")
+ <*> (v .:? "file_size")
+
+data TelegramVoice = TelegramVoice
+ { tvFileId :: Text,
+ tvDuration :: Int,
+ tvMimeType :: Maybe Text,
+ tvFileSize :: Maybe Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON TelegramVoice where
+ toJSON v =
+ Aeson.object
+ [ "file_id" .= tvFileId v,
+ "duration" .= tvDuration v,
+ "mime_type" .= tvMimeType v,
+ "file_size" .= tvFileSize v
+ ]
+
+instance Aeson.FromJSON TelegramVoice where
+ parseJSON =
+ Aeson.withObject "TelegramVoice" <| \v ->
+ (TelegramVoice </ (v .: "file_id"))
+ <*> (v .: "duration")
+ <*> (v .:? "mime_type")
+ <*> (v .:? "file_size")
+
+data TelegramReplyMessage = TelegramReplyMessage
+ { trMessageId :: Int,
+ trFromFirstName :: Maybe Text,
+ trFromLastName :: Maybe Text,
+ trText :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON TelegramReplyMessage where
+ toJSON r =
+ Aeson.object
+ [ "message_id" .= trMessageId r,
+ "from_first_name" .= trFromFirstName r,
+ "from_last_name" .= trFromLastName r,
+ "text" .= trText r
+ ]
+
+instance Aeson.FromJSON TelegramReplyMessage where
+ parseJSON =
+ Aeson.withObject "TelegramReplyMessage" <| \v ->
+ (TelegramReplyMessage </ (v .: "message_id"))
+ <*> (v .:? "from_first_name")
+ <*> (v .:? "from_last_name")
+ <*> (v .:? "text" .!= "")
+
+data BotAddedToGroup = BotAddedToGroup
+ { bagUpdateId :: Int,
+ bagChatId :: Int,
+ bagAddedByUserId :: Int,
+ bagAddedByFirstName :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+data ChatType = Private | Group | Supergroup | Channel
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON ChatType where
+ toJSON Private = Aeson.String "private"
+ toJSON Group = Aeson.String "group"
+ toJSON Supergroup = Aeson.String "supergroup"
+ toJSON Channel = Aeson.String "channel"
+
+instance Aeson.FromJSON ChatType where
+ parseJSON = Aeson.withText "ChatType" parseChatType
+ where
+ parseChatType "private" = pure Private
+ parseChatType "group" = pure Group
+ parseChatType "supergroup" = pure Supergroup
+ parseChatType "channel" = pure Channel
+ parseChatType _ = pure Private
+
+data TelegramMessage = TelegramMessage
+ { tmUpdateId :: Int,
+ tmChatId :: Int,
+ tmChatType :: ChatType,
+ tmThreadId :: Maybe Int,
+ tmUserId :: Int,
+ tmUserFirstName :: Text,
+ tmUserLastName :: Maybe Text,
+ tmText :: Text,
+ tmDocument :: Maybe TelegramDocument,
+ tmPhoto :: Maybe TelegramPhoto,
+ tmVoice :: Maybe TelegramVoice,
+ tmReplyTo :: Maybe TelegramReplyMessage
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON TelegramMessage where
+ toJSON m =
+ Aeson.object
+ [ "update_id" .= tmUpdateId m,
+ "chat_id" .= tmChatId m,
+ "chat_type" .= tmChatType m,
+ "thread_id" .= tmThreadId m,
+ "user_id" .= tmUserId m,
+ "user_first_name" .= tmUserFirstName m,
+ "user_last_name" .= tmUserLastName m,
+ "text" .= tmText m,
+ "document" .= tmDocument m,
+ "photo" .= tmPhoto m,
+ "voice" .= tmVoice m,
+ "reply_to" .= tmReplyTo m
+ ]
+
+instance Aeson.FromJSON TelegramMessage where
+ parseJSON =
+ Aeson.withObject "TelegramMessage" <| \v ->
+ (TelegramMessage </ (v .: "update_id"))
+ <*> (v .: "chat_id")
+ <*> (v .:? "chat_type" .!= Private)
+ <*> (v .:? "thread_id")
+ <*> (v .: "user_id")
+ <*> (v .: "user_first_name")
+ <*> (v .:? "user_last_name")
+ <*> (v .: "text")
+ <*> (v .:? "document")
+ <*> (v .:? "photo")
+ <*> (v .:? "voice")
+ <*> (v .:? "reply_to")
+
+data TelegramUpdate = TelegramUpdate
+ { tuUpdateId :: Int,
+ tuMessage :: Maybe Aeson.Value
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON TelegramUpdate where
+ parseJSON =
+ Aeson.withObject "TelegramUpdate" <| \v ->
+ (TelegramUpdate </ (v .: "update_id"))
+ <*> (v .:? "message")
+
+parseUpdate :: Aeson.Value -> Maybe TelegramMessage
+parseUpdate val = do
+ Aeson.Object obj <- pure val
+ updateId <- case KeyMap.lookup "update_id" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ Aeson.Object msgObj <- KeyMap.lookup "message" obj
+ Aeson.Object chatObj <- KeyMap.lookup "chat" msgObj
+ chatId <- case KeyMap.lookup "id" chatObj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ let chatType = case KeyMap.lookup "type" chatObj of
+ Just (Aeson.String "private") -> Private
+ Just (Aeson.String "group") -> Group
+ Just (Aeson.String "supergroup") -> Supergroup
+ Just (Aeson.String "channel") -> Channel
+ _ -> Private
+ let threadId = case KeyMap.lookup "message_thread_id" msgObj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ Aeson.Object fromObj <- KeyMap.lookup "from" msgObj
+ userId <- case KeyMap.lookup "id" fromObj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ firstName <- case KeyMap.lookup "first_name" fromObj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ let lastName = case KeyMap.lookup "last_name" fromObj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ let text = case KeyMap.lookup "text" msgObj of
+ Just (Aeson.String s) -> s
+ _ -> ""
+ let caption = case KeyMap.lookup "caption" msgObj of
+ Just (Aeson.String s) -> s
+ _ -> ""
+ let document = case KeyMap.lookup "document" msgObj of
+ Just (Aeson.Object docObj) -> parseDocument docObj
+ _ -> Nothing
+ let photo = case KeyMap.lookup "photo" msgObj of
+ Just (Aeson.Array photos) -> parseLargestPhoto (toList photos)
+ _ -> Nothing
+ let voice = case KeyMap.lookup "voice" msgObj of
+ Just (Aeson.Object voiceObj) -> parseVoice voiceObj
+ _ -> Nothing
+ let replyTo = case KeyMap.lookup "reply_to_message" msgObj of
+ Just (Aeson.Object replyObj) -> parseReplyMessage replyObj
+ _ -> Nothing
+ let hasContent = not (Text.null text) || not (Text.null caption) || isJust document || isJust photo || isJust voice
+ guard hasContent
+ pure
+ TelegramMessage
+ { tmUpdateId = updateId,
+ tmChatId = chatId,
+ tmChatType = chatType,
+ tmThreadId = threadId,
+ tmUserId = userId,
+ tmUserFirstName = firstName,
+ tmUserLastName = lastName,
+ tmText = if Text.null text then caption else text,
+ tmDocument = document,
+ tmPhoto = photo,
+ tmVoice = voice,
+ tmReplyTo = replyTo
+ }
+
+parseBotAddedToGroup :: Text -> Aeson.Value -> Maybe BotAddedToGroup
+parseBotAddedToGroup botUsername val = do
+ Aeson.Object obj <- pure val
+ updateId <- case KeyMap.lookup "update_id" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ Aeson.Object msgObj <- KeyMap.lookup "message" obj
+ Aeson.Object chatObj <- KeyMap.lookup "chat" msgObj
+ chatId <- case KeyMap.lookup "id" chatObj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ let chatType = case KeyMap.lookup "type" chatObj of
+ Just (Aeson.String t) -> t
+ _ -> "private"
+ guard (chatType == "group" || chatType == "supergroup")
+ Aeson.Object fromObj <- KeyMap.lookup "from" msgObj
+ addedByUserId <- case KeyMap.lookup "id" fromObj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ addedByFirstName <- case KeyMap.lookup "first_name" fromObj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ Aeson.Array newMembers <- KeyMap.lookup "new_chat_members" msgObj
+ let botWasAdded = any (isBotUser botUsername) (toList newMembers)
+ guard botWasAdded
+ pure
+ BotAddedToGroup
+ { bagUpdateId = updateId,
+ bagChatId = chatId,
+ bagAddedByUserId = addedByUserId,
+ bagAddedByFirstName = addedByFirstName
+ }
+ where
+ isBotUser :: Text -> Aeson.Value -> Bool
+ isBotUser username (Aeson.Object userObj) =
+ case KeyMap.lookup "username" userObj of
+ Just (Aeson.String u) -> Text.toLower u == Text.toLower username
+ _ -> False
+ isBotUser _ _ = False
+
+parseDocument :: Aeson.Object -> Maybe TelegramDocument
+parseDocument docObj = do
+ fileId <- case KeyMap.lookup "file_id" docObj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ let fileName = case KeyMap.lookup "file_name" docObj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ mimeType = case KeyMap.lookup "mime_type" docObj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ fileSize = case KeyMap.lookup "file_size" docObj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ pure
+ TelegramDocument
+ { tdFileId = fileId,
+ tdFileName = fileName,
+ tdMimeType = mimeType,
+ tdFileSize = fileSize
+ }
+
+parseLargestPhoto :: [Aeson.Value] -> Maybe TelegramPhoto
+parseLargestPhoto photos = do
+ let parsed = mapMaybe parsePhotoSize photos
+ case parsed of
+ [] -> Nothing
+ ps -> Just (maximumBy (comparing tpWidth) ps)
+
+parsePhotoSize :: Aeson.Value -> Maybe TelegramPhoto
+parsePhotoSize val = do
+ Aeson.Object obj <- pure val
+ fileId <- case KeyMap.lookup "file_id" obj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ width <- case KeyMap.lookup "width" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ height <- case KeyMap.lookup "height" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ let fileSize = case KeyMap.lookup "file_size" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ pure
+ TelegramPhoto
+ { tpFileId = fileId,
+ tpWidth = width,
+ tpHeight = height,
+ tpFileSize = fileSize
+ }
+
+parseVoice :: Aeson.Object -> Maybe TelegramVoice
+parseVoice obj = do
+ fileId <- case KeyMap.lookup "file_id" obj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ duration <- case KeyMap.lookup "duration" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ let mimeType = case KeyMap.lookup "mime_type" obj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ fileSize = case KeyMap.lookup "file_size" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ pure
+ TelegramVoice
+ { tvFileId = fileId,
+ tvDuration = duration,
+ tvMimeType = mimeType,
+ tvFileSize = fileSize
+ }
+
+parseReplyMessage :: Aeson.Object -> Maybe TelegramReplyMessage
+parseReplyMessage obj = do
+ messageId <- case KeyMap.lookup "message_id" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ let fromFirstName = case KeyMap.lookup "from" obj of
+ Just (Aeson.Object fromObj) -> case KeyMap.lookup "first_name" fromObj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ _ -> Nothing
+ fromLastName = case KeyMap.lookup "from" obj of
+ Just (Aeson.Object fromObj) -> case KeyMap.lookup "last_name" fromObj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ _ -> Nothing
+ text = case KeyMap.lookup "text" obj of
+ Just (Aeson.String s) -> s
+ _ -> case KeyMap.lookup "caption" obj of
+ Just (Aeson.String s) -> s
+ _ -> ""
+ pure
+ TelegramReplyMessage
+ { trMessageId = messageId,
+ trFromFirstName = fromFirstName,
+ trFromLastName = fromLastName,
+ trText = text
+ }
+
+isPdf :: TelegramDocument -> Bool
+isPdf doc =
+ case tdMimeType doc of
+ Just mime -> mime == "application/pdf"
+ Nothing -> case tdFileName doc of
+ Just name -> ".pdf" `Text.isSuffixOf` Text.toLower name
+ Nothing -> False
+
+isSupportedVoiceFormat :: TelegramVoice -> Bool
+isSupportedVoiceFormat voice =
+ case tvMimeType voice of
+ Just "audio/ogg" -> True
+ Just "audio/opus" -> True
+ Just "audio/x-opus+ogg" -> True
+ Nothing -> True
+ _ -> False
+
+isGroupChat :: TelegramMessage -> Bool
+isGroupChat msg = tmChatType msg `elem` [Group, Supergroup]
+
+shouldRespondInGroup :: Text -> TelegramMessage -> Bool
+shouldRespondInGroup botUsername msg
+ | not (isGroupChat msg) = True
+ | isMentioned = True
+ | isReplyToBot = True
+ | otherwise = False
+ where
+ msgText = Text.toLower (tmText msg)
+ mention = "@" <> Text.toLower botUsername
+ isMentioned = mention `Text.isInfixOf` msgText
+ isReplyToBot = isJust (tmReplyTo msg)
diff --git a/Omni/Agent/Tools.hs b/Omni/Agent/Tools.hs
new file mode 100644
index 0000000..22cc8a1
--- /dev/null
+++ b/Omni/Agent/Tools.hs
@@ -0,0 +1,682 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Core coding tools for Jr agent.
+--
+-- Provides implementations for:
+-- - readFile: Read file contents
+-- - writeFile: Write file contents
+-- - editFile: Search/replace edit
+-- - runBash: Execute shell commands
+-- - searchCodebase: Ripgrep wrapper for code search
+--
+-- All tools return structured JSON results.
+--
+-- : out omni-agent-tools
+-- : dep aeson
+-- : dep directory
+module Omni.Agent.Tools
+ ( readFileTool,
+ writeFileTool,
+ editFileTool,
+ runBashTool,
+ searchCodebaseTool,
+ searchAndReadTool,
+ allTools,
+ ReadFileArgs (..),
+ WriteFileArgs (..),
+ EditFileArgs (..),
+ RunBashArgs (..),
+ SearchCodebaseArgs (..),
+ SearchAndReadArgs (..),
+ ToolResult (..),
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.List as List
+import qualified Data.Text as Text
+import qualified Data.Text.IO as TextIO
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Test as Test
+import qualified System.Directory as Directory
+import qualified System.Exit as Exit
+import qualified System.Process as Process
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools"
+ [ Test.unit "readFileTool schema is valid" <| do
+ let schema = Engine.toolJsonSchema readFileTool
+ case schema of
+ Aeson.Object _ -> pure ()
+ _ -> Test.assertFailure "Schema should be an object",
+ Test.unit "writeFileTool schema is valid" <| do
+ let schema = Engine.toolJsonSchema writeFileTool
+ case schema of
+ Aeson.Object _ -> pure ()
+ _ -> Test.assertFailure "Schema should be an object",
+ Test.unit "editFileTool schema is valid" <| do
+ let schema = Engine.toolJsonSchema editFileTool
+ case schema of
+ Aeson.Object _ -> pure ()
+ _ -> Test.assertFailure "Schema should be an object",
+ Test.unit "runBashTool schema is valid" <| do
+ let schema = Engine.toolJsonSchema runBashTool
+ case schema of
+ Aeson.Object _ -> pure ()
+ _ -> Test.assertFailure "Schema should be an object",
+ Test.unit "searchCodebaseTool schema is valid" <| do
+ let schema = Engine.toolJsonSchema searchCodebaseTool
+ case schema of
+ Aeson.Object _ -> pure ()
+ _ -> Test.assertFailure "Schema should be an object",
+ Test.unit "allTools contains 6 tools" <| do
+ length allTools Test.@=? 6,
+ Test.unit "ReadFileArgs parses correctly" <| do
+ let json = Aeson.object ["path" .= ("test.txt" :: Text)]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: ReadFileArgs) -> readFilePath args Test.@=? "test.txt"
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "WriteFileArgs parses correctly" <| do
+ let json = Aeson.object ["path" .= ("test.txt" :: Text), "content" .= ("hello" :: Text)]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: WriteFileArgs) -> do
+ writeFilePath args Test.@=? "test.txt"
+ writeFileContent args Test.@=? "hello"
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "EditFileArgs parses correctly" <| do
+ let json =
+ Aeson.object
+ [ "path" .= ("test.txt" :: Text),
+ "old_str" .= ("foo" :: Text),
+ "new_str" .= ("bar" :: Text)
+ ]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: EditFileArgs) -> do
+ editFilePath args Test.@=? "test.txt"
+ editFileOldStr args Test.@=? "foo"
+ editFileNewStr args Test.@=? "bar"
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "RunBashArgs parses correctly" <| do
+ let json = Aeson.object ["command" .= ("ls -la" :: Text)]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: RunBashArgs) -> runBashCommand args Test.@=? "ls -la"
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "SearchCodebaseArgs parses correctly" <| do
+ let json = Aeson.object ["pattern" .= ("TODO" :: Text)]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: SearchCodebaseArgs) -> searchPattern args Test.@=? "TODO"
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "ToolResult success JSON roundtrip" <| do
+ let result = ToolResult True "done" Nothing
+ case Aeson.decode (Aeson.encode result) of
+ Nothing -> Test.assertFailure "Failed to decode ToolResult"
+ Just decoded -> toolResultSuccess decoded Test.@=? True,
+ Test.unit "ToolResult failure JSON roundtrip" <| do
+ let result = ToolResult False "" (Just "error occurred")
+ case Aeson.decode (Aeson.encode result) of
+ Nothing -> Test.assertFailure "Failed to decode ToolResult"
+ Just decoded -> toolResultError decoded Test.@=? Just "error occurred",
+ Test.unit "readFileTool handles missing files" <| do
+ let args = Aeson.object ["path" .= ("/nonexistent/path/to/file.txt" :: Text)]
+ result <- Engine.toolExecute readFileTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (tr :: ToolResult) -> do
+ toolResultSuccess tr Test.@=? False
+ isJust (toolResultError tr) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "editFileTool handles no-match case" <| do
+ let args =
+ Aeson.object
+ [ "path" .= ("/nonexistent/file.txt" :: Text),
+ "old_str" .= ("needle" :: Text),
+ "new_str" .= ("replacement" :: Text)
+ ]
+ result <- Engine.toolExecute editFileTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (tr :: ToolResult) -> toolResultSuccess tr Test.@=? False
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "runBashTool captures exit codes" <| do
+ let args = Aeson.object ["command" .= ("exit 42" :: Text)]
+ result <- Engine.toolExecute runBashTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (tr :: ToolResult) -> do
+ toolResultSuccess tr Test.@=? False
+ toolResultError tr Test.@=? Just "Exit code: 42"
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "runBashTool captures stdout" <| do
+ let args = Aeson.object ["command" .= ("echo hello" :: Text)]
+ result <- Engine.toolExecute runBashTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (tr :: ToolResult) -> do
+ toolResultSuccess tr Test.@=? True
+ ("hello" `Text.isInfixOf` toolResultOutput tr) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "runBashTool validates cwd exists" <| do
+ let args =
+ Aeson.object
+ [ "command" .= ("echo test" :: Text),
+ "cwd" .= ("/nonexistent/path/that/does/not/exist" :: Text)
+ ]
+ result <- Engine.toolExecute runBashTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (tr :: ToolResult) -> do
+ toolResultSuccess tr Test.@=? False
+ isJust (toolResultError tr) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "searchCodebaseTool returns structured results" <| do
+ let args =
+ Aeson.object
+ [ "pattern" .= ("module" :: Text),
+ "path" .= ("." :: Text),
+ "max_results" .= (5 :: Int)
+ ]
+ result <- Engine.toolExecute searchCodebaseTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (tr :: ToolResult) -> toolResultSuccess tr Test.@=? True
+ Aeson.Error e -> Test.assertFailure e
+ ]
+
+data ToolResult = ToolResult
+ { toolResultSuccess :: Bool,
+ toolResultOutput :: Text,
+ toolResultError :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON ToolResult where
+ toJSON r =
+ Aeson.object
+ <| catMaybes
+ [ Just ("success" .= toolResultSuccess r),
+ Just ("output" .= toolResultOutput r),
+ ("error" .=) </ toolResultError r
+ ]
+
+instance Aeson.FromJSON ToolResult where
+ parseJSON =
+ Aeson.withObject "ToolResult" <| \v ->
+ (ToolResult </ (v .: "success"))
+ <*> (v .:? "output" .!= "")
+ <*> (v .:? "error")
+
+mkSuccess :: Text -> Aeson.Value
+mkSuccess output = Aeson.toJSON <| ToolResult True output Nothing
+
+mkError :: Text -> Aeson.Value
+mkError err = Aeson.toJSON <| ToolResult False "" (Just err)
+
+allTools :: [Engine.Tool]
+allTools =
+ [ readFileTool,
+ writeFileTool,
+ editFileTool,
+ runBashTool,
+ searchCodebaseTool,
+ searchAndReadTool
+ ]
+
+data ReadFileArgs = ReadFileArgs
+ { readFilePath :: Text,
+ readFileStartLine :: Maybe Int,
+ readFileEndLine :: Maybe Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON ReadFileArgs where
+ parseJSON =
+ Aeson.withObject "ReadFileArgs" <| \v ->
+ (ReadFileArgs </ (v .: "path"))
+ <*> (v .:? "start_line")
+ <*> (v .:? "end_line")
+
+readFileTool :: Engine.Tool
+readFileTool =
+ Engine.Tool
+ { Engine.toolName = "read_file",
+ Engine.toolDescription = "Read the contents of a file. Can optionally read a specific line range.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "path"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Absolute path to the file to read" :: Text)
+ ],
+ "start_line"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Optional: first line to read (1-indexed)" :: Text)
+ ],
+ "end_line"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Optional: last line to read (1-indexed)" :: Text)
+ ]
+ ],
+ "required" .= (["path"] :: [Text])
+ ],
+ Engine.toolExecute = executeReadFile
+ }
+
+executeReadFile :: Aeson.Value -> IO Aeson.Value
+executeReadFile v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError (Text.pack e)
+ Aeson.Success args -> do
+ let path = Text.unpack (readFilePath args)
+ exists <- Directory.doesFileExist path
+ if exists
+ then do
+ content <- TextIO.readFile path
+ let allLines = Text.lines content
+ startIdx = maybe 0 (\n -> n - 1) (readFileStartLine args)
+ endIdx = maybe (length allLines) identity (readFileEndLine args)
+ selectedLines = take (endIdx - startIdx) (drop startIdx allLines)
+ numberedLines = zipWith formatLine [(startIdx + 1) ..] selectedLines
+ result = Text.unlines numberedLines
+ pure <| mkSuccess result
+ else pure <| mkError ("File not found: " <> readFilePath args)
+ where
+ formatLine :: Int -> Text -> Text
+ formatLine n line = Text.pack (show n) <> ": " <> line
+
+data WriteFileArgs = WriteFileArgs
+ { writeFilePath :: Text,
+ writeFileContent :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON WriteFileArgs where
+ parseJSON =
+ Aeson.withObject "WriteFileArgs" <| \v ->
+ (WriteFileArgs </ (v .: "path"))
+ <*> (v .: "content")
+
+writeFileTool :: Engine.Tool
+writeFileTool =
+ Engine.Tool
+ { Engine.toolName = "write_file",
+ Engine.toolDescription = "Create or overwrite a file with the given content.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "path"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Absolute path to the file to write" :: Text)
+ ],
+ "content"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Content to write to the file" :: Text)
+ ]
+ ],
+ "required" .= (["path", "content"] :: [Text])
+ ],
+ Engine.toolExecute = executeWriteFile
+ }
+
+executeWriteFile :: Aeson.Value -> IO Aeson.Value
+executeWriteFile v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError (Text.pack e)
+ Aeson.Success args -> do
+ let path = Text.unpack (writeFilePath args)
+ let dir = takeDirectory path
+ dirExists <- Directory.doesDirectoryExist dir
+ if dirExists
+ then do
+ TextIO.writeFile path (writeFileContent args)
+ pure <| mkSuccess ("File written: " <> writeFilePath args)
+ else pure <| mkError ("Parent directory does not exist: " <> Text.pack dir)
+ where
+ takeDirectory :: FilePath -> FilePath
+ takeDirectory p =
+ let parts = Text.splitOn "/" (Text.pack p)
+ in Text.unpack (Text.intercalate "/" (List.init parts))
+
+data EditFileArgs = EditFileArgs
+ { editFilePath :: Text,
+ editFileOldStr :: Text,
+ editFileNewStr :: Text,
+ editFileReplaceAll :: Maybe Bool
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON EditFileArgs where
+ parseJSON =
+ Aeson.withObject "EditFileArgs" <| \v ->
+ (EditFileArgs </ (v .: "path"))
+ <*> (v .: "old_str")
+ <*> (v .: "new_str")
+ <*> (v .:? "replace_all")
+
+editFileTool :: Engine.Tool
+editFileTool =
+ Engine.Tool
+ { Engine.toolName = "edit_file",
+ Engine.toolDescription = "Edit a file by replacing old_str with new_str. By default replaces only the first occurrence unless replace_all is true.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "path"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Absolute path to the file to edit" :: Text)
+ ],
+ "old_str"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The text to search for and replace" :: Text)
+ ],
+ "new_str"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The replacement text" :: Text)
+ ],
+ "replace_all"
+ .= Aeson.object
+ [ "type" .= ("boolean" :: Text),
+ "description" .= ("If true, replace all occurrences; otherwise replace only the first" :: Text)
+ ]
+ ],
+ "required" .= (["path", "old_str", "new_str"] :: [Text])
+ ],
+ Engine.toolExecute = executeEditFile
+ }
+
+executeEditFile :: Aeson.Value -> IO Aeson.Value
+executeEditFile v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError (Text.pack e)
+ Aeson.Success args -> do
+ let path = Text.unpack (editFilePath args)
+ exists <- Directory.doesFileExist path
+ if exists
+ then do
+ content <- TextIO.readFile path
+ let oldStr = editFileOldStr args
+ newStr = editFileNewStr args
+ replaceAll = fromMaybe False (editFileReplaceAll args)
+ if Text.isInfixOf oldStr content
+ then do
+ let newContent =
+ if replaceAll
+ then Text.replace oldStr newStr content
+ else replaceFirst oldStr newStr content
+ TextIO.writeFile path newContent
+ let count =
+ if replaceAll
+ then Text.count oldStr content
+ else 1
+ pure <| mkSuccess ("Replaced " <> tshow count <> " occurrence(s)")
+ else pure <| mkError ("old_str not found in file: " <> editFilePath args)
+ else pure <| mkError ("File not found: " <> editFilePath args)
+
+replaceFirst :: Text -> Text -> Text -> Text
+replaceFirst old new content =
+ case Text.breakOn old content of
+ (before, after) ->
+ if Text.null after
+ then content
+ else before <> new <> Text.drop (Text.length old) after
+
+data RunBashArgs = RunBashArgs
+ { runBashCommand :: Text,
+ runBashCwd :: Maybe Text,
+ runBashTimeout :: Maybe Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON RunBashArgs where
+ parseJSON =
+ Aeson.withObject "RunBashArgs" <| \v ->
+ (RunBashArgs </ (v .: "command"))
+ <*> (v .:? "cwd")
+ <*> (v .:? "timeout")
+
+runBashTool :: Engine.Tool
+runBashTool =
+ Engine.Tool
+ { Engine.toolName = "run_bash",
+ Engine.toolDescription = "Execute a shell command and return stdout/stderr.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "command"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The shell command to execute" :: Text)
+ ],
+ "cwd"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Optional: working directory for the command" :: Text)
+ ],
+ "timeout"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Optional: timeout in seconds (default: 300)" :: Text)
+ ]
+ ],
+ "required" .= (["command"] :: [Text])
+ ],
+ Engine.toolExecute = executeRunBash
+ }
+
+executeRunBash :: Aeson.Value -> IO Aeson.Value
+executeRunBash v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError (Text.pack e)
+ Aeson.Success args -> do
+ let cmd = Text.unpack (runBashCommand args)
+ maybeCwd = runBashCwd args
+ cwdValid <- case maybeCwd of
+ Nothing -> pure True
+ Just cwd -> Directory.doesDirectoryExist (Text.unpack cwd)
+ if not cwdValid
+ then
+ pure
+ <| mkError
+ ("Working directory does not exist: " <> fromMaybe "" maybeCwd)
+ else do
+ let proc =
+ (Process.shell cmd)
+ { Process.cwd = Text.unpack </ maybeCwd
+ }
+ (exitCode, stdoutStr, stderrStr) <- Process.readCreateProcessWithExitCode proc ""
+ let output = Text.pack stdoutStr <> Text.pack stderrStr
+ case exitCode of
+ Exit.ExitSuccess ->
+ pure
+ <| Aeson.toJSON
+ <| ToolResult
+ { toolResultSuccess = True,
+ toolResultOutput = output,
+ toolResultError = Nothing
+ }
+ Exit.ExitFailure code ->
+ pure
+ <| Aeson.toJSON
+ <| ToolResult
+ { toolResultSuccess = False,
+ toolResultOutput = output,
+ toolResultError = Just ("Exit code: " <> tshow code)
+ }
+
+data SearchCodebaseArgs = SearchCodebaseArgs
+ { searchPattern :: Text,
+ searchPath :: Maybe Text,
+ searchGlob :: Maybe Text,
+ searchCaseSensitive :: Maybe Bool,
+ searchMaxResults :: Maybe Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON SearchCodebaseArgs where
+ parseJSON =
+ Aeson.withObject "SearchCodebaseArgs" <| \v ->
+ (SearchCodebaseArgs </ (v .: "pattern"))
+ <*> (v .:? "path")
+ <*> (v .:? "glob")
+ <*> (v .:? "case_sensitive")
+ <*> (v .:? "max_results")
+
+searchCodebaseTool :: Engine.Tool
+searchCodebaseTool =
+ Engine.Tool
+ { Engine.toolName = "search_codebase",
+ Engine.toolDescription = "Search the codebase using ripgrep. Returns matching lines with file paths and line numbers.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "pattern"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The regex pattern to search for" :: Text)
+ ],
+ "path"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Optional: directory or file path to search in" :: Text)
+ ],
+ "glob"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Optional: file glob pattern (e.g., '*.hs')" :: Text)
+ ],
+ "case_sensitive"
+ .= Aeson.object
+ [ "type" .= ("boolean" :: Text),
+ "description" .= ("Optional: case sensitive search (default: false)" :: Text)
+ ],
+ "max_results"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Optional: maximum number of results (default: 100)" :: Text)
+ ]
+ ],
+ "required" .= (["pattern"] :: [Text])
+ ],
+ Engine.toolExecute = executeSearchCodebase
+ }
+
+executeSearchCodebase :: Aeson.Value -> IO Aeson.Value
+executeSearchCodebase v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError (Text.pack e)
+ Aeson.Success args -> do
+ let pat = Text.unpack (searchPattern args)
+ maxRes = fromMaybe 100 (searchMaxResults args)
+ caseSensitive = fromMaybe False (searchCaseSensitive args)
+ baseArgs =
+ ["--line-number", "--no-heading", "--max-count=" <> show maxRes, pat]
+ caseArgs = ["--ignore-case" | not caseSensitive]
+ globArgs = maybe [] (\g -> ["--glob", Text.unpack g]) (searchGlob args)
+ pathArg = maybe ["."] (\p -> [Text.unpack p]) (searchPath args)
+ allArgs = caseArgs <> globArgs <> baseArgs <> pathArg
+ proc = Process.proc "rg" allArgs
+ (exitCode, stdoutStr, stderrStr) <- Process.readCreateProcessWithExitCode proc ""
+ case exitCode of
+ Exit.ExitSuccess ->
+ pure <| mkSuccess (Text.pack stdoutStr)
+ Exit.ExitFailure 1 ->
+ pure <| mkSuccess "No matches found"
+ Exit.ExitFailure code ->
+ pure <| mkError ("ripgrep failed with code " <> tshow code <> ": " <> Text.pack stderrStr)
+
+data SearchAndReadArgs = SearchAndReadArgs
+ { sarPattern :: Text,
+ sarPath :: Maybe Text,
+ sarContextLines :: Maybe Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON SearchAndReadArgs where
+ parseJSON =
+ Aeson.withObject "SearchAndReadArgs" <| \v ->
+ (SearchAndReadArgs </ (v .: "pattern"))
+ <*> (v .:? "path")
+ <*> (v .:? "context_lines")
+
+searchAndReadTool :: Engine.Tool
+searchAndReadTool =
+ Engine.Tool
+ { Engine.toolName = "search_and_read",
+ Engine.toolDescription =
+ "Search for a pattern, then read the matching lines with context. "
+ <> "More efficient than search + read separately - returns file content "
+ <> "around each match. Use this to find and understand code in one step.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "pattern"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Regex pattern to search for" :: Text)
+ ],
+ "path"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Optional: directory or file to search in" :: Text)
+ ],
+ "context_lines"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Lines of context around each match (default: 10)" :: Text)
+ ]
+ ],
+ "required" .= (["pattern"] :: [Text])
+ ],
+ Engine.toolExecute = executeSearchAndRead
+ }
+
+executeSearchAndRead :: Aeson.Value -> IO Aeson.Value
+executeSearchAndRead v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError (Text.pack e)
+ Aeson.Success args -> do
+ let pat = Text.unpack (sarPattern args)
+ ctx = fromMaybe 10 (sarContextLines args)
+ pathArg = maybe ["."] (\p -> [Text.unpack p]) (sarPath args)
+ rgArgs =
+ [ "--line-number",
+ "--no-heading",
+ "--context=" <> show ctx,
+ "--max-count=20",
+ "--ignore-case",
+ pat
+ ]
+ <> pathArg
+ proc = Process.proc "rg" rgArgs
+ (exitCode, stdoutStr, stderrStr) <- Process.readCreateProcessWithExitCode proc ""
+ case exitCode of
+ Exit.ExitSuccess ->
+ pure <| mkSuccess (Text.pack stdoutStr)
+ Exit.ExitFailure 1 ->
+ pure <| mkSuccess "No matches found"
+ Exit.ExitFailure code ->
+ pure <| mkError ("ripgrep failed: " <> tshow code <> ": " <> Text.pack stderrStr)
diff --git a/Omni/Agent/Tools/Calendar.hs b/Omni/Agent/Tools/Calendar.hs
new file mode 100644
index 0000000..805916f
--- /dev/null
+++ b/Omni/Agent/Tools/Calendar.hs
@@ -0,0 +1,322 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Calendar tool using khal CLI.
+--
+-- Provides calendar access for agents via local khal/CalDAV.
+--
+-- : out omni-agent-tools-calendar
+-- : dep aeson
+-- : dep process
+module Omni.Agent.Tools.Calendar
+ ( -- * Tools
+ calendarListTool,
+ calendarAddTool,
+ calendarSearchTool,
+
+ -- * Direct API
+ listEvents,
+ addEvent,
+ searchEvents,
+ listCalendars,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Text as Text
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Test as Test
+import System.Process (readProcessWithExitCode)
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Calendar"
+ [ Test.unit "calendarListTool has correct schema" <| do
+ let tool = calendarListTool
+ Engine.toolName tool Test.@=? "calendar_list",
+ Test.unit "calendarAddTool has correct schema" <| do
+ let tool = calendarAddTool
+ Engine.toolName tool Test.@=? "calendar_add",
+ Test.unit "calendarSearchTool has correct schema" <| do
+ let tool = calendarSearchTool
+ Engine.toolName tool Test.@=? "calendar_search",
+ Test.unit "listCalendars returns calendars" <| do
+ result <- listCalendars
+ case result of
+ Left _ -> pure ()
+ Right cals -> (not (null cals) || null cals) Test.@=? True
+ ]
+
+defaultCalendars :: [String]
+defaultCalendars = ["BenSimaShared", "Kate"]
+
+listEvents :: Text -> Maybe Text -> IO (Either Text Text)
+listEvents range maybeCalendar = do
+ let rangeArg = if Text.null range then "today 7d" else Text.unpack range
+ calArgs = case maybeCalendar of
+ Just cal -> ["-a", Text.unpack cal]
+ Nothing -> concatMap (\c -> ["-a", c]) defaultCalendars
+ formatArg = ["-f", "[{calendar}] {title} | {start-time} - {end-time}"]
+ result <-
+ try <| readProcessWithExitCode "khal" (["list"] <> calArgs <> formatArg <> [rangeArg, "-o"]) ""
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("khal error: " <> tshow e))
+ Right (exitCode, stdoutStr, stderrStr) ->
+ case exitCode of
+ ExitSuccess -> pure (Right (Text.pack stdoutStr))
+ ExitFailure code ->
+ pure (Left ("khal failed (" <> tshow code <> "): " <> Text.pack stderrStr))
+
+addEvent :: Text -> Text -> Maybe Text -> Maybe Text -> Maybe Text -> IO (Either Text Text)
+addEvent calendarName eventSpec location alarm description = do
+ let baseArgs = ["new", "-a", Text.unpack calendarName]
+ locArgs = maybe [] (\l -> ["-l", Text.unpack l]) location
+ alarmArgs = maybe [] (\a -> ["-m", Text.unpack a]) alarm
+ specParts = Text.unpack eventSpec
+ descParts = maybe [] (\d -> ["::", Text.unpack d]) description
+ allArgs = baseArgs <> locArgs <> alarmArgs <> [specParts] <> descParts
+ result <- try <| readProcessWithExitCode "khal" allArgs ""
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("khal error: " <> tshow e))
+ Right (exitCode, stdoutStr, stderrStr) ->
+ case exitCode of
+ ExitSuccess ->
+ pure (Right ("Event created: " <> Text.pack stdoutStr))
+ ExitFailure code ->
+ pure (Left ("khal failed (" <> tshow code <> "): " <> Text.pack stderrStr))
+
+searchEvents :: Text -> IO (Either Text Text)
+searchEvents query = do
+ let calArgs = concatMap (\c -> ["-a", c]) defaultCalendars
+ result <-
+ try <| readProcessWithExitCode "khal" (["search"] <> calArgs <> [Text.unpack query]) ""
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("khal error: " <> tshow e))
+ Right (exitCode, stdoutStr, stderrStr) ->
+ case exitCode of
+ ExitSuccess -> pure (Right (Text.pack stdoutStr))
+ ExitFailure code ->
+ pure (Left ("khal failed (" <> tshow code <> "): " <> Text.pack stderrStr))
+
+listCalendars :: IO (Either Text [Text])
+listCalendars = do
+ result <-
+ try <| readProcessWithExitCode "khal" ["printcalendars"] ""
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("khal error: " <> tshow e))
+ Right (exitCode, stdoutStr, stderrStr) ->
+ case exitCode of
+ ExitSuccess ->
+ pure (Right (filter (not <. Text.null) (Text.lines (Text.pack stdoutStr))))
+ ExitFailure code ->
+ pure (Left ("khal failed (" <> tshow code <> "): " <> Text.pack stderrStr))
+
+calendarListTool :: Engine.Tool
+calendarListTool =
+ Engine.Tool
+ { Engine.toolName = "calendar_list",
+ Engine.toolDescription =
+ "List upcoming calendar events. Use to check what's scheduled. "
+ <> "Range can be like 'today', 'tomorrow', 'today 7d', 'next week', etc. "
+ <> "Available calendars: BenSimaShared, Kate.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "range"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Time range like 'today 7d', 'tomorrow', 'next week' (default: today 7d)" :: Text)
+ ],
+ "calendar"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Filter to specific calendar: 'BenSimaShared' or 'Kate' (default: both)" :: Text)
+ ]
+ ],
+ "required" .= ([] :: [Text])
+ ],
+ Engine.toolExecute = executeCalendarList
+ }
+
+executeCalendarList :: Aeson.Value -> IO Aeson.Value
+executeCalendarList v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: CalendarListArgs) -> do
+ result <- listEvents (clRange args) (clCalendar args)
+ case result of
+ Left err ->
+ pure (Aeson.object ["error" .= err])
+ Right events ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "events" .= events
+ ]
+ )
+
+data CalendarListArgs = CalendarListArgs
+ { clRange :: Text,
+ clCalendar :: Maybe Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON CalendarListArgs where
+ parseJSON =
+ Aeson.withObject "CalendarListArgs" <| \v ->
+ (CalendarListArgs </ (v .:? "range" .!= "today 7d"))
+ <*> (v .:? "calendar")
+
+calendarAddTool :: Engine.Tool
+calendarAddTool =
+ Engine.Tool
+ { Engine.toolName = "calendar_add",
+ Engine.toolDescription =
+ "Add a new calendar event. The event_spec format is: "
+ <> "'START [END] SUMMARY' where START/END are dates or times. "
+ <> "Examples: '2024-12-25 Christmas', 'tomorrow 10:00 11:00 Meeting', "
+ <> "'friday 14:00 1h Doctor appointment'.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "calendar"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Calendar name to add to (e.g., 'BenSimaShared', 'Kate')" :: Text)
+ ],
+ "event_spec"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Event specification: 'START [END] SUMMARY' (e.g., 'tomorrow 10:00 11:00 Team meeting')" :: Text)
+ ],
+ "location"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Location of the event (optional)" :: Text)
+ ],
+ "alarm"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Alarm time before event, e.g., '15m', '1h', '1d' (optional)" :: Text)
+ ],
+ "description"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Detailed description of the event (optional)" :: Text)
+ ]
+ ],
+ "required" .= (["calendar", "event_spec"] :: [Text])
+ ],
+ Engine.toolExecute = executeCalendarAdd
+ }
+
+executeCalendarAdd :: Aeson.Value -> IO Aeson.Value
+executeCalendarAdd v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: CalendarAddArgs) -> do
+ result <-
+ addEvent
+ (caCalendar args)
+ (caEventSpec args)
+ (caLocation args)
+ (caAlarm args)
+ (caDescription args)
+ case result of
+ Left err ->
+ pure (Aeson.object ["error" .= err])
+ Right msg ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "message" .= msg
+ ]
+ )
+
+data CalendarAddArgs = CalendarAddArgs
+ { caCalendar :: Text,
+ caEventSpec :: Text,
+ caLocation :: Maybe Text,
+ caAlarm :: Maybe Text,
+ caDescription :: Maybe Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON CalendarAddArgs where
+ parseJSON =
+ Aeson.withObject "CalendarAddArgs" <| \v ->
+ (CalendarAddArgs </ (v .: "calendar"))
+ <*> (v .: "event_spec")
+ <*> (v .:? "location")
+ <*> (v .:? "alarm")
+ <*> (v .:? "description")
+
+calendarSearchTool :: Engine.Tool
+calendarSearchTool =
+ Engine.Tool
+ { Engine.toolName = "calendar_search",
+ Engine.toolDescription =
+ "Search for calendar events by text. Finds events matching the query "
+ <> "in title, description, or location.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "query"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Search text to find in events" :: Text)
+ ]
+ ],
+ "required" .= (["query"] :: [Text])
+ ],
+ Engine.toolExecute = executeCalendarSearch
+ }
+
+executeCalendarSearch :: Aeson.Value -> IO Aeson.Value
+executeCalendarSearch v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: CalendarSearchArgs) -> do
+ result <- searchEvents (csQuery args)
+ case result of
+ Left err ->
+ pure (Aeson.object ["error" .= err])
+ Right events ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "results" .= events
+ ]
+ )
+
+newtype CalendarSearchArgs = CalendarSearchArgs
+ { csQuery :: Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON CalendarSearchArgs where
+ parseJSON =
+ Aeson.withObject "CalendarSearchArgs" <| \v ->
+ CalendarSearchArgs </ (v .: "query")
diff --git a/Omni/Agent/Tools/Email.hs b/Omni/Agent/Tools/Email.hs
new file mode 100644
index 0000000..7a9bc64
--- /dev/null
+++ b/Omni/Agent/Tools/Email.hs
@@ -0,0 +1,675 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Email tools for IMAP and SMTP access via Telegram bot.
+--
+-- Provides email management for agents:
+-- - Check for urgent/time-sensitive emails
+-- - Identify emails needing response vs FYI
+-- - Auto-unsubscribe from marketing
+-- - Send approved outreach emails via SMTP
+--
+-- Uses HaskellNet for IMAP/SMTP client support.
+-- Password retrieved via `pass ben@bensima.com`.
+--
+-- : out omni-agent-tools-email
+-- : dep aeson
+-- : dep process
+-- : dep regex-applicative
+-- : dep http-conduit
+-- : dep HaskellNet
+-- : dep HaskellNet-SSL
+module Omni.Agent.Tools.Email
+ ( -- * Tools
+ emailCheckTool,
+ emailReadTool,
+ emailUnsubscribeTool,
+ emailArchiveTool,
+ emailSendTool,
+
+ -- * All tools
+ allEmailTools,
+
+ -- * Direct API
+ checkNewEmails,
+ readEmail,
+ unsubscribeFromEmail,
+ archiveEmail,
+ getPassword,
+ sendApprovedEmail,
+
+ -- * Scheduled Check
+ emailCheckLoop,
+ performScheduledCheck,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString.Char8 as BS8
+import qualified Data.List as List
+import qualified Data.Text as Text
+import qualified Data.Text.Lazy as LText
+import Data.Time (NominalDiffTime, UTCTime, addUTCTime, getCurrentTime)
+import Data.Time.Format (defaultTimeLocale, formatTime, parseTimeM)
+import Data.Time.LocalTime (TimeZone (..), utcToZonedTime)
+import qualified Network.HTTP.Simple as HTTP
+import qualified Network.HaskellNet.IMAP as IMAP
+import Network.HaskellNet.IMAP.Connection (IMAPConnection)
+import qualified Network.HaskellNet.IMAP.SSL as IMAPSSL
+import qualified Network.HaskellNet.SMTP as SMTP
+import qualified Network.HaskellNet.SMTP.SSL as SMTPSSL
+import Network.Mail.Mime (Address (..), simpleMail')
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Tools.Outreach as Outreach
+import qualified Omni.Test as Test
+import System.Process (readProcessWithExitCode)
+import Text.Regex.Applicative (RE, anySym, few, (=~))
+import qualified Text.Regex.Applicative as RE
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Email"
+ [ Test.unit "emailCheckTool has correct name" <| do
+ Engine.toolName emailCheckTool Test.@=? "email_check",
+ Test.unit "emailReadTool has correct name" <| do
+ Engine.toolName emailReadTool Test.@=? "email_read",
+ Test.unit "emailUnsubscribeTool has correct name" <| do
+ Engine.toolName emailUnsubscribeTool Test.@=? "email_unsubscribe",
+ Test.unit "emailArchiveTool has correct name" <| do
+ Engine.toolName emailArchiveTool Test.@=? "email_archive",
+ Test.unit "emailSendTool has correct name" <| do
+ Engine.toolName emailSendTool Test.@=? "email_send",
+ Test.unit "allEmailTools has 5 tools" <| do
+ length allEmailTools Test.@=? 5,
+ Test.unit "parseEmailHeaders extracts fields" <| do
+ let headers =
+ "From: test@example.com\r\n\
+ \Subject: Test Subject\r\n\
+ \Date: Mon, 1 Jan 2024 12:00:00 +0000\r\n\
+ \\r\n"
+ case parseEmailHeaders headers of
+ Nothing -> Test.assertFailure "Failed to parse headers"
+ Just email -> do
+ emailFrom email Test.@=? "test@example.com"
+ emailSubject email Test.@=? "Test Subject",
+ Test.unit "parseUnsubscribeHeader extracts URL" <| do
+ let header = "<https://example.com/unsubscribe>, <mailto:unsub@example.com>"
+ case parseUnsubscribeUrl header of
+ Nothing -> Test.assertFailure "Failed to parse unsubscribe URL"
+ Just url -> ("https://example.com" `Text.isPrefixOf` url) Test.@=? True
+ ]
+
+imapServer :: String
+imapServer = "bensima.com"
+
+imapUser :: String
+imapUser = "ben@bensima.com"
+
+getPassword :: IO (Either Text Text)
+getPassword = do
+ result <- try <| readProcessWithExitCode "pass" ["ben@bensima.com"] ""
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("Failed to get password: " <> tshow e))
+ Right (exitCode, stdoutStr, stderrStr) ->
+ case exitCode of
+ ExitSuccess -> pure (Right (Text.strip (Text.pack stdoutStr)))
+ ExitFailure code ->
+ pure (Left ("pass failed (" <> tshow code <> "): " <> Text.pack stderrStr))
+
+withImapConnection :: (IMAPConnection -> IO a) -> IO (Either Text a)
+withImapConnection action = do
+ pwResult <- getPassword
+ case pwResult of
+ Left err -> pure (Left err)
+ Right pw -> do
+ result <-
+ try <| do
+ conn <- IMAPSSL.connectIMAPSSL imapServer
+ IMAP.login conn imapUser (Text.unpack pw)
+ r <- action conn
+ IMAP.logout conn
+ pure r
+ case result of
+ Left (e :: SomeException) -> pure (Left ("IMAP error: " <> tshow e))
+ Right r -> pure (Right r)
+
+data EmailSummary = EmailSummary
+ { emailUid :: Int,
+ emailFrom :: Text,
+ emailSubject :: Text,
+ emailDate :: Text,
+ emailUnsubscribe :: Maybe Text
+ }
+ deriving (Show, Generic)
+
+instance Aeson.ToJSON EmailSummary where
+ toJSON e =
+ Aeson.object
+ [ "uid" .= emailUid e,
+ "from" .= emailFrom e,
+ "subject" .= emailSubject e,
+ "date" .= formatDateAsEst (emailDate e),
+ "has_unsubscribe" .= isJust (emailUnsubscribe e)
+ ]
+
+estTimezone :: TimeZone
+estTimezone = TimeZone (-300) False "EST"
+
+formatDateAsEst :: Text -> Text
+formatDateAsEst dateStr =
+ case parseEmailDate dateStr of
+ Nothing -> dateStr
+ Just utcTime ->
+ let zonedTime = utcToZonedTime estTimezone utcTime
+ in Text.pack (formatTime defaultTimeLocale "%a %b %d %H:%M EST" zonedTime)
+
+parseEmailHeaders :: Text -> Maybe EmailSummary
+parseEmailHeaders raw = do
+ let headerLines = Text.lines raw
+ fromLine = findHeader "From:" headerLines
+ subjectLine = findHeader "Subject:" headerLines
+ dateLine = findHeader "Date:" headerLines
+ unsubLine = findHeader "List-Unsubscribe:" headerLines
+ fromVal <- fromLine
+ subject <- subjectLine
+ dateVal <- dateLine
+ pure
+ EmailSummary
+ { emailUid = 0,
+ emailFrom = Text.strip (Text.drop 5 fromVal),
+ emailSubject = Text.strip (Text.drop 8 subject),
+ emailDate = Text.strip (Text.drop 5 dateVal),
+ emailUnsubscribe = (parseUnsubscribeUrl <. Text.drop 16) =<< unsubLine
+ }
+ where
+ findHeader :: Text -> [Text] -> Maybe Text
+ findHeader prefix = List.find (prefix `Text.isPrefixOf`)
+
+parseUnsubscribeUrl :: Text -> Maybe Text
+parseUnsubscribeUrl header =
+ let text = Text.unpack header
+ in case text =~ urlInBrackets of
+ Just url | "http" `List.isPrefixOf` url -> Just (Text.pack url)
+ _ -> Nothing
+ where
+ urlInBrackets :: RE Char String
+ urlInBrackets = few anySym *> RE.sym '<' *> few anySym <* RE.sym '>'
+
+checkNewEmails :: Maybe Int -> Maybe Int -> IO (Either Text [EmailSummary])
+checkNewEmails maybeLimit maybeHours = do
+ withImapConnection <| \conn -> do
+ IMAP.select conn "INBOX"
+ uids <- IMAP.search conn [IMAP.UNFLAG IMAP.Seen]
+ let limit = fromMaybe 20 maybeLimit
+ recentUids = take limit (reverse (map fromIntegral uids))
+ if null recentUids
+ then pure []
+ else do
+ emails <-
+ forM recentUids <| \uid -> do
+ headerBytes <- IMAP.fetchHeader conn (fromIntegral uid)
+ let headerText = Text.pack (BS8.unpack headerBytes)
+ pure (parseEmailHeaders headerText, uid)
+ let parsed =
+ [ e {emailUid = uid}
+ | (Just e, uid) <- emails
+ ]
+ case maybeHours of
+ Nothing -> pure parsed
+ Just hours -> do
+ now <- getCurrentTime
+ let cutoff = addUTCTime (negate (fromIntegral hours * 3600 :: NominalDiffTime)) now
+ pure (filter (isAfterCutoff cutoff) parsed)
+
+isAfterCutoff :: UTCTime -> EmailSummary -> Bool
+isAfterCutoff cutoff email =
+ case parseEmailDate (emailDate email) of
+ Nothing -> False
+ Just emailTime -> emailTime >= cutoff
+
+parseEmailDate :: Text -> Maybe UTCTime
+parseEmailDate dateStr =
+ let cleaned = stripParenTz (Text.strip dateStr)
+ formats =
+ [ "%a, %d %b %Y %H:%M:%S %z",
+ "%a, %d %b %Y %H:%M:%S %Z",
+ "%d %b %Y %H:%M:%S %z",
+ "%a, %d %b %Y %H:%M %z",
+ "%a, %d %b %Y %H:%M:%S %z (%Z)"
+ ]
+ tryParse [] = Nothing
+ tryParse (fmt : rest) =
+ case parseTimeM True defaultTimeLocale fmt (Text.unpack cleaned) of
+ Just t -> Just t
+ Nothing -> tryParse rest
+ in tryParse formats
+
+stripParenTz :: Text -> Text
+stripParenTz t =
+ case Text.breakOn " (" t of
+ (before, after)
+ | Text.null after -> t
+ | ")" `Text.isSuffixOf` after -> before
+ | otherwise -> t
+
+readEmail :: Int -> IO (Either Text Text)
+readEmail uid =
+ withImapConnection <| \conn -> do
+ IMAP.select conn "INBOX"
+ bodyBytes <- IMAP.fetch conn (fromIntegral uid)
+ let bodyText = Text.pack (BS8.unpack bodyBytes)
+ pure (Text.take 10000 bodyText)
+
+unsubscribeFromEmail :: Int -> IO (Either Text Text)
+unsubscribeFromEmail uid = do
+ headerResult <-
+ withImapConnection <| \conn -> do
+ IMAP.select conn "INBOX"
+ headerBytes <- IMAP.fetchHeader conn (fromIntegral uid)
+ pure (Text.pack (BS8.unpack headerBytes))
+ case headerResult of
+ Left err -> pure (Left err)
+ Right headerText ->
+ case extractUnsubscribeUrl headerText of
+ Nothing -> pure (Left "No unsubscribe URL found in this email")
+ Just url -> do
+ clickResult <- clickUnsubscribeLink url
+ case clickResult of
+ Left err -> pure (Left ("Failed to unsubscribe: " <> err))
+ Right () -> do
+ _ <- archiveEmail uid
+ pure (Right ("Unsubscribed and archived email " <> tshow uid))
+
+extractUnsubscribeUrl :: Text -> Maybe Text
+extractUnsubscribeUrl headerText =
+ let unsubLine = List.find ("List-Unsubscribe:" `Text.isInfixOf`) (Text.lines headerText)
+ in (parseUnsubscribeUrl <. Text.drop 16 <. Text.strip) =<< unsubLine
+
+clickUnsubscribeLink :: Text -> IO (Either Text ())
+clickUnsubscribeLink url = do
+ result <-
+ try <| do
+ req <- HTTP.parseRequest (Text.unpack url)
+ _ <- HTTP.httpLBS req
+ pure ()
+ case result of
+ Left (e :: SomeException) -> pure (Left (tshow e))
+ Right () -> pure (Right ())
+
+archiveEmail :: Int -> IO (Either Text Text)
+archiveEmail uid =
+ withImapConnection <| \conn -> do
+ IMAP.select conn "INBOX"
+ IMAP.copy conn (fromIntegral uid) "Archives.2025"
+ IMAP.store conn (fromIntegral uid) (IMAP.PlusFlags [IMAP.Deleted])
+ _ <- IMAP.expunge conn
+ pure ("Archived email " <> tshow uid)
+
+allEmailTools :: [Engine.Tool]
+allEmailTools =
+ [ emailCheckTool,
+ emailReadTool,
+ emailUnsubscribeTool,
+ emailArchiveTool,
+ emailSendTool
+ ]
+
+emailCheckTool :: Engine.Tool
+emailCheckTool =
+ Engine.Tool
+ { Engine.toolName = "email_check",
+ Engine.toolDescription =
+ "Check for new/unread emails. Returns a summary of recent unread emails "
+ <> "including sender, subject, date, and whether they have an unsubscribe link. "
+ <> "Use this to identify urgent items or emails needing response. "
+ <> "Use 'hours' to filter to emails received in the last N hours (e.g., hours=6 for last 6 hours).",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "limit"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Max emails to return (default: 20)" :: Text)
+ ],
+ "hours"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Only return emails from the last N hours (e.g., 6 for last 6 hours)" :: Text)
+ ]
+ ],
+ "required" .= ([] :: [Text])
+ ],
+ Engine.toolExecute = executeEmailCheck
+ }
+
+executeEmailCheck :: Aeson.Value -> IO Aeson.Value
+executeEmailCheck v = do
+ let (limit, hours) = case v of
+ Aeson.Object obj ->
+ let l = case KeyMap.lookup "limit" obj of
+ Just (Aeson.Number n) -> Just (round n :: Int)
+ _ -> Nothing
+ h = case KeyMap.lookup "hours" obj of
+ Just (Aeson.Number n) -> Just (round n :: Int)
+ _ -> Nothing
+ in (l, h)
+ _ -> (Nothing, Nothing)
+ result <- checkNewEmails limit hours
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right emails ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "count" .= length emails,
+ "emails" .= emails
+ ]
+ )
+
+emailReadTool :: Engine.Tool
+emailReadTool =
+ Engine.Tool
+ { Engine.toolName = "email_read",
+ Engine.toolDescription =
+ "Read the full content of an email by its UID. "
+ <> "Use after email_check to read emails that seem important or need a response.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "uid"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Email UID from email_check" :: Text)
+ ]
+ ],
+ "required" .= (["uid"] :: [Text])
+ ],
+ Engine.toolExecute = executeEmailRead
+ }
+
+executeEmailRead :: Aeson.Value -> IO Aeson.Value
+executeEmailRead v = do
+ let uidM = case v of
+ Aeson.Object obj -> case KeyMap.lookup "uid" obj of
+ Just (Aeson.Number n) -> Just (round n :: Int)
+ _ -> Nothing
+ _ -> Nothing
+ case uidM of
+ Nothing -> pure (Aeson.object ["error" .= ("Missing uid parameter" :: Text)])
+ Just uid -> do
+ result <- readEmail uid
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right body ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "uid" .= uid,
+ "body" .= body
+ ]
+ )
+
+emailUnsubscribeTool :: Engine.Tool
+emailUnsubscribeTool =
+ Engine.Tool
+ { Engine.toolName = "email_unsubscribe",
+ Engine.toolDescription =
+ "Unsubscribe from a mailing list by clicking the List-Unsubscribe link. "
+ <> "Use for marketing/newsletter emails. Automatically archives the email after unsubscribing.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "uid"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Email UID to unsubscribe from" :: Text)
+ ]
+ ],
+ "required" .= (["uid"] :: [Text])
+ ],
+ Engine.toolExecute = executeEmailUnsubscribe
+ }
+
+executeEmailUnsubscribe :: Aeson.Value -> IO Aeson.Value
+executeEmailUnsubscribe v = do
+ let uidM = case v of
+ Aeson.Object obj -> case KeyMap.lookup "uid" obj of
+ Just (Aeson.Number n) -> Just (round n :: Int)
+ _ -> Nothing
+ _ -> Nothing
+ case uidM of
+ Nothing -> pure (Aeson.object ["error" .= ("Missing uid parameter" :: Text)])
+ Just uid -> do
+ result <- unsubscribeFromEmail uid
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right msg ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "message" .= msg
+ ]
+ )
+
+emailArchiveTool :: Engine.Tool
+emailArchiveTool =
+ Engine.Tool
+ { Engine.toolName = "email_archive",
+ Engine.toolDescription =
+ "Archive an email (move to Archives.2025 folder). "
+ <> "Use for emails that don't need a response and are just FYI.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "uid"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Email UID to archive" :: Text)
+ ]
+ ],
+ "required" .= (["uid"] :: [Text])
+ ],
+ Engine.toolExecute = executeEmailArchive
+ }
+
+executeEmailArchive :: Aeson.Value -> IO Aeson.Value
+executeEmailArchive v = do
+ let uidM = case v of
+ Aeson.Object obj -> case KeyMap.lookup "uid" obj of
+ Just (Aeson.Number n) -> Just (round n :: Int)
+ _ -> Nothing
+ _ -> Nothing
+ case uidM of
+ Nothing -> pure (Aeson.object ["error" .= ("Missing uid parameter" :: Text)])
+ Just uid -> do
+ result <- archiveEmail uid
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right msg ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "message" .= msg
+ ]
+ )
+
+emailCheckLoop :: (Int -> Maybe Int -> Text -> IO (Maybe Int)) -> Int -> IO ()
+emailCheckLoop sendFn chatId =
+ forever <| do
+ let sixHours = 6 * 60 * 60 * 1000000
+ threadDelay sixHours
+ performScheduledCheck sendFn chatId
+
+performScheduledCheck :: (Int -> Maybe Int -> Text -> IO (Maybe Int)) -> Int -> IO ()
+performScheduledCheck sendFn chatId = do
+ putText "Running scheduled email check..."
+ result <- checkNewEmails (Just 50) (Just 6)
+ case result of
+ Left err -> putText ("Email check failed: " <> err)
+ Right emails -> do
+ let urgent = filter isUrgent emails
+ needsResponse = filter needsResponsePred emails
+ marketing = filter hasUnsubscribe emails
+ when (not (null urgent) || not (null needsResponse)) <| do
+ let msg = formatEmailSummary urgent needsResponse (length marketing)
+ _ <- sendFn chatId Nothing msg
+ pure ()
+ where
+ isUrgent :: EmailSummary -> Bool
+ isUrgent email =
+ let subj = Text.toLower (emailSubject email)
+ in "urgent"
+ `Text.isInfixOf` subj
+ || "asap"
+ `Text.isInfixOf` subj
+ || "important"
+ `Text.isInfixOf` subj
+ || "action required"
+ `Text.isInfixOf` subj
+
+ needsResponsePred :: EmailSummary -> Bool
+ needsResponsePred email =
+ let sender = Text.toLower (emailFrom email)
+ subj = Text.toLower (emailSubject email)
+ in not (hasUnsubscribe email)
+ && not (isUrgent email)
+ && not ("noreply" `Text.isInfixOf` sender)
+ && not ("no-reply" `Text.isInfixOf` sender)
+ && ("?" `Text.isInfixOf` subj || "reply" `Text.isInfixOf` subj || "response" `Text.isInfixOf` subj)
+
+ hasUnsubscribe :: EmailSummary -> Bool
+ hasUnsubscribe = isJust <. emailUnsubscribe
+
+ formatEmailSummary :: [EmailSummary] -> [EmailSummary] -> Int -> Text
+ formatEmailSummary urgent needs marketingCount =
+ Text.unlines
+ <| ["📧 *email check*", ""]
+ <> (if null urgent then [] else ["*urgent:*"] <> map formatOne urgent <> [""])
+ <> (if null needs then [] else ["*may need response:*"] <> map formatOne needs <> [""])
+ <> [tshow marketingCount <> " marketing emails (use email_check to review)"]
+
+ formatOne :: EmailSummary -> Text
+ formatOne e =
+ "• " <> emailSubject e <> " (from: " <> emailFrom e <> ", uid: " <> tshow (emailUid e) <> ")"
+
+smtpServer :: String
+smtpServer = "bensima.com"
+
+smtpUser :: String
+smtpUser = "ben@bensima.com"
+
+withSmtpConnection :: (SMTP.SMTPConnection -> IO a) -> IO (Either Text a)
+withSmtpConnection action = do
+ pwResult <- getPassword
+ case pwResult of
+ Left err -> pure (Left err)
+ Right pw -> do
+ result <-
+ try <| do
+ conn <- SMTPSSL.connectSMTPSSL smtpServer
+ authSuccess <- SMTP.authenticate SMTP.LOGIN smtpUser (Text.unpack pw) conn
+ if authSuccess
+ then do
+ r <- action conn
+ SMTP.closeSMTP conn
+ pure r
+ else do
+ SMTP.closeSMTP conn
+ panic "SMTP authentication failed"
+ case result of
+ Left (e :: SomeException) -> pure (Left ("SMTP error: " <> tshow e))
+ Right r -> pure (Right r)
+
+sendApprovedEmail :: Text -> IO (Either Text Text)
+sendApprovedEmail draftId = do
+ mDraft <- Outreach.getDraft draftId
+ case mDraft of
+ Nothing -> pure (Left "Draft not found")
+ Just draft -> do
+ case Outreach.draftStatus draft of
+ Outreach.Approved -> do
+ let recipientAddr = Address Nothing (Outreach.draftRecipient draft)
+ senderAddr = Address (Just "Ben Sima") "ben@bensima.com"
+ subject = fromMaybe "" (Outreach.draftSubject draft)
+ body = LText.fromStrict (Outreach.draftBody draft)
+ footer = "\n\n---\nSent by Ava on behalf of Ben"
+ fullBody = body <> footer
+ mail = simpleMail' recipientAddr senderAddr subject fullBody
+ sendResult <-
+ withSmtpConnection <| \conn -> do
+ SMTP.sendMail mail conn
+ case sendResult of
+ Left err -> pure (Left err)
+ Right () -> do
+ _ <- Outreach.markSent draftId
+ pure (Right ("Email sent to " <> Outreach.draftRecipient draft))
+ Outreach.Pending -> pure (Left "Draft is still pending approval")
+ Outreach.Rejected -> pure (Left "Draft was rejected")
+ Outreach.Sent -> pure (Left "Draft was already sent")
+
+emailSendTool :: Engine.Tool
+emailSendTool =
+ Engine.Tool
+ { Engine.toolName = "email_send",
+ Engine.toolDescription =
+ "Send an approved outreach email. Only sends emails that have been approved "
+ <> "by Ben in the outreach queue. Use outreach_draft to create drafts first, "
+ <> "wait for approval, then use this to send.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "draft_id"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("ID of the approved draft to send" :: Text)
+ ]
+ ],
+ "required" .= (["draft_id"] :: [Text])
+ ],
+ Engine.toolExecute = executeEmailSend
+ }
+
+executeEmailSend :: Aeson.Value -> IO Aeson.Value
+executeEmailSend v = do
+ let draftIdM = case v of
+ Aeson.Object obj -> case KeyMap.lookup "draft_id" obj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ _ -> Nothing
+ case draftIdM of
+ Nothing -> pure (Aeson.object ["error" .= ("Missing draft_id parameter" :: Text)])
+ Just draftId -> do
+ result <- sendApprovedEmail draftId
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right msg ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "message" .= msg
+ ]
+ )
diff --git a/Omni/Agent/Tools/Feedback.hs b/Omni/Agent/Tools/Feedback.hs
new file mode 100644
index 0000000..1ec684c
--- /dev/null
+++ b/Omni/Agent/Tools/Feedback.hs
@@ -0,0 +1,204 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Feedback query tool for PodcastItLater user research.
+--
+-- Allows the agent to query collected feedback from the PIL database.
+-- Feedback is submitted via /feedback on the PIL web app.
+--
+-- : out omni-agent-tools-feedback
+-- : dep aeson
+-- : dep http-conduit
+module Omni.Agent.Tools.Feedback
+ ( -- * Tools
+ feedbackListTool,
+ allFeedbackTools,
+
+ -- * Types
+ FeedbackEntry (..),
+ ListFeedbackArgs (..),
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Text as Text
+import qualified Network.HTTP.Simple as HTTP
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Test as Test
+import System.Environment (lookupEnv)
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Feedback"
+ [ Test.unit "feedbackListTool has correct name" <| do
+ Engine.toolName feedbackListTool Test.@=? "feedback_list",
+ Test.unit "allFeedbackTools has 1 tool" <| do
+ length allFeedbackTools Test.@=? 1,
+ Test.unit "ListFeedbackArgs parses correctly" <| do
+ let json = Aeson.object ["limit" .= (10 :: Int)]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: ListFeedbackArgs) -> lfaLimit args Test.@=? 10
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "ListFeedbackArgs parses with since" <| do
+ let json =
+ Aeson.object
+ [ "limit" .= (20 :: Int),
+ "since" .= ("2024-01-01" :: Text)
+ ]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: ListFeedbackArgs) -> do
+ lfaLimit args Test.@=? 20
+ lfaSince args Test.@=? Just "2024-01-01"
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "FeedbackEntry JSON roundtrip" <| do
+ let entry =
+ FeedbackEntry
+ { feId = "abc123",
+ feEmail = Just "test@example.com",
+ feSource = Just "outreach",
+ feCampaignId = Nothing,
+ feRating = Just 4,
+ feFeedbackText = Just "Great product!",
+ feUseCase = Just "Commute listening",
+ feCreatedAt = "2024-01-15T10:00:00Z"
+ }
+ case Aeson.decode (Aeson.encode entry) of
+ Nothing -> Test.assertFailure "Failed to decode FeedbackEntry"
+ Just decoded -> do
+ feId decoded Test.@=? "abc123"
+ feEmail decoded Test.@=? Just "test@example.com"
+ feRating decoded Test.@=? Just 4
+ ]
+
+data FeedbackEntry = FeedbackEntry
+ { feId :: Text,
+ feEmail :: Maybe Text,
+ feSource :: Maybe Text,
+ feCampaignId :: Maybe Text,
+ feRating :: Maybe Int,
+ feFeedbackText :: Maybe Text,
+ feUseCase :: Maybe Text,
+ feCreatedAt :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON FeedbackEntry where
+ toJSON e =
+ Aeson.object
+ [ "id" .= feId e,
+ "email" .= feEmail e,
+ "source" .= feSource e,
+ "campaign_id" .= feCampaignId e,
+ "rating" .= feRating e,
+ "feedback_text" .= feFeedbackText e,
+ "use_case" .= feUseCase e,
+ "created_at" .= feCreatedAt e
+ ]
+
+instance Aeson.FromJSON FeedbackEntry where
+ parseJSON =
+ Aeson.withObject "FeedbackEntry" <| \v ->
+ (FeedbackEntry </ (v .: "id"))
+ <*> (v .:? "email")
+ <*> (v .:? "source")
+ <*> (v .:? "campaign_id")
+ <*> (v .:? "rating")
+ <*> (v .:? "feedback_text")
+ <*> (v .:? "use_case")
+ <*> (v .: "created_at")
+
+data ListFeedbackArgs = ListFeedbackArgs
+ { lfaLimit :: Int,
+ lfaSince :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON ListFeedbackArgs where
+ parseJSON =
+ Aeson.withObject "ListFeedbackArgs" <| \v ->
+ (ListFeedbackArgs </ (v .:? "limit" .!= 20))
+ <*> (v .:? "since")
+
+allFeedbackTools :: [Engine.Tool]
+allFeedbackTools = [feedbackListTool]
+
+feedbackListTool :: Engine.Tool
+feedbackListTool =
+ Engine.Tool
+ { Engine.toolName = "feedback_list",
+ Engine.toolDescription =
+ "List feedback entries from PodcastItLater users. "
+ <> "Use to review user research data and understand what potential "
+ <> "customers want from the product.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "limit"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Max entries to return (default: 20)" :: Text)
+ ],
+ "since"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("ISO date to filter by (entries after this date)" :: Text)
+ ]
+ ],
+ "required" .= ([] :: [Text])
+ ],
+ Engine.toolExecute = executeFeedbackList
+ }
+
+executeFeedbackList :: Aeson.Value -> IO Aeson.Value
+executeFeedbackList v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: ListFeedbackArgs) -> do
+ mBaseUrl <- lookupEnv "PIL_BASE_URL"
+ let baseUrl = maybe "http://localhost:8000" Text.pack mBaseUrl
+ limit = min 100 (max 1 (lfaLimit args))
+ sinceParam = case lfaSince args of
+ Nothing -> ""
+ Just since -> "&since=" <> since
+ url = baseUrl <> "/api/feedback?limit=" <> tshow limit <> sinceParam
+ result <- fetchFeedback url
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right entries ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "count" .= length entries,
+ "entries" .= entries
+ ]
+ )
+
+fetchFeedback :: Text -> IO (Either Text [FeedbackEntry])
+fetchFeedback url = do
+ result <-
+ try <| do
+ req <- HTTP.parseRequest (Text.unpack url)
+ resp <- HTTP.httpLBS req
+ pure (HTTP.getResponseStatusCode resp, HTTP.getResponseBody resp)
+ case result of
+ Left (e :: SomeException) -> pure (Left ("Request failed: " <> tshow e))
+ Right (status, body) ->
+ if status /= 200
+ then pure (Left ("HTTP " <> tshow status))
+ else case Aeson.decode body of
+ Nothing -> pure (Left "Failed to parse response")
+ Just entries -> pure (Right entries)
diff --git a/Omni/Agent/Tools/Hledger.hs b/Omni/Agent/Tools/Hledger.hs
new file mode 100644
index 0000000..59e0c05
--- /dev/null
+++ b/Omni/Agent/Tools/Hledger.hs
@@ -0,0 +1,489 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Hledger tools for personal finance queries and transaction entry.
+--
+-- Provides hledger access for agents via the nix-shell in ~/fund.
+--
+-- : out omni-agent-tools-hledger
+-- : dep aeson
+-- : dep process
+-- : dep directory
+module Omni.Agent.Tools.Hledger
+ ( -- * Tools
+ hledgerBalanceTool,
+ hledgerRegisterTool,
+ hledgerAddTool,
+ hledgerIncomeStatementTool,
+ hledgerBalanceSheetTool,
+
+ -- * All tools (for easy import)
+ allHledgerTools,
+
+ -- * Direct API
+ queryBalance,
+ queryRegister,
+ addTransaction,
+ incomeStatement,
+ balanceSheet,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.List as List
+import qualified Data.Text as Text
+import qualified Data.Text.IO as TextIO
+import Data.Time (getCurrentTime, utcToLocalTime)
+import Data.Time.Format (defaultTimeLocale, formatTime)
+import Data.Time.LocalTime (getCurrentTimeZone)
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Test as Test
+import System.Directory (doesFileExist)
+import System.Process (readProcessWithExitCode)
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Hledger"
+ [ Test.unit "hledgerBalanceTool has correct name" <| do
+ Engine.toolName hledgerBalanceTool Test.@=? "hledger_balance",
+ Test.unit "hledgerRegisterTool has correct name" <| do
+ Engine.toolName hledgerRegisterTool Test.@=? "hledger_register",
+ Test.unit "hledgerAddTool has correct name" <| do
+ Engine.toolName hledgerAddTool Test.@=? "hledger_add",
+ Test.unit "hledgerIncomeStatementTool has correct name" <| do
+ Engine.toolName hledgerIncomeStatementTool Test.@=? "hledger_income_statement",
+ Test.unit "hledgerBalanceSheetTool has correct name" <| do
+ Engine.toolName hledgerBalanceSheetTool Test.@=? "hledger_balance_sheet",
+ Test.unit "allHledgerTools has 5 tools" <| do
+ length allHledgerTools Test.@=? 5
+ ]
+
+fundDir :: FilePath
+fundDir = "/home/ben/fund"
+
+journalFile :: FilePath
+journalFile = fundDir <> "/ledger.journal"
+
+transactionsFile :: FilePath
+transactionsFile = fundDir <> "/telegram-transactions.journal"
+
+runHledgerInFund :: [String] -> IO (Either Text Text)
+runHledgerInFund args = do
+ let fullArgs :: [String]
+ fullArgs = ["-f", journalFile] <> args
+ hledgerCmd :: String
+ hledgerCmd = "hledger " ++ List.unwords fullArgs
+ cmd :: String
+ cmd = "cd " ++ fundDir ++ " && " ++ hledgerCmd
+ result <-
+ try <| readProcessWithExitCode "nix-shell" [fundDir ++ "/shell.nix", "--run", cmd] ""
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("hledger error: " <> tshow e))
+ Right (exitCode, stdoutStr, stderrStr) ->
+ case exitCode of
+ ExitSuccess -> pure (Right (Text.pack stdoutStr))
+ ExitFailure code ->
+ pure (Left ("hledger failed (" <> tshow code <> "): " <> Text.pack stderrStr))
+
+allHledgerTools :: [Engine.Tool]
+allHledgerTools =
+ [ hledgerBalanceTool,
+ hledgerRegisterTool,
+ hledgerAddTool,
+ hledgerIncomeStatementTool,
+ hledgerBalanceSheetTool
+ ]
+
+queryBalance :: Maybe Text -> Maybe Text -> Maybe Text -> IO (Either Text Text)
+queryBalance maybePattern maybePeriod maybeCurrency = do
+ let patternArg = maybe [] (\p -> [Text.unpack p]) maybePattern
+ periodArg = maybe [] (\p -> ["-p", "'" ++ Text.unpack p ++ "'"]) maybePeriod
+ currency = maybe "USD" Text.unpack maybeCurrency
+ currencyArg = ["-X", currency]
+ runHledgerInFund (["bal", "-1", "--flat"] <> currencyArg <> patternArg <> periodArg)
+
+queryRegister :: Text -> Maybe Int -> Maybe Text -> Maybe Text -> IO (Either Text Text)
+queryRegister accountPattern maybeLimit maybeCurrency maybePeriod = do
+ let limitArg = maybe ["-n", "10"] (\n -> ["-n", show n]) maybeLimit
+ currency = maybe "USD" Text.unpack maybeCurrency
+ currencyArg = ["-X", currency]
+ periodArg = maybe [] (\p -> ["-p", "'" ++ Text.unpack p ++ "'"]) maybePeriod
+ runHledgerInFund (["reg", Text.unpack accountPattern] <> currencyArg <> periodArg <> limitArg)
+
+incomeStatement :: Maybe Text -> Maybe Text -> IO (Either Text Text)
+incomeStatement maybePeriod maybeCurrency = do
+ let periodArg = maybe ["-p", "thismonth"] (\p -> ["-p", "'" ++ Text.unpack p ++ "'"]) maybePeriod
+ currency = maybe "USD" Text.unpack maybeCurrency
+ currencyArg = ["-X", currency]
+ runHledgerInFund (["is"] <> currencyArg <> periodArg)
+
+balanceSheet :: Maybe Text -> IO (Either Text Text)
+balanceSheet maybeCurrency = do
+ let currency = maybe "USD" Text.unpack maybeCurrency
+ currencyArg = ["-X", currency]
+ runHledgerInFund (["bs"] <> currencyArg)
+
+addTransaction :: Text -> Text -> Text -> Text -> Maybe Text -> IO (Either Text Text)
+addTransaction description fromAccount toAccount amount maybeDate = do
+ now <- getCurrentTime
+ tz <- getCurrentTimeZone
+ let localTime = utcToLocalTime tz now
+ todayStr = formatTime defaultTimeLocale "%Y-%m-%d" localTime
+ dateStr = maybe todayStr Text.unpack maybeDate
+ transaction =
+ Text.unlines
+ [ "",
+ Text.pack dateStr <> " " <> description,
+ " " <> toAccount <> " " <> amount,
+ " " <> fromAccount
+ ]
+ exists <- doesFileExist transactionsFile
+ unless exists <| do
+ TextIO.writeFile transactionsFile "; Transactions added via Telegram bot\n"
+ TextIO.appendFile transactionsFile transaction
+ pure (Right ("Transaction added:\n" <> transaction))
+
+hledgerBalanceTool :: Engine.Tool
+hledgerBalanceTool =
+ Engine.Tool
+ { Engine.toolName = "hledger_balance",
+ Engine.toolDescription =
+ "Query account balances from hledger. "
+ <> "Account patterns: 'as' (assets), 'li' (liabilities), 'ex' (expenses), 'in' (income), 'eq' (equity). "
+ <> "Can drill down like 'as:me:cash' or 'ex:us:need'. "
+ <> "Currency defaults to USD but can be changed (e.g., 'BTC', 'ETH'). "
+ <> "Period uses hledger syntax: 'thismonth', 'lastmonth', 'thisyear', '2024', '2024-06', "
+ <> "'from 2024-01-01 to 2024-06-30', 'from 2024-06-01'.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "account_pattern"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Account pattern to filter (e.g., 'as:me:cash', 'ex', 'li:us:cred')" :: Text)
+ ],
+ "period"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("hledger period: 'thismonth', 'lastmonth', '2024', '2024-06', 'from 2024-01-01 to 2024-06-30'" :: Text)
+ ],
+ "currency"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Currency to display values in (default: 'USD'). Examples: 'BTC', 'ETH', 'EUR'" :: Text)
+ ]
+ ],
+ "required" .= ([] :: [Text])
+ ],
+ Engine.toolExecute = executeBalance
+ }
+
+executeBalance :: Aeson.Value -> IO Aeson.Value
+executeBalance v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: BalanceArgs) -> do
+ result <- queryBalance (baPattern args) (baPeriod args) (baCurrency args)
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right output ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "balances" .= output
+ ]
+ )
+
+data BalanceArgs = BalanceArgs
+ { baPattern :: Maybe Text,
+ baPeriod :: Maybe Text,
+ baCurrency :: Maybe Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON BalanceArgs where
+ parseJSON =
+ Aeson.withObject "BalanceArgs" <| \v ->
+ (BalanceArgs </ (v .:? "account_pattern"))
+ <*> (v .:? "period")
+ <*> (v .:? "currency")
+
+hledgerRegisterTool :: Engine.Tool
+hledgerRegisterTool =
+ Engine.Tool
+ { Engine.toolName = "hledger_register",
+ Engine.toolDescription =
+ "Show recent transactions for an account. "
+ <> "Useful for seeing transaction history and checking recent spending. "
+ <> "Currency defaults to USD.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "account_pattern"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Account pattern to show transactions for (e.g., 'ex:us:need:grocery')" :: Text)
+ ],
+ "limit"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Max transactions to show (default: 10)" :: Text)
+ ],
+ "currency"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Currency to display values in (default: 'USD')" :: Text)
+ ],
+ "period"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("hledger period: 'thismonth', 'lastmonth', '2024', '2024-06', 'from 2024-06-01 to 2024-12-31'" :: Text)
+ ]
+ ],
+ "required" .= (["account_pattern"] :: [Text])
+ ],
+ Engine.toolExecute = executeRegister
+ }
+
+executeRegister :: Aeson.Value -> IO Aeson.Value
+executeRegister v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: RegisterArgs) -> do
+ result <- queryRegister (raPattern args) (raLimit args) (raCurrency args) (raPeriod args)
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right output ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "transactions" .= output
+ ]
+ )
+
+data RegisterArgs = RegisterArgs
+ { raPattern :: Text,
+ raLimit :: Maybe Int,
+ raCurrency :: Maybe Text,
+ raPeriod :: Maybe Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON RegisterArgs where
+ parseJSON =
+ Aeson.withObject "RegisterArgs" <| \v ->
+ (RegisterArgs </ (v .: "account_pattern"))
+ <*> (v .:? "limit")
+ <*> (v .:? "currency")
+ <*> (v .:? "period")
+
+hledgerAddTool :: Engine.Tool
+hledgerAddTool =
+ Engine.Tool
+ { Engine.toolName = "hledger_add",
+ Engine.toolDescription =
+ "Add a new transaction to the ledger. "
+ <> "Use for recording expenses like 'I spent $30 at the barber'. "
+ <> "Account naming: ex:me:want (personal discretionary), ex:us:need (shared necessities), "
+ <> "as:me:cash:checking (bank account), li:us:cred:chase (credit card). "
+ <> "Common expense accounts: ex:us:need:grocery, ex:us:need:utilities, ex:me:want:dining, ex:me:want:grooming.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "description"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Transaction description (e.g., 'Haircut at Joe's Barber')" :: Text)
+ ],
+ "from_account"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Account paying (e.g., 'as:me:cash:checking', 'li:us:cred:chase')" :: Text)
+ ],
+ "to_account"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Account receiving (e.g., 'ex:me:want:grooming')" :: Text)
+ ],
+ "amount"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Amount with currency (e.g., '$30.00', '30 USD')" :: Text)
+ ],
+ "date"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Transaction date YYYY-MM-DD (default: today)" :: Text)
+ ]
+ ],
+ "required" .= (["description", "from_account", "to_account", "amount"] :: [Text])
+ ],
+ Engine.toolExecute = executeAdd
+ }
+
+executeAdd :: Aeson.Value -> IO Aeson.Value
+executeAdd v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: AddArgs) -> do
+ result <-
+ addTransaction
+ (aaDescription args)
+ (aaFromAccount args)
+ (aaToAccount args)
+ (aaAmount args)
+ (aaDate args)
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right msg ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "message" .= msg
+ ]
+ )
+
+data AddArgs = AddArgs
+ { aaDescription :: Text,
+ aaFromAccount :: Text,
+ aaToAccount :: Text,
+ aaAmount :: Text,
+ aaDate :: Maybe Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON AddArgs where
+ parseJSON =
+ Aeson.withObject "AddArgs" <| \v ->
+ (AddArgs </ (v .: "description"))
+ <*> (v .: "from_account")
+ <*> (v .: "to_account")
+ <*> (v .: "amount")
+ <*> (v .:? "date")
+
+hledgerIncomeStatementTool :: Engine.Tool
+hledgerIncomeStatementTool =
+ Engine.Tool
+ { Engine.toolName = "hledger_income_statement",
+ Engine.toolDescription =
+ "Show income statement (income vs expenses) for a period. "
+ <> "Good for seeing 'how much did I spend this month' or 'what's my net income'. "
+ <> "Currency defaults to USD. "
+ <> "Period uses hledger syntax: 'thismonth', 'lastmonth', '2024', 'from 2024-01-01 to 2024-06-30'.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "period"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("hledger period (default: 'thismonth'): 'lastmonth', '2024', '2024-06', 'from 2024-01-01 to 2024-06-30'" :: Text)
+ ],
+ "currency"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Currency to display values in (default: 'USD')" :: Text)
+ ]
+ ],
+ "required" .= ([] :: [Text])
+ ],
+ Engine.toolExecute = executeIncomeStatement
+ }
+
+executeIncomeStatement :: Aeson.Value -> IO Aeson.Value
+executeIncomeStatement v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: IncomeStatementArgs) -> do
+ result <- incomeStatement (isaPeriod args) (isaCurrency args)
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right output ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "income_statement" .= output
+ ]
+ )
+
+data IncomeStatementArgs = IncomeStatementArgs
+ { isaPeriod :: Maybe Text,
+ isaCurrency :: Maybe Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON IncomeStatementArgs where
+ parseJSON =
+ Aeson.withObject "IncomeStatementArgs" <| \v ->
+ (IncomeStatementArgs </ (v .:? "period"))
+ <*> (v .:? "currency")
+
+hledgerBalanceSheetTool :: Engine.Tool
+hledgerBalanceSheetTool =
+ Engine.Tool
+ { Engine.toolName = "hledger_balance_sheet",
+ Engine.toolDescription =
+ "Show current balance sheet (assets, liabilities, net worth). "
+ <> "Good for seeing 'what's my net worth' or 'how much do I have'. "
+ <> "Currency defaults to USD.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "currency"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Currency to display values in (default: 'USD')" :: Text)
+ ]
+ ],
+ "required" .= ([] :: [Text])
+ ],
+ Engine.toolExecute = executeBalanceSheet
+ }
+
+executeBalanceSheet :: Aeson.Value -> IO Aeson.Value
+executeBalanceSheet v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: BalanceSheetArgs) -> do
+ result <- balanceSheet (bsCurrency args)
+ case result of
+ Left err -> pure (Aeson.object ["error" .= err])
+ Right output ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "balance_sheet" .= output
+ ]
+ )
+
+newtype BalanceSheetArgs = BalanceSheetArgs
+ { bsCurrency :: Maybe Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON BalanceSheetArgs where
+ parseJSON =
+ Aeson.withObject "BalanceSheetArgs" <| \v ->
+ BalanceSheetArgs </ (v .:? "currency")
diff --git a/Omni/Agent/Tools/Http.hs b/Omni/Agent/Tools/Http.hs
new file mode 100644
index 0000000..d996ff5
--- /dev/null
+++ b/Omni/Agent/Tools/Http.hs
@@ -0,0 +1,338 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | HTTP request tools for agent API interactions.
+--
+-- Provides http_get and http_post tools for making HTTP requests.
+-- Supports headers, query params, and JSON body.
+--
+-- : out omni-agent-tools-http
+-- : dep aeson
+-- : dep http-conduit
+module Omni.Agent.Tools.Http
+ ( -- * Tools
+ httpGetTool,
+ httpPostTool,
+ allHttpTools,
+
+ -- * Types
+ HttpGetArgs (..),
+ HttpPostArgs (..),
+ HttpResult (..),
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.Key as Key
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.CaseInsensitive as CI
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import qualified Network.HTTP.Client as HTTPClient
+import qualified Network.HTTP.Simple as HTTP
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Test as Test
+import System.Timeout (timeout)
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Http"
+ [ Test.unit "httpGetTool has correct name" <| do
+ Engine.toolName httpGetTool Test.@=? "http_get",
+ Test.unit "httpPostTool has correct name" <| do
+ Engine.toolName httpPostTool Test.@=? "http_post",
+ Test.unit "allHttpTools has 2 tools" <| do
+ length allHttpTools Test.@=? 2,
+ Test.unit "HttpGetArgs parses correctly" <| do
+ let json = Aeson.object ["url" .= ("https://example.com" :: Text)]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: HttpGetArgs) -> httpGetUrl args Test.@=? "https://example.com"
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "HttpGetArgs parses with headers" <| do
+ let json =
+ Aeson.object
+ [ "url" .= ("https://api.example.com" :: Text),
+ "headers" .= Aeson.object ["Authorization" .= ("Bearer token" :: Text)]
+ ]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: HttpGetArgs) -> do
+ httpGetUrl args Test.@=? "https://api.example.com"
+ isJust (httpGetHeaders args) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "HttpPostArgs parses correctly" <| do
+ let json =
+ Aeson.object
+ [ "url" .= ("https://api.example.com" :: Text),
+ "body" .= Aeson.object ["key" .= ("value" :: Text)]
+ ]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: HttpPostArgs) -> do
+ httpPostUrl args Test.@=? "https://api.example.com"
+ isJust (httpPostBody args) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "HttpResult JSON roundtrip" <| do
+ let result =
+ HttpResult
+ { httpResultStatus = 200,
+ httpResultHeaders = Aeson.object ["Content-Type" .= ("application/json" :: Text)],
+ httpResultBody = "{\"ok\": true}"
+ }
+ case Aeson.decode (Aeson.encode result) of
+ Nothing -> Test.assertFailure "Failed to decode HttpResult"
+ Just decoded -> httpResultStatus decoded Test.@=? 200,
+ Test.unit "http_get fetches real URL" <| do
+ let args = Aeson.object ["url" .= ("https://httpbin.org/get" :: Text)]
+ result <- Engine.toolExecute httpGetTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (r :: HttpResult) -> do
+ httpResultStatus r Test.@=? 200
+ ("httpbin.org" `Text.isInfixOf` httpResultBody r) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "http_post with JSON body" <| do
+ let args =
+ Aeson.object
+ [ "url" .= ("https://httpbin.org/post" :: Text),
+ "body" .= Aeson.object ["test" .= ("value" :: Text)]
+ ]
+ result <- Engine.toolExecute httpPostTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (r :: HttpResult) -> do
+ httpResultStatus r Test.@=? 200
+ ("test" `Text.isInfixOf` httpResultBody r) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e
+ ]
+
+data HttpGetArgs = HttpGetArgs
+ { httpGetUrl :: Text,
+ httpGetHeaders :: Maybe Aeson.Object,
+ httpGetParams :: Maybe Aeson.Object
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON HttpGetArgs where
+ parseJSON =
+ Aeson.withObject "HttpGetArgs" <| \v ->
+ (HttpGetArgs </ (v .: "url"))
+ <*> (v .:? "headers")
+ <*> (v .:? "params")
+
+data HttpPostArgs = HttpPostArgs
+ { httpPostUrl :: Text,
+ httpPostHeaders :: Maybe Aeson.Object,
+ httpPostBody :: Maybe Aeson.Value,
+ httpPostContentType :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON HttpPostArgs where
+ parseJSON =
+ Aeson.withObject "HttpPostArgs" <| \v ->
+ (HttpPostArgs </ (v .: "url"))
+ <*> (v .:? "headers")
+ <*> (v .:? "body")
+ <*> (v .:? "content_type")
+
+data HttpResult = HttpResult
+ { httpResultStatus :: Int,
+ httpResultHeaders :: Aeson.Value,
+ httpResultBody :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON HttpResult where
+ toJSON r =
+ Aeson.object
+ [ "status" .= httpResultStatus r,
+ "headers" .= httpResultHeaders r,
+ "body" .= httpResultBody r
+ ]
+
+instance Aeson.FromJSON HttpResult where
+ parseJSON =
+ Aeson.withObject "HttpResult" <| \v ->
+ (HttpResult </ (v .: "status"))
+ <*> (v .: "headers")
+ <*> (v .: "body")
+
+allHttpTools :: [Engine.Tool]
+allHttpTools = [httpGetTool, httpPostTool]
+
+httpGetTool :: Engine.Tool
+httpGetTool =
+ Engine.Tool
+ { Engine.toolName = "http_get",
+ Engine.toolDescription =
+ "Make an HTTP GET request. Returns status code, headers, and response body. "
+ <> "Use for fetching data from APIs or web pages.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "url"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The URL to request" :: Text)
+ ],
+ "headers"
+ .= Aeson.object
+ [ "type" .= ("object" :: Text),
+ "description" .= ("Optional headers as key-value pairs" :: Text)
+ ],
+ "params"
+ .= Aeson.object
+ [ "type" .= ("object" :: Text),
+ "description" .= ("Optional query parameters as key-value pairs" :: Text)
+ ]
+ ],
+ "required" .= (["url"] :: [Text])
+ ],
+ Engine.toolExecute = executeHttpGet
+ }
+
+executeHttpGet :: Aeson.Value -> IO Aeson.Value
+executeHttpGet v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError ("Invalid arguments: " <> Text.pack e)
+ Aeson.Success args -> do
+ let urlWithParams = case httpGetParams args of
+ Nothing -> httpGetUrl args
+ Just params ->
+ let paramList = [(k, v') | (k, v') <- KeyMap.toList params]
+ paramStr = Text.intercalate "&" [Key.toText k <> "=" <> valueToText v' | (k, v') <- paramList]
+ in if Text.null paramStr
+ then httpGetUrl args
+ else httpGetUrl args <> "?" <> paramStr
+ doHttpRequest "GET" urlWithParams (httpGetHeaders args) Nothing
+
+httpPostTool :: Engine.Tool
+httpPostTool =
+ Engine.Tool
+ { Engine.toolName = "http_post",
+ Engine.toolDescription =
+ "Make an HTTP POST request. Returns status code, headers, and response body. "
+ <> "Use for submitting data to APIs or forms.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "url"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The URL to request" :: Text)
+ ],
+ "headers"
+ .= Aeson.object
+ [ "type" .= ("object" :: Text),
+ "description" .= ("Optional headers as key-value pairs" :: Text)
+ ],
+ "body"
+ .= Aeson.object
+ [ "type" .= ("object" :: Text),
+ "description" .= ("Optional JSON body (object or string)" :: Text)
+ ],
+ "content_type"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Content type (default: application/json)" :: Text)
+ ]
+ ],
+ "required" .= (["url"] :: [Text])
+ ],
+ Engine.toolExecute = executeHttpPost
+ }
+
+executeHttpPost :: Aeson.Value -> IO Aeson.Value
+executeHttpPost v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError ("Invalid arguments: " <> Text.pack e)
+ Aeson.Success args -> do
+ let contentType = fromMaybe "application/json" (httpPostContentType args)
+ body = case httpPostBody args of
+ Nothing -> Nothing
+ Just b -> Just (contentType, BL.toStrict (Aeson.encode b))
+ doHttpRequest "POST" (httpPostUrl args) (httpPostHeaders args) body
+
+doHttpRequest ::
+ ByteString ->
+ Text ->
+ Maybe Aeson.Object ->
+ Maybe (Text, ByteString) ->
+ IO Aeson.Value
+doHttpRequest method url mHeaders mBody = do
+ let timeoutMicros = 30 * 1000000
+ result <-
+ try <| do
+ req0 <- HTTP.parseRequest (Text.unpack url)
+ let req1 =
+ HTTP.setRequestMethod method
+ <| HTTP.setRequestHeader "User-Agent" ["OmniAgent/1.0"]
+ <| HTTP.setRequestResponseTimeout (HTTPClient.responseTimeoutMicro timeoutMicros)
+ <| req0
+ req2 = case mHeaders of
+ Nothing -> req1
+ Just hdrs -> foldr addHeader req1 (KeyMap.toList hdrs)
+ req3 = case mBody of
+ Nothing -> req2
+ Just (ct, bodyBytes) ->
+ HTTP.setRequestHeader "Content-Type" [TE.encodeUtf8 ct]
+ <| HTTP.setRequestBodyLBS (BL.fromStrict bodyBytes)
+ <| req2
+ mResp <- timeout timeoutMicros (HTTP.httpLBS req3)
+ case mResp of
+ Nothing -> pure (Left "Request timed out after 30 seconds")
+ Just resp -> pure (Right resp)
+ case result of
+ Left (e :: SomeException) -> pure <| mkError ("Request failed: " <> tshow e)
+ Right (Left err) -> pure <| mkError err
+ Right (Right response) -> do
+ let status = HTTP.getResponseStatusCode response
+ respHeaders = HTTP.getResponseHeaders response
+ headerObj =
+ Aeson.object
+ [ Key.fromText (TE.decodeUtf8 (CI.original k)) .= TE.decodeUtf8 v
+ | (k, v) <- respHeaders
+ ]
+ body = TE.decodeUtf8With (\_ _ -> Just '?') (BL.toStrict (HTTP.getResponseBody response))
+ pure
+ <| Aeson.toJSON
+ <| HttpResult
+ { httpResultStatus = status,
+ httpResultHeaders = headerObj,
+ httpResultBody = body
+ }
+ where
+ addHeader :: (Aeson.Key, Aeson.Value) -> HTTP.Request -> HTTP.Request
+ addHeader (k, v) req =
+ let headerName = CI.mk (TE.encodeUtf8 (Key.toText k))
+ headerValue = TE.encodeUtf8 (valueToText v)
+ in HTTP.addRequestHeader headerName headerValue req
+
+valueToText :: Aeson.Value -> Text
+valueToText (Aeson.String s) = s
+valueToText (Aeson.Number n) = tshow n
+valueToText (Aeson.Bool b) = if b then "true" else "false"
+valueToText Aeson.Null = ""
+valueToText other = TE.decodeUtf8 (BL.toStrict (Aeson.encode other))
+
+mkError :: Text -> Aeson.Value
+mkError err =
+ Aeson.object
+ [ "status" .= (-1 :: Int),
+ "headers" .= Aeson.object [],
+ "body" .= err
+ ]
diff --git a/Omni/Agent/Tools/Notes.hs b/Omni/Agent/Tools/Notes.hs
new file mode 100644
index 0000000..e3cef5d
--- /dev/null
+++ b/Omni/Agent/Tools/Notes.hs
@@ -0,0 +1,357 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Quick notes tool for agents.
+--
+-- Provides simple CRUD for tagged notes stored in memory.db.
+--
+-- : out omni-agent-tools-notes
+-- : dep aeson
+-- : dep sqlite-simple
+module Omni.Agent.Tools.Notes
+ ( -- * Tools
+ noteAddTool,
+ noteListTool,
+ noteDeleteTool,
+
+ -- * Direct API
+ Note (..),
+ createNote,
+ listNotes,
+ listNotesByTopic,
+ deleteNote,
+
+ -- * Database
+ initNotesTable,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Text as Text
+import Data.Time (UTCTime, getCurrentTime)
+import qualified Database.SQLite.Simple as SQL
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Memory as Memory
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Notes"
+ [ Test.unit "noteAddTool has correct schema" <| do
+ let tool = noteAddTool "test-user-id"
+ Engine.toolName tool Test.@=? "note_add",
+ Test.unit "noteListTool has correct schema" <| do
+ let tool = noteListTool "test-user-id"
+ Engine.toolName tool Test.@=? "note_list",
+ Test.unit "noteDeleteTool has correct schema" <| do
+ let tool = noteDeleteTool "test-user-id"
+ Engine.toolName tool Test.@=? "note_delete",
+ Test.unit "Note JSON roundtrip" <| do
+ now <- getCurrentTime
+ let n =
+ Note
+ { noteId = 1,
+ noteUserId = "user-123",
+ noteTopic = "groceries",
+ noteContent = "Buy milk",
+ noteCreatedAt = now
+ }
+ case Aeson.decode (Aeson.encode n) of
+ Nothing -> Test.assertFailure "Failed to decode Note"
+ Just decoded -> do
+ noteContent decoded Test.@=? "Buy milk"
+ noteTopic decoded Test.@=? "groceries"
+ ]
+
+data Note = Note
+ { noteId :: Int,
+ noteUserId :: Text,
+ noteTopic :: Text,
+ noteContent :: Text,
+ noteCreatedAt :: UTCTime
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Note where
+ toJSON n =
+ Aeson.object
+ [ "id" .= noteId n,
+ "user_id" .= noteUserId n,
+ "topic" .= noteTopic n,
+ "content" .= noteContent n,
+ "created_at" .= noteCreatedAt n
+ ]
+
+instance Aeson.FromJSON Note where
+ parseJSON =
+ Aeson.withObject "Note" <| \v ->
+ (Note </ (v .: "id"))
+ <*> (v .: "user_id")
+ <*> (v .: "topic")
+ <*> (v .: "content")
+ <*> (v .: "created_at")
+
+instance SQL.FromRow Note where
+ fromRow =
+ (Note </ SQL.field)
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+
+initNotesTable :: SQL.Connection -> IO ()
+initNotesTable conn = do
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS notes (\
+ \ id INTEGER PRIMARY KEY AUTOINCREMENT,\
+ \ user_id TEXT NOT NULL,\
+ \ topic TEXT NOT NULL,\
+ \ content TEXT NOT NULL,\
+ \ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\
+ \)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_notes_user ON notes(user_id)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_notes_topic ON notes(user_id, topic)"
+
+createNote :: Text -> Text -> Text -> IO Note
+createNote uid topic content = do
+ now <- getCurrentTime
+ Memory.withMemoryDb <| \conn -> do
+ initNotesTable conn
+ SQL.execute
+ conn
+ "INSERT INTO notes (user_id, topic, content, created_at) VALUES (?, ?, ?, ?)"
+ (uid, topic, content, now)
+ rowId <- SQL.lastInsertRowId conn
+ pure
+ Note
+ { noteId = fromIntegral rowId,
+ noteUserId = uid,
+ noteTopic = topic,
+ noteContent = content,
+ noteCreatedAt = now
+ }
+
+listNotes :: Text -> Int -> IO [Note]
+listNotes uid limit =
+ Memory.withMemoryDb <| \conn -> do
+ initNotesTable conn
+ SQL.query
+ conn
+ "SELECT id, user_id, topic, content, created_at \
+ \FROM notes WHERE user_id = ? \
+ \ORDER BY created_at DESC LIMIT ?"
+ (uid, limit)
+
+listNotesByTopic :: Text -> Text -> Int -> IO [Note]
+listNotesByTopic uid topic limit =
+ Memory.withMemoryDb <| \conn -> do
+ initNotesTable conn
+ SQL.query
+ conn
+ "SELECT id, user_id, topic, content, created_at \
+ \FROM notes WHERE user_id = ? AND topic = ? \
+ \ORDER BY created_at DESC LIMIT ?"
+ (uid, topic, limit)
+
+deleteNote :: Text -> Int -> IO Bool
+deleteNote uid nid =
+ Memory.withMemoryDb <| \conn -> do
+ initNotesTable conn
+ SQL.execute
+ conn
+ "DELETE FROM notes WHERE id = ? AND user_id = ?"
+ (nid, uid)
+ changes <- SQL.changes conn
+ pure (changes > 0)
+
+noteAddTool :: Text -> Engine.Tool
+noteAddTool uid =
+ Engine.Tool
+ { Engine.toolName = "note_add",
+ Engine.toolDescription =
+ "Add a quick note on a topic. Use for reminders, lists, ideas, or anything "
+ <> "the user wants to jot down. Topics help organize notes (e.g., 'groceries', "
+ <> "'ideas', 'todo', 'recipes').",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "topic"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Topic/category for the note (e.g., 'groceries', 'todo')" :: Text)
+ ],
+ "content"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The note content" :: Text)
+ ]
+ ],
+ "required" .= (["topic", "content"] :: [Text])
+ ],
+ Engine.toolExecute = executeNoteAdd uid
+ }
+
+executeNoteAdd :: Text -> Aeson.Value -> IO Aeson.Value
+executeNoteAdd uid v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: NoteAddArgs) -> do
+ newNote <- createNote uid (naTopic args) (naContent args)
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "note_id" .= noteId newNote,
+ "message" .= ("Added note to '" <> noteTopic newNote <> "': " <> noteContent newNote)
+ ]
+ )
+
+data NoteAddArgs = NoteAddArgs
+ { naTopic :: Text,
+ naContent :: Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON NoteAddArgs where
+ parseJSON =
+ Aeson.withObject "NoteAddArgs" <| \v ->
+ (NoteAddArgs </ (v .: "topic"))
+ <*> (v .: "content")
+
+noteListTool :: Text -> Engine.Tool
+noteListTool uid =
+ Engine.Tool
+ { Engine.toolName = "note_list",
+ Engine.toolDescription =
+ "List notes, optionally filtered by topic. Use to show the user their "
+ <> "saved notes or check what's on a specific list.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "topic"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Filter by topic (optional, omit to list all)" :: Text)
+ ],
+ "limit"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Max notes to return (default: 20)" :: Text)
+ ]
+ ],
+ "required" .= ([] :: [Text])
+ ],
+ Engine.toolExecute = executeNoteList uid
+ }
+
+executeNoteList :: Text -> Aeson.Value -> IO Aeson.Value
+executeNoteList uid v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: NoteListArgs) -> do
+ let lim = min 50 (max 1 (nlLimit args))
+ notes <- case nlTopic args of
+ Just topic -> listNotesByTopic uid topic lim
+ Nothing -> listNotes uid lim
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "count" .= length notes,
+ "notes" .= formatNotesForLLM notes
+ ]
+ )
+
+formatNotesForLLM :: [Note] -> Text
+formatNotesForLLM [] = "No notes found."
+formatNotesForLLM notes =
+ Text.unlines (map formatNote notes)
+ where
+ formatNote n =
+ "[" <> noteTopic n <> "] " <> noteContent n <> " (id: " <> tshow (noteId n) <> ")"
+
+data NoteListArgs = NoteListArgs
+ { nlTopic :: Maybe Text,
+ nlLimit :: Int
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON NoteListArgs where
+ parseJSON =
+ Aeson.withObject "NoteListArgs" <| \v ->
+ (NoteListArgs </ (v .:? "topic"))
+ <*> (v .:? "limit" .!= 20)
+
+noteDeleteTool :: Text -> Engine.Tool
+noteDeleteTool uid =
+ Engine.Tool
+ { Engine.toolName = "note_delete",
+ Engine.toolDescription =
+ "Delete a note by its ID. Use after the user says they've completed an item "
+ <> "or no longer need a note.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "note_id"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("The ID of the note to delete" :: Text)
+ ]
+ ],
+ "required" .= (["note_id"] :: [Text])
+ ],
+ Engine.toolExecute = executeNoteDelete uid
+ }
+
+executeNoteDelete :: Text -> Aeson.Value -> IO Aeson.Value
+executeNoteDelete uid v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: NoteDeleteArgs) -> do
+ deleted <- deleteNote uid (ndNoteId args)
+ if deleted
+ then
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "message" .= ("Note deleted" :: Text)
+ ]
+ )
+ else
+ pure
+ ( Aeson.object
+ [ "success" .= False,
+ "error" .= ("Note not found or already deleted" :: Text)
+ ]
+ )
+
+newtype NoteDeleteArgs = NoteDeleteArgs
+ { ndNoteId :: Int
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON NoteDeleteArgs where
+ parseJSON =
+ Aeson.withObject "NoteDeleteArgs" <| \v ->
+ NoteDeleteArgs </ (v .: "note_id")
diff --git a/Omni/Agent/Tools/Outreach.hs b/Omni/Agent/Tools/Outreach.hs
new file mode 100644
index 0000000..e576cbd
--- /dev/null
+++ b/Omni/Agent/Tools/Outreach.hs
@@ -0,0 +1,513 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Outreach approval queue for agent use.
+--
+-- Provides tools for creating and tracking outreach drafts that require
+-- human approval before sending (emails, messages, etc).
+--
+-- Drafts flow: pending -> approved -> sent (or rejected)
+--
+-- : out omni-agent-tools-outreach
+-- : dep aeson
+-- : dep uuid
+-- : dep directory
+module Omni.Agent.Tools.Outreach
+ ( -- * Tools
+ outreachDraftTool,
+ outreachListTool,
+ outreachStatusTool,
+ allOutreachTools,
+
+ -- * Types
+ OutreachDraft (..),
+ OutreachType (..),
+ OutreachStatus (..),
+
+ -- * Direct API
+ createDraft,
+ listDrafts,
+ getDraft,
+ approveDraft,
+ rejectDraft,
+ markSent,
+ getPendingCount,
+
+ -- * Paths
+ outreachDir,
+ pendingDir,
+ approvedDir,
+ rejectedDir,
+ sentDir,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Control.Monad.Fail (MonadFail (fail))
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import qualified Data.Text.IO as TextIO
+import Data.Time (UTCTime, getCurrentTime)
+import qualified Data.UUID as UUID
+import qualified Data.UUID.V4 as UUID
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Paths as Paths
+import qualified Omni.Test as Test
+import qualified System.Directory as Directory
+import System.FilePath ((</>))
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Outreach"
+ [ Test.unit "outreachDraftTool has correct name" <| do
+ Engine.toolName outreachDraftTool Test.@=? "outreach_draft",
+ Test.unit "outreachListTool has correct name" <| do
+ Engine.toolName outreachListTool Test.@=? "outreach_list",
+ Test.unit "outreachStatusTool has correct name" <| do
+ Engine.toolName outreachStatusTool Test.@=? "outreach_status",
+ Test.unit "allOutreachTools has 3 tools" <| do
+ length allOutreachTools Test.@=? 3,
+ Test.unit "OutreachDraft JSON roundtrip" <| do
+ now <- getCurrentTime
+ let draft =
+ OutreachDraft
+ { draftId = "test-id-123",
+ draftType = Email,
+ draftCreatedAt = now,
+ draftSubject = Just "Test subject",
+ draftRecipient = "test@example.com",
+ draftBody = "Hello, this is a test.",
+ draftContext = "Testing outreach system",
+ draftStatus = Pending,
+ draftRejectReason = Nothing
+ }
+ case Aeson.decode (Aeson.encode draft) of
+ Nothing -> Test.assertFailure "Failed to decode OutreachDraft"
+ Just decoded -> do
+ draftId decoded Test.@=? "test-id-123"
+ draftType decoded Test.@=? Email
+ draftRecipient decoded Test.@=? "test@example.com",
+ Test.unit "OutreachType JSON roundtrip" <| do
+ case Aeson.decode (Aeson.encode Email) of
+ Just Email -> pure ()
+ _ -> Test.assertFailure "Failed to decode Email"
+ case Aeson.decode (Aeson.encode Message) of
+ Just Message -> pure ()
+ _ -> Test.assertFailure "Failed to decode Message",
+ Test.unit "OutreachStatus JSON roundtrip" <| do
+ let statuses = [Pending, Approved, Rejected, Sent]
+ forM_ statuses <| \s ->
+ case Aeson.decode (Aeson.encode s) of
+ Nothing -> Test.assertFailure ("Failed to decode " <> show s)
+ Just decoded -> decoded Test.@=? s
+ ]
+
+outreachDir :: FilePath
+outreachDir = Paths.outreachDir
+
+pendingDir :: FilePath
+pendingDir = outreachDir </> "pending"
+
+approvedDir :: FilePath
+approvedDir = outreachDir </> "approved"
+
+rejectedDir :: FilePath
+rejectedDir = outreachDir </> "rejected"
+
+sentDir :: FilePath
+sentDir = outreachDir </> "sent"
+
+data OutreachType = Email | Message
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON OutreachType where
+ toJSON Email = Aeson.String "email"
+ toJSON Message = Aeson.String "message"
+
+instance Aeson.FromJSON OutreachType where
+ parseJSON =
+ Aeson.withText "OutreachType" <| \t ->
+ case Text.toLower t of
+ "email" -> pure Email
+ "message" -> pure Message
+ _ -> fail "OutreachType must be 'email' or 'message'"
+
+data OutreachStatus = Pending | Approved | Rejected | Sent
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON OutreachStatus where
+ toJSON Pending = Aeson.String "pending"
+ toJSON Approved = Aeson.String "approved"
+ toJSON Rejected = Aeson.String "rejected"
+ toJSON Sent = Aeson.String "sent"
+
+instance Aeson.FromJSON OutreachStatus where
+ parseJSON =
+ Aeson.withText "OutreachStatus" <| \t ->
+ case Text.toLower t of
+ "pending" -> pure Pending
+ "approved" -> pure Approved
+ "rejected" -> pure Rejected
+ "sent" -> pure Sent
+ _ -> fail "OutreachStatus must be 'pending', 'approved', 'rejected', or 'sent'"
+
+data OutreachDraft = OutreachDraft
+ { draftId :: Text,
+ draftType :: OutreachType,
+ draftCreatedAt :: UTCTime,
+ draftSubject :: Maybe Text,
+ draftRecipient :: Text,
+ draftBody :: Text,
+ draftContext :: Text,
+ draftStatus :: OutreachStatus,
+ draftRejectReason :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON OutreachDraft where
+ toJSON d =
+ Aeson.object
+ [ "id" .= draftId d,
+ "type" .= draftType d,
+ "created_at" .= draftCreatedAt d,
+ "subject" .= draftSubject d,
+ "recipient" .= draftRecipient d,
+ "body" .= draftBody d,
+ "context" .= draftContext d,
+ "status" .= draftStatus d,
+ "reject_reason" .= draftRejectReason d
+ ]
+
+instance Aeson.FromJSON OutreachDraft where
+ parseJSON =
+ Aeson.withObject "OutreachDraft" <| \v ->
+ (OutreachDraft </ (v .: "id"))
+ <*> (v .: "type")
+ <*> (v .: "created_at")
+ <*> (v .:? "subject")
+ <*> (v .: "recipient")
+ <*> (v .: "body")
+ <*> (v .: "context")
+ <*> (v .: "status")
+ <*> (v .:? "reject_reason")
+
+ensureDirs :: IO ()
+ensureDirs = do
+ Directory.createDirectoryIfMissing True pendingDir
+ Directory.createDirectoryIfMissing True approvedDir
+ Directory.createDirectoryIfMissing True rejectedDir
+ Directory.createDirectoryIfMissing True sentDir
+
+draftPath :: FilePath -> Text -> FilePath
+draftPath dir draftId' = dir </> (Text.unpack draftId' <> ".json")
+
+saveDraft :: OutreachDraft -> IO ()
+saveDraft draft = do
+ ensureDirs
+ let dir = case draftStatus draft of
+ Pending -> pendingDir
+ Approved -> approvedDir
+ Rejected -> rejectedDir
+ Sent -> sentDir
+ path = draftPath dir (draftId draft)
+ TextIO.writeFile path (TE.decodeUtf8 (BL.toStrict (Aeson.encode draft)))
+
+createDraft :: OutreachType -> Text -> Maybe Text -> Text -> Text -> IO OutreachDraft
+createDraft otype recipient subject body context = do
+ uuid <- UUID.nextRandom
+ now <- getCurrentTime
+ let draft =
+ OutreachDraft
+ { draftId = UUID.toText uuid,
+ draftType = otype,
+ draftCreatedAt = now,
+ draftSubject = subject,
+ draftRecipient = recipient,
+ draftBody = body,
+ draftContext = context,
+ draftStatus = Pending,
+ draftRejectReason = Nothing
+ }
+ saveDraft draft
+ pure draft
+
+listDrafts :: OutreachStatus -> IO [OutreachDraft]
+listDrafts status = do
+ ensureDirs
+ let dir = case status of
+ Pending -> pendingDir
+ Approved -> approvedDir
+ Rejected -> rejectedDir
+ Sent -> sentDir
+ files <- Directory.listDirectory dir
+ let jsonFiles = filter (".json" `isSuffixOf`) files
+ drafts <-
+ forM jsonFiles <| \f -> do
+ content <- TextIO.readFile (dir </> f)
+ pure (Aeson.decode (BL.fromStrict (TE.encodeUtf8 content)))
+ pure (catMaybes drafts)
+
+getDraft :: Text -> IO (Maybe OutreachDraft)
+getDraft draftId' = do
+ ensureDirs
+ let dirs = [pendingDir, approvedDir, rejectedDir, sentDir]
+ findFirst dirs
+ where
+ findFirst [] = pure Nothing
+ findFirst (dir : rest) = do
+ let path = draftPath dir draftId'
+ exists <- Directory.doesFileExist path
+ if exists
+ then do
+ content <- TextIO.readFile path
+ pure (Aeson.decode (BL.fromStrict (TE.encodeUtf8 content)))
+ else findFirst rest
+
+moveDraft :: Text -> OutreachStatus -> OutreachStatus -> Maybe Text -> IO (Either Text OutreachDraft)
+moveDraft draftId' fromStatus toStatus reason = do
+ ensureDirs
+ let fromDir = case fromStatus of
+ Pending -> pendingDir
+ Approved -> approvedDir
+ Rejected -> rejectedDir
+ Sent -> sentDir
+ fromPath = draftPath fromDir draftId'
+ exists <- Directory.doesFileExist fromPath
+ if not exists
+ then pure (Left ("Draft not found in " <> tshow fromStatus <> " queue"))
+ else do
+ content <- TextIO.readFile fromPath
+ case Aeson.decode (BL.fromStrict (TE.encodeUtf8 content)) of
+ Nothing -> pure (Left "Failed to parse draft")
+ Just draft -> do
+ let updated = draft {draftStatus = toStatus, draftRejectReason = reason}
+ Directory.removeFile fromPath
+ saveDraft updated
+ pure (Right updated)
+
+approveDraft :: Text -> IO (Either Text OutreachDraft)
+approveDraft draftId' = moveDraft draftId' Pending Approved Nothing
+
+rejectDraft :: Text -> Maybe Text -> IO (Either Text OutreachDraft)
+rejectDraft draftId' = moveDraft draftId' Pending Rejected
+
+markSent :: Text -> IO (Either Text OutreachDraft)
+markSent draftId' = moveDraft draftId' Approved Sent Nothing
+
+getPendingCount :: IO Int
+getPendingCount = do
+ ensureDirs
+ files <- Directory.listDirectory pendingDir
+ pure (length (filter (".json" `isSuffixOf`) files))
+
+allOutreachTools :: [Engine.Tool]
+allOutreachTools =
+ [ outreachDraftTool,
+ outreachListTool,
+ outreachStatusTool
+ ]
+
+outreachDraftTool :: Engine.Tool
+outreachDraftTool =
+ Engine.Tool
+ { Engine.toolName = "outreach_draft",
+ Engine.toolDescription =
+ "Create a new outreach draft for Ben to review before sending. "
+ <> "Use this when you want to send an email or message on behalf of the business. "
+ <> "All outreach requires approval before it goes out.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "type"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "enum" .= (["email", "message"] :: [Text]),
+ "description" .= ("Type of outreach: 'email' or 'message'" :: Text)
+ ],
+ "recipient"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Email address or identifier of the recipient" :: Text)
+ ],
+ "subject"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Subject line (required for emails)" :: Text)
+ ],
+ "body"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The message content" :: Text)
+ ],
+ "context"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Explain why you're sending this - helps Ben review" :: Text)
+ ]
+ ],
+ "required" .= (["type", "recipient", "body", "context"] :: [Text])
+ ],
+ Engine.toolExecute = executeOutreachDraft
+ }
+
+executeOutreachDraft :: Aeson.Value -> IO Aeson.Value
+executeOutreachDraft v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: DraftArgs) -> do
+ let otype = case daType args of
+ "email" -> Email
+ _ -> Message
+ draft <- createDraft otype (daRecipient args) (daSubject args) (daBody args) (daContext args)
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "draft_id" .= draftId draft,
+ "message" .= ("Draft created and queued for review. ID: " <> draftId draft)
+ ]
+ )
+
+data DraftArgs = DraftArgs
+ { daType :: Text,
+ daRecipient :: Text,
+ daSubject :: Maybe Text,
+ daBody :: Text,
+ daContext :: Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON DraftArgs where
+ parseJSON =
+ Aeson.withObject "DraftArgs" <| \v ->
+ (DraftArgs </ (v .: "type"))
+ <*> (v .: "recipient")
+ <*> (v .:? "subject")
+ <*> (v .: "body")
+ <*> (v .: "context")
+
+outreachListTool :: Engine.Tool
+outreachListTool =
+ Engine.Tool
+ { Engine.toolName = "outreach_list",
+ Engine.toolDescription =
+ "List outreach drafts by status. Use to check what's pending approval, "
+ <> "what's been approved, or review past outreach.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "status"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "enum" .= (["pending", "approved", "rejected", "sent"] :: [Text]),
+ "description" .= ("Filter by status (default: pending)" :: Text)
+ ],
+ "limit"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Max drafts to return (default: 20)" :: Text)
+ ]
+ ],
+ "required" .= ([] :: [Text])
+ ],
+ Engine.toolExecute = executeOutreachList
+ }
+
+executeOutreachList :: Aeson.Value -> IO Aeson.Value
+executeOutreachList v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: ListArgs) -> do
+ let status = case laStatus args of
+ Just "approved" -> Approved
+ Just "rejected" -> Rejected
+ Just "sent" -> Sent
+ _ -> Pending
+ limit = min 50 (max 1 (laLimit args))
+ drafts <- listDrafts status
+ let limited = take limit drafts
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "status" .= tshow status,
+ "count" .= length limited,
+ "drafts" .= limited
+ ]
+ )
+
+data ListArgs = ListArgs
+ { laStatus :: Maybe Text,
+ laLimit :: Int
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON ListArgs where
+ parseJSON =
+ Aeson.withObject "ListArgs" <| \v ->
+ (ListArgs </ (v .:? "status"))
+ <*> (v .:? "limit" .!= 20)
+
+outreachStatusTool :: Engine.Tool
+outreachStatusTool =
+ Engine.Tool
+ { Engine.toolName = "outreach_status",
+ Engine.toolDescription =
+ "Check the status of a specific outreach draft by ID.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "draft_id"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The draft ID to check" :: Text)
+ ]
+ ],
+ "required" .= (["draft_id"] :: [Text])
+ ],
+ Engine.toolExecute = executeOutreachStatus
+ }
+
+executeOutreachStatus :: Aeson.Value -> IO Aeson.Value
+executeOutreachStatus v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: StatusArgs) -> do
+ mDraft <- getDraft (saId args)
+ case mDraft of
+ Nothing ->
+ pure (Aeson.object ["error" .= ("Draft not found" :: Text)])
+ Just draft ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "draft" .= draft
+ ]
+ )
+
+newtype StatusArgs = StatusArgs
+ { saId :: Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON StatusArgs where
+ parseJSON =
+ Aeson.withObject "StatusArgs" <| \v ->
+ StatusArgs </ (v .: "draft_id")
diff --git a/Omni/Agent/Tools/Pdf.hs b/Omni/Agent/Tools/Pdf.hs
new file mode 100644
index 0000000..7687234
--- /dev/null
+++ b/Omni/Agent/Tools/Pdf.hs
@@ -0,0 +1,180 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | PDF extraction tool using poppler-utils (pdftotext).
+--
+-- Extracts text from PDF files for LLM consumption.
+--
+-- : out omni-agent-tools-pdf
+-- : dep aeson
+-- : dep http-conduit
+-- : dep directory
+-- : dep process
+module Omni.Agent.Tools.Pdf
+ ( -- * Tool
+ pdfTool,
+
+ -- * Direct API
+ extractPdfText,
+ downloadAndExtract,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.Text as Text
+import qualified Network.HTTP.Simple as HTTP
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Test as Test
+import System.IO (hClose)
+import System.IO.Temp (withSystemTempFile)
+import System.Process (readProcessWithExitCode)
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Pdf"
+ [ Test.unit "pdfTool has correct schema" <| do
+ let tool = pdfTool
+ Engine.toolName tool Test.@=? "read_pdf",
+ Test.unit "extractPdfText handles missing file" <| do
+ result <- extractPdfText "/nonexistent/file.pdf"
+ case result of
+ Left err -> ("No such file" `Text.isInfixOf` err || "pdftotext" `Text.isInfixOf` err) Test.@=? True
+ Right _ -> Test.assertFailure "Expected error for missing file",
+ Test.unit "chunkText splits correctly" <| do
+ let text = Text.replicate 5000 "a"
+ chunks = chunkText 1000 text
+ length chunks Test.@=? 5
+ all (\c -> Text.length c <= 1000) chunks Test.@=? True,
+ Test.unit "chunkText handles small text" <| do
+ let text = "small text"
+ chunks = chunkText 1000 text
+ chunks Test.@=? ["small text"]
+ ]
+
+data PdfArgs = PdfArgs
+ { pdfPath :: Text,
+ pdfMaxChars :: Maybe Int
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON PdfArgs where
+ parseJSON =
+ Aeson.withObject "PdfArgs" <| \v ->
+ (PdfArgs </ (v Aeson..: "path"))
+ <*> (v Aeson..:? "max_chars")
+
+pdfTool :: Engine.Tool
+pdfTool =
+ Engine.Tool
+ { Engine.toolName = "read_pdf",
+ Engine.toolDescription =
+ "Extract text from a PDF file. Use this when you receive a PDF document "
+ <> "and need to read its contents. Returns the extracted text.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "path"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Path to the PDF file" :: Text)
+ ],
+ "max_chars"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Maximum characters to return (default: 50000)" :: Text)
+ ]
+ ],
+ "required" .= (["path"] :: [Text])
+ ],
+ Engine.toolExecute = executePdf
+ }
+
+executePdf :: Aeson.Value -> IO Aeson.Value
+executePdf v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: PdfArgs) -> do
+ let maxChars = maybe 50000 (min 100000 <. max 1000) (pdfMaxChars args)
+ result <- extractPdfText (Text.unpack (pdfPath args))
+ case result of
+ Left err ->
+ pure (Aeson.object ["error" .= err])
+ Right text -> do
+ let truncated = Text.take maxChars text
+ wasTruncated = Text.length text > maxChars
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "text" .= truncated,
+ "chars" .= Text.length truncated,
+ "truncated" .= wasTruncated
+ ]
+ )
+
+extractPdfText :: FilePath -> IO (Either Text Text)
+extractPdfText path = do
+ result <-
+ try <| readProcessWithExitCode "pdftotext" ["-layout", path, "-"] ""
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("pdftotext error: " <> tshow e))
+ Right (exitCode, stdoutStr, stderrStr) ->
+ case exitCode of
+ ExitSuccess -> pure (Right (Text.pack stdoutStr))
+ ExitFailure code ->
+ pure (Left ("pdftotext failed (" <> tshow code <> "): " <> Text.pack stderrStr))
+
+downloadAndExtract :: Text -> Text -> Text -> IO (Either Text Text)
+downloadAndExtract botToken filePath maxCharsText = do
+ let url =
+ "https://api.telegram.org/file/bot"
+ <> Text.unpack botToken
+ <> "/"
+ <> Text.unpack filePath
+ maxChars = maybe 50000 identity (readMaybe (Text.unpack maxCharsText) :: Maybe Int)
+ withSystemTempFile "telegram_pdf.pdf" <| \tmpPath tmpHandle -> do
+ hClose tmpHandle
+ downloadResult <-
+ try <| do
+ req <- HTTP.parseRequest url
+ response <- HTTP.httpLBS req
+ let status = HTTP.getResponseStatusCode response
+ if status >= 200 && status < 300
+ then do
+ BL.writeFile tmpPath (HTTP.getResponseBody response)
+ pure (Right ())
+ else pure (Left ("Download failed: HTTP " <> tshow status))
+ case downloadResult of
+ Left (e :: SomeException) ->
+ pure (Left ("Download error: " <> tshow e))
+ Right (Left err) -> pure (Left err)
+ Right (Right ()) -> do
+ result <- extractPdfText tmpPath
+ case result of
+ Left err -> pure (Left err)
+ Right text -> do
+ let truncated = Text.take maxChars text
+ pure (Right truncated)
+
+chunkText :: Int -> Text -> [Text]
+chunkText chunkSize text
+ | Text.null text = []
+ | Text.length text <= chunkSize = [text]
+ | otherwise =
+ let (chunk, rest) = Text.splitAt chunkSize text
+ in chunk : chunkText chunkSize rest
diff --git a/Omni/Agent/Tools/Python.hs b/Omni/Agent/Tools/Python.hs
new file mode 100644
index 0000000..99f3f7d
--- /dev/null
+++ b/Omni/Agent/Tools/Python.hs
@@ -0,0 +1,217 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Python execution tool for agent use.
+--
+-- Executes Python snippets via subprocess with timeout support.
+-- Writes code to temp file, executes with python3, cleans up after.
+--
+-- Available stdlib: requests, json, csv, re, datetime, urllib
+--
+-- : out omni-agent-tools-python
+-- : dep aeson
+-- : dep process
+-- : dep directory
+-- : dep temporary
+module Omni.Agent.Tools.Python
+ ( pythonExecTool,
+ PythonExecArgs (..),
+ PythonResult (..),
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Text as Text
+import qualified Data.Text.IO as TextIO
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Test as Test
+import qualified System.Directory as Directory
+import qualified System.Exit as Exit
+import qualified System.Process as Process
+import System.Timeout (timeout)
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Python"
+ [ Test.unit "pythonExecTool has correct name" <| do
+ Engine.toolName pythonExecTool Test.@=? "python_exec",
+ Test.unit "pythonExecTool schema is valid" <| do
+ let schema = Engine.toolJsonSchema pythonExecTool
+ case schema of
+ Aeson.Object _ -> pure ()
+ _ -> Test.assertFailure "Schema should be an object",
+ Test.unit "PythonExecArgs parses correctly" <| do
+ let json = Aeson.object ["code" .= ("print('hello')" :: Text)]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: PythonExecArgs) -> pythonCode args Test.@=? "print('hello')"
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "PythonExecArgs parses with timeout" <| do
+ let json = Aeson.object ["code" .= ("x = 1" :: Text), "timeout" .= (10 :: Int)]
+ case Aeson.fromJSON json of
+ Aeson.Success (args :: PythonExecArgs) -> do
+ pythonCode args Test.@=? "x = 1"
+ pythonTimeout args Test.@=? Just 10
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "simple print statement" <| do
+ let args = Aeson.object ["code" .= ("print('hello world')" :: Text)]
+ result <- Engine.toolExecute pythonExecTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (r :: PythonResult) -> do
+ pythonResultExitCode r Test.@=? 0
+ ("hello world" `Text.isInfixOf` pythonResultStdout r) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "syntax error handling" <| do
+ let args = Aeson.object ["code" .= ("def broken(" :: Text)]
+ result <- Engine.toolExecute pythonExecTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (r :: PythonResult) -> do
+ (pythonResultExitCode r /= 0) Test.@=? True
+ not (Text.null (pythonResultStderr r)) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "import json works" <| do
+ let code = "import json\nprint(json.dumps({'a': 1}))"
+ args = Aeson.object ["code" .= (code :: Text)]
+ result <- Engine.toolExecute pythonExecTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (r :: PythonResult) -> do
+ pythonResultExitCode r Test.@=? 0
+ ("{\"a\": 1}" `Text.isInfixOf` pythonResultStdout r) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e,
+ Test.unit "timeout handling" <| do
+ let code = "import time\ntime.sleep(5)"
+ args = Aeson.object ["code" .= (code :: Text), "timeout" .= (1 :: Int)]
+ result <- Engine.toolExecute pythonExecTool args
+ case Aeson.fromJSON result of
+ Aeson.Success (r :: PythonResult) -> do
+ pythonResultExitCode r Test.@=? (-1)
+ ("timeout" `Text.isInfixOf` Text.toLower (pythonResultStderr r)) Test.@=? True
+ Aeson.Error e -> Test.assertFailure e
+ ]
+
+data PythonExecArgs = PythonExecArgs
+ { pythonCode :: Text,
+ pythonTimeout :: Maybe Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON PythonExecArgs where
+ parseJSON =
+ Aeson.withObject "PythonExecArgs" <| \v ->
+ (PythonExecArgs </ (v .: "code"))
+ <*> (v .:? "timeout")
+
+data PythonResult = PythonResult
+ { pythonResultStdout :: Text,
+ pythonResultStderr :: Text,
+ pythonResultExitCode :: Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON PythonResult where
+ toJSON r =
+ Aeson.object
+ [ "stdout" .= pythonResultStdout r,
+ "stderr" .= pythonResultStderr r,
+ "exit_code" .= pythonResultExitCode r
+ ]
+
+instance Aeson.FromJSON PythonResult where
+ parseJSON =
+ Aeson.withObject "PythonResult" <| \v ->
+ (PythonResult </ (v .: "stdout"))
+ <*> (v .: "stderr")
+ <*> (v .: "exit_code")
+
+pythonExecTool :: Engine.Tool
+pythonExecTool =
+ Engine.Tool
+ { Engine.toolName = "python_exec",
+ Engine.toolDescription =
+ "Execute Python code and return the output. "
+ <> "Use for data processing, API calls, calculations, or any task requiring Python. "
+ <> "Available libraries: requests, json, csv, re, datetime, urllib. "
+ <> "Code runs in a subprocess with a 30 second default timeout.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "code"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Python code to execute" :: Text)
+ ],
+ "timeout"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Timeout in seconds (default: 30)" :: Text)
+ ]
+ ],
+ "required" .= (["code"] :: [Text])
+ ],
+ Engine.toolExecute = executePythonExec
+ }
+
+executePythonExec :: Aeson.Value -> IO Aeson.Value
+executePythonExec v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure <| mkError ("Invalid arguments: " <> Text.pack e)
+ Aeson.Success args -> do
+ let code = pythonCode args
+ timeoutSecs = fromMaybe 30 (pythonTimeout args)
+ timeoutMicros = timeoutSecs * 1000000
+ tmpDir <- Directory.getTemporaryDirectory
+ let tmpFile = tmpDir <> "/python_exec_" <> show (codeHash code) <> ".py"
+ result <-
+ try <| do
+ TextIO.writeFile tmpFile code
+ let proc = Process.proc "python3" [tmpFile]
+ mResult <- timeout timeoutMicros <| Process.readCreateProcessWithExitCode proc ""
+ Directory.removeFile tmpFile
+ pure mResult
+ case result of
+ Left (e :: SomeException) -> do
+ _ <- try @SomeException <| Directory.removeFile tmpFile
+ pure <| mkError ("Execution failed: " <> tshow e)
+ Right Nothing -> do
+ _ <- try @SomeException <| Directory.removeFile tmpFile
+ pure
+ <| Aeson.toJSON
+ <| PythonResult
+ { pythonResultStdout = "",
+ pythonResultStderr = "Timeout: execution exceeded " <> tshow timeoutSecs <> " seconds",
+ pythonResultExitCode = -1
+ }
+ Right (Just (exitCode, stdoutStr, stderrStr)) ->
+ pure
+ <| Aeson.toJSON
+ <| PythonResult
+ { pythonResultStdout = Text.pack stdoutStr,
+ pythonResultStderr = Text.pack stderrStr,
+ pythonResultExitCode = exitCodeToInt exitCode
+ }
+
+exitCodeToInt :: Exit.ExitCode -> Int
+exitCodeToInt Exit.ExitSuccess = 0
+exitCodeToInt (Exit.ExitFailure n) = n
+
+mkError :: Text -> Aeson.Value
+mkError err =
+ Aeson.toJSON
+ <| PythonResult
+ { pythonResultStdout = "",
+ pythonResultStderr = err,
+ pythonResultExitCode = -1
+ }
+
+codeHash :: Text -> Int
+codeHash = Text.foldl' (\h c -> 31 * h + fromEnum c) 0
diff --git a/Omni/Agent/Tools/Todos.hs b/Omni/Agent/Tools/Todos.hs
new file mode 100644
index 0000000..2aacacc
--- /dev/null
+++ b/Omni/Agent/Tools/Todos.hs
@@ -0,0 +1,527 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Todo tool with due dates and reminders.
+--
+-- Provides user-scoped todos with optional due dates.
+--
+-- : out omni-agent-tools-todos
+-- : dep aeson
+-- : dep sqlite-simple
+-- : dep time
+module Omni.Agent.Tools.Todos
+ ( -- * Tools
+ todoAddTool,
+ todoListTool,
+ todoCompleteTool,
+ todoDeleteTool,
+
+ -- * Direct API
+ Todo (..),
+ createTodo,
+ listTodos,
+ listPendingTodos,
+ listOverdueTodos,
+ completeTodo,
+ deleteTodo,
+
+ -- * Reminders
+ listTodosDueForReminder,
+ markReminderSent,
+ reminderInterval,
+
+ -- * Database
+ initTodosTable,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Text as Text
+import Data.Time (LocalTime, NominalDiffTime, TimeZone, UTCTime, addUTCTime, getCurrentTime, localTimeToUTC, minutesToTimeZone, utcToLocalTime)
+import Data.Time.Format (defaultTimeLocale, formatTime, parseTimeM)
+import qualified Database.SQLite.Simple as SQL
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Memory as Memory
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.Todos"
+ [ Test.unit "todoAddTool has correct schema" <| do
+ let tool = todoAddTool "test-user-id"
+ Engine.toolName tool Test.@=? "todo_add",
+ Test.unit "todoListTool has correct schema" <| do
+ let tool = todoListTool "test-user-id"
+ Engine.toolName tool Test.@=? "todo_list",
+ Test.unit "todoCompleteTool has correct schema" <| do
+ let tool = todoCompleteTool "test-user-id"
+ Engine.toolName tool Test.@=? "todo_complete",
+ Test.unit "todoDeleteTool has correct schema" <| do
+ let tool = todoDeleteTool "test-user-id"
+ Engine.toolName tool Test.@=? "todo_delete",
+ Test.unit "Todo JSON roundtrip" <| do
+ now <- getCurrentTime
+ let td =
+ Todo
+ { todoId = 1,
+ todoUserId = "user-123",
+ todoTitle = "Buy milk",
+ todoDueDate = Just now,
+ todoCompleted = False,
+ todoCreatedAt = now,
+ todoLastRemindedAt = Nothing
+ }
+ case Aeson.decode (Aeson.encode td) of
+ Nothing -> Test.assertFailure "Failed to decode Todo"
+ Just decoded -> do
+ todoTitle decoded Test.@=? "Buy milk"
+ todoCompleted decoded Test.@=? False,
+ Test.unit "parseDueDate handles various formats" <| do
+ isJust (parseDueDate "2024-12-25") Test.@=? True
+ isJust (parseDueDate "2024-12-25 14:00") Test.@=? True
+ ]
+
+data Todo = Todo
+ { todoId :: Int,
+ todoUserId :: Text,
+ todoTitle :: Text,
+ todoDueDate :: Maybe UTCTime,
+ todoCompleted :: Bool,
+ todoCreatedAt :: UTCTime,
+ todoLastRemindedAt :: Maybe UTCTime
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Todo where
+ toJSON td =
+ Aeson.object
+ [ "id" .= todoId td,
+ "user_id" .= todoUserId td,
+ "title" .= todoTitle td,
+ "due_date" .= todoDueDate td,
+ "completed" .= todoCompleted td,
+ "created_at" .= todoCreatedAt td,
+ "last_reminded_at" .= todoLastRemindedAt td
+ ]
+
+instance Aeson.FromJSON Todo where
+ parseJSON =
+ Aeson.withObject "Todo" <| \v ->
+ (Todo </ (v .: "id"))
+ <*> (v .: "user_id")
+ <*> (v .: "title")
+ <*> (v .:? "due_date")
+ <*> (v .: "completed")
+ <*> (v .: "created_at")
+ <*> (v .:? "last_reminded_at")
+
+instance SQL.FromRow Todo where
+ fromRow =
+ (Todo </ SQL.field)
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+
+initTodosTable :: SQL.Connection -> IO ()
+initTodosTable conn = do
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS todos (\
+ \ id INTEGER PRIMARY KEY AUTOINCREMENT,\
+ \ user_id TEXT NOT NULL,\
+ \ title TEXT NOT NULL,\
+ \ due_date TIMESTAMP,\
+ \ completed INTEGER NOT NULL DEFAULT 0,\
+ \ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\
+ \ last_reminded_at TIMESTAMP\
+ \)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_todos_user ON todos(user_id)"
+ SQL.execute_
+ conn
+ "CREATE INDEX IF NOT EXISTS idx_todos_due ON todos(user_id, due_date)"
+ migrateTodosTable conn
+
+migrateTodosTable :: SQL.Connection -> IO ()
+migrateTodosTable conn = do
+ cols <- SQL.query_ conn "PRAGMA table_info(todos)" :: IO [(Int, Text, Text, Int, Maybe Text, Int)]
+ let colNames = map (\(_, name, _, _, _, _) -> name) cols
+ unless ("last_reminded_at" `elem` colNames) <| do
+ SQL.execute_ conn "ALTER TABLE todos ADD COLUMN last_reminded_at TIMESTAMP"
+
+easternTimeZone :: TimeZone
+easternTimeZone = minutesToTimeZone (-300)
+
+parseDueDate :: Text -> Maybe UTCTime
+parseDueDate txt =
+ let s = Text.unpack txt
+ parseLocal :: Maybe LocalTime
+ parseLocal =
+ parseTimeM True defaultTimeLocale "%Y-%m-%d %H:%M" s
+ <|> parseTimeM True defaultTimeLocale "%Y-%m-%d" s
+ <|> parseTimeM True defaultTimeLocale "%Y-%m-%dT%H:%M:%S" s
+ in fmap (localTimeToUTC easternTimeZone) parseLocal
+ <|> parseTimeM True defaultTimeLocale "%Y-%m-%dT%H:%M:%SZ" s
+
+createTodo :: Text -> Text -> Maybe Text -> IO Todo
+createTodo uid title maybeDueDateStr = do
+ now <- getCurrentTime
+ let dueDate = maybeDueDateStr +> parseDueDate
+ Memory.withMemoryDb <| \conn -> do
+ initTodosTable conn
+ SQL.execute
+ conn
+ "INSERT INTO todos (user_id, title, due_date, completed, created_at) VALUES (?, ?, ?, 0, ?)"
+ (uid, title, dueDate, now)
+ rowId <- SQL.lastInsertRowId conn
+ pure
+ Todo
+ { todoId = fromIntegral rowId,
+ todoUserId = uid,
+ todoTitle = title,
+ todoDueDate = dueDate,
+ todoCompleted = False,
+ todoCreatedAt = now,
+ todoLastRemindedAt = Nothing
+ }
+
+listTodos :: Text -> Int -> IO [Todo]
+listTodos uid limit =
+ Memory.withMemoryDb <| \conn -> do
+ initTodosTable conn
+ SQL.query
+ conn
+ "SELECT id, user_id, title, due_date, completed, created_at, last_reminded_at \
+ \FROM todos WHERE user_id = ? \
+ \ORDER BY completed ASC, due_date ASC NULLS LAST, created_at DESC LIMIT ?"
+ (uid, limit)
+
+listPendingTodos :: Text -> Int -> IO [Todo]
+listPendingTodos uid limit =
+ Memory.withMemoryDb <| \conn -> do
+ initTodosTable conn
+ SQL.query
+ conn
+ "SELECT id, user_id, title, due_date, completed, created_at, last_reminded_at \
+ \FROM todos WHERE user_id = ? AND completed = 0 \
+ \ORDER BY due_date ASC NULLS LAST, created_at DESC LIMIT ?"
+ (uid, limit)
+
+listOverdueTodos :: Text -> IO [Todo]
+listOverdueTodos uid = do
+ now <- getCurrentTime
+ Memory.withMemoryDb <| \conn -> do
+ initTodosTable conn
+ SQL.query
+ conn
+ "SELECT id, user_id, title, due_date, completed, created_at, last_reminded_at \
+ \FROM todos WHERE user_id = ? AND completed = 0 AND due_date < ? \
+ \ORDER BY due_date ASC"
+ (uid, now)
+
+reminderInterval :: NominalDiffTime
+reminderInterval = 24 * 60 * 60
+
+listTodosDueForReminder :: IO [Todo]
+listTodosDueForReminder = do
+ now <- getCurrentTime
+ let cutoff = addUTCTime (negate reminderInterval) now
+ Memory.withMemoryDb <| \conn -> do
+ initTodosTable conn
+ SQL.query
+ conn
+ "SELECT id, user_id, title, due_date, completed, created_at, last_reminded_at \
+ \FROM todos \
+ \WHERE completed = 0 \
+ \ AND due_date IS NOT NULL \
+ \ AND due_date < ? \
+ \ AND (last_reminded_at IS NULL OR last_reminded_at < ?)"
+ (now, cutoff)
+
+markReminderSent :: Int -> IO ()
+markReminderSent tid = do
+ now <- getCurrentTime
+ Memory.withMemoryDb <| \conn -> do
+ initTodosTable conn
+ SQL.execute
+ conn
+ "UPDATE todos SET last_reminded_at = ? WHERE id = ?"
+ (now, tid)
+
+completeTodo :: Text -> Int -> IO Bool
+completeTodo uid tid =
+ Memory.withMemoryDb <| \conn -> do
+ initTodosTable conn
+ SQL.execute
+ conn
+ "UPDATE todos SET completed = 1 WHERE id = ? AND user_id = ?"
+ (tid, uid)
+ changes <- SQL.changes conn
+ pure (changes > 0)
+
+deleteTodo :: Text -> Int -> IO Bool
+deleteTodo uid tid =
+ Memory.withMemoryDb <| \conn -> do
+ initTodosTable conn
+ SQL.execute
+ conn
+ "DELETE FROM todos WHERE id = ? AND user_id = ?"
+ (tid, uid)
+ changes <- SQL.changes conn
+ pure (changes > 0)
+
+todoAddTool :: Text -> Engine.Tool
+todoAddTool uid =
+ Engine.Tool
+ { Engine.toolName = "todo_add",
+ Engine.toolDescription =
+ "Add a todo item with optional due date. Use for tasks, reminders, "
+ <> "or anything the user needs to remember to do. "
+ <> "Due date format: 'YYYY-MM-DD' or 'YYYY-MM-DD HH:MM'.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "title"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("What needs to be done" :: Text)
+ ],
+ "due_date"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Optional due date in Eastern time: 'YYYY-MM-DD' or 'YYYY-MM-DD HH:MM'" :: Text)
+ ]
+ ],
+ "required" .= (["title"] :: [Text])
+ ],
+ Engine.toolExecute = executeTodoAdd uid
+ }
+
+executeTodoAdd :: Text -> Aeson.Value -> IO Aeson.Value
+executeTodoAdd uid v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: TodoAddArgs) -> do
+ td <- createTodo uid (taTitle args) (taDueDate args)
+ let dueDateMsg = case todoDueDate td of
+ Just d ->
+ let localTime = utcToLocalTime easternTimeZone d
+ in " (due: " <> Text.pack (formatTime defaultTimeLocale "%Y-%m-%d %H:%M ET" localTime) <> ")"
+ Nothing -> ""
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "todo_id" .= todoId td,
+ "message" .= ("Added todo: " <> todoTitle td <> dueDateMsg)
+ ]
+ )
+
+data TodoAddArgs = TodoAddArgs
+ { taTitle :: Text,
+ taDueDate :: Maybe Text
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON TodoAddArgs where
+ parseJSON =
+ Aeson.withObject "TodoAddArgs" <| \v ->
+ (TodoAddArgs </ (v .: "title"))
+ <*> (v .:? "due_date")
+
+todoListTool :: Text -> Engine.Tool
+todoListTool uid =
+ Engine.Tool
+ { Engine.toolName = "todo_list",
+ Engine.toolDescription =
+ "List todos. By default shows pending (incomplete) todos. "
+ <> "Can show all todos or just overdue ones.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "filter"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("Filter: 'pending' (default), 'all', or 'overdue'" :: Text)
+ ],
+ "limit"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Max todos to return (default: 20)" :: Text)
+ ]
+ ],
+ "required" .= ([] :: [Text])
+ ],
+ Engine.toolExecute = executeTodoList uid
+ }
+
+executeTodoList :: Text -> Aeson.Value -> IO Aeson.Value
+executeTodoList uid v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: TodoListArgs) -> do
+ let lim = min 50 (max 1 (tlLimit args))
+ todos <- case tlFilter args of
+ "all" -> listTodos uid lim
+ "overdue" -> listOverdueTodos uid
+ _ -> listPendingTodos uid lim
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "count" .= length todos,
+ "todos" .= formatTodosForLLM todos
+ ]
+ )
+
+formatTodosForLLM :: [Todo] -> Text
+formatTodosForLLM [] = "No todos found."
+formatTodosForLLM todos =
+ Text.unlines (map formatTodo todos)
+ where
+ formatTodo td =
+ let status = if todoCompleted td then "[x]" else "[ ]"
+ dueStr = case todoDueDate td of
+ Just d ->
+ let localTime = utcToLocalTime easternTimeZone d
+ in " (due: " <> Text.pack (formatTime defaultTimeLocale "%Y-%m-%d %H:%M ET" localTime) <> ")"
+ Nothing -> ""
+ in status <> " " <> todoTitle td <> dueStr <> " (id: " <> tshow (todoId td) <> ")"
+
+data TodoListArgs = TodoListArgs
+ { tlFilter :: Text,
+ tlLimit :: Int
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON TodoListArgs where
+ parseJSON =
+ Aeson.withObject "TodoListArgs" <| \v ->
+ (TodoListArgs </ (v .:? "filter" .!= "pending"))
+ <*> (v .:? "limit" .!= 20)
+
+todoCompleteTool :: Text -> Engine.Tool
+todoCompleteTool uid =
+ Engine.Tool
+ { Engine.toolName = "todo_complete",
+ Engine.toolDescription =
+ "Mark a todo as completed. Use when the user says they finished something.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "todo_id"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("The ID of the todo to complete" :: Text)
+ ]
+ ],
+ "required" .= (["todo_id"] :: [Text])
+ ],
+ Engine.toolExecute = executeTodoComplete uid
+ }
+
+executeTodoComplete :: Text -> Aeson.Value -> IO Aeson.Value
+executeTodoComplete uid v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: TodoCompleteArgs) -> do
+ completed <- completeTodo uid (tcTodoId args)
+ if completed
+ then
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "message" .= ("Todo marked as complete" :: Text)
+ ]
+ )
+ else
+ pure
+ ( Aeson.object
+ [ "success" .= False,
+ "error" .= ("Todo not found" :: Text)
+ ]
+ )
+
+newtype TodoCompleteArgs = TodoCompleteArgs
+ { tcTodoId :: Int
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON TodoCompleteArgs where
+ parseJSON =
+ Aeson.withObject "TodoCompleteArgs" <| \v ->
+ TodoCompleteArgs </ (v .: "todo_id")
+
+todoDeleteTool :: Text -> Engine.Tool
+todoDeleteTool uid =
+ Engine.Tool
+ { Engine.toolName = "todo_delete",
+ Engine.toolDescription =
+ "Delete a todo permanently. Use when a todo is no longer needed.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "todo_id"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("The ID of the todo to delete" :: Text)
+ ]
+ ],
+ "required" .= (["todo_id"] :: [Text])
+ ],
+ Engine.toolExecute = executeTodoDelete uid
+ }
+
+executeTodoDelete :: Text -> Aeson.Value -> IO Aeson.Value
+executeTodoDelete uid v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: TodoDeleteArgs) -> do
+ deleted <- deleteTodo uid (tdTodoId args)
+ if deleted
+ then
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "message" .= ("Todo deleted" :: Text)
+ ]
+ )
+ else
+ pure
+ ( Aeson.object
+ [ "success" .= False,
+ "error" .= ("Todo not found" :: Text)
+ ]
+ )
+
+newtype TodoDeleteArgs = TodoDeleteArgs
+ { tdTodoId :: Int
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON TodoDeleteArgs where
+ parseJSON =
+ Aeson.withObject "TodoDeleteArgs" <| \v ->
+ TodoDeleteArgs </ (v .: "todo_id")
diff --git a/Omni/Agent/Tools/WebReader.hs b/Omni/Agent/Tools/WebReader.hs
new file mode 100644
index 0000000..a69e3cf
--- /dev/null
+++ b/Omni/Agent/Tools/WebReader.hs
@@ -0,0 +1,308 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Web page reader tool - fetches and summarizes web pages.
+--
+-- : out omni-agent-tools-webreader
+-- : dep aeson
+-- : dep http-conduit
+-- : run trafilatura
+module Omni.Agent.Tools.WebReader
+ ( -- * Tool
+ webReaderTool,
+
+ -- * Direct API
+ fetchWebpage,
+ extractText,
+ fetchAndSummarize,
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import qualified Control.Concurrent.Sema as Sema
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import qualified Data.Text.IO as TIO
+import Data.Time.Clock (diffUTCTime, getCurrentTime)
+import qualified Network.HTTP.Client as HTTPClient
+import qualified Network.HTTP.Simple as HTTP
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Provider as Provider
+import qualified Omni.Test as Test
+import qualified System.Exit as Exit
+import qualified System.IO as IO
+import qualified System.Process as Process
+import qualified System.Timeout as Timeout
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.WebReader"
+ [ Test.unit "extractText removes HTML tags" <| do
+ let html = "<html><body><p>Hello world</p></body></html>"
+ result = extractText html
+ ("Hello world" `Text.isInfixOf` result) Test.@=? True,
+ Test.unit "extractText removes script tags" <| do
+ let html = "<html><script>alert('hi')</script><p>Content</p></html>"
+ result = extractText html
+ ("alert" `Text.isInfixOf` result) Test.@=? False
+ ("Content" `Text.isInfixOf` result) Test.@=? True,
+ Test.unit "webReaderTool has correct schema" <| do
+ let tool = webReaderTool "test-key"
+ Engine.toolName tool Test.@=? "read_webpages"
+ ]
+
+-- | Fetch timeout in microseconds (15 seconds - short because blocked sites won't respond anyway)
+fetchTimeoutMicros :: Int
+fetchTimeoutMicros = 15 * 1000000
+
+-- | Summarization timeout in microseconds (30 seconds)
+summarizeTimeoutMicros :: Int
+summarizeTimeoutMicros = 30 * 1000000
+
+-- | Maximum concurrent fetches
+maxConcurrentFetches :: Int
+maxConcurrentFetches = 10
+
+-- | Simple debug logging to stderr
+dbg :: Text -> IO ()
+dbg = TIO.hPutStrLn IO.stderr
+
+fetchWebpage :: Text -> IO (Either Text Text)
+fetchWebpage url = do
+ dbg ("[webreader] Fetching: " <> url)
+ result <-
+ Timeout.timeout fetchTimeoutMicros <| do
+ innerResult <-
+ try <| do
+ req0 <- HTTP.parseRequest (Text.unpack url)
+ let req =
+ HTTP.setRequestMethod "GET"
+ <| HTTP.setRequestHeader "User-Agent" ["Mozilla/5.0 (compatible; OmniBot/1.0)"]
+ <| HTTP.setRequestHeader "Accept" ["text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"]
+ <| HTTP.setRequestResponseTimeout (HTTPClient.responseTimeoutMicro fetchTimeoutMicros)
+ <| req0
+ HTTP.httpLBS req
+ case innerResult of
+ Left (e :: SomeException) -> do
+ dbg ("[webreader] Fetch error: " <> url <> " - " <> tshow e)
+ pure (Left ("Failed to fetch URL: " <> tshow e))
+ Right response -> do
+ let status = HTTP.getResponseStatusCode response
+ if status >= 200 && status < 300
+ then do
+ let body = HTTP.getResponseBody response
+ text = TE.decodeUtf8With (\_ _ -> Just '?') (BL.toStrict body)
+ len = Text.length text
+ dbg ("[webreader] Fetched: " <> url <> " (" <> tshow len <> " chars)")
+ pure (Right text)
+ else do
+ dbg ("[webreader] HTTP " <> tshow status <> ": " <> url)
+ pure (Left ("HTTP error: " <> tshow status))
+ case result of
+ Nothing -> do
+ dbg ("[webreader] Timeout: " <> url)
+ pure (Left ("Timeout fetching " <> url))
+ Just r -> pure r
+
+-- | Fast single-pass text extraction from HTML
+-- Strips all tags in one pass, no expensive operations
+extractText :: Text -> Text
+extractText html = collapseWhitespace (stripAllTags html)
+ where
+ -- Single pass: accumulate text outside of tags
+ stripAllTags :: Text -> Text
+ stripAllTags txt = Text.pack (go (Text.unpack txt) False [])
+ where
+ go :: [Char] -> Bool -> [Char] -> [Char]
+ go [] _ acc = reverse acc
+ go ('<' : rest) _ acc = go rest True acc -- Enter tag
+ go ('>' : rest) True acc = go rest False (' ' : acc) -- Exit tag, add space
+ go (_ : rest) True acc = go rest True acc -- Inside tag, skip
+ go (c : rest) False acc = go rest False (c : acc) -- Outside tag, keep
+ collapseWhitespace = Text.unwords <. Text.words
+
+-- | Maximum chars to send for summarization (keep it small for fast LLM response)
+maxContentForSummary :: Int
+maxContentForSummary = 15000
+
+-- | Maximum summary length to return
+maxSummaryLength :: Int
+maxSummaryLength = 1000
+
+-- | Timeout for trafilatura extraction in microseconds (10 seconds)
+extractTimeoutMicros :: Int
+extractTimeoutMicros = 10 * 1000000
+
+-- | Extract article content using trafilatura (Python library)
+-- Falls back to naive extractText if trafilatura fails
+extractWithTrafilatura :: Text -> IO Text
+extractWithTrafilatura html = do
+ let pythonScript =
+ "import sys; import trafilatura; "
+ <> "html = sys.stdin.read(); "
+ <> "result = trafilatura.extract(html, include_comments=False, include_tables=False); "
+ <> "print(result if result else '')"
+ proc =
+ (Process.proc "python3" ["-c", Text.unpack pythonScript])
+ { Process.std_in = Process.CreatePipe,
+ Process.std_out = Process.CreatePipe,
+ Process.std_err = Process.CreatePipe
+ }
+ result <-
+ Timeout.timeout extractTimeoutMicros <| do
+ (exitCode, stdoutStr, _stderrStr) <- Process.readCreateProcessWithExitCode proc (Text.unpack html)
+ case exitCode of
+ Exit.ExitSuccess -> pure (Text.strip (Text.pack stdoutStr))
+ Exit.ExitFailure _ -> pure ""
+ case result of
+ Just txt | not (Text.null txt) -> pure txt
+ _ -> do
+ dbg "[webreader] trafilatura failed, falling back to naive extraction"
+ pure (extractText (Text.take 100000 html))
+
+summarizeContent :: Text -> Text -> Text -> IO (Either Text Text)
+summarizeContent apiKey url content = do
+ let truncatedContent = Text.take maxContentForSummary content
+ haiku = Provider.defaultOpenRouter apiKey "anthropic/claude-haiku-4.5"
+ dbg ("[webreader] Summarizing: " <> url <> " (" <> tshow (Text.length truncatedContent) <> " chars)")
+ dbg "[webreader] Calling LLM for summarization..."
+ startTime <- getCurrentTime
+ result <-
+ Timeout.timeout summarizeTimeoutMicros
+ <| Provider.chat
+ haiku
+ []
+ [ Provider.Message
+ Provider.System
+ ( "You are a webpage summarizer. Extract the key information in 3-5 bullet points. "
+ <> "Be extremely concise - max 500 characters total. No preamble, just bullets."
+ )
+ Nothing
+ Nothing,
+ Provider.Message
+ Provider.User
+ ("Summarize: " <> url <> "\n\n" <> truncatedContent)
+ Nothing
+ Nothing
+ ]
+ endTime <- getCurrentTime
+ let elapsed = diffUTCTime endTime startTime
+ dbg ("[webreader] LLM call completed in " <> tshow elapsed)
+ case result of
+ Nothing -> do
+ dbg ("[webreader] Summarize timeout after " <> tshow elapsed <> ": " <> url)
+ pure (Left ("Timeout summarizing " <> url))
+ Just (Left err) -> do
+ dbg ("[webreader] Summarize error: " <> url <> " - " <> err)
+ pure (Left ("Summarization failed: " <> err))
+ Just (Right msg) -> do
+ let summary = Text.take maxSummaryLength (Provider.msgContent msg)
+ dbg ("[webreader] Summarized: " <> url <> " (" <> tshow (Text.length summary) <> " chars)")
+ pure (Right summary)
+
+-- | Fetch and summarize a single URL, returning a result object
+-- This is the core function used by both single and batch tools
+fetchAndSummarize :: Text -> Text -> IO Aeson.Value
+fetchAndSummarize apiKey url = do
+ fetchResult <- fetchWebpage url
+ case fetchResult of
+ Left err ->
+ pure (Aeson.object ["url" .= url, "error" .= err])
+ Right html -> do
+ dbg ("[webreader] Extracting article from: " <> url <> " (" <> tshow (Text.length html) <> " chars HTML)")
+ extractStart <- getCurrentTime
+ textContent <- extractWithTrafilatura html
+ extractEnd <- getCurrentTime
+ let extractElapsed = diffUTCTime extractEnd extractStart
+ dbg ("[webreader] Extracted: " <> url <> " (" <> tshow (Text.length textContent) <> " chars text) in " <> tshow extractElapsed)
+ if Text.null (Text.strip textContent)
+ then pure (Aeson.object ["url" .= url, "error" .= ("Page appears to be empty or JavaScript-only" :: Text)])
+ else do
+ summaryResult <- summarizeContent apiKey url textContent
+ case summaryResult of
+ Left err ->
+ pure
+ ( Aeson.object
+ [ "url" .= url,
+ "error" .= err,
+ "raw_content" .= Text.take 2000 textContent
+ ]
+ )
+ Right summary ->
+ pure
+ ( Aeson.object
+ [ "url" .= url,
+ "success" .= True,
+ "summary" .= summary
+ ]
+ )
+
+-- | Web reader tool - fetches and summarizes webpages in parallel
+webReaderTool :: Text -> Engine.Tool
+webReaderTool apiKey =
+ Engine.Tool
+ { Engine.toolName = "read_webpages",
+ Engine.toolDescription =
+ "Fetch and summarize webpages in parallel. Each page is processed independently - "
+ <> "failures on one page won't affect others. Returns a list of summaries.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "urls"
+ .= Aeson.object
+ [ "type" .= ("array" :: Text),
+ "items" .= Aeson.object ["type" .= ("string" :: Text)],
+ "description" .= ("List of URLs to read and summarize" :: Text)
+ ]
+ ],
+ "required" .= (["urls"] :: [Text])
+ ],
+ Engine.toolExecute = executeWebReader apiKey
+ }
+
+executeWebReader :: Text -> Aeson.Value -> IO Aeson.Value
+executeWebReader apiKey v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: WebReaderArgs) -> do
+ let urls = wrUrls args
+ dbg ("[webreader] Starting batch: " <> tshow (length urls) <> " URLs")
+ results <- Sema.mapPool maxConcurrentFetches (fetchAndSummarize apiKey) urls
+ let succeeded = length (filter isSuccess results)
+ dbg ("[webreader] Batch complete: " <> tshow succeeded <> "/" <> tshow (length urls) <> " succeeded")
+ pure
+ ( Aeson.object
+ [ "results" .= results,
+ "total" .= length urls,
+ "succeeded" .= succeeded
+ ]
+ )
+ where
+ isSuccess (Aeson.Object obj) = KeyMap.member "success" obj
+ isSuccess _ = False
+
+newtype WebReaderArgs = WebReaderArgs
+ { wrUrls :: [Text]
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON WebReaderArgs where
+ parseJSON =
+ Aeson.withObject "WebReaderArgs" <| \v ->
+ WebReaderArgs </ (v Aeson..: "urls")
diff --git a/Omni/Agent/Tools/WebReaderTest.hs b/Omni/Agent/Tools/WebReaderTest.hs
new file mode 100644
index 0000000..ca4c119
--- /dev/null
+++ b/Omni/Agent/Tools/WebReaderTest.hs
@@ -0,0 +1,53 @@
+{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Quick test for WebReader to debug hangs
+--
+-- : out webreader-test
+-- : dep http-conduit
+-- : run trafilatura
+module Omni.Agent.Tools.WebReaderTest where
+
+import Alpha
+import qualified Data.Text as Text
+import qualified Data.Text.IO as TIO
+import Data.Time.Clock (diffUTCTime, getCurrentTime)
+import qualified Omni.Agent.Tools.WebReader as WebReader
+
+main :: IO ()
+main = do
+ TIO.putStrLn "=== WebReader Debug Test ==="
+
+ TIO.putStrLn "\n--- Test 1: Small page (httpbin) ---"
+ testUrl "https://httpbin.org/html"
+
+ TIO.putStrLn "\n--- Test 2: Medium page (example.com) ---"
+ testUrl "https://example.com"
+
+ TIO.putStrLn "\n--- Test 3: Large page (github) ---"
+ testUrl "https://github.com/anthropics/skills"
+
+ TIO.putStrLn "\n=== Done ==="
+
+testUrl :: Text -> IO ()
+testUrl url = do
+ TIO.putStrLn ("Fetching: " <> url)
+
+ startFetch <- getCurrentTime
+ result <- WebReader.fetchWebpage url
+ endFetch <- getCurrentTime
+ TIO.putStrLn ("Fetch took: " <> tshow (diffUTCTime endFetch startFetch))
+
+ case result of
+ Left err -> TIO.putStrLn ("Fetch error: " <> err)
+ Right html -> do
+ TIO.putStrLn ("HTML size: " <> tshow (Text.length html) <> " chars")
+
+ TIO.putStrLn "Extracting text (naive, 100k truncated)..."
+ startExtract <- getCurrentTime
+ let !text = WebReader.extractText (Text.take 100000 html)
+ endExtract <- getCurrentTime
+ TIO.putStrLn ("Extract took: " <> tshow (diffUTCTime endExtract startExtract))
+ TIO.putStrLn ("Text size: " <> tshow (Text.length text) <> " chars")
+ TIO.putStrLn ("Preview: " <> Text.take 200 text)
diff --git a/Omni/Agent/Tools/WebSearch.hs b/Omni/Agent/Tools/WebSearch.hs
new file mode 100644
index 0000000..58c945c
--- /dev/null
+++ b/Omni/Agent/Tools/WebSearch.hs
@@ -0,0 +1,212 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Web search tool using Kagi Search API.
+--
+-- Provides web search capabilities for agents.
+--
+-- : out omni-agent-tools-websearch
+-- : dep aeson
+-- : dep http-conduit
+module Omni.Agent.Tools.WebSearch
+ ( -- * Tool
+ webSearchTool,
+
+ -- * Direct API
+ kagiSearch,
+ SearchResult (..),
+
+ -- * Testing
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import qualified Network.HTTP.Simple as HTTP
+import qualified Network.HTTP.Types.URI as URI
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Test as Test
+
+main :: IO ()
+main = Test.run test
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Agent.Tools.WebSearch"
+ [ Test.unit "SearchResult JSON parsing" <| do
+ let json =
+ Aeson.object
+ [ "t" .= (0 :: Int),
+ "url" .= ("https://example.com" :: Text),
+ "title" .= ("Example Title" :: Text),
+ "snippet" .= ("This is a snippet" :: Text)
+ ]
+ case parseSearchResult json of
+ Nothing -> Test.assertFailure "Failed to parse search result"
+ Just sr -> do
+ srUrl sr Test.@=? "https://example.com"
+ srTitle sr Test.@=? "Example Title"
+ srSnippet sr Test.@=? Just "This is a snippet",
+ Test.unit "webSearchTool has correct schema" <| do
+ let tool = webSearchTool "test-key"
+ Engine.toolName tool Test.@=? "web_search",
+ Test.unit "formatResultsForLLM formats correctly" <| do
+ let results =
+ [ SearchResult "https://a.com" "Title A" (Just "Snippet A") Nothing,
+ SearchResult "https://b.com" "Title B" Nothing Nothing
+ ]
+ formatted = formatResultsForLLM results
+ ("Title A" `Text.isInfixOf` formatted) Test.@=? True
+ ("https://a.com" `Text.isInfixOf` formatted) Test.@=? True
+ ]
+
+data SearchResult = SearchResult
+ { srUrl :: Text,
+ srTitle :: Text,
+ srSnippet :: Maybe Text,
+ srPublished :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON SearchResult where
+ toJSON r =
+ Aeson.object
+ [ "url" .= srUrl r,
+ "title" .= srTitle r,
+ "snippet" .= srSnippet r,
+ "published" .= srPublished r
+ ]
+
+parseSearchResult :: Aeson.Value -> Maybe SearchResult
+parseSearchResult val = do
+ Aeson.Object obj <- pure val
+ t <- case KeyMap.lookup "t" obj of
+ Just (Aeson.Number n) -> Just (round n :: Int)
+ _ -> Nothing
+ guard (t == 0)
+ url <- case KeyMap.lookup "url" obj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ title <- case KeyMap.lookup "title" obj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ let snippet = case KeyMap.lookup "snippet" obj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ published = case KeyMap.lookup "published" obj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ pure SearchResult {srUrl = url, srTitle = title, srSnippet = snippet, srPublished = published}
+
+kagiSearch :: Text -> Text -> Int -> IO (Either Text [SearchResult])
+kagiSearch apiKey query limit = do
+ let encodedQuery = TE.decodeUtf8 (URI.urlEncode False (TE.encodeUtf8 query))
+ url = "https://kagi.com/api/v0/search?q=" <> Text.unpack encodedQuery <> "&limit=" <> show limit
+ result <-
+ try <| do
+ req0 <- HTTP.parseRequest url
+ let req =
+ HTTP.setRequestMethod "GET"
+ <| HTTP.setRequestHeader "Authorization" ["Bot " <> TE.encodeUtf8 apiKey]
+ <| req0
+ HTTP.httpLBS req
+ case result of
+ Left (e :: SomeException) ->
+ pure (Left ("Kagi API error: " <> tshow e))
+ Right response -> do
+ let status = HTTP.getResponseStatusCode response
+ if status >= 200 && status < 300
+ then case Aeson.decode (HTTP.getResponseBody response) of
+ Just (Aeson.Object obj) -> case KeyMap.lookup "data" obj of
+ Just (Aeson.Array arr) ->
+ pure (Right (mapMaybe parseSearchResult (toList arr)))
+ _ -> pure (Left "No data in response")
+ _ -> pure (Left "Failed to parse Kagi response")
+ else case Aeson.decode (HTTP.getResponseBody response) of
+ Just (Aeson.Object obj) -> case KeyMap.lookup "error" obj of
+ Just errArr -> pure (Left ("Kagi error: " <> tshow errArr))
+ _ -> pure (Left ("Kagi HTTP error: " <> tshow status))
+ _ -> pure (Left ("Kagi HTTP error: " <> tshow status))
+
+formatResultsForLLM :: [SearchResult] -> Text
+formatResultsForLLM [] = "No results found."
+formatResultsForLLM results =
+ Text.unlines (zipWith formatResult [1 ..] results)
+ where
+ formatResult :: Int -> SearchResult -> Text
+ formatResult n r =
+ tshow n
+ <> ". "
+ <> srTitle r
+ <> "\n "
+ <> srUrl r
+ <> maybe "" (\s -> "\n " <> Text.take 200 s) (srSnippet r)
+
+webSearchTool :: Text -> Engine.Tool
+webSearchTool apiKey =
+ Engine.Tool
+ { Engine.toolName = "web_search",
+ Engine.toolDescription =
+ "Search the web using Kagi. Use this to find current information, "
+ <> "verify facts, look up documentation, or research topics. "
+ <> "Returns titles, URLs, and snippets from search results.",
+ Engine.toolJsonSchema =
+ Aeson.object
+ [ "type" .= ("object" :: Text),
+ "properties"
+ .= Aeson.object
+ [ "query"
+ .= Aeson.object
+ [ "type" .= ("string" :: Text),
+ "description" .= ("The search query" :: Text)
+ ],
+ "limit"
+ .= Aeson.object
+ [ "type" .= ("integer" :: Text),
+ "description" .= ("Max results to return (default: 10, max: 20)" :: Text)
+ ]
+ ],
+ "required" .= (["query"] :: [Text])
+ ],
+ Engine.toolExecute = executeWebSearch apiKey
+ }
+
+executeWebSearch :: Text -> Aeson.Value -> IO Aeson.Value
+executeWebSearch apiKey v =
+ case Aeson.fromJSON v of
+ Aeson.Error e -> pure (Aeson.object ["error" .= Text.pack e])
+ Aeson.Success (args :: WebSearchArgs) -> do
+ let lim = min 20 (max 1 (wsLimit args))
+ result <- kagiSearch apiKey (wsQuery args) lim
+ case result of
+ Left err ->
+ pure (Aeson.object ["error" .= err])
+ Right results ->
+ pure
+ ( Aeson.object
+ [ "success" .= True,
+ "count" .= length results,
+ "results" .= formatResultsForLLM results
+ ]
+ )
+
+data WebSearchArgs = WebSearchArgs
+ { wsQuery :: Text,
+ wsLimit :: Int
+ }
+ deriving (Generic)
+
+instance Aeson.FromJSON WebSearchArgs where
+ parseJSON =
+ Aeson.withObject "WebSearchArgs" <| \v ->
+ (WebSearchArgs </ (v Aeson..: "query"))
+ <*> (v Aeson..:? "limit" Aeson..!= 10)
diff --git a/Omni/Agent/Worker.hs b/Omni/Agent/Worker.hs
new file mode 100644
index 0000000..d6afb73
--- /dev/null
+++ b/Omni/Agent/Worker.hs
@@ -0,0 +1,665 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+module Omni.Agent.Worker
+ ( start,
+ buildFullPrompt,
+ selectModel,
+ selectCostByComplexity,
+ )
+where
+
+import Alpha
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.Key as AesonKey
+import qualified Data.ByteString.Lazy as BSL
+import Data.IORef (modifyIORef', newIORef, readIORef)
+import qualified Data.List as List
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import qualified Data.Time
+import qualified Omni.Agent.Core as Core
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Provider as Provider
+import qualified Omni.Agent.Status as AgentStatus
+import qualified Omni.Agent.Tools as Tools
+import qualified Omni.Fact as Fact
+import qualified Omni.Task.Core as TaskCore
+import qualified System.Directory as Directory
+import qualified System.Environment as Env
+import qualified System.Exit as Exit
+import System.FilePath ((</>))
+import qualified System.Process as Process
+
+start :: Core.Worker -> Maybe Text -> IO ()
+start worker maybeTaskId = do
+ if Core.workerQuiet worker
+ then putText ("[worker] Starting for " <> Core.workerName worker)
+ else do
+ AgentStatus.init (Core.workerName worker)
+ AgentStatus.log ("[worker] Starting for " <> Core.workerName worker)
+ case maybeTaskId of
+ Just tid -> logMsg worker ("[worker] Target task: " <> tid)
+ Nothing -> logMsg worker "[worker] No specific task, will pick from ready queue"
+ runOnce worker maybeTaskId
+
+-- | Log message respecting quiet mode
+logMsg :: Core.Worker -> Text -> IO ()
+logMsg worker msg =
+ if Core.workerQuiet worker
+ then putText msg
+ else AgentStatus.log msg
+
+-- | Convert key-value pairs to JSON metadata string
+toMetadata :: [(Text, Text)] -> Text
+toMetadata pairs =
+ let obj = Aeson.object [(AesonKey.fromText k, Aeson.String v) | (k, v) <- pairs]
+ in TE.decodeUtf8 (BSL.toStrict (Aeson.encode obj))
+
+-- | Format guardrail result for logging
+formatGuardrailResult :: Engine.GuardrailResult -> Text
+formatGuardrailResult Engine.GuardrailOk = "OK"
+formatGuardrailResult (Engine.GuardrailCostExceeded actual limit) =
+ "Cost exceeded: " <> tshow actual <> " cents (limit: " <> tshow limit <> ")"
+formatGuardrailResult (Engine.GuardrailTokensExceeded actual limit) =
+ "Tokens exceeded: " <> tshow actual <> " (limit: " <> tshow limit <> ")"
+formatGuardrailResult (Engine.GuardrailDuplicateToolCalls tool count) =
+ "Duplicate tool calls: " <> tool <> " called " <> tshow count <> " times"
+formatGuardrailResult (Engine.GuardrailTestFailures count) =
+ "Test failures: " <> tshow count <> " failures"
+formatGuardrailResult (Engine.GuardrailEditFailures count) =
+ "Edit failures: " <> tshow count <> " 'old_str not found' errors"
+
+runOnce :: Core.Worker -> Maybe Text -> IO ()
+runOnce worker maybeTaskId = do
+ -- Find work
+ targetTask <- case maybeTaskId of
+ Just tid -> do
+ TaskCore.findTask tid </ TaskCore.loadTasks
+ Nothing -> do
+ readyTasks <- TaskCore.getReadyTasks
+ case readyTasks of
+ [] -> pure Nothing
+ (task : _) -> pure (Just task)
+
+ case targetTask of
+ Nothing -> do
+ case maybeTaskId of
+ Just tid -> do
+ unless (Core.workerQuiet worker) <| AgentStatus.updateActivity ("Task " <> tid <> " not found.")
+ logMsg worker ("[worker] Task " <> tid <> " not found.")
+ Nothing -> do
+ unless (Core.workerQuiet worker) <| AgentStatus.updateActivity "No work found."
+ logMsg worker "[worker] No ready tasks found."
+ Just task -> do
+ processTask worker task
+
+processTask :: Core.Worker -> TaskCore.Task -> IO ()
+processTask worker task = do
+ let repo = Core.workerPath worker
+ let tid = TaskCore.taskId task
+ let quiet = Core.workerQuiet worker
+ let say = logMsg worker
+
+ unless quiet <| AgentStatus.update (\s -> s {AgentStatus.statusTask = Just tid})
+ say ("[worker] Claiming task " <> tid)
+
+ -- Claim task
+ TaskCore.logActivity tid TaskCore.Claiming Nothing
+ TaskCore.updateTaskStatusWithActor tid TaskCore.InProgress [] TaskCore.Junior
+ say "[worker] Status -> InProgress"
+
+ -- Run agent with timing
+ startTime <- Data.Time.getCurrentTime
+ activityId <- TaskCore.logActivityWithMetrics tid TaskCore.Running Nothing Nothing (Just startTime) Nothing Nothing Nothing
+
+ say "[worker] Starting engine..."
+ engineResult <- runWithEngine worker repo task
+
+ endTime <- Data.Time.getCurrentTime
+
+ -- Update the activity record with metrics (convert Double to Int by rounding)
+ let costCents = case engineResult of
+ EngineSuccess _ c -> c
+ EngineGuardrailViolation _ c -> c
+ EngineError _ c -> c
+ TaskCore.updateActivityMetrics activityId Nothing (Just endTime) (Just (round costCents)) Nothing
+
+ case engineResult of
+ EngineSuccess output _ -> do
+ say "[worker] Agent completed successfully"
+ TaskCore.logActivity tid TaskCore.Reviewing Nothing
+ say "[worker] Running formatters..."
+ _ <- runFormatters repo
+
+ -- Try to commit (this runs git hooks which may fail)
+ let commitMsg = formatCommitMessage task output
+ say "[worker] Attempting commit..."
+ commitResult <- tryCommit repo commitMsg
+
+ case commitResult of
+ CommitFailed commitErr -> do
+ say ("[worker] Commit failed: " <> commitErr)
+
+ -- Save failure context and reopen task for retry
+ maybeCtx <- TaskCore.getRetryContext tid
+ let attempt = maybe 1 (\c -> TaskCore.retryAttempt c + 1) maybeCtx
+
+ if attempt > 3
+ then do
+ say "[worker] Task failed 3 times, needs human intervention"
+ TaskCore.logActivity tid TaskCore.Failed (Just (toMetadata [("reason", "max_retries_exceeded")]))
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.Junior
+ else do
+ let currentReason = "attempt " <> tshow attempt <> ": commit_failed: " <> commitErr
+ let accumulatedReason = case maybeCtx of
+ Nothing -> currentReason
+ Just ctx -> TaskCore.retryReason ctx <> "\n" <> currentReason
+ TaskCore.setRetryContext
+ TaskCore.RetryContext
+ { TaskCore.retryTaskId = tid,
+ TaskCore.retryOriginalCommit = "",
+ TaskCore.retryConflictFiles = [],
+ TaskCore.retryAttempt = attempt,
+ TaskCore.retryReason = accumulatedReason,
+ TaskCore.retryNotes = maybeCtx +> TaskCore.retryNotes
+ }
+ TaskCore.logActivity tid TaskCore.Retrying (Just (toMetadata [("attempt", tshow attempt)]))
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.Junior
+ say ("[worker] Task reopened (attempt " <> tshow attempt <> "/3)")
+ NoChanges -> do
+ -- No changes = task already implemented, mark as Done
+ say "[worker] No changes to commit - task already done"
+ TaskCore.clearRetryContext tid
+ TaskCore.logActivity tid TaskCore.Completed (Just (toMetadata [("result", "no_changes")]))
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Done [] TaskCore.Junior
+ say ("[worker] ✓ Task " <> tid <> " -> Done (no changes)")
+ unless quiet <| AgentStatus.update (\s -> s {AgentStatus.statusTask = Nothing})
+ CommitSuccess -> do
+ -- Commit succeeded, set to Review
+ TaskCore.logActivity tid TaskCore.Completed (Just (toMetadata [("result", "committed")]))
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Review [] TaskCore.Junior
+ say ("[worker] ✓ Task " <> tid <> " -> Review")
+ unless quiet <| AgentStatus.update (\s -> s {AgentStatus.statusTask = Nothing})
+ EngineGuardrailViolation errMsg _ -> do
+ say ("[worker] Guardrail violation: " <> errMsg)
+ TaskCore.logActivity tid TaskCore.Failed (Just (toMetadata [("reason", "guardrail_violation")]))
+ -- Add comment with guardrail details
+ _ <- TaskCore.addComment tid errMsg TaskCore.Junior
+ -- Set to NeedsHelp so human can review
+ TaskCore.updateTaskStatusWithActor tid TaskCore.NeedsHelp [] TaskCore.Junior
+ say ("[worker] Task " <> tid <> " -> NeedsHelp (guardrail violation)")
+ unless quiet <| AgentStatus.update (\s -> s {AgentStatus.statusTask = Nothing})
+ EngineError errMsg _ -> do
+ say ("[worker] Engine error: " <> errMsg)
+ TaskCore.logActivity tid TaskCore.Failed (Just (toMetadata [("reason", "engine_error")]))
+ -- Don't set back to Open here - leave in InProgress for debugging
+ say "[worker] Task left in InProgress (engine failure)"
+
+-- | Run lint --fix to format and fix lint issues
+runFormatters :: FilePath -> IO (Either Text ())
+runFormatters repo = do
+ let cmd = (Process.proc "lint" ["--fix"]) {Process.cwd = Just repo}
+ (code, _, _) <- Process.readCreateProcessWithExitCode cmd ""
+ case code of
+ Exit.ExitSuccess -> pure (Right ())
+ Exit.ExitFailure _ -> pure (Right ()) -- lint --fix may exit non-zero but still fix things
+
+data CommitResult = CommitSuccess | NoChanges | CommitFailed Text
+ deriving (Show, Eq)
+
+-- | Try to commit, returning result
+tryCommit :: FilePath -> Text -> IO CommitResult
+tryCommit repo msg = do
+ -- Stage all changes
+ let addCmd = (Process.proc "git" ["add", "."]) {Process.cwd = Just repo}
+ (addCode, _, addErr) <- Process.readCreateProcessWithExitCode addCmd ""
+ case addCode of
+ Exit.ExitFailure _ -> pure <| CommitFailed (Text.pack addErr)
+ Exit.ExitSuccess -> do
+ -- Check for changes
+ let checkCmd = (Process.proc "git" ["diff", "--cached", "--quiet"]) {Process.cwd = Just repo}
+ (checkCode, _, _) <- Process.readCreateProcessWithExitCode checkCmd ""
+ case checkCode of
+ Exit.ExitSuccess -> pure NoChanges
+ Exit.ExitFailure 1 -> do
+ -- There are changes, commit them
+ let commitCmd = (Process.proc "git" ["commit", "-m", Text.unpack msg]) {Process.cwd = Just repo}
+ (commitCode, _, commitErr) <- Process.readCreateProcessWithExitCode commitCmd ""
+ case commitCode of
+ Exit.ExitSuccess -> pure CommitSuccess
+ Exit.ExitFailure _ -> pure <| CommitFailed (Text.pack commitErr)
+ Exit.ExitFailure c -> pure <| CommitFailed ("git diff failed with code " <> tshow c)
+
+data EngineResult
+ = EngineSuccess Text Double -- output, cost
+ | EngineGuardrailViolation Text Double -- error message, cost
+ | EngineError Text Double -- error message, cost
+
+-- | Run task using native Engine
+-- Returns engine result with output/error and cost
+runWithEngine :: Core.Worker -> FilePath -> TaskCore.Task -> IO EngineResult
+runWithEngine worker repo task = do
+ -- Read API key from environment
+ maybeApiKey <- Env.lookupEnv "OPENROUTER_API_KEY"
+ case maybeApiKey of
+ Nothing -> pure (EngineError "OPENROUTER_API_KEY not set" 0)
+ Just apiKey -> do
+ -- Check for retry context
+ maybeRetry <- TaskCore.getRetryContext (TaskCore.taskId task)
+
+ -- Get progress from database (checkpoint events from previous sessions)
+ progressContent <- TaskCore.getProgressSummary (TaskCore.taskId task)
+
+ -- Build the full prompt
+ let ns = fromMaybe "." (TaskCore.taskNamespace task)
+ let basePrompt = buildBasePrompt ns repo
+
+ -- Add progress context if present
+ let progressPrompt = buildProgressPrompt progressContent
+
+ -- Add retry context if present
+ let retryPrompt = buildRetryPrompt maybeRetry
+
+ let prompt = basePrompt <> progressPrompt <> retryPrompt
+
+ -- Read AGENTS.md
+ agentsMd <-
+ fmap (fromMaybe "") <| do
+ exists <- Directory.doesFileExist (repo </> "AGENTS.md")
+ if exists
+ then Just </ readFile (repo </> "AGENTS.md")
+ else pure Nothing
+
+ -- Get relevant facts from the knowledge base
+ relevantFacts <- getRelevantFacts task
+ let factsSection = formatFacts relevantFacts
+
+ -- Build system prompt
+ let systemPrompt =
+ prompt
+ <> "\n\nREPOSITORY GUIDELINES (AGENTS.md):\n"
+ <> agentsMd
+ <> factsSection
+
+ -- Build user prompt from task comments
+ let userPrompt = formatTask task
+
+ -- Select model based on task complexity (simple heuristic)
+ let model = selectModel task
+
+ -- Generate session ID for event logging
+ sessionId <- TaskCore.generateSessionId
+ let tid = TaskCore.taskId task
+
+ -- Helper to log events to DB
+ -- For text content, store as-is; for structured data, JSON-encode
+ let logJuniorEvent eventType content = TaskCore.insertAgentEvent tid sessionId eventType content TaskCore.Junior
+ logJuniorJson eventType value = do
+ let contentJson = TE.decodeUtf8 (BSL.toStrict (Aeson.encode value))
+ TaskCore.insertAgentEvent tid sessionId eventType contentJson TaskCore.Junior
+ logSystemEvent eventType content = TaskCore.insertAgentEvent tid sessionId eventType content TaskCore.System
+
+ -- Build Engine config with callbacks
+ totalCostRef <- newIORef (0 :: Double)
+ let quiet = Core.workerQuiet worker
+ sayLog msg = if quiet then putText msg else AgentStatus.log msg
+ engineCfg =
+ Engine.EngineConfig
+ { Engine.engineLLM =
+ Engine.defaultLLM
+ { Engine.llmApiKey = Text.pack apiKey
+ },
+ Engine.engineOnCost = \tokens cost -> do
+ modifyIORef' totalCostRef (+ cost)
+ sayLog <| "Cost: " <> tshow cost <> " cents (" <> tshow tokens <> " tokens)"
+ logJuniorJson "Cost" (Aeson.object [("tokens", Aeson.toJSON tokens), ("cents", Aeson.toJSON cost)]),
+ Engine.engineOnActivity = \activity -> do
+ sayLog <| "[engine] " <> activity,
+ Engine.engineOnToolCall = \toolName args -> do
+ sayLog <| "[tool] " <> toolName
+ logJuniorEvent "ToolCall" (toolName <> ": " <> args),
+ Engine.engineOnAssistant = \msg -> do
+ sayLog <| "[assistant] " <> Text.take 200 msg
+ logJuniorEvent "Assistant" msg,
+ Engine.engineOnToolResult = \toolName success output -> do
+ let statusStr = if success then "ok" else "failed"
+ sayLog <| "[result] " <> toolName <> " (" <> statusStr <> "): " <> Text.take 100 output
+ logJuniorEvent "ToolResult" output,
+ Engine.engineOnComplete = do
+ sayLog "[engine] Complete"
+ logJuniorEvent "Complete" "",
+ Engine.engineOnError = \err -> do
+ sayLog <| "[error] " <> err
+ logJuniorEvent "Error" err,
+ Engine.engineOnGuardrail = \guardrailResult -> do
+ let guardrailMsg = formatGuardrailResult guardrailResult
+ contentJson = TE.decodeUtf8 (BSL.toStrict (Aeson.encode guardrailResult))
+ sayLog <| "[guardrail] " <> guardrailMsg
+ logSystemEvent "Guardrail" contentJson
+ }
+
+ -- Build Agent config with guardrails (scale cost by complexity)
+ let baseCost = selectCostByComplexity (TaskCore.taskComplexity task)
+ guardrails =
+ Engine.Guardrails
+ { Engine.guardrailMaxCostCents = baseCost,
+ Engine.guardrailMaxTokens = 2000000,
+ Engine.guardrailMaxDuplicateToolCalls = 30,
+ Engine.guardrailMaxTestFailures = 3,
+ Engine.guardrailMaxEditFailures = 5
+ }
+ agentCfg =
+ Engine.AgentConfig
+ { Engine.agentModel = model,
+ Engine.agentTools = Tools.allTools,
+ Engine.agentSystemPrompt = systemPrompt,
+ Engine.agentMaxIterations = 100,
+ Engine.agentGuardrails = guardrails
+ }
+
+ -- Run the agent with appropriate provider
+ result <- case Core.workerEngine worker of
+ Core.EngineOpenRouter -> Engine.runAgent engineCfg agentCfg userPrompt
+ Core.EngineOllama -> do
+ ollamaModel <- fromMaybe "llama3.1:8b" </ Env.lookupEnv "OLLAMA_MODEL"
+ let provider = Provider.defaultOllama (Text.pack ollamaModel)
+ Engine.runAgentWithProvider engineCfg provider agentCfg userPrompt
+ Core.EngineAmp -> pure (Left "Amp engine not yet implemented")
+ totalCost <- readIORef totalCostRef
+
+ case result of
+ Left err ->
+ if "Guardrail: " `Text.isPrefixOf` err
+ then pure (EngineGuardrailViolation err totalCost)
+ else pure (EngineError ("Engine error: " <> err) totalCost)
+ Right agentResult -> do
+ let output = Engine.resultFinalMessage agentResult
+ pure (EngineSuccess output totalCost)
+
+-- | Build the base prompt for the agent
+buildBasePrompt :: Text -> FilePath -> Text
+buildBasePrompt ns repo =
+ "You are `jr`, an autonomous Senior Software Engineer. You are rigorous, efficient, and safety-conscious.\n"
+ <> "Your Goal: Complete the assigned task with **zero regressions**.\n\n"
+ <> "# The Workflow\n"
+ <> "Follow this 4-phase loop. Do not skip phases.\n\n"
+ <> "## Phase 1: Exploration (MANDATORY)\n"
+ <> "- NEVER edit immediately. Explore first.\n"
+ <> "- Use search_and_read to find code relevant to the task.\n"
+ <> "- Read the imports. Read the tests that cover this code.\n"
+ <> "- Understand the *callers* of a function before you modify it.\n\n"
+ <> "## Phase 2: Planning (for multi-file changes)\n"
+ <> "- If the task involves more than 2 files, plan the order of operations.\n"
+ <> "- Identify potential breaking changes (API shifts, import cycles).\n"
+ <> "- For refactors: copy code first, verify it works, then delete the original.\n\n"
+ <> "## Phase 3: Execution\n"
+ <> "- Make atomic changes. One logical edit per edit_file call.\n"
+ <> "- Use edit_file with sufficient context (5+ lines) to match uniquely.\n"
+ <> "- Do NOT update task status or manage git - the worker handles that.\n\n"
+ <> "## Phase 4: Verification\n"
+ <> "- Run 'bild --test "
+ <> ns
+ <> "' after your changes.\n"
+ <> "- 'bild --test' tests ALL dependencies transitively - run it ONCE, not per-file.\n"
+ <> "- Use 'lint --fix' to handle formatting (not hlint directly).\n"
+ <> "- If tests pass, STOP. Do not verify again, do not double-check.\n\n"
+ <> "# Tool Usage\n\n"
+ <> "Your tools: read_file, write_file, edit_file, run_bash, search_codebase, search_and_read.\n\n"
+ <> "## Efficient Reading (CRITICAL FOR BUDGET)\n"
+ <> "- Read files ONCE with large ranges (500+ lines), not many small 100-line chunks.\n"
+ <> "- WRONG: 10 separate read_file calls with 100-line ranges on the same file.\n"
+ <> "- RIGHT: 1-2 read_file calls with 500-1000 line ranges to cover the file.\n"
+ <> "- When you know the target file, use read_file directly with a path argument.\n"
+ <> "- WRONG: search_and_read across the whole repo when you know the file is Worker.py.\n"
+ <> "- RIGHT: read_file on Worker.py, or search_codebase with path='Worker.py'.\n"
+ <> "- search_and_read is for discovery when you DON'T know which file to look in.\n\n"
+ <> "## Efficient Editing\n"
+ <> "- Include enough context in old_str to match uniquely (usually 5+ lines).\n"
+ <> "- If edit_file fails with 'old_str not found', you are hallucinating the content.\n"
+ <> "- STOP. Call read_file on those exact lines to get fresh content. Then retry.\n"
+ <> "- After 3 failed edits on the same file, reconsider your approach.\n\n"
+ <> "## Cost Awareness\n"
+ <> "- Each tool call costs tokens. Large file writes are expensive.\n"
+ <> "- For refactors: plan all new files first, then write them in order.\n"
+ <> "- Don't write a file, then immediately read it back - you just wrote it!\n"
+ <> "- Monitor your progress: if you're on tool call 30+ and not close to done, simplify.\n\n"
+ <> "# Debugging\n"
+ <> "If 'bild' fails, do NOT guess the fix.\n"
+ <> "1. Read the error output carefully.\n"
+ <> "2. For type errors: read the definition of the types involved.\n"
+ <> "3. For import cycles: create a Types or Common module to break the cycle.\n"
+ <> "4. If tests fail 3 times on the same issue, STOP - the task will be marked for human review.\n\n"
+ <> "# Examples\n\n"
+ <> "## Example: Splitting a Module\n"
+ <> "1. search_and_read to understand the file structure\n"
+ <> "2. write_file NewModule.py (with extracted code + proper imports)\n"
+ <> "3. edit_file Original.py (remove moved code, add 'from NewModule import ...')\n"
+ <> "4. run_bash: bild --test <namespace>\n"
+ <> "5. Tests pass -> STOP\n\n"
+ <> "## Example: Fixing a Type Error\n"
+ <> "1. read_file Main.hs (lines around the error)\n"
+ <> "2. Identify: function expects Text but got String\n"
+ <> "3. edit_file Main.hs (add import, apply T.pack)\n"
+ <> "4. run_bash: bild --test <namespace>\n"
+ <> "5. Tests pass -> STOP\n\n"
+ <> "# Constraints\n"
+ <> "- You are autonomous. There is NO human to ask for clarification.\n"
+ <> "- Make reasonable decisions. If ambiguous, implement the straightforward interpretation.\n"
+ <> "- Aim to complete the task in under 50 tool calls.\n"
+ <> "- Guardrails will stop you if you exceed cost/token limits or make repeated mistakes.\n\n"
+ <> "# Context\n"
+ <> "- Working directory: "
+ <> Text.pack repo
+ <> "\n"
+ <> "- Namespace: "
+ <> ns
+ <> "\n"
+
+-- | Build progress context prompt
+buildProgressPrompt :: Maybe Text -> Text
+buildProgressPrompt Nothing = ""
+buildProgressPrompt (Just progress) =
+ "\n\n## PROGRESS FROM PREVIOUS SESSIONS (from database)\n\n"
+ <> "This task has been worked on before. Here are the checkpoint notes:\n\n"
+ <> progress
+ <> "\n\n"
+ <> "IMPORTANT:\n"
+ <> "- Review these checkpoints to understand what's already done\n"
+ <> "- Do NOT repeat work that's already completed\n"
+ <> "- If the task appears complete, verify tests pass and exit\n\n"
+
+-- | Build retry context prompt
+buildRetryPrompt :: Maybe TaskCore.RetryContext -> Text
+buildRetryPrompt Nothing = ""
+buildRetryPrompt (Just ctx) =
+ "\n\n## RETRY CONTEXT (IMPORTANT)\n\n"
+ <> "This task was previously attempted but failed. Attempt: "
+ <> tshow (TaskCore.retryAttempt ctx)
+ <> "/3\n"
+ <> "Reason: "
+ <> TaskCore.retryReason ctx
+ <> "\n\n"
+ <> ( if null (TaskCore.retryConflictFiles ctx)
+ then ""
+ else
+ "Conflicting files from previous attempt:\n"
+ <> Text.unlines (map (" - " <>) (TaskCore.retryConflictFiles ctx))
+ <> "\n"
+ )
+ <> "Original commit: "
+ <> TaskCore.retryOriginalCommit ctx
+ <> "\n\n"
+ <> maybe "" (\notes -> "## HUMAN NOTES/GUIDANCE\n\n" <> notes <> "\n\n") (TaskCore.retryNotes ctx)
+ <> "INSTRUCTIONS FOR RETRY:\n"
+ <> "- The codebase has changed since your last attempt\n"
+ <> "- Re-implement this task on top of the CURRENT codebase\n"
+ <> "- If there were merge conflicts, the conflicting files may have been modified by others\n"
+ <> "- Review the current state of those files before making changes\n"
+
+-- | Select model based on task complexity (1-5 scale)
+-- Uses OpenRouter model identifiers for Claude models
+selectModel :: TaskCore.Task -> Text
+selectModel task = selectModelByComplexity (TaskCore.taskComplexity task)
+
+-- | Select model based on complexity level
+selectModelByComplexity :: Maybe Int -> Text
+selectModelByComplexity Nothing = "anthropic/claude-sonnet-4.5"
+selectModelByComplexity (Just 1) = "anthropic/claude-haiku-4.5"
+selectModelByComplexity (Just 2) = "anthropic/claude-haiku-4.5"
+selectModelByComplexity (Just 3) = "anthropic/claude-sonnet-4.5"
+selectModelByComplexity (Just 4) = "anthropic/claude-sonnet-4.5"
+selectModelByComplexity (Just 5) = "anthropic/claude-opus-4.5"
+selectModelByComplexity (Just _) = "anthropic/claude-sonnet-4.5"
+
+-- | Select cost guardrail based on complexity level (in cents)
+-- Lower complexity = lower budget, higher complexity = more room for iteration
+selectCostByComplexity :: Maybe Int -> Double
+selectCostByComplexity Nothing = 200.0
+selectCostByComplexity (Just 1) = 50.0
+selectCostByComplexity (Just 2) = 100.0
+selectCostByComplexity (Just 3) = 200.0
+selectCostByComplexity (Just 4) = 400.0
+selectCostByComplexity (Just 5) = 600.0
+selectCostByComplexity (Just _) = 200.0
+
+formatTask :: TaskCore.Task -> Text
+formatTask t =
+ "Task: "
+ <> TaskCore.taskId t
+ <> "\n"
+ <> "Title: "
+ <> TaskCore.taskTitle t
+ <> "\n"
+ <> "Type: "
+ <> Text.pack (show (TaskCore.taskType t))
+ <> "\n"
+ <> "Status: "
+ <> Text.pack (show (TaskCore.taskStatus t))
+ <> "\n"
+ <> "Priority: "
+ <> Text.pack (show (TaskCore.taskPriority t))
+ <> "\n"
+ <> maybe "" (\p -> "Parent: " <> p <> "\n") (TaskCore.taskParent t)
+ <> maybe "" (\ns -> "Namespace: " <> ns <> "\n") (TaskCore.taskNamespace t)
+ <> "Created: "
+ <> Text.pack (show (TaskCore.taskCreatedAt t))
+ <> "\n"
+ <> "Updated: "
+ <> Text.pack (show (TaskCore.taskUpdatedAt t))
+ <> "\n"
+ <> (if Text.null (TaskCore.taskDescription t) then "" else "Description:\n" <> TaskCore.taskDescription t <> "\n\n")
+ <> formatDeps (TaskCore.taskDependencies t)
+ <> formatComments (TaskCore.taskComments t)
+ where
+ formatDeps [] = ""
+ formatDeps deps = "\nDependencies:\n" <> Text.unlines (map formatDep deps)
+ formatDep dep = " - " <> TaskCore.depId dep <> " [" <> Text.pack (show (TaskCore.depType dep)) <> "]"
+ formatComments [] = ""
+ formatComments cs = "\nComments/Notes:\n" <> Text.unlines (map formatComment cs)
+ formatComment c = " [" <> Text.pack (show (TaskCore.commentCreatedAt c)) <> "] " <> TaskCore.commentText c
+
+formatCommitMessage :: TaskCore.Task -> Text -> Text
+formatCommitMessage task agentOutput =
+ let tid = TaskCore.taskId task
+ subject = cleanSubject (TaskCore.taskTitle task)
+ body = cleanBody agentOutput
+ in if Text.null body
+ then subject <> "\n\nTask-Id: " <> tid
+ else subject <> "\n\n" <> body <> "\n\nTask-Id: " <> tid
+ where
+ cleanSubject s =
+ let trailingPunct = ['.', ':', '!', '?', ',', ';', ' ', '-']
+ stripped = Text.dropWhileEnd (`elem` trailingPunct) s
+ truncated = Text.take 72 stripped
+ noPunct = Text.dropWhileEnd (`elem` trailingPunct) truncated
+ capitalized = case Text.uncons noPunct of
+ Just (c, rest) -> Text.cons (toUpper c) rest
+ Nothing -> noPunct
+ in capitalized
+
+ cleanBody :: Text -> Text
+ cleanBody output =
+ let stripped = Text.strip output
+ in if Text.null stripped
+ then ""
+ else
+ let lns = Text.lines stripped
+ cleaned = [Text.take 72 ln | ln <- lns]
+ in Text.intercalate "\n" cleaned
+
+-- | Get facts relevant to a task based on namespace/project
+getRelevantFacts :: TaskCore.Task -> IO [TaskCore.Fact]
+getRelevantFacts task = do
+ let namespace = fromMaybe "Omni" (TaskCore.taskNamespace task)
+ projectFacts <- Fact.getFactsByProject namespace
+ let sorted = List.sortBy (comparing (Down <. TaskCore.factConfidence)) projectFacts
+ pure (take 10 sorted)
+
+-- | Format facts for inclusion in the prompt
+formatFacts :: [TaskCore.Fact] -> Text
+formatFacts [] = ""
+formatFacts facts =
+ Text.unlines
+ [ "\n\nKNOWLEDGE BASE FACTS:",
+ "(These are learned patterns/conventions from previous work)",
+ ""
+ ]
+ <> Text.unlines (map formatFact facts)
+
+-- | Format a single fact for the prompt
+formatFact :: TaskCore.Fact -> Text
+formatFact f =
+ "- "
+ <> TaskCore.factContent f
+ <> ( if null (TaskCore.factRelatedFiles f)
+ then ""
+ else " [" <> Text.intercalate ", " (TaskCore.factRelatedFiles f) <> "]"
+ )
+
+-- | Build the full system prompt for a task without starting the agent.
+-- This is useful for debugging/inspecting what the agent will be told.
+buildFullPrompt :: TaskCore.Task -> IO Text
+buildFullPrompt task = do
+ repo <- Directory.getCurrentDirectory
+ let ns = fromMaybe "." (TaskCore.taskNamespace task)
+ let basePrompt = buildBasePrompt ns repo
+
+ maybeRetry <- TaskCore.getRetryContext (TaskCore.taskId task)
+ progressContent <- TaskCore.getProgressSummary (TaskCore.taskId task)
+
+ let progressPrompt = buildProgressPrompt progressContent
+ let retryPrompt = buildRetryPrompt maybeRetry
+ let prompt = basePrompt <> progressPrompt <> retryPrompt
+
+ agentsMd <-
+ fmap (fromMaybe "") <| do
+ exists <- Directory.doesFileExist (repo </> "AGENTS.md")
+ if exists
+ then Just </ readFile (repo </> "AGENTS.md")
+ else pure Nothing
+
+ relevantFacts <- getRelevantFacts task
+ let factsSection = formatFacts relevantFacts
+
+ let systemPrompt =
+ prompt
+ <> "\n\nREPOSITORY GUIDELINES (AGENTS.md):\n"
+ <> agentsMd
+ <> factsSection
+
+ let model = selectModel task
+ let costBudget = selectCostByComplexity (TaskCore.taskComplexity task)
+
+ pure
+ <| Text.unlines
+ [ "=== AGENT CONFIGURATION ===",
+ "Model: " <> model,
+ "Cost budget: " <> tshow costBudget <> " cents",
+ "",
+ "=== SYSTEM PROMPT ===",
+ systemPrompt,
+ "",
+ "=== USER PROMPT (task details) ===",
+ formatTask task
+ ]
diff --git a/Omni/App.py b/Omni/App.py
index 0c6776c..d42bb75 100644
--- a/Omni/App.py
+++ b/Omni/App.py
@@ -4,6 +4,10 @@ import enum
import os
+class AreaError(Exception):
+ """Error raised when area configuration is invalid or missing."""
+
+
class Area(enum.Enum):
"""The area we are running."""
@@ -15,7 +19,7 @@ def from_env() -> Area:
"""Load AREA from environment variable.
Raises:
- ValueError: if AREA is not defined
+ AreaError: if AREA is not defined
"""
var = os.getenv("AREA", "Test")
if var == "Test":
@@ -23,4 +27,4 @@ def from_env() -> Area:
if var == "Live":
return Area.Live
msg = "AREA not defined"
- raise ValueError(msg)
+ raise AreaError(msg)
diff --git a/Omni/Ava.hs b/Omni/Ava.hs
new file mode 100755
index 0000000..0788658
--- /dev/null
+++ b/Omni/Ava.hs
@@ -0,0 +1,69 @@
+#!/usr/bin/env run.sh
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE QuasiQuotes #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Ava - AI assistant via Telegram.
+--
+-- Usage:
+-- ava # Uses TELEGRAM_BOT_TOKEN env var
+-- ava --token=XXX # Explicit token
+-- ava --model=MODEL # Override LLM model
+--
+-- : out ava
+-- : dep aeson
+-- : dep http-conduit
+-- : dep stm
+module Omni.Ava where
+
+import Alpha
+import qualified Data.Text as Text
+import qualified Omni.Agent.Telegram as Telegram
+import qualified Omni.Cli as Cli
+import qualified Omni.Test as Test
+import qualified System.Console.Docopt as Docopt
+import qualified System.IO as IO
+
+main :: IO ()
+main = Cli.main plan
+
+plan :: Cli.Plan ()
+plan =
+ Cli.Plan
+ { Cli.help = help,
+ Cli.move = move,
+ Cli.test = test,
+ Cli.tidy = \_ -> pure ()
+ }
+
+help :: Cli.Docopt
+help =
+ [Cli.docopt|
+ava - AI assistant via Telegram
+
+Usage:
+ ava [--token=TOKEN] [--model=MODEL]
+ ava test
+ ava (-h | --help)
+
+Options:
+ -h --help Show this help
+ --token=TOKEN Telegram bot token (or use TELEGRAM_BOT_TOKEN env)
+ --model=MODEL LLM model to use [default: anthropic/claude-sonnet-4]
+|]
+
+move :: Cli.Arguments -> IO ()
+move args = do
+ IO.hSetBuffering IO.stdout IO.LineBuffering
+ IO.hSetBuffering IO.stderr IO.LineBuffering
+ let maybeToken = fmap Text.pack (Cli.getArg args (Cli.longOption "token"))
+ Telegram.startBot maybeToken
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Ava"
+ [ Test.unit "help is non-empty" <| do
+ let usage = str (Docopt.usage help) :: String
+ null usage Test.@=? False
+ ]
diff --git a/Omni/Bild.hs b/Omni/Bild.hs
index 9c649a7..1ebeb05 100755..100644
--- a/Omni/Bild.hs
+++ b/Omni/Bild.hs
@@ -119,6 +119,9 @@ module Omni.Bild where
import Alpha hiding (sym, (<.>))
import qualified Conduit
import qualified Control.Concurrent.Async as Async
+import qualified Control.Concurrent.QSemN as QSemN
+import Control.Concurrent.STM (TQueue, TVar, modifyTVar', newTQueue, newTVar, readTVar, readTVarIO, tryReadTQueue, writeTQueue)
+import qualified Control.Exception as Exception
import qualified Data.Aeson as Aeson
import qualified Data.ByteString as ByteString
import qualified Data.ByteString.Char8 as Char8
@@ -128,6 +131,9 @@ import qualified Data.Char as Char
import Data.Conduit ((.|))
import qualified Data.Conduit.Combinators as Conduit
import qualified Data.Conduit.Process as Conduit
+import Data.Graph (SCC (..), stronglyConnComp)
+import Data.IORef (IORef, modifyIORef', newIORef, readIORef, writeIORef)
+import Data.List (partition)
import qualified Data.List as List
import qualified Data.Map as Map
import qualified Data.Set as Set
@@ -136,9 +142,11 @@ import qualified Data.Text as Text
import qualified Data.Text.IO as Text.IO
import qualified GHC.Conc as GHC
import qualified Network.HostName as HostName
+import qualified Numeric
import qualified Omni.Bild.Meta as Meta
import qualified Omni.Cli as Cli
import qualified Omni.Log as Log
+import qualified Omni.Log.Concurrent as LogC
import Omni.Namespace (Namespace (..))
import qualified Omni.Namespace as Namespace
import Omni.Test ((@=?))
@@ -146,13 +154,22 @@ import qualified Omni.Test as Test
import qualified System.Directory as Dir
import qualified System.Environment as Env
import qualified System.Exit as Exit
-import System.FilePath (replaceExtension, (</>))
+import System.FilePath (dropExtension, replaceExtension, takeDirectory, (</>))
import qualified System.IO as IO
import System.IO.Unsafe (unsafePerformIO)
import qualified System.Process as Process
import qualified System.Timeout as Timeout
import qualified Text.Regex.Applicative as Regex
+mapConcurrentlyBounded :: Int -> (a -> IO b) -> [a] -> IO [b]
+mapConcurrentlyBounded n f xs = do
+ sem <- QSemN.newQSemN n
+ Async.forConcurrently xs <| \x ->
+ Exception.bracket_
+ (QSemN.waitQSemN sem 1)
+ (QSemN.signalQSemN sem 1)
+ (f x)
+
main :: IO ()
main = Cli.Plan help move test_ pure |> Cli.main
where
@@ -163,19 +180,20 @@ main = Cli.Plan help move test_ pure |> Cli.main
test_bildExamples,
test_isGitIgnored,
test_isGitHook,
- test_detectPythonImports
+ test_detectPythonImports,
+ test_buildHsModuleGraph
]
test_bildBild :: Test.Tree
test_bildBild =
Test.unit "can bild bild" <| do
- root <- Env.getEnv "CODEROOT"
+ root <- getCoderoot
path <- Dir.makeAbsolute "Omni/Bild.hs"
case Namespace.fromPath root path of
Nothing -> Test.assertFailure "can't find ns for bild"
Just ns ->
- analyze mempty ns
- +> build False False 1 2
+ analyzeAll True [ns]
+ +> build False True 1 2
+> \case
[Exit.ExitFailure _] ->
Test.assertFailure "can't bild bild"
@@ -185,40 +203,63 @@ test_bildBild =
test_bildExamples :: Test.Tree
test_bildExamples =
Test.unit "can bild examples" <| do
- Env.getEnv "CODEROOT" +> \root ->
+ getCoderoot +> \root ->
["c", "hs", "lisp", "rs"]
|> map ("Omni/Bild/Example." <>)
|> traverse Dir.makeAbsolute
/> map (Namespace.fromPath root)
/> catMaybes
- +> foldM analyze mempty
- +> build False False 4 1
+ +> analyzeAll True
+ +> build False True 4 1
+> \case
[] -> Test.assertFailure "asdf"
xs -> all (== Exit.ExitSuccess) xs @=? True
move :: Cli.Arguments -> IO ()
-move args =
+move args = do
IO.hSetBuffering stdout IO.NoBuffering
- >> Env.getEnv "CODEROOT"
- +> \root ->
- Cli.getAllArgs args (Cli.argument "target")
- |> filterM Dir.doesFileExist
- +> filterM (\x -> isGitIgnored x /> don't)
- /> filter (\x -> isGitHook x |> don't)
- +> traverse Dir.makeAbsolute
- +> traverse (namespaceFromPathOrDie root)
- /> filter isBuildableNs
- +> foldM analyze mempty
- +> printOrBuild
- |> Timeout.timeout (toMillis minutes)
- +> \case
- Nothing ->
- Log.br
- >> Log.fail ["bild", "timeout after " <> tshow minutes <> " minutes"]
- >> Log.br
- >> exitWith (ExitFailure 124)
- Just s -> exitSummary s
+ root <- getCoderoot
+ loadGhcPkgCache
+ allNamespaces <-
+ Cli.getAllArgs args (Cli.argument "target")
+ |> filterM Dir.doesFileExist
+ +> filterGitIgnored
+ /> filter (\x -> isGitHook x |> don't)
+ +> traverse Dir.makeAbsolute
+ +> traverse (namespaceFromPathOrDie root)
+ let (namespaces, skippedNamespaces) = partition isBuildableNs allNamespaces
+ let isPlanMode = args `Cli.has` Cli.longOption "plan"
+ if isPlanMode
+ then do
+ analysis <- analyzeAll True namespaces
+ if Map.null analysis
+ then Log.wipe >> Log.fail ["bild", "nothing to build"] >> Log.br >> exitWith (ExitFailure 1)
+ else putJSON analysis
+ else do
+ when (null allNamespaces) <| do
+ Log.wipe >> Log.fail ["bild", "nothing to build"] >> Log.br >> exitWith (ExitFailure 1)
+ nproc <- GHC.getNumProcessors
+ createHier root
+ let runWithManager action =
+ if isLoud
+ then action
+ else
+ LogC.withLineManager allNamespaces <| \mgr -> do
+ LogC.initializeLines mgr
+ forM_ skippedNamespaces <| \ns -> LogC.updateLineState ns LogC.Skipped
+ action
+ runWithManager <| do
+ pipelineBuild isTest isLoud (not noCache) 8 jobs (cpus nproc) namespaces analyzeOne
+ |> Timeout.timeout (toMillis minutes)
+ +> \case
+ Nothing ->
+ Log.br
+ >> Log.fail ["bild", "timeout after " <> tshow minutes <> " minutes"]
+ >> Log.br
+ >> exitWith (ExitFailure 124)
+ Just s -> do
+ when (all isSuccess s) saveGhcPkgCache
+ exitSummary s
where
minutes =
Cli.getArgWithDefault args "10" (Cli.longOption "time")
@@ -226,20 +267,6 @@ move args =
|> \case
Nothing -> panic "could not read --time argument"
Just n -> (n == 0) ?: (-1, n)
- printOrBuild :: Analysis -> IO [ExitCode]
- printOrBuild targets
- | Map.null targets =
- Log.wipe
- >> Log.fail ["bild", "nothing to build"]
- >> Log.br
- >> exitWith (ExitFailure 1)
- | args `Cli.has` Cli.longOption "plan" =
- Log.wipe >> putJSON targets >> pure [Exit.ExitSuccess]
- | otherwise = do
- root <- Env.getEnv "CODEROOT"
- nproc <- GHC.getNumProcessors
- createHier root
- build isTest isLoud jobs (cpus nproc) targets
cpus :: Int -> Int
cpus nproc =
Cli.longOption "cpus"
@@ -258,6 +285,7 @@ move args =
Just n -> n
isTest = args `Cli.has` Cli.longOption "test"
isLoud = args `Cli.has` Cli.longOption "loud"
+ noCache = args `Cli.has` Cli.longOption "no-cache"
putJSON = Aeson.encode .> ByteString.Lazy.toStrict .> Char8.putStrLn
-- | Don't try to build stuff that isn't part of the git repo.
@@ -268,6 +296,20 @@ isGitIgnored path =
(ExitSuccess, _, _) -> pure True
(ExitFailure _, _, _) -> pure False
+filterGitIgnored :: [FilePath] -> IO [FilePath]
+filterGitIgnored [] = pure []
+filterGitIgnored paths = do
+ (exitCode, out, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["check-ignore", "--stdin"]
+ (List.intercalate "\n" paths)
+ case exitCode of
+ ExitSuccess ->
+ let ignoredPaths = Set.fromList (String.lines out)
+ in pure [p | p <- paths, don't (Set.member p ignoredPaths)]
+ ExitFailure _ -> pure paths
+
test_isGitIgnored :: Test.Tree
test_isGitIgnored =
Test.group
@@ -289,10 +331,10 @@ test_isGitHook =
Test.group
"isGitHook"
[ Test.unit "filters pre-commit hook" <| do
- root <- Env.getEnv "CODEROOT"
+ root <- getCoderoot
True @=? (isGitHook <| root <> "/Omni/Ide/hooks/pre-commit"),
Test.unit "doesn't filter non-hooks" <| do
- root <- Env.getEnv "CODEROOT"
+ root <- getCoderoot
False @=? (isGitHook <| root <> "/Omni/Bild.hs")
]
@@ -322,6 +364,7 @@ Options:
--test, -t Run tests on a target after building
--loud, -l Show all output from compiler
--plan, -p Print the build plan as JSON, don't build
+ --no-cache Skip signing and pushing to S3 binary cache
--time N Set timeout to N minutes, 0 means never timeout [default: 10]
--jobs N, -j N Build up to N jobs at once [default: 6]
--cpus N, -c N Allocate up to N cpu cores per job (default: (nproc-4)/jobs)
@@ -401,20 +444,37 @@ data Target = Target
-- | Wrapper script (if necessary)
wrapper :: Maybe Text,
-- | Runtime dependences
- rundeps :: Set Meta.Run
+ rundeps :: Set Meta.Run,
+ -- | Haskell module graph for per-module builds (Nothing means fallback to monolithic)
+ hsGraph :: Maybe HsModuleGraph
+ }
+ deriving (Show, Generic, Aeson.ToJSON)
+
+type ModuleName = Text
+
+data HsModuleNode = HsModuleNode
+ { nodePath :: FilePath,
+ nodeImports :: [ModuleName],
+ nodeHasTH :: Bool
+ }
+ deriving (Show, Generic, Aeson.ToJSON)
+
+data HsModuleGraph = HsModuleGraph
+ { graphEntry :: ModuleName,
+ graphModules :: Map ModuleName HsModuleNode
}
deriving (Show, Generic, Aeson.ToJSON)
-- | Use this to just get a target to play with at the repl.
dev_getTarget :: FilePath -> IO Target
dev_getTarget fp = do
- root <- Env.getEnv "CODEROOT"
+ root <- getCoderoot
path <- Dir.makeAbsolute fp
Namespace.fromPath root path
|> \case
Nothing -> panic "Could not get namespace from path"
Just ns ->
- analyze mempty ns
+ analyzeAll False [ns]
/> Map.lookup ns
/> \case
Nothing -> panic "Could not retrieve target from analysis"
@@ -456,7 +516,7 @@ isBuildableNs = \case
(Namespace _ Namespace.Sh) -> False
(Namespace _ Namespace.Scm) -> True
(Namespace _ Namespace.Rs) -> True
- (Namespace _ Namespace.Toml) -> True
+ (Namespace _ Namespace.Toml) -> False
-- | The default output directory. This is not IO because I don't want to
-- refactor all of my code right now, but it probably should be.
@@ -502,100 +562,100 @@ removeVersion = takeWhile (/= '.') .> butlast2
type Analysis = Map Namespace Target
-analyze :: Analysis -> Namespace -> IO Analysis
-analyze hmap ns = case Map.lookup ns hmap of
- Nothing -> do
- mTarget <- analyzeOne ns
- pure <| maybe hmap (\t -> Map.insert ns t hmap) mTarget
- Just _ -> pure hmap
- where
- analyzeOne :: Namespace -> IO (Maybe Target)
- analyzeOne namespace@(Namespace parts ext) = do
- let path = Namespace.toPath namespace
- root <- Env.getEnv "CODEROOT"
- let abspath = root </> path
- let quapath = path
- user <- Env.getEnv "USER" /> Text.pack
- host <- HostName.getHostName /> Text.pack
- Log.info ["bild", "analyze", str path]
- contentLines <-
- withFile abspath ReadMode <| \h ->
- IO.hSetEncoding h IO.utf8_bom
- >> Text.IO.hGetContents h
- /> Text.lines
- -- if the file is exe but doesn't have 'out' metadata, just use the
- -- dot-separated namespace instead
- isExe <- Dir.getPermissions quapath /> Dir.executable
- let defaultOut = isExe ?: (Just <| Namespace.dotSeparated parts, Nothing)
- case ext of
- -- basically we don't support building these
- Namespace.Css -> pure Nothing
- Namespace.Json -> pure Nothing
- Namespace.Keys -> pure Nothing
- Namespace.Md -> pure Nothing
- Namespace.None -> pure Nothing
- Namespace.Html -> pure Nothing
- Namespace.Toml -> pure Nothing
- Namespace.Py ->
- contentLines
- |> Meta.detectAll "#"
- |> \Meta.Parsed {..} ->
- detectPythonImports contentLines +> \srcs ->
- Target
- { builder = "python",
- wrapper = Nothing,
- compiler = CPython,
- compilerFlags =
- -- This doesn't really make sense for python, but I'll leave
- -- it here for eventual --dev builds
- [ "-c",
- "\"import py_compile;import os;"
- <> "py_compile.compile(file='"
- <> str quapath
- <> "', cfile=os.getenv('CODEROOT')+'/_/int/"
- <> str quapath
- <> "', doraise=True)\""
- ],
- sysdeps = psys,
- langdeps = pdep,
- outPath = outToPath pout,
- out = pout <|> defaultOut,
- packageSet = "python.packages",
- mainModule = Namespace.toModule namespace,
- rundeps = prun,
- ..
- }
- |> Just
- |> pure
- Namespace.Sh -> pure Nothing
- Namespace.C ->
- Meta.detectAll "//" contentLines |> \Meta.Parsed {..} -> do
+analyzeAll :: Bool -> [Namespace] -> IO Analysis
+analyzeAll _isPlanMode nss = do
+ targets <- mapConcurrentlyBounded 8 analyzeOne nss
+ pure <| Map.fromList <| catMaybes <| zipWith (\ns mt -> (ns,) </ mt) nss targets
+
+analyzeOne :: Namespace -> IO (Maybe Target)
+analyzeOne namespace@(Namespace parts ext) = do
+ let path = Namespace.toPath namespace
+ root <- getCoderoot
+ let abspath = root </> path
+ let quapath = path
+ user <- Env.getEnv "USER" /> Text.pack
+ host <- HostName.getHostName /> Text.pack
+ contentLines <-
+ withFile abspath ReadMode <| \h ->
+ IO.hSetEncoding h IO.utf8_bom
+ >> Text.IO.hGetContents h
+ /> Text.lines
+ isExe <- Dir.getPermissions quapath /> Dir.executable
+ let defaultOut = isExe ?: (Just <| Namespace.dotSeparated parts, Nothing)
+ case ext of
+ Namespace.Css -> pure Nothing
+ Namespace.Json -> pure Nothing
+ Namespace.Keys -> pure Nothing
+ Namespace.Md -> pure Nothing
+ Namespace.None -> pure Nothing
+ Namespace.Html -> pure Nothing
+ Namespace.Toml -> pure Nothing
+ Namespace.Py ->
+ contentLines
+ |> Meta.detectAll "#"
+ |> \Meta.Parsed {..} ->
+ detectPythonImports mempty contentLines +> \(srcs, transitiveDeps) ->
Target
- { langdeps = pdep,
- sysdeps = psys,
+ { builder = "python",
wrapper = Nothing,
- compiler = Gcc,
- builder = "c",
+ compiler = CPython,
+ compilerFlags =
+ -- This doesn't really make sense for python, but I'll leave
+ -- it here for eventual --dev builds
+ [ "-c",
+ "\"import py_compile;import os;"
+ <> "py_compile.compile(file='"
+ <> str quapath
+ <> "', cfile=os.getenv('CODEROOT')+'/_/int/"
+ <> str quapath
+ <> "', doraise=True)\""
+ ],
+ sysdeps = psys,
+ langdeps = pdep <> transitiveDeps,
+ outPath = outToPath pout,
out = pout <|> defaultOut,
- packageSet = "c.packages",
+ packageSet = "python.packages",
mainModule = Namespace.toModule namespace,
- compilerFlags = case pout of
- Just o ->
- ["-o", o, path] <> Set.toList parg |> map Text.pack
- Nothing -> panic "can only bild C exes, not libs",
- outPath = outToPath pout,
- -- implement detectCImports, then I can fill this out
- srcs = Set.empty,
rundeps = prun,
+ hsGraph = Nothing,
..
}
|> Just
|> pure
- Namespace.Hs ->
- contentLines
- |> Meta.detectAll "--"
- |> \Meta.Parsed {..} ->
- detectHaskellImports hmap contentLines +> \(langdeps, srcs) ->
+ Namespace.Sh -> pure Nothing
+ Namespace.C ->
+ Meta.detectAll "//" contentLines |> \Meta.Parsed {..} -> do
+ Target
+ { langdeps = pdep,
+ sysdeps = psys,
+ wrapper = Nothing,
+ compiler = Gcc,
+ builder = "c",
+ out = pout <|> defaultOut,
+ packageSet = "c.packages",
+ mainModule = Namespace.toModule namespace,
+ compilerFlags = case pout of
+ Just o ->
+ ["-o", o, path] <> Set.toList parg |> map Text.pack
+ Nothing -> panic "can only bild C exes, not libs",
+ outPath = outToPath pout,
+ -- implement detectCImports, then I can fill this out
+ srcs = Set.empty,
+ rundeps = prun,
+ hsGraph = Nothing,
+ ..
+ }
+ |> Just
+ |> pure
+ Namespace.Hs ->
+ contentLines
+ |> Meta.detectAll "--"
+ |> \Meta.Parsed {..} ->
+ detectHaskellImports mempty contentLines +> \(autoDeps, srcs) -> do
+ let langdeps = autoDeps <> pdep
+ graph <- buildHsModuleGraph namespace quapath srcs
+ pure
+ <| Just
Target
{ builder = "haskell",
wrapper = Nothing,
@@ -629,182 +689,181 @@ analyze hmap ns = case Map.lookup ns hmap of
outPath = outToPath pout,
rundeps = prun,
out = pout <|> defaultOut,
+ hsGraph = graph,
..
}
- |> Just
- |> pure
- Namespace.Lisp ->
- Meta.detectOut (Meta.out ";;") contentLines |> \out -> do
- langdeps <- detectLispImports contentLines
- Just
- </ pure
- Target
- { sysdeps = Set.empty,
- wrapper = Nothing,
- compiler = Sbcl,
- packageSet = "lisp.sbclWith",
- mainModule = Namespace.toModule namespace,
- compilerFlags =
- map
- Text.pack
- [ "--eval",
- "(require :asdf)",
- "--load",
- quapath,
- "--eval",
- "(sb-ext:save-lisp-and-die #p\"" <> (root </> outToPath out) <> "\" :toplevel #'main :executable t)"
- ],
- builder = "base",
- outPath = outToPath out,
- -- add local src imports to detectLispImports, then i can fill this out
- srcs = Set.empty,
- rundeps = Set.empty,
- ..
- }
- Namespace.Nix ->
- (host == "lithium") ?: (Local user "lithium", Remote user "dev.simatime.com") |> \builder ->
+ Namespace.Lisp ->
+ Meta.detectOut (Meta.out ";;") contentLines |> \out -> do
+ langdeps <- detectLispImports contentLines
+ Just
+ </ pure
Target
- { langdeps = Set.empty,
+ { sysdeps = Set.empty,
wrapper = Nothing,
- sysdeps = Set.empty,
- compiler = NixBuild,
- compilerFlags =
- [ quapath,
- "--out-link",
- root </> nixdir </> Namespace.toPath namespace,
- "--builders",
- toNixFlag builder,
- "--arg",
- "bild",
- str <| "import " <> root </> "Omni/Bild.nix {}"
- ]
- |> map Text.pack,
- out = Nothing,
- outPath = outToPath Nothing,
- srcs = Set.empty,
- packageSet = "",
- mainModule = Namespace.toModule namespace,
- builder = "base",
- rundeps = Set.empty,
- ..
- }
- |> Just
- |> pure
- Namespace.Scm ->
- Meta.detectAll ";;" contentLines |> \Meta.Parsed {..} ->
- Target
- { langdeps = pdep,
- sysdeps = psys,
- compiler = Guile,
- packageSet = "scheme.guilePackages",
+ compiler = Sbcl,
+ packageSet = "lisp.sbclWith",
mainModule = Namespace.toModule namespace,
compilerFlags =
- [ "compile",
- "--r7rs",
- "--load-path=" ++ root,
- "--output=" ++ root </> intdir </> replaceExtension quapath ".scm.go",
- quapath
- ]
- |> map Text.pack,
- builder = "base",
- outPath = outToPath pout,
- out = pout <|> defaultOut,
- srcs = Set.empty, -- implement detectSchemeImports
- -- TODO: wrapper should just be removed, instead rely on
- -- upstream nixpkgs builders to make wrappers
- wrapper =
- isNothing pout
- ?: ( Nothing,
- [ "#!/usr/bin/env bash",
- "guile -C \""
- <> root
- </> intdir
- <> "\" -e main "
- <> "-s "
- <> Namespace.toPath namespace
- <> " \"$@\""
- ]
- |> joinWith "\n"
- |> Text.pack
- |> Just
- ),
- rundeps = prun,
- ..
- }
- |> Just
- |> pure
- Namespace.Rs ->
- Meta.detectAll "//" contentLines |> \Meta.Parsed {..} ->
- Target
- { langdeps = pdep,
- -- this packageSet doesn't actually exist because everyone in
- -- nix just generates nix expressions for rust dependencies with
- -- Cargo.lock, so I have to make it in order to use rust deps
- packageSet = "rust.packages",
- mainModule = Namespace.toModule namespace,
- wrapper = Nothing,
- sysdeps = psys <> Set.singleton "rustc",
- out = pout <|> defaultOut,
- compiler = Rustc,
- compilerFlags = case pout of
- Just o ->
- map
- Text.pack
- [ "$CODEROOT" </> path,
- "-o",
- o
- ]
- Nothing -> panic "can't build rust libs",
+ map
+ Text.pack
+ [ "--eval",
+ "(require :asdf)",
+ "--load",
+ quapath,
+ "--eval",
+ "(sb-ext:save-lisp-and-die #p\"" <> (root </> outToPath out) <> "\" :toplevel #'main :executable t)"
+ ],
builder = "base",
- outPath = outToPath pout,
- -- implement detectRustImports
+ outPath = outToPath out,
+ -- add local src imports to detectLispImports, then i can fill this out
srcs = Set.empty,
- rundeps = prun,
+ rundeps = Set.empty,
+ hsGraph = Nothing,
..
}
- |> Just
- |> pure
+ Namespace.Nix ->
+ (host == "lithium") ?: (Local user "lithium", Remote user "dev.bensima.com") |> \builder ->
+ Target
+ { langdeps = Set.empty,
+ wrapper = Nothing,
+ sysdeps = Set.empty,
+ compiler = NixBuild,
+ compilerFlags =
+ [ quapath,
+ "--out-link",
+ root </> nixdir </> Namespace.toPath namespace,
+ "--builders",
+ toNixFlag builder,
+ "--arg",
+ "bild",
+ str <| "import " <> root </> "Omni/Bild.nix {}"
+ ]
+ |> map Text.pack,
+ out = Nothing,
+ outPath = outToPath Nothing,
+ srcs = Set.empty,
+ packageSet = "",
+ mainModule = Namespace.toModule namespace,
+ builder = "base",
+ rundeps = Set.empty,
+ hsGraph = Nothing,
+ ..
+ }
+ |> Just
+ |> pure
+ Namespace.Scm ->
+ Meta.detectAll ";;" contentLines |> \Meta.Parsed {..} ->
+ Target
+ { langdeps = pdep,
+ sysdeps = psys,
+ compiler = Guile,
+ packageSet = "scheme.guilePackages",
+ mainModule = Namespace.toModule namespace,
+ compilerFlags =
+ [ "compile",
+ "--r7rs",
+ "--load-path=" ++ root,
+ "--output=" ++ root </> intdir </> replaceExtension quapath ".scm.go",
+ quapath
+ ]
+ |> map Text.pack,
+ builder = "base",
+ outPath = outToPath pout,
+ out = pout <|> defaultOut,
+ srcs = Set.empty, -- implement detectSchemeImports
+ -- TODO: wrapper should just be removed, instead rely on
+ -- upstream nixpkgs builders to make wrappers
+ wrapper =
+ isNothing pout
+ ?: ( Nothing,
+ [ "#!/usr/bin/env bash",
+ "guile -C \""
+ <> root
+ </> intdir
+ <> "\" -e main "
+ <> "-s "
+ <> Namespace.toPath namespace
+ <> " \"$@\""
+ ]
+ |> joinWith "\n"
+ |> Text.pack
+ |> Just
+ ),
+ rundeps = prun,
+ hsGraph = Nothing,
+ ..
+ }
+ |> Just
+ |> pure
+ Namespace.Rs ->
+ Meta.detectAll "//" contentLines |> \Meta.Parsed {..} ->
+ Target
+ { langdeps = pdep,
+ -- this packageSet doesn't actually exist because everyone in
+ -- nix just generates nix expressions for rust dependencies with
+ -- Cargo.lock, so I have to make it in order to use rust deps
+ packageSet = "rust.packages",
+ mainModule = Namespace.toModule namespace,
+ wrapper = Nothing,
+ sysdeps = psys <> Set.singleton "rustc",
+ out = pout <|> defaultOut,
+ compiler = Rustc,
+ compilerFlags = case pout of
+ Just o ->
+ map
+ Text.pack
+ [ "$CODEROOT" </> path,
+ "-o",
+ o
+ ]
+ Nothing -> panic "can't build rust libs",
+ builder = "base",
+ outPath = outToPath pout,
+ -- implement detectRustImports
+ srcs = Set.empty,
+ rundeps = prun,
+ hsGraph = Nothing,
+ ..
+ }
+ |> Just
+ |> pure
detectHaskellImports :: Analysis -> [Text] -> IO (Set Meta.Dep, Set FilePath)
-detectHaskellImports hmap contentLines =
- Env.getEnv "CODEROOT" +> \root ->
- contentLines
- /> Text.unpack
- /> Regex.match haskellImports
- |> catMaybes
- |> \imports ->
- foldM ghcPkgFindModule Set.empty imports
- +> \pkgs ->
- filepaths imports
- +> \files ->
- findDeps root files
- +> \deps ->
- (pkgs <> deps, map (stripRoot root) files |> Set.fromList)
- |> pure
+detectHaskellImports _ contentLines = do
+ root <- getCoderoot
+ let initialMods = catMaybes (Regex.match haskellImports </ (Text.unpack </ contentLines))
+ initialLocals <- toLocalFiles root initialMods
+ let initialLocalsSet = Set.fromList initialLocals
+ let localMods = [m | m <- initialMods, (Namespace.fromHaskellModule m |> Namespace.toPath) `elem` initialLocals]
+ let initialExternals = filter (`notElem` localMods) initialMods
+ (srcs, transitiveExtMods) <- bfs root initialLocalsSet Set.empty Set.empty
+ let allExtMods = Set.fromList initialExternals <> transitiveExtMods
+ pkgSets <- Async.mapConcurrently ghcPkgFindModuleCached (Set.toList allExtMods)
+ let pkgs = mconcat pkgSets
+ pure (pkgs, srcs)
where
- filepaths :: [String] -> IO [FilePath]
- filepaths imports =
- imports
- |> map Namespace.fromHaskellModule
- |> map Namespace.toPath
- |> traverse Dir.makeAbsolute
- +> filterM Dir.doesFileExist
- findDeps :: String -> [FilePath] -> IO (Set Meta.Dep)
- findDeps root fps =
- fps
- |> traverse (pure <. Namespace.fromPath root)
- /> catMaybes
- -- this is still an inefficiency, because this recurses before the
- -- hmap is updated by the fold, transitive imports will be
- -- re-visited. you can see this with `TERM=dumb bild`. to fix this i
- -- need shared state instead of a fold, or figure out how to do a
- -- breadth-first search instead of depth-first.
- +> foldM analyze (onlyHaskell hmap)
- /> Map.elems
- /> map langdeps
- /> mconcat
- onlyHaskell :: Analysis -> Analysis
- onlyHaskell = Map.filterWithKey (\ns _ -> ext ns == Namespace.Hs)
+ bfs :: FilePath -> Set FilePath -> Set FilePath -> Set String -> IO (Set FilePath, Set String)
+ bfs root queue visited extMods
+ | Set.null queue = pure (visited, extMods)
+ | otherwise = do
+ let (rel, queue') = Set.deleteFindMin queue
+ fileLines <-
+ withFile (root </> rel) ReadMode <| \h ->
+ IO.hSetEncoding h IO.utf8_bom
+ >> Text.IO.hGetContents h
+ /> Text.lines
+ let mods = catMaybes (Regex.match haskellImports </ (Text.unpack </ fileLines))
+ locals <- toLocalFiles root mods
+ let localsSet = Set.fromList locals
+ let localModsFromPaths = Set.fromList [m | m <- mods, (Namespace.fromHaskellModule m |> Namespace.toPath) `elem` locals]
+ let newExternals = Set.fromList mods Set.\\ localModsFromPaths
+ let newLocals = localsSet Set.\\ visited
+ bfs root (queue' <> newLocals) (Set.insert rel visited) (extMods <> newExternals)
+
+ toLocalFiles :: FilePath -> [String] -> IO [FilePath]
+ toLocalFiles root mods = do
+ let rels = map (Namespace.fromHaskellModule .> Namespace.toPath) mods
+ filterM (\rel -> Dir.doesFileExist (root </> rel)) rels
stripRoot :: FilePath -> FilePath -> FilePath
stripRoot root f = fromMaybe f (List.stripPrefix (root <> "/") f)
@@ -818,19 +877,14 @@ detectLispImports contentLines =
|> Set.fromList
|> pure
--- | Finds local imports. Does not recurse to find transitive imports like
--- 'detectHaskellImports' does. Someday I will refactor these detection
--- functions and have a common, well-performing, complete solution.
-detectPythonImports :: [Text] -> IO (Set FilePath)
-detectPythonImports contentLines =
- contentLines
- /> Text.unpack
- /> Regex.match pythonImport
- |> catMaybes
- /> Namespace.fromPythonModule
- /> Namespace.toPath
- |> filterM Dir.doesPathExist
- /> Set.fromList
+-- | Finds local imports and recursively finds transitive imports and langdeps.
+-- Returns (srcs, transitive langdeps).
+detectPythonImports :: Analysis -> [Text] -> IO (Set FilePath, Set Meta.Dep)
+detectPythonImports _ contentLines = do
+ root <- getCoderoot
+ let initialMods = catMaybes (Regex.match pythonImport </ (Text.unpack </ contentLines))
+ initialLocals <- toLocalFiles root initialMods
+ bfs root (Set.fromList initialLocals) Set.empty Set.empty
where
-- only detects 'import x' because I don't like 'from'
pythonImport :: Regex.RE Char String
@@ -840,18 +894,138 @@ detectPythonImports contentLines =
*> Regex.many (Regex.psym isModuleChar)
<* Regex.many Regex.anySym
+ bfs :: FilePath -> Set FilePath -> Set FilePath -> Set Meta.Dep -> IO (Set FilePath, Set Meta.Dep)
+ bfs root queue visited deps
+ | Set.null queue = pure (visited, deps)
+ | otherwise = do
+ let (rel, queue') = Set.deleteFindMin queue
+ fileLines <-
+ withFile (root </> rel) ReadMode <| \h ->
+ IO.hSetEncoding h IO.utf8_bom
+ >> Text.IO.hGetContents h
+ /> Text.lines
+ let mods = catMaybes (Regex.match pythonImport </ (Text.unpack </ fileLines))
+ locals <- toLocalFiles root mods
+ let localsSet = Set.fromList locals
+ let newLocals = localsSet Set.\\ visited
+ -- Collect langdeps from this file's metadata
+ let Meta.Parsed {pdep = fileDeps} = Meta.detectAll "#" fileLines
+ bfs root (queue' <> newLocals) (Set.insert rel visited) (deps <> fileDeps)
+
+ toLocalFiles :: FilePath -> [String] -> IO [FilePath]
+ toLocalFiles root mods = do
+ let rels = map (Namespace.fromPythonModule .> Namespace.toPath) mods
+ filterM (\rel -> Dir.doesFileExist (root </> rel)) rels
+
test_detectPythonImports :: Test.Tree
test_detectPythonImports =
Test.group
"detectPythonImports"
[ Test.unit "matches import statements" <| do
- set <- detectPythonImports ["import Omni.Log"]
- Set.fromList ["Omni/Log.py"] @=? set,
+ (srcs, _) <- detectPythonImports mempty ["import Omni.Log"]
+ Set.fromList ["Omni/Log.py"] @=? srcs,
Test.unit "matches import as statements" <| do
- set <- detectPythonImports ["import Omni.Log as Log"]
- Set.fromList ["Omni/Log.py"] @=? set
+ (srcs, _) <- detectPythonImports mempty ["import Omni.Log as Log"]
+ Set.fromList ["Omni/Log.py"] @=? srcs
+ ]
+
+test_buildHsModuleGraph :: Test.Tree
+test_buildHsModuleGraph =
+ Test.group
+ "buildHsModuleGraph"
+ [ Test.unit "includes entry point in graph" <| do
+ let ns = Namespace ["Omni", "Bild", "Example"] Namespace.Hs
+ let entryPoint = "Omni/Bild/Example.hs"
+ let deps = Set.fromList ["Alpha.hs", "Omni/Test.hs"]
+
+ result <- buildHsModuleGraph ns entryPoint deps
+ case result of
+ Nothing -> Test.assertFailure "buildHsModuleGraph returned Nothing"
+ Just graph -> do
+ let modules = Map.keys (graphModules graph)
+ Text.pack "Omni.Bild.Example" `elem` modules @=? True
]
+type GhcPkgCacheMem = Map String (Set String)
+
+type GhcPkgCacheDisk = Map String [String]
+
+{-# NOINLINE ghcPkgCache #-}
+ghcPkgCache :: IORef GhcPkgCacheMem
+ghcPkgCache = unsafePerformIO (newIORef Map.empty)
+
+cacheToDisk :: GhcPkgCacheMem -> GhcPkgCacheDisk
+cacheToDisk = Map.map Set.toList
+
+cacheFromDisk :: GhcPkgCacheDisk -> GhcPkgCacheMem
+cacheFromDisk = Map.map Set.fromList
+
+ghcPkgCacheHash :: IO (Maybe String)
+ghcPkgCacheHash = do
+ mdb <- Env.lookupEnv "GHC_PACKAGE_PATH"
+ case mdb of
+ Nothing -> pure Nothing
+ Just db -> do
+ v <-
+ Exception.catch
+ ( Process.readProcess "ghc" ["--numeric-version"] ""
+ /> takeWhile (/= '\n')
+ )
+ (\(_ :: Exception.SomeException) -> pure "")
+ if null v then pure Nothing else pure (Just (hashString (v <> "|" <> db)))
+ where
+ hashString :: String -> String
+ hashString s =
+ List.foldl' (\h c -> h * 131 + fromEnum c) (7 :: Int) s
+ |> abs
+ |> toInteger
+ |> \n -> Numeric.showHex n ""
+
+ghcPkgCachePath :: IO (Maybe FilePath)
+ghcPkgCachePath = do
+ root <- getCoderoot
+ fmap (\h -> root </> vardir </> ("ghc-pkg-cache-" <> h <> ".json")) </ ghcPkgCacheHash
+
+loadGhcPkgCache :: IO ()
+loadGhcPkgCache = do
+ mpath <- ghcPkgCachePath
+ case mpath of
+ Nothing -> pure ()
+ Just path -> do
+ exists <- Dir.doesFileExist path
+ if not exists
+ then pure ()
+ else do
+ eres <- Exception.try (ByteString.Lazy.readFile path) :: IO (Either Exception.IOException ByteString.Lazy.ByteString)
+ case eres of
+ Left _ -> pure ()
+ Right bs ->
+ case Aeson.eitherDecode bs :: Either String GhcPkgCacheDisk of
+ Left _ -> pure ()
+ Right disk -> writeIORef ghcPkgCache (cacheFromDisk disk)
+
+saveGhcPkgCache :: IO ()
+saveGhcPkgCache = do
+ mpath <- ghcPkgCachePath
+ case mpath of
+ Nothing -> pure ()
+ Just path -> do
+ cache <- readIORef ghcPkgCache
+ let tmp = path <> ".tmp"
+ Dir.createDirectoryIfMissing True (takeDirectory path)
+ ByteString.Lazy.writeFile tmp (Aeson.encode (cacheToDisk cache))
+ Dir.renameFile tmp path
+
+ghcPkgFindModuleCached :: String -> IO (Set String)
+ghcPkgFindModuleCached m = do
+ cache <- readIORef ghcPkgCache
+ case Map.lookup m cache of
+ Just pkgs -> pure pkgs
+ Nothing -> do
+ pkgs <- ghcPkgFindModule Set.empty m
+ modifyIORef' ghcPkgCache (Map.insert m pkgs)
+ pure pkgs
+
ghcPkgFindModule :: Set String -> String -> IO (Set String)
ghcPkgFindModule acc m =
Env.getEnv "GHC_PACKAGE_PATH" +> \packageDb ->
@@ -863,6 +1037,81 @@ ghcPkgFindModule acc m =
/> Set.fromList
/> Set.union acc
+-- | Build module graph for Haskell targets, returns Nothing if TH or cycles detected
+buildHsModuleGraph :: Namespace -> FilePath -> Set FilePath -> IO (Maybe HsModuleGraph)
+buildHsModuleGraph namespace entryPoint deps = do
+ root <- getCoderoot
+ -- Analyze all dependencies first
+ depNodes <- foldM (analyzeModule root) Map.empty (Set.toList deps)
+ -- Then analyze the entry point itself
+ allNodes <- analyzeModule root depNodes entryPoint
+ let hasTH = any nodeHasTH (Map.elems allNodes)
+ let hasCycles = detectCycles allNodes
+ if hasTH || hasCycles
+ then pure Nothing
+ else
+ pure
+ <| Just
+ HsModuleGraph
+ { graphEntry = Namespace.toHaskellModule namespace |> Text.pack,
+ graphModules = allNodes
+ }
+ where
+ analyzeModule :: FilePath -> Map ModuleName HsModuleNode -> FilePath -> IO (Map ModuleName HsModuleNode)
+ analyzeModule root acc srcPath = do
+ let modName = pathToModuleName srcPath
+ case Map.lookup modName acc of
+ Just _ -> pure acc
+ Nothing -> do
+ fileLines <-
+ withFile (root </> srcPath) ReadMode <| \h ->
+ IO.hSetEncoding h IO.utf8_bom
+ >> Text.IO.hGetContents h
+ /> Text.lines
+ let importedMods = catMaybes (Regex.match haskellImports </ (Text.unpack </ fileLines))
+ localImportMods <- filterLocalImports root importedMods
+ let hasTH = detectTH fileLines
+ let node =
+ HsModuleNode
+ { nodePath = srcPath,
+ nodeImports = map Text.pack localImportMods,
+ nodeHasTH = hasTH
+ }
+ pure (Map.insert modName node acc)
+
+ pathToModuleName :: FilePath -> ModuleName
+ pathToModuleName fp =
+ fp
+ |> dropExtension
+ |> map (\c -> if c == '/' then '.' else c)
+ |> Text.pack
+
+ filterLocalImports :: FilePath -> [String] -> IO [String]
+ filterLocalImports root mods = do
+ let rels = map (Namespace.fromHaskellModule .> Namespace.toPath) mods
+ filterM (\rel -> Dir.doesFileExist (root </> rel)) rels
+ /> map (\rel -> replaceExtension rel "" |> map (\c -> if c == '/' then '.' else c))
+
+ detectTH :: [Text] -> Bool
+ detectTH =
+ any
+ ( \line ->
+ Text.isInfixOf "TemplateHaskell" line
+ || Text.isInfixOf "$(" line
+ )
+
+ detectCycles :: Map ModuleName HsModuleNode -> Bool
+ detectCycles nodes =
+ let sccs = stronglyConnComp (map nodeToEdge (Map.toList nodes))
+ in any isNonTrivialSCC sccs
+ where
+ nodeToEdge :: (ModuleName, HsModuleNode) -> (HsModuleNode, ModuleName, [ModuleName])
+ nodeToEdge (name, node) = (node, name, nodeImports node)
+
+ isNonTrivialSCC :: SCC HsModuleNode -> Bool
+ isNonTrivialSCC (AcyclicSCC _) = False
+ isNonTrivialSCC (CyclicSCC sccNodes) = length sccNodes > 1
+
isFailure :: Exit.ExitCode -> Bool
isFailure (Exit.ExitFailure _) = True
isFailure Exit.ExitSuccess = False
@@ -873,7 +1122,7 @@ isSuccess _ = False
test :: Bool -> Target -> IO (Exit.ExitCode, ByteString)
test loud Target {..} =
- Env.getEnv "CODEROOT"
+ getCoderoot
+> \root -> case compiler of
Ghc ->
Proc
@@ -881,8 +1130,8 @@ test loud Target {..} =
cmd = root </> outToPath out,
args = ["test"],
ns = namespace,
- onFailure = Log.fail ["test", nschunk namespace] >> Log.br,
- onSuccess = Log.pass ["test", nschunk namespace] >> Log.br
+ onFailure = loud ?: (Log.fail ["test", nschunk namespace] >> Log.br, LogC.updateLineState namespace LogC.Failed),
+ onSuccess = loud ?: (Log.pass ["test", nschunk namespace] >> Log.br, LogC.updateLineState namespace LogC.Success)
}
|> run
CPython ->
@@ -891,41 +1140,42 @@ test loud Target {..} =
cmd = root </> outToPath out,
args = ["test"],
ns = namespace,
- onFailure = Log.fail ["test", nschunk namespace] >> Log.br,
- onSuccess = Log.pass ["test", nschunk namespace] >> Log.br
+ onFailure = loud ?: (Log.fail ["test", nschunk namespace] >> Log.br, LogC.updateLineState namespace LogC.Failed),
+ onSuccess = loud ?: (Log.pass ["test", nschunk namespace] >> Log.br, LogC.updateLineState namespace LogC.Success)
}
|> run
_ ->
- Log.warn ["test", nschunk namespace, "unavailable"]
- >> Log.br
- >> pure (Exit.ExitFailure 1, mempty)
+ pure (Exit.ExitFailure 1, mempty)
build :: Bool -> Bool -> Int -> Int -> Analysis -> IO [Exit.ExitCode]
-build andTest loud jobs cpus analysis =
- Env.getEnv "CODEROOT" +> \root ->
- forM (Map.elems analysis) <| \target@Target {..} ->
- fst </ case compiler of
+build andTest loud jobs cpus analysis = do
+ root <- getCoderoot
+ let targets = Map.elems analysis
+ -- Build runs concurrently with --jobs parallelism
+ -- LineManager is set up by caller (move), so we just update states here
+ results <- mapConcurrentlyBounded jobs (buildTarget root) targets
+ pure (map fst results)
+ where
+ buildTarget :: FilePath -> Target -> IO (Exit.ExitCode, ByteString)
+ buildTarget root target@Target {..} = do
+ LogC.updateLineState namespace LogC.Building
+ result <- case compiler of
CPython -> case out of
Just _ ->
- Log.info ["bild", "nix", "python", nschunk namespace]
- >> nixBuild loud jobs cpus target
+ nixBuild loud jobs cpus target
+> (\r -> (isSuccess (fst r) && andTest) ?: (test loud target, pure r))
Nothing ->
- Log.info ["bild", "nix", "python", nschunk namespace, "cannot build library"]
- >> pure (Exit.ExitSuccess, mempty)
+ pure (Exit.ExitSuccess, mempty)
Gcc ->
- Log.info ["bild", "nix", "gcc", nschunk namespace]
- >> nixBuild loud jobs cpus target
+ nixBuild loud jobs cpus target
Ghc -> case out of
Nothing -> pure (Exit.ExitSuccess, mempty)
Just _ -> do
- Log.info ["bild", "nix", user <> "@" <> host, nschunk namespace]
result <- nixBuild loud jobs cpus target
if andTest && (isSuccess <| fst result)
then test loud target
else pure result
Guile -> do
- Log.info ["bild", "dev", "guile", nschunk namespace]
_ <- proc loud namespace (toNixFlag compiler) compilerFlags
case wrapper of
Nothing -> pure (Exit.ExitSuccess, mempty)
@@ -937,30 +1187,268 @@ build andTest loud jobs cpus analysis =
NixBuild ->
Dir.getPermissions quapath /> Dir.executable +> \isExe ->
isExe
- ?: ( Log.info ["bild", "nix", user <> "@" <> host, nschunk namespace]
- >> proc
- loud
- namespace
- (toNixFlag compiler)
- ( compilerFlags
- ++ [ "--max-jobs",
- Text.pack <| str jobs,
- "--cores",
- Text.pack <| str cpus
- ]
- ),
- Log.warn ["bild", "nix", nschunk namespace, "x bit not set, not building"]
- >> pure (Exit.ExitSuccess, mempty)
+ ?: ( proc
+ loud
+ namespace
+ (toNixFlag compiler)
+ ( compilerFlags
+ ++ [ "--max-jobs",
+ Text.pack <| str jobs,
+ "--cores",
+ Text.pack <| str cpus
+ ]
+ ),
+ pure (Exit.ExitSuccess, mempty)
)
- Copy -> do
- Log.warn ["bild", "copy", "not implemented yet", nschunk namespace]
+ Copy ->
pure (Exit.ExitSuccess, mempty)
Rustc ->
- Log.info ["bild", "dev", "rust", nschunk namespace]
- >> nixBuild loud jobs cpus target
- Sbcl -> do
- Log.info ["bild", "dev", "lisp", nschunk namespace]
+ nixBuild loud jobs cpus target
+ Sbcl ->
proc loud namespace (toNixFlag compiler) compilerFlags
+ LogC.updateLineState namespace (isSuccess (fst result) ?: (LogC.Success, LogC.Failed))
+ pure result
+
+-- | Pipeline state machine for each target
+data TargetState
+ = TSQueued
+ | TSAnalyzing
+ | TSAnalysisFailed
+ | TSWaitingForDeps Target (Set Namespace)
+ | TSReadyToBuild Target
+ | TSBuilding Target
+ | TSComplete Target Exit.ExitCode
+
+-- | Coordinator manages the pipelined analyze→build flow
+data Coordinator = Coordinator
+ { coStates :: TVar (Map Namespace TargetState),
+ coAnalyzeQ :: TQueue Namespace,
+ coBuildQ :: TQueue Namespace,
+ coAllTargets :: Set Namespace,
+ coResults :: TVar [Exit.ExitCode],
+ coRemaining :: TVar Int,
+ coRoot :: FilePath
+ }
+
+initCoordinator :: FilePath -> [Namespace] -> IO Coordinator
+initCoordinator root nss =
+ atomically <| do
+ let allTargets = Set.fromList nss
+ states <- newTVar (Map.fromList [(ns, TSQueued) | ns <- nss])
+ analyzeQ <- newTQueue
+ buildQ <- newTQueue
+ results <- newTVar []
+ remaining <- newTVar (length nss)
+ forM_ nss (writeTQueue analyzeQ)
+ pure
+ Coordinator
+ { coStates = states,
+ coAnalyzeQ = analyzeQ,
+ coBuildQ = buildQ,
+ coAllTargets = allTargets,
+ coResults = results,
+ coRemaining = remaining,
+ coRoot = root
+ }
+
+computeDeps :: Coordinator -> Target -> Set Namespace
+computeDeps Coordinator {..} Target {..} =
+ let toNs path = Namespace.fromPath coRoot (coRoot </> path)
+ result =
+ srcs
+ |> Set.toList
+ |> map toNs
+ |> catMaybes
+ |> Set.fromList
+ |> flip Set.intersection coAllTargets
+ |> Set.delete namespace
+ in result
+
+tsIsComplete :: TargetState -> Bool
+tsIsComplete (TSComplete _ _) = True
+tsIsComplete _ = False
+
+pipelineAnalysisWorker :: Coordinator -> (Namespace -> IO (Maybe Target)) -> IO ()
+pipelineAnalysisWorker coord@Coordinator {..} analyzeFn = loop
+ where
+ loop = do
+ remaining <- readTVarIO coRemaining
+ when (remaining > 0) <| do
+ mNs <- atomically (tryReadTQueue coAnalyzeQ)
+ case mNs of
+ Nothing -> threadDelay 1000 >> loop
+ Just ns -> do
+ atomically <| modifyTVar' coStates (Map.insert ns TSAnalyzing)
+ LogC.updateLineState ns LogC.Analyzing
+ result <- analyzeFn ns
+ case result of
+ Nothing -> do
+ atomically <| do
+ modifyTVar' coStates (Map.insert ns TSAnalysisFailed)
+ modifyTVar' coRemaining (subtract 1)
+ LogC.updateLineState ns LogC.Failed
+ Just target -> do
+ let deps = computeDeps coord target
+ atomically <| do
+ states <- readTVar coStates
+ let pendingDeps = Set.filter (\d -> maybe True (tsIsComplete .> not) (Map.lookup d states)) deps
+ if Set.null pendingDeps
+ then do
+ modifyTVar' coStates (Map.insert ns (TSReadyToBuild target))
+ writeTQueue coBuildQ ns
+ else modifyTVar' coStates (Map.insert ns (TSWaitingForDeps target pendingDeps))
+ loop
+
+pipelineBuildWorker :: Bool -> Bool -> Bool -> Int -> Int -> Coordinator -> IO ()
+pipelineBuildWorker andTest loud andCache jobs cpus coord@Coordinator {..} = loop
+ where
+ loop = do
+ remaining <- readTVarIO coRemaining
+ when (remaining > 0) <| do
+ mNs <- atomically (tryReadTQueue coBuildQ)
+ case mNs of
+ Nothing -> threadDelay 1000 >> loop
+ Just ns -> do
+ mTarget <-
+ atomically <| do
+ states <- readTVar coStates
+ case Map.lookup ns states of
+ Just (TSReadyToBuild t) -> do
+ modifyTVar' coStates (Map.insert ns (TSBuilding t))
+ pure (Just t)
+ _ -> pure Nothing
+ case mTarget of
+ Nothing -> loop
+ Just target -> do
+ LogC.updateLineState ns LogC.Building
+ exitCode <- pipelineBuildOne andTest loud andCache jobs cpus target
+ atomically <| do
+ modifyTVar' coStates (Map.insert ns (TSComplete target exitCode))
+ modifyTVar' coResults (exitCode :)
+ modifyTVar' coRemaining (subtract 1)
+ promoteWaiters coord ns
+ LogC.updateLineState ns (isSuccess exitCode ?: (LogC.Success, LogC.Failed))
+ loop
+
+promoteWaiters :: Coordinator -> Namespace -> STM ()
+promoteWaiters Coordinator {..} completedNs = do
+ states <- readTVar coStates
+ forM_ (Map.toList states) <| \(ns, st) ->
+ case st of
+ TSWaitingForDeps target deps -> do
+ let deps' = Set.delete completedNs deps
+ if Set.null deps'
+ then do
+ modifyTVar' coStates (Map.insert ns (TSReadyToBuild target))
+ writeTQueue coBuildQ ns
+ else modifyTVar' coStates (Map.insert ns (TSWaitingForDeps target deps'))
+ _ -> pure ()
+
+pipelineBuildOne :: Bool -> Bool -> Bool -> Int -> Int -> Target -> IO Exit.ExitCode
+pipelineBuildOne andTest loud andCache jobs cpus target@Target {..} = do
+ root <- getCoderoot
+ result <- case compiler of
+ CPython -> case out of
+ Just _ ->
+ nixBuild loud jobs cpus target
+ +> (\r -> (isSuccess (fst r) && andTest) ?: (test loud target, pure r))
+ Nothing ->
+ pure (Exit.ExitSuccess, mempty)
+ Gcc ->
+ nixBuild loud jobs cpus target
+ Ghc -> case out of
+ Nothing -> pure (Exit.ExitSuccess, mempty)
+ Just _ -> do
+ r <- nixBuild loud jobs cpus target
+ if andTest && (isSuccess <| fst r)
+ then test loud target
+ else pure r
+ Guile -> do
+ _ <- proc loud namespace (toNixFlag compiler) compilerFlags
+ case wrapper of
+ Nothing -> pure (Exit.ExitSuccess, mempty)
+ Just content -> do
+ writeFile (root </> outToPath out) content
+ p <- Dir.getPermissions <| root </> outToPath out
+ Dir.setPermissions (root </> outToPath out) (Dir.setOwnerExecutable True p)
+ pure (Exit.ExitSuccess, mempty)
+ NixBuild ->
+ Dir.getPermissions quapath /> Dir.executable +> \isExe ->
+ isExe
+ ?: ( proc
+ loud
+ namespace
+ (toNixFlag compiler)
+ ( compilerFlags
+ ++ [ "--max-jobs",
+ Text.pack <| str jobs,
+ "--cores",
+ Text.pack <| str cpus
+ ]
+ ),
+ pure (Exit.ExitSuccess, mempty)
+ )
+ Copy ->
+ pure (Exit.ExitSuccess, mempty)
+ Rustc ->
+ nixBuild loud jobs cpus target
+ Sbcl ->
+ proc loud namespace (toNixFlag compiler) compilerFlags
+ let exitCode = fst result
+ when (andCache && isSuccess exitCode) <| do
+ storePath <- Dir.canonicalizePath (nixdir </> outname out)
+ cacheStorePath loud namespace storePath
+ pure exitCode
+
+cacheStorePath :: Bool -> Namespace -> FilePath -> IO ()
+cacheStorePath loud ns storePath = do
+ mKeyPath <- Env.lookupEnv "NIX_CACHE_KEY"
+ case mKeyPath of
+ Nothing -> Log.warn ["cache", "NIX_CACHE_KEY not set, skipping"]
+ Just keyPath -> do
+ let s3Url = "s3://omni-nix-cache?profile=digitalocean&scheme=https&endpoint=nyc3.digitaloceanspaces.com"
+ LogC.updateLine ns "signing..."
+ (signExit, _, signErr) <-
+ Process.readProcessWithExitCode
+ "nix"
+ ["store", "sign", "--key-file", keyPath, storePath]
+ ""
+ case signExit of
+ Exit.ExitSuccess -> do
+ LogC.updateLine ns "pushing to cache..."
+ (pushExit, _, pushErr) <-
+ Process.readProcessWithExitCode
+ "nix"
+ ["copy", "--to", s3Url, storePath]
+ ""
+ case pushExit of
+ Exit.ExitSuccess -> do
+ loud ?| Log.good ["cache", "pushed", Text.pack storePath]
+ Text.IO.putStrLn <| "STORE_PATH=" <> Text.pack storePath
+ Exit.ExitFailure _ -> do
+ Log.fail ["cache", "push failed", Text.pack storePath]
+ loud ?| putStrLn pushErr
+ Exit.ExitFailure _ -> do
+ Log.fail ["cache", "sign failed", Text.pack storePath]
+ loud ?| putStrLn signErr
+
+pipelineBuild :: Bool -> Bool -> Bool -> Int -> Int -> Int -> [Namespace] -> (Namespace -> IO (Maybe Target)) -> IO [Exit.ExitCode]
+pipelineBuild andTest loud andCache analysisWorkers buildWorkers cpus namespaces analyzeFn = do
+ root <- getCoderoot
+ coord <- initCoordinator root namespaces
+ let spawnAnalysis = replicateM analysisWorkers (Async.async (pipelineAnalysisWorker coord analyzeFn))
+ let spawnBuild = replicateM buildWorkers (Async.async (pipelineBuildWorker andTest loud andCache buildWorkers cpus coord))
+ threads <- (<>) </ spawnAnalysis <*> spawnBuild
+ let waitLoop = do
+ remaining <- readTVarIO (coRemaining coord)
+ if remaining == 0
+ then pure ()
+ else do
+ threadDelay 10000
+ waitLoop
+ waitLoop
+ traverse_ Async.cancel threads
+ readTVarIO (coResults coord)
data Proc = Proc
{ loud :: Bool,
@@ -983,7 +1471,8 @@ run Proc {..} = do
Conduit.proc cmd args
|> (\proc_ -> proc_ {Process.create_group = True})
|> Conduit.streamingProcess
- +> \(Conduit.UseProvidedHandle, stdout_, stderr_, hdl) ->
+ +> \(stdin_, stdout_, stderr_, hdl) -> do
+ IO.hClose stdin_ -- Close stdin immediately since we don't use it
(,,)
</ Async.Concurrently (Conduit.waitForStreamingProcess hdl)
<*> Async.Concurrently (loud ?: (puts stdout_, logs ns stdout_))
@@ -1014,7 +1503,7 @@ proc loud namespace cmd args =
cmd = cmd,
args = map Text.unpack args,
onFailure = Log.fail ["bild", nschunk namespace] >> Log.br,
- onSuccess = Log.good ["bild", nschunk namespace] >> Log.br
+ onSuccess = pure ()
}
|> run
@@ -1041,10 +1530,11 @@ logs ns src =
src
.| Conduit.iterM
( ByteString.filter (/= BSI.c2w '\n')
- .> (\t -> Log.fmt ["info", "bild", nschunk ns, decodeUtf8 t])
+ .> decodeUtf8
.> Text.take (columns - 1)
- .> (<> "…\r")
- .> putStr
+ .> (<> "...")
+ .> LogC.updateLine ns
+ .> liftIO
)
.| Conduit.foldC
|> Conduit.runConduitRes
@@ -1082,7 +1572,7 @@ lispRequires =
nixBuild :: Bool -> Int -> Int -> Target -> IO (Exit.ExitCode, ByteString)
nixBuild loud maxJobs cores target@(Target {..}) =
- Env.getEnv "CODEROOT" +> \root ->
+ getCoderoot +> \root ->
instantiate root |> run +> \case
(_, "") -> panic "instantiate did not produce a drv"
(Exit.ExitSuccess, drv) ->
@@ -1092,7 +1582,9 @@ nixBuild loud maxJobs cores target@(Target {..}) =
|> str
|> realise
|> run
- >> run symlink
+ +> \case
+ (Exit.ExitSuccess, _) -> run symlink
+ failure -> pure failure
x -> pure x
where
instantiate root =
@@ -1129,7 +1621,7 @@ nixBuild loud maxJobs cores target@(Target {..}) =
str cores
],
onFailure = Log.fail ["bild", "realise", nschunk namespace] >> Log.br,
- onSuccess = Log.good ["bild", nschunk namespace] >> Log.br
+ onSuccess = pure ()
}
symlink =
Proc
@@ -1146,3 +1638,41 @@ nixBuild loud maxJobs cores target@(Target {..}) =
onFailure = Log.fail ["bild", "symlink", nschunk namespace] >> Log.br,
onSuccess = pure ()
}
+
+getCoderoot :: IO FilePath
+getCoderoot = do
+ mEnvRoot <- Env.lookupEnv "CODEROOT"
+ cwd <- Dir.getCurrentDirectory
+ case mEnvRoot of
+ Just envRoot -> do
+ let isPrefix = envRoot `List.isPrefixOf` cwd
+ let validPrefix =
+ isPrefix
+ && ( length envRoot
+ == length cwd
+ || (length cwd > length envRoot && (List.!!) cwd (length envRoot) == '/')
+ )
+ if validPrefix
+ then pure envRoot
+ else do
+ mRealRoot <- findRoot cwd
+ case mRealRoot of
+ Just realRoot -> pure realRoot
+ Nothing -> pure envRoot
+ Nothing -> do
+ mRealRoot <- findRoot cwd
+ case mRealRoot of
+ Just realRoot -> pure realRoot
+ Nothing -> panic "CODEROOT not set and could not find root"
+
+findRoot :: FilePath -> IO (Maybe FilePath)
+findRoot dir = do
+ let marker = dir </> "Omni"
+ exists <- Dir.doesDirectoryExist marker
+ if exists
+ then pure (Just dir)
+ else do
+ let parent = takeDirectory dir
+ if parent == dir
+ then pure Nothing
+ else findRoot parent
diff --git a/Omni/Bild.nix b/Omni/Bild.nix
index 741ea0e..ca70ae8 100644
--- a/Omni/Bild.nix
+++ b/Omni/Bild.nix
@@ -13,7 +13,6 @@
ccacheStdenv
haskell
sbcl
- python312
nixos
mkShell
dockerTools
@@ -22,7 +21,7 @@
stdenv = stable.ccacheStdenv;
};
- unstable = nixpkgs.nixos-unstable-small;
+ unstable = nixpkgs.nixos-unstable;
# get the .src attributes of all drvs in each pkgset in the `sources` list,
# and concat them with `:` into a Unix-style search path.
@@ -69,6 +68,7 @@
ghcPackageSetBild = ghcWith (hpkgs:
with hpkgs; [
aeson
+ ansi-terminal
async
base
bytestring
@@ -78,6 +78,7 @@
directory
docopt
filepath
+ hostname
process
protolude
rainbow
@@ -87,7 +88,6 @@
tasty-hunit
tasty-quickcheck
text
- hostname
wai # can remove when removed from Omni.Log
]);
};
@@ -97,9 +97,9 @@
python = {
packages =
self.lib.attrsets.getAttrs (import ./Bild/Deps/Python.nix)
- stable.python312.pkgs;
- pythonWith = stable.python312.withPackages;
- buildPythonApplication = stable.python312.pkgs.buildPythonApplication;
+ unstable.python312.pkgs;
+ pythonWith = unstable.python312.withPackages;
+ buildPythonApplication = unstable.python312.pkgs.buildPythonApplication;
};
# c packages are just stable, filtered to just the list of deps i want
@@ -110,19 +110,25 @@
pkgs = with stable.pkgs; {
inherit
alejandra
+ awscli2
bat
bc
cmark
+ coreutils
universal-ctags
datasette
deadnix
+ doctl
fd
figlet
+ findutils
+ ffmpeg
fzf
git
git-branchless
gitlint
gitstats
+ gnutar
groff
guile
hlint
@@ -134,18 +140,15 @@
pkg-config
ripgrep
rustc
+ sqlite
+ stripe-cli
tree
wemux
;
- aider-chat = unstable.aider-chat;
llama-cpp = unstable.llama-cpp;
- # can't put this in the dev namespace because it pulls in openai with
- # python311, which conflicts with any other usage of openai with
- # python312. so i need to make a target that exposese/wraps llm like i did
- # with llamacpp
- #llm = python311.withPackages
- # (p: [ p.llm p.llm-ollama p.llm-sentence-transformers ]);
+ llm = unstable.python312.withPackages (p: [p.llm]);
ollama = unstable.ollama;
+ trafilatura = unstable.python312.withPackages (p: [p.trafilatura]);
ruff = unstable.ruff;
shellcheck = unstable.shellcheck;
};
@@ -162,6 +165,8 @@
../Omni/Bild/Meta.hs
../Omni/Cli.hs
../Omni/Log.hs
+ ../Omni/Log/Concurrent.hs
+ ../Omni/Log/Terminal.hs
../Omni/Namespace.hs
../Omni/Test.hs
];
@@ -224,6 +229,7 @@
+ self.lib.strings.removePrefix (toString src) (toString target);
buildPhase = ''
export CODEROOT=$(pwd)
+ export NO_COLOR=1
mkdir $out
${self.bild}/bin/bild --plan "$TARGET" 1> $out/analysis.json \
2> >(tee -a $out/stderr >&2)
@@ -249,11 +255,11 @@
name = "omnidev";
# this should just be dev tools
buildInputs = with self.pkgs; [
- aider-chat
bat
bc
self.bild
datasette
+ doctl
universal-ctags
fd
figlet
@@ -262,9 +268,13 @@
git-branchless
gitlint
jq
+ llm
lolcat
ormolu
ripgrep
+ ruff
+ sqlite
+ stripe-cli
tree
wemux
];
diff --git a/Omni/Bild/Audit.py b/Omni/Bild/Audit.py
new file mode 100755
index 0000000..4df6c0b
--- /dev/null
+++ b/Omni/Bild/Audit.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+"""
+Audit codebase builds.
+
+Iterates through every namespace in the project and runs 'bild'.
+For every build failure encountered, it automatically creates a new task.
+"""
+
+# : out bild-audit
+
+import argparse
+import re
+import shutil
+import subprocess
+import sys
+from pathlib import Path
+
+# Extensions supported by bild (from Omni/Bild.hs and Omni/Namespace.hs)
+EXTENSIONS = {".c", ".hs", ".lisp", ".nix", ".py", ".scm", ".rs", ".toml"}
+MAX_TITLE_LENGTH = 50
+
+
+def strip_ansi(text: str) -> str:
+ """Strip ANSI escape codes from text."""
+ ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
+ return ansi_escape.sub("", text)
+
+
+def is_ignored(path: Path) -> bool:
+ """Check if a file is ignored by git."""
+ res = subprocess.run(
+ ["git", "check-ignore", str(path)],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ check=False,
+ )
+ return res.returncode == 0
+
+
+def get_buildable_files(root_dir: str = ".") -> list[str]:
+ """Find all files that bild can build."""
+ targets: list[str] = []
+
+ root = Path(root_dir)
+ if not root.exists():
+ return []
+
+ for path in root.rglob("*"):
+ # Skip directories
+ if path.is_dir():
+ continue
+
+ # Skip hidden files/dirs and '_' dirs
+ parts = path.parts
+ if any(p.startswith(".") or p == "_" for p in parts):
+ continue
+
+ if path.suffix in EXTENSIONS:
+ # Clean up path: keep it relative to cwd if possible
+ try:
+ # We want the path as a string, relative to current directory
+ # if possible
+ p_str = (
+ str(path.relative_to(Path.cwd()))
+ if path.is_absolute()
+ else str(path)
+ )
+ except ValueError:
+ p_str = str(path)
+
+ if not is_ignored(Path(p_str)):
+ targets.append(p_str)
+ return targets
+
+
+def run_bild(target: str) -> subprocess.CompletedProcess[str]:
+ """Run bild on the target."""
+ # --time 0 disables timeout
+ # --loud enables output (which we capture)
+ cmd = ["bild", "--time", "0", "--loud", target]
+ return subprocess.run(cmd, capture_output=True, text=True, check=False)
+
+
+def create_task(
+ target: str,
+ result: subprocess.CompletedProcess[str],
+ parent_id: str | None = None,
+) -> None:
+ """Create a task for a build failure."""
+ # Construct a descriptive title
+ # Try to get the last meaningful line of error output
+ lines = (result.stdout + result.stderr).strip().split("\n")
+ last_line = lines[-1] if lines else "Unknown error"
+ last_line = strip_ansi(last_line).strip()
+
+ if len(last_line) > MAX_TITLE_LENGTH:
+ last_line = last_line[: MAX_TITLE_LENGTH - 3] + "..."
+
+ title = f"Build failed: {target} - {last_line}"
+
+ cmd = ["task", "create", title, "--priority", "2", "--json"]
+
+ if parent_id:
+ cmd.append(f"--discovered-from={parent_id}")
+
+ # Try to infer namespace
+ # e.g. Omni/Bild.hs -> Omni/Bild
+ ns = Path(target).parent
+ if str(ns) != ".":
+ cmd.append(f"--namespace={ns}")
+
+ print(f"Creating task for {target}...") # noqa: T201
+ proc = subprocess.run(cmd, capture_output=True, text=True, check=False)
+
+ if proc.returncode != 0:
+ print(f"Error creating task: {proc.stderr}", file=sys.stderr) # noqa: T201
+ else:
+ # task create --json returns the created task json
+ print(f"Task created: {proc.stdout.strip()}") # noqa: T201
+
+
+def main() -> None:
+ """Run the build audit."""
+ parser = argparse.ArgumentParser(description="Audit codebase builds.")
+ parser.add_argument(
+ "--parent",
+ help="Parent task ID to link discovered tasks to",
+ )
+ parser.add_argument(
+ "paths",
+ nargs="*",
+ default=["."],
+ help="Paths to search for targets",
+ )
+ args = parser.parse_args()
+
+ # Check if bild is available
+ if not shutil.which("bild"):
+ print( # noqa: T201
+ "Warning: 'bild' command not found. Ensure it is in PATH.",
+ file=sys.stderr,
+ )
+
+ print(f"Scanning for targets in {args.paths}...") # noqa: T201
+ targets: list[str] = []
+ for path_str in args.paths:
+ path = Path(path_str)
+ if path.is_file():
+ targets.append(str(path))
+ else:
+ targets.extend(get_buildable_files(path_str))
+
+ # Remove duplicates
+ targets = sorted(set(targets))
+ print(f"Found {len(targets)} targets.") # noqa: T201
+
+ failures = 0
+ for target in targets:
+ res = run_bild(target)
+
+ if res.returncode == 0:
+ print("OK") # noqa: T201
+ else:
+ print("FAIL") # noqa: T201
+ failures += 1
+ create_task(target, res, args.parent)
+
+ print(f"\nAudit complete. {failures} failures found.") # noqa: T201
+ if failures > 0:
+ sys.exit(1)
+ else:
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/Omni/Bild/Builder.nix b/Omni/Bild/Builder.nix
index 09e478b..9356d97 100644
--- a/Omni/Bild/Builder.nix
+++ b/Omni/Bild/Builder.nix
@@ -32,20 +32,51 @@ with bild; let
isEmpty = x: x == null || x == [];
skip = ["_" ".direnv"];
+
+ # Normalize paths by removing leading "./"
+ normalize = p: lib.strings.removePrefix "./" p;
+
+ # Given a list of path parts, produce all cumulative prefixes:
+ # ["a","b","c"] -> ["a","a/b","a/b/c"]
+ dirPrefixes = parts:
+ if parts == []
+ then []
+ else let
+ hd = lib.lists.head parts;
+ tl = lib.lists.tail parts;
+ rest = dirPrefixes tl;
+ in
+ [hd] ++ (lib.lists.map (r: "${hd}/${r}") rest);
+
+ # Normalize all source file paths (relative to root)
+ allSourcesRel = lib.lists.map normalize allSources;
+
+ # Allowed directories are the ancestors of all source files, plus the repo root ""
+ allowedDirs = lib.lists.unique (
+ [""]
+ ++ lib.lists.concatMap
+ (p: let
+ parts = lib.strings.splitString "/" p;
+ in
+ dirPrefixes (lib.lists.init parts))
+ allSourcesRel
+ );
+
filter = file: type:
if lib.lists.elem (builtins.baseNameOf file) skip
then false
- # TODO: this means any new directory will cause a rebuild. this bad. i
- # should recurse into the directory and match against the srcs. for now I
- # just use preBuild to delete empty dirs
else if type == "directory"
- then true
+ then let
+ rel = lib.strings.removePrefix "${root}/" file;
+ rel' = normalize rel;
+ in
+ lib.lists.elem rel' allowedDirs
else if type == "regular"
- then
- lib.trivial.pipe file [
- (f: lib.strings.removePrefix "${root}/" f)
- (f: lib.lists.elem f allSources)
- ]
+ then let
+ rel = lib.strings.removePrefix "${root}/" file;
+ rel' = normalize rel;
+ in
+ lib.lists.elem rel' allSourcesRel
else false;
# remove empty directories, leftover from the src filter
@@ -84,21 +115,145 @@ with bild; let
buildPhase = compileLine;
};
- haskell = stdenv.mkDerivation rec {
- inherit name src CODEROOT preBuild;
- nativeBuildInputs = [makeWrapper];
- buildInputs =
- sysdeps_
- ++ [
- (haskell.ghcWith (p: (lib.attrsets.attrVals target.langdeps p)))
- ];
- buildPhase = compileLine;
- installPhase = ''
- install -D ${name} $out/bin/${name}
- wrapProgram $out/bin/${name} \
- --prefix PATH : ${lib.makeBinPath rundeps_}
- '';
- };
+ haskell =
+ if (target.hsGraph or null) == null
+ then
+ # Monolithic build (fallback for TH/cycles)
+ stdenv.mkDerivation rec {
+ inherit name src CODEROOT preBuild;
+ nativeBuildInputs = [makeWrapper];
+ buildInputs =
+ sysdeps_
+ ++ [
+ (haskell.ghcWith (p: (lib.attrsets.attrVals target.langdeps p)))
+ ];
+ buildPhase = compileLine;
+ installPhase = ''
+ install -D ${name} $out/bin/${name}
+ wrapProgram $out/bin/${name} \
+ --prefix PATH : ${lib.makeBinPath rundeps_}
+ '';
+ }
+ else
+ # Per-module incremental build
+ let
+ graph = target.hsGraph;
+ ghcPkg = haskell.ghcWith (p: (lib.attrsets.attrVals target.langdeps p));
+
+ # Helper to sanitize module names for Nix attr names
+ sanitize = builtins.replaceStrings ["."] ["_"];
+
+ # Create source filter for a single module
+ mkModuleSrc = modulePath: let
+ moduleFiles = [modulePath];
+ moduleAllSources = moduleFiles;
+ moduleAllSourcesRel = lib.lists.map normalize moduleAllSources;
+ moduleAllowedDirs = lib.lists.unique (
+ [""]
+ ++ lib.lists.concatMap
+ (p: let
+ parts = lib.strings.splitString "/" p;
+ in
+ dirPrefixes (lib.lists.init parts))
+ moduleAllSourcesRel
+ );
+ moduleFilter = file: type:
+ if lib.lists.elem (builtins.baseNameOf file) skip
+ then false
+ else if type == "directory"
+ then let
+ rel = lib.strings.removePrefix "${root}/" file;
+ rel' = normalize rel;
+ in
+ lib.lists.elem rel' moduleAllowedDirs
+ else if type == "regular"
+ then let
+ rel = lib.strings.removePrefix "${root}/" file;
+ rel' = normalize rel;
+ in
+ lib.lists.elem rel' moduleAllSourcesRel
+ else false;
+ in
+ lib.sources.cleanSourceWith {
+ filter = moduleFilter;
+ src = lib.sources.cleanSource root;
+ };
+
+ # Build one module derivation
+ mkModuleDrv = modName: node: depDrvs:
+ stdenv.mkDerivation {
+ name = "hs-mod-${sanitize modName}";
+ src = mkModuleSrc node.nodePath;
+ inherit CODEROOT;
+ nativeBuildInputs = [];
+ buildInputs = sysdeps_ ++ depDrvs;
+ builder = "${stdenv.shell}";
+ args = [
+ "-c"
+ (let
+ copyDeps =
+ lib.strings.concatMapStringsSep "\n" (d: ''
+ ${pkgs.coreutils}/bin/cp -rfL ${d}/hidir/. . 2>/dev/null || true
+ ${pkgs.coreutils}/bin/cp -rfL ${d}/odir/. . 2>/dev/null || true
+ ${pkgs.coreutils}/bin/chmod -R +w . 2>/dev/null || true
+ '')
+ depDrvs;
+ in ''
+ set -eu
+ ${pkgs.coreutils}/bin/cp -rL $src/. .
+ ${pkgs.coreutils}/bin/chmod -R +w .
+ ${copyDeps}
+ ${ghcPkg}/bin/ghc -c \
+ -Wall -Werror -haddock -Winvalid-haddock \
+ -i. \
+ ${node.nodePath}
+ ${pkgs.coreutils}/bin/mkdir -p $out/hidir $out/odir
+ ${pkgs.findutils}/bin/find . -name '*.hi' -exec ${pkgs.coreutils}/bin/cp --parents {} $out/hidir/ \;
+ ${pkgs.findutils}/bin/find . -name '*.o' -exec ${pkgs.coreutils}/bin/cp --parents {} $out/odir/ \;
+ '')
+ ];
+ };
+
+ # Recursive attrset of all module derivations
+ # mapAttrs' creates {sanitized-name = drv}, while nodeImports use original names
+ modules = lib.fix (self:
+ lib.mapAttrs'
+ (modName: node:
+ lib.nameValuePair (sanitize modName) (
+ mkModuleDrv modName node (map (dep: builtins.getAttr (sanitize dep) self) node.nodeImports)
+ ))
+ graph.graphModules);
+ in
+ # Final link derivation
+ stdenv.mkDerivation rec {
+ inherit name CODEROOT src;
+ nativeBuildInputs = [makeWrapper];
+ dontConfigure = true;
+ dontStrip = true;
+ dontPatchShebangs = true;
+ buildPhase = let
+ pkgFlags = lib.strings.concatMapStringsSep " " (p: "-package ${p}") target.langdeps;
+ copyHiFiles = lib.strings.concatMapStringsSep "\n" (drv: "cp -rL ${drv}/hidir/. . 2>/dev/null || true") (lib.attrsets.attrValues modules);
+ in ''
+ set -eu
+ ${copyHiFiles}
+ chmod -R +w . || true
+ ${ghcPkg}/bin/ghc --make \
+ ${target.quapath} \
+ -i. \
+ ${pkgFlags} \
+ -threaded \
+ -o ${name} \
+ ${lib.optionalString (target.mainModule != "Main") "-main-is ${target.mainModule}"}
+ '';
+ installPhase = ''
+ install -D ${name} $out/bin/${name}
+ ${lib.optionalString (rundeps_ != []) ''
+ wrapProgram $out/bin/${name} \
+ --prefix PATH : ${lib.makeBinPath rundeps_}
+ ''}
+ '';
+ };
c = stdenv.mkDerivation rec {
inherit name src CODEROOT preBuild;
@@ -125,6 +280,7 @@ with bild; let
python = python.buildPythonApplication rec {
inherit name src CODEROOT;
+ format = "setuptools";
nativeBuildInputs = [makeWrapper];
propagatedBuildInputs = langdeps_ ++ sysdeps_ ++ rundeps_;
buildInputs = sysdeps_;
@@ -132,7 +288,7 @@ with bild; let
checkPhase = ''
. ${commonBash}
cp ${../../pyproject.toml} ./pyproject.toml
- check ruff format --exclude 'setup.py' --check .
+ # check ruff format --exclude 'setup.py' --check .
# ignore EXE here to support run.sh shebangs
check ruff check \
--ignore EXE \
@@ -142,7 +298,7 @@ with bild; let
touch ./py.typed
check python -m mypy \
--explicit-package-bases \
- --no-error-summary \
+ --no-color-output \
--exclude 'setup\.py$' \
.
'';
@@ -162,9 +318,9 @@ with bild; let
name="${name}",
entry_points={"console_scripts":["${name} = ${mainModule}:main"]},
version="0.0.0",
- url="git://simatime.com/omni.git",
+ url="https://git.bensima.com/omni.git",
author="dev",
- author_email="dev@simatime.com",
+ author_email="dev@bensima.com",
description="nil",
packages=find_packages(),
install_requires=[],
diff --git a/Omni/Bild/Deps.nix b/Omni/Bild/Deps.nix
index b410f3b..0822fb1 100644
--- a/Omni/Bild/Deps.nix
+++ b/Omni/Bild/Deps.nix
@@ -1,9 +1,13 @@
-_self: super: {
+_self: super: let
+ dontCheck = drv: drv.overrideAttrs (_: {doCheck = false;});
+in {
cgit = super.overrideSrc super.cgit super.sources.cgit;
# Needs upgrading for guile 3
# inspekt3d = super.callPackage ./Deps/inspekt3d.nix {};
+ gupnp = dontCheck super.gupnp;
+
guix = super.pkgs.stdenv.mkDerivation rec {
pname = "guix";
name = "${pname}-${version}";
@@ -28,5 +32,13 @@ _self: super: {
nostr-rs-relay = super.callPackage ./Deps/nostr-rs-relay.nix {};
- radicale = super.radicale.overrideAttrs (_old: {doCheck = false;});
+ radicale = dontCheck super.radicale;
+
+ sweph-data = super.callPackage ./Deps/sweph-data.nix {};
+
+ swtpm = dontCheck super.swtpm;
+
+ thrift = dontCheck super.thrift;
+
+ valkey = dontCheck super.valkey;
}
diff --git a/Omni/Bild/Deps/Haskell.nix b/Omni/Bild/Deps/Haskell.nix
index 5d6abbb..138a80e 100644
--- a/Omni/Bild/Deps/Haskell.nix
+++ b/Omni/Bild/Deps/Haskell.nix
@@ -22,6 +22,8 @@
"fast-logger"
"filepath"
"github"
+ "HaskellNet"
+ "HaskellNet-SSL"
"haskeline"
"hostname"
"http-types"
@@ -50,10 +52,14 @@
"servant-lucid"
"servant-server"
"split"
+ "sqids"
+ "sqlite-simple"
"stm"
+ "tagsoup"
"tasty"
"tasty-hunit"
"tasty-quickcheck"
+ "temporary"
"text"
"time"
"transformers"
diff --git a/Omni/Bild/Deps/Python.nix b/Omni/Bild/Deps/Python.nix
index 3a0562d..d21e129 100644
--- a/Omni/Bild/Deps/Python.nix
+++ b/Omni/Bild/Deps/Python.nix
@@ -1,6 +1,11 @@
[
+ "boto3"
+ "botocore"
"cryptography"
+ "feedgen"
"flask"
+ "httpx"
+ "itsdangerous"
"llm"
"llm-ollama"
"ludic"
@@ -8,10 +13,22 @@
"nltk"
"ollama"
"openai"
+ "psutil"
+ "pydantic"
+ "pydantic-ai"
+ "pydantic-ai-slim"
+ "pydantic-graph"
+ "pydub"
+ "pytest"
+ "pytest-asyncio"
+ "pytest-mock"
"requests"
+ "setuptools"
"slixmpp"
"sqids"
"starlette"
+ "stripe"
+ "trafilatura"
"types-requests"
"uvicorn"
]
diff --git a/Omni/Bild/Deps/kerykeion.nix b/Omni/Bild/Deps/kerykeion.nix
new file mode 100644
index 0000000..d887231
--- /dev/null
+++ b/Omni/Bild/Deps/kerykeion.nix
@@ -0,0 +1,72 @@
+{
+ buildPythonPackage,
+ lib,
+ poetry-core,
+ pytestCheckHook,
+ pytz,
+ pyswisseph,
+ pydantic,
+ requests,
+ requests-cache,
+ scour,
+ simple-ascii-tables,
+ typing-extensions,
+ sources,
+ setuptools,
+}:
+buildPythonPackage rec {
+ pname = "kerykeion";
+ version = sources.kerykeion.version;
+ pyproject = true;
+
+ src = sources.kerykeion;
+
+ nativeBuildInputs = [poetry-core];
+
+ propagatedBuildInputs = [
+ pyswisseph
+ pydantic
+ scour
+ requests-cache
+ requests
+ simple-ascii-tables
+ pytz
+ typing-extensions
+ setuptools
+ ];
+
+ preBuild = ''
+ cat <<EOF >> pyproject.toml
+ [project]
+ name = "kerykeion"
+ version = "${sources.kerykeion.version}"
+
+ [tool.setuptools.packages.find]
+ where = ["."]
+ include = ["kerykeion*", "tests"]
+ namespaces = false
+
+ [build-system]
+ build-backend = "setuptools.build_meta"
+ requires = ["setuptools"]
+ EOF
+ '';
+
+ nativeCheckInputs = [pytestCheckHook];
+
+ pythonImportsCheck = ["kerykeion"];
+
+ # almost all tests perform network requests to api.geonames.org
+ enabledTests = [
+ "test_ephemeris_data"
+ "test_settings"
+ ];
+
+ meta = with lib; {
+ homepage = "https://www.kerykeion.net/";
+ description = "A python library for astrology";
+ changelog = "https://github.com/g-battaglia/kerykeion/releases/tag/v${version}";
+ license = licenses.agpl3Only;
+ maintainers = with maintainers; [bsima];
+ };
+}
diff --git a/Omni/Bild/Deps/logfire-api.nix b/Omni/Bild/Deps/logfire-api.nix
new file mode 100644
index 0000000..af6eedf
--- /dev/null
+++ b/Omni/Bild/Deps/logfire-api.nix
@@ -0,0 +1,24 @@
+{
+ lib,
+ buildPythonPackage,
+ sources,
+ hatchling,
+ pythonOlder,
+}:
+buildPythonPackage rec {
+ pname = "logfire-api";
+ version = sources.logfire.rev;
+ pyproject = true;
+ disabled = pythonOlder "3.8";
+ src = sources.logfire;
+ sourceRoot = "logfire-src/logfire-api";
+ build-system = [hatchling];
+ pythonImportsCheck = ["logfire_api"];
+ meta = {
+ description = "Shim for the Logfire SDK which does nothing unless Logfire is installed";
+ homepage = "https://pypi.org/project/logfire-api/";
+ changelog = "https://github.com/pydantic/logfire/releases/tag/v${version}";
+ license = lib.licenses.mit;
+ maintainers = with lib.maintainers; [bsima];
+ };
+}
diff --git a/Omni/Bild/Deps/openai-python.nix b/Omni/Bild/Deps/openai-python.nix
deleted file mode 100644
index 79db11c..0000000
--- a/Omni/Bild/Deps/openai-python.nix
+++ /dev/null
@@ -1,99 +0,0 @@
-{
- lib,
- buildPythonPackage,
- pythonOlder,
- # build-system
- hatchling,
- hatch-fancy-pypi-readme,
- # dependencies
- anyio,
- distro,
- httpx,
- jiter,
- pydantic,
- sniffio,
- tqdm,
- typing-extensions,
- numpy,
- pandas,
- pandas-stubs,
- # check deps
- pytestCheckHook,
- dirty-equals,
- inline-snapshot,
- nest-asyncio,
- pytest-asyncio,
- pytest-mock,
- respx,
- sources,
-}:
-buildPythonPackage rec {
- pname = "openai";
- version = sources.openai-python.version;
- pyproject = true;
-
- disabled = pythonOlder "3.8";
-
- src = sources.openai-python;
-
- build-system = [
- hatchling
- hatch-fancy-pypi-readme
- ];
-
- dependencies = [
- anyio
- distro
- httpx
- jiter
- pydantic
- sniffio
- tqdm
- typing-extensions
- ];
-
- optional-dependencies = {
- datalib = [
- numpy
- pandas
- pandas-stubs
- ];
- };
-
- pythonImportsCheck = ["openai"];
-
- nativeCheckInputs = [
- pytestCheckHook
- dirty-equals
- inline-snapshot
- nest-asyncio
- pytest-asyncio
- pytest-mock
- respx
- ];
-
- pytestFlagsArray = [
- "-W"
- "ignore::DeprecationWarning"
- ];
-
- disabledTests = [
- # Tests make network requests
- "test_copy_build_request"
- "test_basic_attribute_access_works"
- ];
-
- disabledTestPaths = [
- # Test makes network requests
- "tests/api_resources"
- ];
-
- meta = with lib; {
- description = "Python client library for the OpenAI API";
- homepage = "https://github.com/openai/openai-python";
- changelog = "https://github.com/openai/openai-python/releases/tag/v${version}";
- license = licenses.mit;
- maintainers = with maintainers; [malo];
- mainProgram = "openai";
- };
-}
diff --git a/Omni/Bild/Deps/pydantic-ai-slim.nix b/Omni/Bild/Deps/pydantic-ai-slim.nix
new file mode 100644
index 0000000..067508b
--- /dev/null
+++ b/Omni/Bild/Deps/pydantic-ai-slim.nix
@@ -0,0 +1,90 @@
+{
+ lib,
+ buildPythonPackage,
+ hatchling,
+ pydantic,
+ logfire-api,
+ httpx,
+ eval-type-backport,
+ griffe,
+ pydantic-graph,
+ pythonOlder,
+ sources,
+ writeTextFile,
+}: let
+ version = sources.pydantic-ai.version;
+ pyproject_toml = writeTextFile {
+ name = "pyproject.toml";
+ text = ''
+ [build-system]
+ requires = ["hatchling"]
+ build-backend = "hatchling.build"
+
+ [project]
+ name = "pydantic-ai-slim"
+ version = "${version}"
+ description = "Agent Framework / shim to use Pydantic with LLMs, slim package"
+ authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
+ license = "MIT"
+ readme = "README.md"
+ requires-python = ">=3.9"
+ dependencies = [
+ "eval-type-backport>=0.2.0",
+ "griffe>=1.3.2",
+ "httpx>=0.27",
+ "pydantic>=2.10",
+ "pydantic-graph==0.1.9",
+ "exceptiongroup; python_version < '3.11'",
+ "opentelemetry-api>=1.28.0",
+ "typing-inspection>=0.4.0",
+ ]
+
+ [tool.hatch.metadata]
+ allow-direct-references = true
+
+ [project.scripts]
+ pai = "pydantic_ai._cli:app"
+
+ [tool.hatch.build.targets.wheel]
+ packages = ["pydantic_ai"]
+ '';
+ };
+in
+ buildPythonPackage rec {
+ pname = "pydantic-ai-slim";
+ inherit version;
+ pyproject = true;
+ disabled = pythonOlder "3.8";
+ src = sources.pydantic-ai;
+ build-system = [hatchling];
+ sourceRoot = "pydantic-ai-src/pydantic_ai_slim";
+ dependencies = [
+ pydantic
+ logfire-api
+ httpx
+ eval-type-backport
+ griffe
+ pydantic-graph
+ ];
+ nativeCheckInputs = [
+ pydantic
+ logfire-api
+ httpx
+ eval-type-backport
+ griffe
+ pydantic-graph
+ ];
+ preBuild = ''
+ cp ${pyproject_toml} ./pyproject.toml
+ '';
+ pythonImportsCheck = [
+ "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]"
+ ];
+ meta = {
+ description = "Graph and finite state machine library";
+ homepage = "https://github.com/pydantic/pydantic-ai";
+ changelog = "https://github.com/pydantic/pydantic-ai/releases/tag/v${version}";
+ license = lib.licenses.mit;
+ maintainers = with lib.maintainers; [bsima];
+ };
+ }
diff --git a/Omni/Bild/Deps/pydantic-ai.nix b/Omni/Bild/Deps/pydantic-ai.nix
new file mode 100644
index 0000000..399649d
--- /dev/null
+++ b/Omni/Bild/Deps/pydantic-ai.nix
@@ -0,0 +1,75 @@
+{
+ lib,
+ buildPythonPackage,
+ hatchling,
+ pydantic-ai-slim,
+ pythonOlder,
+ pytest-vcr,
+ dirty-equals,
+ sources,
+ writeTextFile,
+}: let
+ version = sources.pydantic-ai.version;
+ pyproject_toml = writeTextFile {
+ name = "pyproject.toml";
+ text = ''
+ [build-system]
+ requires = ["hatchling"]
+ build-backend = "hatchling.build"
+
+ [project]
+ name = "pydantic-ai"
+ version = "${version}"
+ description = "Agent Framework / shim to use Pydantic with LLMs"
+ authors = [
+ { name = "Samuel Colvin", email = "samuel@pydantic.dev" },
+ { name = "Marcelo Trylesinski", email = "marcelotryle@gmail.com" },
+ { name = "David Montague", email = "david@pydantic.dev" },
+ { name = "Alex Hall", email = "alex@pydantic.dev" },
+ ]
+ license = "MIT"
+ readme = "README.md"
+ requires-python = ">=3.9"
+ dependencies = [
+ "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli,mcp,evals]==${version}",
+ ]
+
+ [project.urls]
+ Homepage = "https://ai.pydantic.dev"
+ Source = "https://github.com/pydantic/pydantic-ai"
+ Documentation = "https://ai.pydantic.dev"
+ Changelog = "https://github.com/pydantic/pydantic-ai/releases"
+
+ [project.scripts]
+ pai = "pydantic_ai._cli:app"
+ '';
+ };
+in
+ buildPythonPackage rec {
+ pname = "pydantic-ai";
+ inherit version;
+ pyproject = true;
+ disabled = pythonOlder "3.8";
+ src = sources.pydantic-ai;
+ build-system = [hatchling];
+ dependencies = [pydantic-ai-slim];
+ nativeCheckInputs = [
+ pydantic-ai-slim
+ pytest-vcr
+ dirty-equals
+ # pytestCheckHook
+ ];
+ preBuild = ''
+ cp ${pyproject_toml} ./pyproject.toml
+ '';
+ pythonImportsCheck = [
+ "pydantic_ai"
+ ];
+ meta = {
+ description = "Agent Framework / shim to use Pydantic with LLMs";
+ homepage = "https://github.com/pydantic/pydantic-ai";
+ changelog = "https://github.com/pydantic/pydantic-ai/releases/tag/v${version}";
+ license = lib.licenses.mit;
+ maintainers = with lib.maintainers; [bsima];
+ };
+ }
diff --git a/Omni/Bild/Deps/pydantic-graph.nix b/Omni/Bild/Deps/pydantic-graph.nix
new file mode 100644
index 0000000..e2797b9
--- /dev/null
+++ b/Omni/Bild/Deps/pydantic-graph.nix
@@ -0,0 +1,45 @@
+{
+ lib,
+ buildPythonPackage,
+ hatchling,
+ pydantic,
+ logfire-api,
+ httpx,
+ opentelemetry-api,
+ pythonOlder,
+ sources,
+}:
+buildPythonPackage rec {
+ pname = "pydantic-graph";
+ version = sources.pydantic-ai.version;
+ pyproject = true;
+ disabled = pythonOlder "3.8";
+ src = sources.pydantic-ai;
+ sourceRoot = "pydantic-ai-src/pydantic_graph";
+ build-system = [hatchling];
+ dependencies = [
+ pydantic
+ logfire-api
+ httpx
+ opentelemetry-api
+ ];
+ nativeCheckInputs = [
+ pydantic
+ logfire-api
+ httpx
+ ];
+ pythonRelaxDeps = true;
+ postPatch = ''
+ substituteInPlace pyproject.toml \
+ --replace-fail ', "uv-dynamic-versioning>=0.7.0"' "" \
+ --replace-fail 'dynamic = ["version"]' 'version = "${version}"'
+ '';
+ pythonImportsCheck = ["pydantic_graph"];
+ meta = {
+ description = "PydanticAI core logic with minimal required dependencies.";
+ homepage = "https://github.com/pydantic/pydantic-ai";
+ changelog = "https://github.com/pydantic/pydantic-ai/releases/tag/v${version}";
+ license = lib.licenses.mit;
+ maintainers = with lib.maintainers; [bsima];
+ };
+}
diff --git a/Omni/Bild/Deps/pyswisseph.nix b/Omni/Bild/Deps/pyswisseph.nix
new file mode 100644
index 0000000..36c805e
--- /dev/null
+++ b/Omni/Bild/Deps/pyswisseph.nix
@@ -0,0 +1,41 @@
+{
+ buildPythonPackage,
+ lib,
+ setuptools,
+ wheel,
+ sources,
+ sweph-data,
+}:
+buildPythonPackage rec {
+ pname = "pyswisseph";
+ version = sources.pyswisseph.version;
+ format = "setuptools";
+
+ src = sources.pyswisseph;
+
+ nativeBuildInputs = [
+ setuptools
+ wheel
+ ];
+
+ # Disable system library detection to use bundled versions
+ preBuild = ''
+ substituteInPlace setup.py \
+ --replace-fail "swe_detection = True" "swe_detection = False" \
+ --replace-fail "sqlite3_detection = True" "sqlite3_detection = False"
+ '';
+
+ # Set ephemeris path to use sweph-data
+ postInstall = ''
+ export SE_EPHE_PATH=${sweph-data}/share/sweph/ephe
+ '';
+
+ pythonImportsCheck = ["swisseph"];
+
+ meta = with lib; {
+ homepage = "https://astrorigin.com/pyswisseph";
+ description = "Python extension to the Swiss Ephemeris";
+ license = licenses.agpl3Only;
+ maintainers = with maintainers; [bsima];
+ };
+}
diff --git a/Omni/Bild/Deps/simple-ascii-tables.nix b/Omni/Bild/Deps/simple-ascii-tables.nix
new file mode 100644
index 0000000..f2aa5d9
--- /dev/null
+++ b/Omni/Bild/Deps/simple-ascii-tables.nix
@@ -0,0 +1,28 @@
+{
+ buildPythonPackage,
+ lib,
+ poetry-core,
+ setuptools,
+ pytestCheckHook,
+ sources,
+}:
+buildPythonPackage rec {
+ pname = "simple-ascii-tables";
+ version = sources.simple-ascii-tables.version;
+ pyproject = true;
+
+ src = sources.simple-ascii-tables;
+
+ nativeBuildInputs = [poetry-core setuptools];
+
+ nativeCheckInputs = [pytestCheckHook];
+
+ pythonImportsCheck = ["simple_ascii_tables"];
+
+ meta = with lib; {
+ homepage = "https://pypi.org/project/simple-ascii-tables/";
+ description = "Simple, minimal, dependency-free ASCII tables for Python";
+ license = licenses.mit;
+ maintainers = with maintainers; [bsima];
+ };
+}
diff --git a/Omni/Bild/Deps/sweph-data.nix b/Omni/Bild/Deps/sweph-data.nix
new file mode 100644
index 0000000..02e373f
--- /dev/null
+++ b/Omni/Bild/Deps/sweph-data.nix
@@ -0,0 +1,38 @@
+{
+ stdenv,
+ fetchurl,
+ lib,
+}:
+stdenv.mkDerivation rec {
+ pname = "sweph-data";
+ version = "2023";
+
+ srcs = [
+ (fetchurl {
+ url = "https://github.com/aloistr/swisseph/raw/master/ephe/seas_18.se1";
+ sha256 = "0nvbd2kx99zsq3mlinabvjvhjm3rdq3middflq4prqsl2smc5naz";
+ })
+ (fetchurl {
+ url = "https://github.com/aloistr/swisseph/raw/master/ephe/semo_18.se1";
+ sha256 = "10191sx3nnbh827y7jpa4n3fj8d8563d4kp0qfdml2xwypdm9ypc";
+ })
+ (fetchurl {
+ url = "https://github.com/aloistr/swisseph/raw/master/ephe/sepl_18.se1";
+ sha256 = "18bfgg13sj9s6rv3zwbx1qx7k1bngyp1sw8xvnhfds8v7ip42zhb";
+ })
+ ];
+
+ unpackPhase = "true";
+
+ installPhase = ''
+ mkdir -p $out/share/sweph/ephe
+ for src in $srcs; do
+ cp $src $out/share/sweph/ephe/$(stripHash $src)
+ done
+ '';
+
+ meta = with lib; {
+ description = "Swiss Ephemeris data files";
+ license = licenses.agpl3Only;
+ };
+}
diff --git a/Omni/Bild/Example.py b/Omni/Bild/Example.py
index 58e941a..1b2f61d 100755
--- a/Omni/Bild/Example.py
+++ b/Omni/Bild/Example.py
@@ -8,8 +8,15 @@ Example Python file that also serves as a test case for bild.
# : out example
# : dep cryptography
import cryptography.fernet
+import logging
+import Omni.App as App
+import Omni.Log as Log
+import Omni.Test as Test
import sys
+logger = logging.getLogger(__name__)
+Log.setup(logger)
+
def cryptic_hello(name: str) -> str:
"""
@@ -23,6 +30,7 @@ def cryptic_hello(name: str) -> str:
key = cryptography.fernet.Fernet.generate_key()
f = cryptography.fernet.Fernet(key)
token = f.encrypt(hello(name).encode("utf-8"))
+ logger.info("attempting decryption")
ret = f.decrypt(token).decode("utf-8")
if ret != hello(name):
msg = "en/decryption failed!"
@@ -35,8 +43,16 @@ def hello(name: str) -> str:
return f"Hello {name}"
+class TestExample(Test.TestCase):
+ """Test the Example module."""
+
+ def test_hello(self) -> None:
+ """Test `hello` function."""
+ self.assertEqual("Hello Ben", hello("Ben"))
+
+
def main() -> None:
"""Entrypoint."""
if "test" in sys.argv:
- sys.stdout.write("testing success")
+ Test.run(App.Area.Test, [TestExample])
sys.stdout.write(cryptic_hello("world"))
diff --git a/Omni/Bild/Haskell.nix b/Omni/Bild/Haskell.nix
index 7e969da..5754253 100644
--- a/Omni/Bild/Haskell.nix
+++ b/Omni/Bild/Haskell.nix
@@ -21,11 +21,14 @@ in rec {
cmark = doJailbreak sup.cmark;
docopt = buildCabal sel "docopt";
filelock = dontCheck sup.filelock;
+ HaskellNet = doJailbreak sup.HaskellNet;
+ HaskellNet-SSL = doJailbreak sup.HaskellNet-SSL;
linear-generics = doJailbreak sup.linear-generics;
req = doJailbreak sup.req;
servant-auth = doJailbreak sup.servant-auth;
servant-auth-server = dontCheck sup.servant-auth-server;
shellcheck = doJailbreak sup.shellcheck;
+ sqids = dontCheck sup.sqids;
string-qq = doJailbreak sup.string-qq;
syb-with-class = doJailbreak sup.syb-with-class;
th-abstraction = doJailbreak sup.th-abstraction;
diff --git a/Omni/Bild/Nixpkgs.nix b/Omni/Bild/Nixpkgs.nix
index 3418673..ab13d40 100644
--- a/Omni/Bild/Nixpkgs.nix
+++ b/Omni/Bild/Nixpkgs.nix
@@ -23,7 +23,7 @@ let
(import ./CcacheWrapper.nix)
(import ./Functions.nix)
depsOverlay
- (_: _: {unstable = this.nixos-unstable-small.pkgs;})
+ (_: _: {unstable = this.nixos-unstable.pkgs;})
(import ./Deps.nix)
(import ./Python.nix)
(import ./Haskell.nix)
@@ -31,7 +31,7 @@ let
];
};
- nixos-unstable-small = import sources.nixos-unstable-small {
+ nixos-unstable = import sources.nixos-unstable {
inherit system config;
overlays = [
(_: _: {inherit sources;})
diff --git a/Omni/Bild/Python.nix b/Omni/Bild/Python.nix
index 36abe25..ae14ebc 100644
--- a/Omni/Bild/Python.nix
+++ b/Omni/Bild/Python.nix
@@ -4,13 +4,19 @@ _self: super: {
with pysuper.pkgs.python312Packages; let
dontCheck = p: p.overridePythonAttrs (_: {doCheck = false;});
in {
+ aider-chat = pysuper.aider-chat.withOptional {withAll = false;};
+ aiohttp = dontCheck pysuper.aiohttp;
+ anthropic = dontCheck pysuper.anthropic;
+ anyio = dontCheck pysuper.anyio;
interegular = callPackage ./Deps/interegular.nix {};
ipython = dontCheck pysuper.ipython;
+ kerykeion = callPackage ./Deps/kerykeion.nix {};
llm = super.overrideSrc pysuper.llm super.sources.llm;
llm-ollama = pysuper.pkgs.python312.pkgs.callPackage ./Deps/llm-ollama.nix {
ollama = pyself.ollama;
};
llm-sentence-transformers = callPackage ./Deps/llm-sentence-transformers.nix {};
+ logfire-api = callPackage ./Deps/logfire-api.nix {};
ludic = callPackage ./Deps/ludic.nix {};
mypy = dontCheck pysuper.mypy;
ollama = pysuper.ollama.overridePythonAttrs (old: rec {
@@ -22,10 +28,18 @@ _self: super: {
--replace-fail "0.0.0" "${version}"
'';
});
- openai = callPackage ./Deps/openai-python.nix {};
+ onnx = dontCheck pysuper.onnx;
outlines = callPackage ./Deps/outlines.nix {};
+ psycopg = dontCheck pysuper.psycopg;
+ pydantic-ai = callPackage ./Deps/pydantic-ai.nix {};
+ pydantic-ai-slim = callPackage ./Deps/pydantic-ai-slim.nix {};
+ pydantic-graph = callPackage ./Deps/pydantic-graph.nix {};
perscache = callPackage ./Deps/perscache.nix {};
+ pyswisseph = callPackage ./Deps/pyswisseph.nix {};
+ simple-ascii-tables = callPackage ./Deps/simple-ascii-tables.nix {};
+ sphinx = dontCheck pysuper.sphinx;
tokenizers = dontCheck pysuper.tokenizers;
+ uvloop = dontCheck pysuper.uvloop;
};
};
diff --git a/Omni/Bild/README.md b/Omni/Bild/README.md
new file mode 100644
index 0000000..e1c026c
--- /dev/null
+++ b/Omni/Bild/README.md
@@ -0,0 +1,40 @@
+# Bild
+
+`bild` is the universal build tool. It can build and test everything in the repo.
+
+Examples:
+```bash
+bild --test Omni/Bild.hs # Build and test a namespace
+bild --time 0 Omni/Cloud.nix # Build with no timeout
+bild --plan Omni/Test.hs # Analyze build without building
+```
+
+When the executable is built, the output will go to `_/bin`. Example:
+
+```bash
+# build the example executable
+bild Omni/Bild/Example.py
+# run the executable
+_/bin/example
+```
+
+## Adding New Dependencies
+
+### Python Packages
+
+To add a new Python package as a dependency:
+
+1. Add the package name to `Omni/Bild/Deps/Python.nix` (alphabetically sorted)
+2. Use it in your Python file with `# : dep <package-name>` comment at the top
+3. Run `bild <yourfile.py>` to build with the new dependency
+
+Example:
+```python
+# : out myapp
+# : dep stripe
+# : dep pytest
+import stripe
+```
+
+The package name must match the nixpkgs python package name (usually the PyPI name).
+Check available packages: `nix-env -qaP -A nixpkgs.python3Packages | grep <name>`
diff --git a/Omni/Bild/Sources.json b/Omni/Bild/Sources.json
index 5d43855..a7d057a 100644
--- a/Omni/Bild/Sources.json
+++ b/Omni/Bild/Sources.json
@@ -73,18 +73,43 @@
"url": "https://github.com/MegaIng/interegular/archive/v0.2.1.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
+ "kerykeion": {
+ "branch": "master",
+ "description": "Data-Driven Astrology 💫 Kerykeion is a Python library for astrology. It generates SVG charts and extracts detailed structured data for birth charts, synastry, transits, composite charts, and more.",
+ "homepage": "https://kerykeion.net",
+ "owner": "g-battaglia",
+ "repo": "kerykeion",
+ "rev": "V4.26.0",
+ "sha256": "0c2r2q0qgjzzjp7d3b1f0mqb508kj3b6767cw7kd2nn47wihb8g8",
+ "type": "tarball",
+ "url": "https://github.com/g-battaglia/kerykeion/archive/V4.26.0.tar.gz",
+ "url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz",
+ "version": "4.26.0"
+ },
"llm": {
"branch": "main",
"description": "Access large language models from the command-line",
"homepage": "https://llm.datasette.io",
"owner": "simonw",
"repo": "llm",
- "rev": "41d64a8f1239322e12aa11c17450054f0c654ed7",
- "sha256": "1vyg0wmcxv8910iz4cx9vjb3y4fq28423p62cgzr308ra8jii719",
+ "rev": "0.27.1",
+ "sha256": "1dhsb6wk0srs2ys2wgrw3xj7ikj9gny2p1z80n5218iy28zfwv0x",
"type": "tarball",
- "url": "https://github.com/simonw/llm/archive/41d64a8f1239322e12aa11c17450054f0c654ed7.tar.gz",
+ "url": "https://github.com/simonw/llm/archive/0.27.1.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz",
- "version": "0.21"
+ "version": "0.27.1"
+ },
+ "logfire": {
+ "branch": "main",
+ "description": "Uncomplicated Observability for Python and beyond! 🪵🔥",
+ "homepage": "https://logfire.pydantic.dev/docs/",
+ "owner": "pydantic",
+ "repo": "logfire",
+ "rev": "0ef05d9414232c82fb03d34860fb1a2ec9a50488",
+ "sha256": "16ffikhdh810lhj7rx9gy0sy9x4kk2621l02j5ydkar0vkcpy6vd",
+ "type": "tarball",
+ "url": "https://github.com/pydantic/logfire/archive/0ef05d9414232c82fb03d34860fb1a2ec9a50488.tar.gz",
+ "url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"niv": {
"branch": "master",
@@ -140,10 +165,10 @@
"homepage": "",
"owner": "nixos",
"repo": "nixpkgs",
- "rev": "030ba1976b7c0e1a67d9716b17308ccdab5b381e",
- "sha256": "14rpk53mia7j0hr4yaf5m3b2d4lzjx8qi2rszxjhqq00pxzzr64w",
+ "rev": "50ab793786d9de88ee30ec4e4c24fb4236fc2674",
+ "sha256": "1s2gr5rcyqvpr58vxdcb095mdhblij9bfzaximrva2243aal3dgx",
"type": "tarball",
- "url": "https://github.com/nixos/nixpkgs/archive/030ba1976b7c0e1a67d9716b17308ccdab5b381e.tar.gz",
+ "url": "https://github.com/nixos/nixpkgs/archive/50ab793786d9de88ee30ec4e4c24fb4236fc2674.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"nixos-mailserver": {
@@ -156,16 +181,16 @@
"url_template": "https://gitlab.com/simple-nixos-mailserver/nixos-mailserver/-/archive/<rev>/nixos-mailserver-<rev>.tar.gz",
"version": "master"
},
- "nixos-unstable-small": {
- "branch": "nixos-unstable-small",
+ "nixos-unstable": {
+ "branch": "nixos-unstable",
"description": "Nix Packages collection & NixOS",
"homepage": "",
"owner": "nixos",
"repo": "nixpkgs",
- "rev": "ceaea203f3ae1787b1bd13f021f686391696fc5b",
- "sha256": "0dgmjq8sng8dbfkzkkmcigkgk5n4mlv6cljaqahhssppygi177q9",
+ "rev": "2fad6eac6077f03fe109c4d4eb171cf96791faa4",
+ "sha256": "14inw2gxia29f0qh9kyvdq9y1wcv43r4cc7fylz9v372z5chiamh",
"type": "tarball",
- "url": "https://github.com/nixos/nixpkgs/archive/ceaea203f3ae1787b1bd13f021f686391696fc5b.tar.gz",
+ "url": "https://github.com/nixos/nixpkgs/archive/2fad6eac6077f03fe109c4d4eb171cf96791faa4.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"nvidia-patch-nixos": {
@@ -186,23 +211,13 @@
"homepage": "https://ollama.com",
"owner": "ollama",
"repo": "ollama-python",
- "rev": "ee349ecc6d05ea57c9e91bc9345e2db3bc79bb5b",
+ "rev": "115792583ed248411d68334050ffed03ce9bc065",
"sha256": "1dkrdkw7gkr9ilfb34qh9vwm0231csg7raln69p00p4mvx2w53gi",
"type": "tarball",
"url": "https://github.com/ollama/ollama-python/archive/refs/tags/v0.4.5.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/refs/tags/v<version>.tar.gz",
"version": "0.4.5"
},
- "openai-python": {
- "branch": "main",
- "description": "The official Python library for the OpenAI API",
- "homepage": "https://pypi.org/project/openai/",
- "owner": "openai",
- "repo": "https://github.com/openai/openai-python",
- "rev": "5e3e4d1b0f16ccc4469a90a5bff09cafe0de7a2e",
- "type": "git",
- "version": "1.56.1"
- },
"outlines": {
"branch": "main",
"description": "Generative Model Programming",
@@ -227,6 +242,33 @@
"url": "https://github.com/leshchenko1979/perscache/archive/0.6.1.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
+ "pydantic-ai": {
+ "branch": "main",
+ "description": "Agent Framework / shim to use Pydantic with LLMs",
+ "homepage": "https://ai.pydantic.dev",
+ "owner": "pydantic",
+ "repo": "pydantic-ai",
+ "rev": "1e561011e4d9e654b1eaecb6b96890bcc047982d",
+ "sha256": "02kx6j9nck4b8qxz86lzs5jvq01rh4641wdal2nwznwxwlinnyp5",
+ "type": "tarball",
+ "url": "https://github.com/pydantic/pydantic-ai/archive/1e561011e4d9e654b1eaecb6b96890bcc047982d.tar.gz",
+ "url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz",
+ "version": "0.1.9"
+ },
+ "pyswisseph": {
+ "branch": "master",
+ "description": "Python extension to the Swiss Ephemeris",
+ "homepage": "https://astrorigin.com/pyswisseph",
+ "repo": "https://github.com/astrorigin/pyswisseph",
+ "rev": "778903d59bed84b8da020cee77f1995b0df5106b",
+ "sha256": "1qbwnhw2rv6qh5nzgj47baxfmx29wim0bkrvfzfg6cy7g7xxfbz6",
+ "submodules": true,
+ "tag": "v2.10.03.2",
+ "type": "git",
+ "url": "https://github.com/astrorigin/pyswisseph/archive/v2.10.03.2.tar.gz",
+ "url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz",
+ "version": "2.10.03.2"
+ },
"radicale": {
"branch": "master",
"description": "A simple CalDAV (calendar) and CardDAV (contact) server.",
@@ -252,5 +294,18 @@
"url": "https://github.com/feuerbach/regex-applicative/archive/449519c38e65753345e9a008362c011cb7a0a4d9.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz",
"version": "0.3.4"
+ },
+ "simple-ascii-tables": {
+ "branch": "master",
+ "description": "Simple, minimal, dependency-free ASCII tables for Python.",
+ "homepage": "https://pypi.org/project/simple-ascii-tables/",
+ "owner": "g-battaglia",
+ "repo": "simple-ascii-tables",
+ "rev": "V1.0.0",
+ "sha256": "0zzpis810kgwybaiyj2im3fcmjvadpb3gls4k2j13k0z909vind7",
+ "type": "tarball",
+ "url": "https://github.com/g-battaglia/simple-ascii-tables/archive/V1.0.0.tar.gz",
+ "url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz",
+ "version": "1.0.0"
}
}
diff --git a/Omni/Bild/Sources.nix b/Omni/Bild/Sources.nix
index dbcd147..93bb9d8 100644
--- a/Omni/Bild/Sources.nix
+++ b/Omni/Bild/Sources.nix
@@ -44,11 +44,15 @@ let
else
abort
"In git source '${name}': Please specify `ref`, `tag` or `branch`!";
+ submodules =
+ if spec ? submodules
+ then spec.submodules
+ else false;
in
builtins.fetchGit {
url = spec.repo;
inherit (spec) rev;
- inherit ref;
+ inherit ref submodules;
};
fetch_local = spec: spec.path;
diff --git a/Omni/Bot.scm b/Omni/Bot.scm
deleted file mode 100755
index ff81c53..0000000
--- a/Omni/Bot.scm
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env run.sh
-!#
-;; : out omnibot
-;;
-;; Usage with ii:
-;;
-;; tail -f \#omni/out | guile -L $CODEROOT -s Omni/Bot.scm
-;;
-(define-module (Omni Bot) #:export (main))
-
-(import (ice-9 rdelim))
-(import (ice-9 match))
-(import (ice-9 regex))
-(import (ice-9 receive))
-(import (bs core))
-(import (prefix (bs string) string.))
-
-(define (log msg)
- (display msg (current-error-port)))
-
-(define (is-command? msg)
- (string.prefix? msg "omnibot:"))
-
-(define (parse-line line)
- (if (eof-object? line)
- (exit)
- (let ([matches (regexp-exec
- (make-regexp "<(\\S*)>(.*)" regexp/extended)
- (string-drop line 11))])
- (if matches
- `(user
- ,(match:substring matches 1)
- ,(string.lstrip (match:substring matches 2) #\space))
- `(system ,(string-drop line 11))))))
-
-(define (dispatch user msg)
- (let ([msg (-> msg
- (string-drop (string-length "omnibot:"))
- (string.lstrip #\space))])
- (cond
- ((equal? msg "hi")
- (display (fmt "~a: well, hello!" user)))
-
- (else
- (display (fmt "command not understood: ~a" msg))))))
-
-(define (main args)
- (while #t
- (match (parse-line (read-line))
- [('user user msg)
- (if (is-command? msg)
- (dispatch user msg)
- (begin
- (log (fmt "user: ~a " user))
- (log (fmt "message: ~a" msg))))]
-
- [('system msg)
- (log (fmt "system: ~a" msg))])
-
- (newline)
- (force-output)))
diff --git a/Omni/Ci.hs b/Omni/Ci.hs
new file mode 100644
index 0000000..aff5c7b
--- /dev/null
+++ b/Omni/Ci.hs
@@ -0,0 +1,191 @@
+#!/usr/bin/env run.sh
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE QuasiQuotes #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | A robust CI program replacing Omni/Ci.sh
+--
+-- : out ci
+module Omni.Ci (main) where
+
+import Alpha
+import qualified Data.Text as Text
+import qualified Omni.Cli as Cli
+import qualified Omni.Log as Log
+import qualified Omni.Test as Test
+import qualified System.Directory as Dir
+import qualified System.Environment as Environment
+import qualified System.Exit as Exit
+import System.FilePath ((</>))
+import qualified System.Process as Process
+
+main :: IO ()
+main = Cli.main <| Cli.Plan help move test pure
+
+help :: Cli.Docopt
+help =
+ [Cli.docopt|
+omni-ci - Continuous Integration
+
+Usage:
+ ci test
+ ci [options]
+
+Options:
+ -h, --help Print this info
+|]
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Ci"
+ [ Test.unit "placeholder test" <| do
+ True Test.@=? True
+ ]
+
+move :: Cli.Arguments -> IO ()
+move _ = do
+ -- 1. Check for dirty worktree
+ status <- readProcess "git" ["status", "-s"] ""
+ unless (Text.null status) <| do
+ Log.fail ["ci", "dirty worktree"]
+ Exit.exitWith (Exit.ExitFailure 1)
+
+ -- 2. Setup environment
+ -- We need to ensure timeout is disabled for CI builds
+ -- Equivalent to: BILD_ARGS="--time 0 ${BILD_ARGS:-""}"
+ currentBildArgs <- Environment.lookupEnv "BILD_ARGS"
+ let bildArgs = "--time 0 " <> fromMaybe "" currentBildArgs
+ Environment.setEnv "BILD_ARGS" bildArgs
+
+ -- 3. Get user info
+ at <- readProcess "date" ["-R"] "" |> fmap chomp
+ user <- readProcess "git" ["config", "--get", "user.name"] "" |> fmap chomp
+ mail <- readProcess "git" ["config", "--get", "user.email"] "" |> fmap chomp
+
+ -- 4. Check existing git notes
+ -- commit=$(git notes --ref=ci show HEAD || true)
+ (exitCode, noteContent, _) <- Process.readProcessWithExitCode "git" ["notes", "--ref=ci", "show", "HEAD"] ""
+
+ let alreadyGood = case exitCode of
+ Exit.ExitSuccess ->
+ let content = Text.pack noteContent
+ in ("Lint-is: good" `Text.isInfixOf` content) && ("Test-is: good" `Text.isInfixOf` content)
+ _ -> False
+
+ when alreadyGood <| do
+ Log.pass ["ci", "already verified"]
+ Exit.exitSuccess
+
+ -- 5. Run Lint
+ coderoot <- getCoderoot
+ let runlint = coderoot </> "_/bin/lint"
+
+ lintExists <- Dir.doesFileExist runlint
+ unless lintExists <| do
+ Log.info ["ci", "building lint"]
+ callProcess "bild" [coderoot </> "Omni/Lint.hs"]
+
+ Log.info ["ci", "running lint"]
+ -- if "$runlint" "${CODEROOT:?}"/**/*
+ -- We need to expand **/* which shell does.
+ -- Since we are in Haskell, we can just pass "." or call git ls-files or similar.
+ -- Omni/Ci.sh used "${CODEROOT:?}"/**/* which relies on bash globbing.
+ -- Omni/Lint.hs recursively checks if passed directory or uses git diff if no args.
+ -- But Omni/Ci.sh passes **/*.
+ -- Let's try passing the root directory or just run it without args?
+ -- Omni/Lint.hs says:
+ -- "case Cli.getAllArgs args (Cli.argument "file") of [] -> changedFiles ..."
+ -- So if we pass nothing, it only checks changed files.
+ -- The CI script explicitly passed everything.
+ -- We can replicate "everything" by passing the coderoot, assuming Lint handles directories recursively?
+ -- Omni/Lint.hs: "traverse Directory.makeAbsolute /> map (Namespace.fromPath root) ... filter (not <. Namespace.isCab)"
+ -- It seems it expects files.
+ -- We can use `git ls-files` to get all files.
+ allFiles <-
+ readProcess "git" ["ls-files"] ""
+ /> lines
+ /> map Text.unpack
+ /> filter (not <. null)
+
+ -- We can't pass all files as arguments if there are too many (ARG_MAX).
+ -- But wait, Omni/Lint.hs takes arguments.
+ -- If we want to check everything, maybe we should implement a "check all" mode in Lint or pass chunks.
+ -- However, looking at Omni/Ci.sh: `"$runlint" "${CODEROOT:?}"/**/*`
+ -- This globbing is handled by the shell. It might be huge.
+ -- If I run `lint` with `.` it might work if Lint supports directories.
+ -- Omni/Lint.hs: "files |> ... filterM Directory.doesFileExist"
+ -- It seems it filters for files.
+ -- If I pass a directory, `doesFileExist` will return False.
+ -- So I must pass files.
+
+ -- Let's pass all files from git ls-files.
+ -- But we must be careful about ARG_MAX.
+ -- For now, let's try passing them. If it fails, we might need to batch.
+
+ lintResult <- do
+ -- We run lint on all files.
+ -- Note: calling callProcess with huge list might fail.
+ -- Let's see if we can avoid passing all files if Lint supports it.
+ -- Omni/Lint.hs doesn't seem to support directory recursion on its own if passed a dir,
+ -- it treats args as file paths.
+
+ -- We will try to run it.
+ (exitCodeLint, _, _) <- Process.readProcessWithExitCode runlint allFiles ""
+ pure <| case exitCodeLint of
+ Exit.ExitSuccess -> "good"
+ _ -> "fail"
+
+ -- 6. Run Tests
+ -- if bild "${BILD_ARGS:-""}" --test "${CODEROOT:?}"/**/*
+ Log.info ["ci", "running tests"]
+
+ testResult <- do
+ -- similarly, bild takes targets.
+ -- bild "${CODEROOT:?}"/**/*
+ -- We can pass namespaces.
+ -- Let's try passing all files again.
+ -- bild handles namespaces.
+ (exitCodeTest, _, _) <- Process.readProcessWithExitCode "bild" ("--test" : allFiles) ""
+ pure <| case exitCodeTest of
+ Exit.ExitSuccess -> "good"
+ _ -> "fail"
+
+ -- 7. Create Note
+ let noteMsg =
+ Text.unlines
+ [ "Lint-is: " <> lintResult,
+ "Test-is: " <> testResult,
+ "Test-by: " <> user <> " <" <> mail <> ">",
+ "Test-at: " <> at
+ ]
+
+ -- 8. Append Note
+ callProcess "git" ["notes", "--ref=ci", "append", "-m", Text.unpack noteMsg]
+
+ -- 9. Exit
+ if lintResult == "good" && testResult == "good"
+ then Exit.exitSuccess
+ else do
+ Log.fail ["ci", "verification failed"]
+ Exit.exitWith (Exit.ExitFailure 1)
+
+-- Helpers
+
+readProcess :: FilePath -> [String] -> String -> IO Text
+readProcess cmd args input = do
+ out <- Process.readProcess cmd args input
+ pure (Text.pack out)
+
+callProcess :: FilePath -> [String] -> IO ()
+callProcess cmd args = do
+ Process.callProcess cmd args
+
+getCoderoot :: IO FilePath
+getCoderoot = do
+ mEnvRoot <- Environment.lookupEnv "CODEROOT"
+ case mEnvRoot of
+ Just envRoot -> pure envRoot
+ Nothing -> panic "CODEROOT not set" -- Simplified for now
diff --git a/Omni/Ci.sh b/Omni/Ci.sh
deleted file mode 100755
index 609e9aa..0000000
--- a/Omni/Ci.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env bash
-#
-# A simple ci that saves its results in a git note, formatted according to
-# RFC-2822, more or less.
-#
-# To run this manually, exec the script. It will by default run the tests for
-# HEAD, whatever you currently have checked out.
-#
-# It would be cool to use a zero-knowledge proof mechanism here to prove that
-# so-and-so ran the tests, but I'll have to research how to do that.
-#
-# ensure we don't exit on bild failure, only on CI script error
- set +e
- set -u
-##
- [[ -n $(git status -s) ]] && { echo fail: dirty worktree; exit 1; }
-##
- at=$(date -R)
- user=$(git config --get user.name)
- mail=$(git config --get user.email)
-##
- commit=$(git notes --ref=ci show HEAD || true)
- if [[ -n "$commit" ]]
- then
- if grep -q "Lint-is: good" <<< "$commit"
- then
- exit 0
- fi
- if grep -q "Test-is: good" <<< "$commit"
- then
- exit 0
- fi
- fi
-##
- runlint="$CODEROOT"/_/bin/lint
- [[ ! -f "$runlint" ]] && bild "${BILD_ARGS:-""}" "${CODEROOT:?}"/Omni/Lint.hs
- if "$runlint" "${CODEROOT:?}"/**/*
- then
- lint_result="good"
- else
- lint_result="fail"
- fi
-##
- if bild "${BILD_ARGS:-""}" --test "${CODEROOT:?}"/**/*
- then
- test_result="good"
- else
- test_result="fail"
- fi
-##
- read -r -d '' note <<EOF
-Lint-is: $lint_result
-Test-is: $test_result
-Test-by: $user <$mail>
-Test-at: $at
-EOF
-##
- git notes --ref=ci append -m "$note"
-##
-# exit 1 if failure
- [[ ! "$lint_result" == "fail" && ! "$test_result" == "fail" ]]
-##
diff --git a/Omni/Cloud.nix b/Omni/Cloud.nix
index 6f52850..21de9d2 100755
--- a/Omni/Cloud.nix
+++ b/Omni/Cloud.nix
@@ -6,19 +6,22 @@ bild.os {
./Os/Base.nix
./Packages.nix
./Users.nix
+ ./Cloud/Cal.nix
# ./Cloud/Chat.nix
./Cloud/Comms.nix
./Cloud/Git.nix
./Cloud/Hardware.nix
./Cloud/Mail.nix
./Cloud/Networking.nix
+ ./Syncthing.nix
./Cloud/Web.nix
./Cloud/Znc.nix
./Cloud/Monica.nix
+ ./Cloud/OpenWebui.nix
"${bild.sources.nixos-mailserver}"
];
- networking.hostName = "simatime";
- networking.domain = "simatime.com";
+ networking.hostName = "bensima";
+ networking.domain = "bensima.com";
# the datacenter for this VM is in NYC
time.timeZone = "America/New_York";
}
diff --git a/Omni/Cloud/Cal.nix b/Omni/Cloud/Cal.nix
new file mode 100644
index 0000000..3c3c46c
--- /dev/null
+++ b/Omni/Cloud/Cal.nix
@@ -0,0 +1,81 @@
+{config, ...}: let
+ ports = import ./Ports.nix;
+ rootDomain = config.networking.domain;
+in {
+ networking.firewall.allowedTCPPorts = [ports.radicale];
+
+ services.radicale = {
+ enable = true;
+ rights = {
+ # Allow reading root collection for authenticated users
+ root = {
+ user = ".*";
+ collection = "";
+ permissions = "R";
+ };
+ # Allow reading and writing principal collection (same as username)
+ principal = {
+ user = ".+";
+ collection = "{user}";
+ permissions = "RW";
+ };
+ # Make shared collection visible as part of root listing
+ shared_principal = {
+ user = ".+";
+ collection = "shared";
+ permissions = "RW";
+ };
+ # Allow reading and writing calendars and address books that are direct
+ # children of the principal collection
+ calendars = {
+ user = ".+";
+ collection = "{user}/[^/]+";
+ permissions = "rw";
+ };
+ # Allow ben full access to shared/ben, repeat this for other shared
+ # calendars as needed.
+ ben_shared = {
+ user = "ben";
+ collection = "shared/ben(/.+)?";
+ permissions = "rwD";
+ };
+ # Must be authed to write to the shared collections
+ shared_write = {
+ user = ".+";
+ collection = "shared/[^/]+";
+ permissions = "rw";
+ };
+ # Allow any user to read the shared collection
+ shared_read = {
+ user = ".+";
+ collection = "shared(/.*)?";
+ permissions = "r";
+ };
+ };
+ settings = {
+ server = {
+ hosts = [
+ "0.0.0.0:${toString ports.radicale}"
+ "[::]:${toString ports.radicale}"
+ ];
+ };
+ auth = {
+ type = "htpasswd";
+ htpasswd_filename = "/etc/radicale/users";
+ htpasswd_encryption = "plain";
+ };
+ };
+ };
+
+ services.nginx.virtualHosts."cal.${rootDomain}" = {
+ locations."/".proxyPass = "http://localhost:${toString ports.radicale}";
+ forceSSL = true;
+ useACMEHost = rootDomain;
+ extraConfig = ''
+ proxy_set_header X-Script-Name "";
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Host $host;
+ proxy_pass_header Authorization;
+ '';
+ };
+}
diff --git a/Omni/Cloud/Chat.nix b/Omni/Cloud/Chat.nix
index a3a6a78..6b15dd7 100644
--- a/Omni/Cloud/Chat.nix
+++ b/Omni/Cloud/Chat.nix
@@ -88,7 +88,7 @@ in {
};
};
};
- # matrix client, available at chat.simatime.com
+ # matrix client, available at chat.bensima.com
#
# note that element and matrix-synapse must be on separate fqdn's to
# protect from XSS attacks:
diff --git a/Omni/Cloud/Comms/Xmpp.nix b/Omni/Cloud/Comms/Xmpp.nix
index ea50ed9..e48dd56 100644
--- a/Omni/Cloud/Comms/Xmpp.nix
+++ b/Omni/Cloud/Comms/Xmpp.nix
@@ -7,7 +7,7 @@
# xmpp chat service
#
let
- rootDomain = config.networking.domain; # simatime.com
+ rootDomain = config.networking.domain; # bensima.com
ssl = {
cert = "/var/lib/acme/${rootDomain}/fullchain.pem";
key = "/var/lib/acme/${rootDomain}/key.pem";
@@ -32,7 +32,7 @@ in {
# this is necessary bc prosody needs access to the acme certs managed in Omni/Cloud/Web.nix, when
# i learn how to use security.acme better, and use separate certs, then i can fix this group
group = "nginx";
- admins = ["bsima@${rootDomain}"];
+ admins = ["ben@${rootDomain}"];
allowRegistration = true;
inherit ssl;
uploadHttp = {
@@ -81,20 +81,20 @@ in {
websocket_url = "wss://${rootDomain}/xmpp-websocket";
}
- cross_domain_websocket = { "https://${rootDomain}", "https://anon.${rootDomain}" }
+ cross_domain_websocket = { "https://${rootDomain}" }
cross_domain_bosh = false; -- handle this with nginx
consider_bosh_secure = true;
-- this is a virtualhost that allows anonymous authentication. use this
-- for a public lobby. the nix module doesn't support 'authentication'
-- so i have to do this here.
- VirtualHost "anon.${rootDomain}"
- authentication = "anonymous"
- ssl = {
- cafile = "/etc/ssl/certs/ca-bundle.crt";
- key = "${ssl.key}";
- certificate = "${ssl.cert}";
- };
+ --VirtualHost "anon.${rootDomain}"
+ -- authentication = "anonymous"
+ -- ssl = {
+ -- cafile = "/etc/ssl/certs/ca-bundle.crt";
+ -- key = "${ssl.key}";
+ -- certificate = "${ssl.cert}";
+ -- };
'';
muc = [
@@ -116,6 +116,11 @@ in {
enabled = true;
inherit ssl;
};
+ "simatime.com" = {
+ domain = "simatime.com";
+ enabled = true;
+ inherit ssl;
+ };
};
};
@@ -154,30 +159,31 @@ in {
};
};
- services.nginx.virtualHosts."anon.${rootDomain}" = {
- useACMEHost = "${rootDomain}";
- forceSSL = true;
- locations = {
- "/http-bind" = {
- proxyPass = "https://anon.${rootDomain}:5281/http-bind";
- extraConfig = ''
- proxy_set_header Host $host;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_buffering off;
- if ($request_method ~* "(GET|POST)") {
- add_header Access-Control-Allow-Origin "*";
- }
- if ($request_method = OPTIONS) {
- add_header Access-Control-Allow-Origin "*";
- add_header Access-Control-Allow-Methods "GET, POST, OPTIONS, HEAD";
- add_header Access-Control-Allow-Headers "Authorization, Origin, X-Requested-With, Content-Type, Accept";
- return 200;
- }
- '';
- };
- };
- };
+ # this is an old proxy for the conversejs anonymous prosody host
+ #services.nginx.virtualHosts."anon.${rootDomain}" = {
+ # useACMEHost = "${rootDomain}";
+ # forceSSL = true;
+ # locations = {
+ # "/http-bind" = {
+ # proxyPass = "https://anon.${rootDomain}:5281/http-bind";
+ # extraConfig = ''
+ # proxy_set_header Host $host;
+ # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ # proxy_set_header X-Forwarded-Proto $scheme;
+ # proxy_buffering off;
+ # if ($request_method ~* "(GET|POST)") {
+ # add_header Access-Control-Allow-Origin "*";
+ # }
+ # if ($request_method = OPTIONS) {
+ # add_header Access-Control-Allow-Origin "*";
+ # add_header Access-Control-Allow-Methods "GET, POST, OPTIONS, HEAD";
+ # add_header Access-Control-Allow-Headers "Authorization, Origin, X-Requested-With, Content-Type, Accept";
+ # return 200;
+ # }
+ # '';
+ # };
+ # };
+ #};
users.users.nginx.extraGroups = ["prosody"];
diff --git a/Omni/Cloud/Git.nix b/Omni/Cloud/Git.nix
index 4d04b98..e610eb5 100644
--- a/Omni/Cloud/Git.nix
+++ b/Omni/Cloud/Git.nix
@@ -27,7 +27,7 @@ in {
settings = {
strict-export = "git-daemon-export-ok";
root-title = "ben's git repos";
- root-desc = "xmpp:buildlog@conference.simatime.com";
+ root-desc = "xmpp:buildlog@conference.bensima.com";
enable-git-config = 1;
clone-url = lib.strings.concatStringsSep " " [
# this doesn't work because git-daemon runs as user gitDaemon, but
diff --git a/Omni/Cloud/Mail.nix b/Omni/Cloud/Mail.nix
index 728ec09..22551c2 100644
--- a/Omni/Cloud/Mail.nix
+++ b/Omni/Cloud/Mail.nix
@@ -10,11 +10,11 @@ Known issues:
mailserver = {
enable = true;
monitoring = {
- enable = false;
- alertAddress = "bsima@me.com";
+ enable = true;
+ alertAddress = "bsima@icloud.com";
};
- fqdn = "simatime.com";
- domains = ["simatime.com" "bsima.me"];
+ fqdn = "bensima.com";
+ domains = ["bensima.com" "simatime.com" "bsima.me"];
certificateScheme = "acme-nginx"; # let's encrypt, using named scheme instead of number
enableImap = true;
enablePop3 = true;
@@ -23,36 +23,46 @@ Known issues:
enableManageSieve = true;
virusScanning = false; # ur on ur own
localDnsResolver = true;
+ dmarcReporting = {
+ enable = true;
+ organizationName = "Ben Sima";
+ domain = "bensima.com";
+ localpart = "dmarc";
+ };
# Define proper virtual aliases instead of placeholder
extraVirtualAliases = {
- "blocked@simatime.com" = "ben@simatime.com";
+ "blocked@bensima.com" = "ben@bensima.com";
+
+ # forward old addresses to new domain
+ "ben@bsima.me" = "ben@bensima.com";
+ "ben@simatime.com" = "ben@bensima.com";
};
loginAccounts = {
- "ben@simatime.com" = {
+ "ben@bensima.com" = {
hashedPasswordFile = "/home/ben/hashed-mail-password";
aliases = [
- # my default email
+ # my old emails
+ "ben@simatime.com"
"ben@bsima.me"
- # admin stuff
- "postmaster@simatime.com"
- "abuse@simatime.com"
+ # admin stuff, necessary i think?
+ "postmaster@bensima.com"
+ "abuse@bensima.com"
];
- catchAll = ["simatime.com" "bsima.me"];
+ catchAll = ["bensima.com" "simatime.com" "bsima.me"];
quota = "10G";
};
- "dev@simatime.com" = {
+ "dev@bensima.com" = {
hashedPasswordFile = "/home/ben/hashed-mail-password";
- aliases = ["dev@bsima.me"];
+ aliases = ["dev@simatime.com" "dev@bsima.me"];
quota = "10G";
};
- "nick@simatime.com" = {
- hashedPassword = "$6$31P/Mg8k8Pezy1e$Fn1tDyssf.1EgxmLYFsQpSq6RP4wbEvP/UlBlXQhyKA9FnmFtJteXsbJM1naa8Kyylo8vZM9zmeoSthHS1slA1";
- aliases = ["nicolai@simatime.com"];
+ "monica@bensima.com" = {
+ hashedPasswordFile = "/home/ben/hashed-mail-password";
quota = "1G";
};
- "monica@simatime.com" = {
+ "dmarc@bensima.com" = {
hashedPasswordFile = "/home/ben/hashed-mail-password";
quota = "1G";
};
@@ -63,30 +73,59 @@ Known issues:
services.postfix.headerChecks = [
# Block perfora.net
{
- pattern = "/^Received:.*perfora\\.net/";
+ pattern = "^Received:.*perfora\\.net";
action = "REJECT Domain perfora.net is blocked";
}
{
- pattern = "/^From:.*perfora\\.net/";
+ pattern = "^From:.*perfora\\.net";
action = "REJECT Domain perfora.net is blocked";
}
-
+
# Block novastells.com.es domain
{
- pattern = "/^Received:.*novastells\\.com\\.es/";
+ pattern = "^Received:.*novastells\\.com\\.es";
action = "REJECT Domain novastells.com.es is blocked";
}
{
- pattern = "/^From:.*novastells\\.com\\.es/";
+ pattern = "^From:.*novastells\\.com\\.es";
action = "REJECT Domain novastells.com.es is blocked";
}
{
- pattern = "/^Return-Path:.*novastells\\.com\\.es/";
+ pattern = "^Return-Path:.*novastells\\.com\\.es";
action = "REJECT Domain novastells.com.es is blocked";
}
{
- pattern = "/^Sender:.*novastells\\.com\\.es/";
+ pattern = "^Sender:.*novastells\\.com\\.es";
action = "REJECT Domain novastells.com.es is blocked";
}
+
+ # Block optaltechtld.com domain
+ {
+ pattern = "^Received:.*optaltechtld\\.com";
+ action = "REJECT Domain optaltechtld.com is blocked";
+ }
+ {
+ pattern = "^From:.*optaltechtld\\.com";
+ action = "REJECT Domain optaltechtld.com is blocked";
+ }
+ {
+ pattern = "^Return-Path:.*optaltechtld\\.com";
+ action = "REJECT Domain optaltechtld.com is blocked";
+ }
+ {
+ pattern = "^Sender:.*optaltechtld\\.com";
+ action = "REJECT Domain optaltechtld.com is blocked";
+ }
];
+
+ # Increase memory limits for mbsync, otherwise it runs out of space trying to
+ # sync large mailboxes (like dev/INBOX)
+ services.dovecot2.extraConfig = ''
+ service imap {
+ vsz_limit = 4G
+ }
+ service quota-status {
+ vsz_limit = 4G
+ }
+ '';
}
diff --git a/Omni/Cloud/Monica.nix b/Omni/Cloud/Monica.nix
index 0d6ca65..39a5ce1 100644
--- a/Omni/Cloud/Monica.nix
+++ b/Omni/Cloud/Monica.nix
@@ -2,7 +2,7 @@
rootDomain = config.networking.domain;
in {
services.monica = {
- enable = true;
+ enable = false;
hostname = "monica.${rootDomain}";
appKeyFile = "/run/keys/monica-appkey";
database = {
@@ -21,7 +21,7 @@ in {
host = "localhost";
port = 25; # Standard SMTP port
fromName = "Monica CRM";
- from = "monica@simatime.com";
+ from = "monica@bensima.com";
encryption = null; # No encryption for local mail server
};
};
diff --git a/Omni/Cloud/NostrRelay.nix b/Omni/Cloud/NostrRelay.nix
index 1bcf99b..00401a4 100644
--- a/Omni/Cloud/NostrRelay.nix
+++ b/Omni/Cloud/NostrRelay.nix
@@ -8,8 +8,8 @@
# https://git.sr.ht/~gheartsfield/nostr-rs-relay/tree/master/config.toml
cfg = pkgs.writeText "config.toml" ''
[info]
- name = "simatime"
- relay_url = "wss://nostr.simatime.com"
+ name = "bensima"
+ relay_url = "wss://nostr.bensima.com"
description = "yet another nostr relay"
[database]
diff --git a/Omni/Cloud/OpenWebui.nix b/Omni/Cloud/OpenWebui.nix
new file mode 100644
index 0000000..fc662c2
--- /dev/null
+++ b/Omni/Cloud/OpenWebui.nix
@@ -0,0 +1,43 @@
+{
+ config,
+ pkgs,
+ ...
+}: let
+ ports = import ./Ports.nix;
+in {
+ config.virtualisation.oci-containers.backend = "docker";
+
+ config.virtualisation.oci-containers.containers.open-webui-aichat = {
+ image = "ghcr.io/open-webui/open-webui:main";
+ volumes = ["/var/lib/open-webui-aichat:/app/backend/data"];
+ environment = {
+ PORT = toString ports.open-webui-aichat;
+ };
+ extraOptions = ["--network=host"];
+ };
+
+ # Add a service that updates and restarts the container
+ config.systemd.services."update-open-webui-aichat" = {
+ description = "pulling new open-webui image and restarting the service";
+ wantedBy = ["multi-user.target"];
+ after = ["network-online.target"];
+ serviceConfig = {
+ Type = "oneshot";
+ ExecStart = [
+ # Pull the latest image
+ "${pkgs.docker}/bin/docker pull ghcr.io/open-webui/open-webui:main"
+ # Restart the container
+ "${pkgs.systemd}/bin/systemctl stop docker-open-webui-aichat"
+ "${pkgs.systemd}/bin/systemctl start docker-open-webui-aichat"
+ ];
+ };
+ };
+
+ # Add a timer that runs every Sunday at 3 AM
+ config.systemd.timers."update-open-webui-aichat" = {
+ wantedBy = ["timers.target"];
+ timerConfig.OnCalendar = "Sun 03:00:00";
+ timerConfig.Persistent = true;
+ unitConfig.Description = "Weekly timer for pulling new open-webui image and restarting service.";
+ };
+}
diff --git a/Omni/Cloud/Ports.nix b/Omni/Cloud/Ports.nix
index 7f16bf3..ac67d54 100644
--- a/Omni/Cloud/Ports.nix
+++ b/Omni/Cloud/Ports.nix
@@ -33,6 +33,7 @@
nostr-relay = 8084;
ollama = 11434;
open-webui = 8088;
+ open-webui-aichat = 4242;
radicale = 5232;
sabten = 8081;
ssh = 22;
diff --git a/Omni/Cloud/Web.nix b/Omni/Cloud/Web.nix
index 79886c3..45afc4d 100644
--- a/Omni/Cloud/Web.nix
+++ b/Omni/Cloud/Web.nix
@@ -35,50 +35,6 @@ in {
port = ports.invidious;
};
- radicale = {
- enable = true;
- rights = {
- # Allow reading root collection for authenticated users
- root = {
- user = ".+";
- collection = "";
- permissions = "R";
- };
- # Allow reading and writing principal collection (same as username)
- principal = {
- user = ".+";
- collection = "{user}";
- permissions = "RW";
- };
- # Allow reading and writing calendars and address books that are direct
- # children of the principal collection
- calendars = {
- user = ".+";
- collection = "{user}/[^/]+";
- permissions = "rw";
- };
- # Allow any authenticated user to modify the public collection
- public = {
- user = ".*";
- collection = "public/.*";
- permissions = "rw";
- };
- };
- settings = {
- server = {
- hosts = [
- "0.0.0.0:${toString ports.radicale}"
- "[::]:${toString ports.radicale}"
- ];
- };
- auth = {
- type = "htpasswd";
- htpasswd_filename = "/etc/radicale/users";
- htpasswd_encryption = "plain";
- };
- };
- };
-
gmnisrv = {
enable = false;
listen = "0.0.0.0:${toString ports.gemini} [::]:${toString ports.gemini}";
@@ -86,7 +42,7 @@ in {
":tls" = {store = "/var/lib/gmnisrv";};
"bsima.me" = {"root" = "/var/web/ben";};
"${rootDomain}" = {
- "root" = "/var/web/simatime.com";
+ "root" = "/var/web/ben";
"cgi" = "on";
};
};
@@ -116,20 +72,28 @@ in {
serverName = rootDomain;
forceSSL = true;
enableACME = true;
+ serverAliases = [
+ "www.simatime.com"
+ "simatime.com"
+ "www.bsima.me"
+ "bsima.me"
+ ];
locations = {
# nostr nip-5 verification
- "/.well-known/nostr.json".return = "200 '${
- builtins.toJSON {
- names.bensima = "2fa4b9ba71b6dab17c4723745bb7850dfdafcb6ae1a8642f76f9c64fa5f43436";
- }
- }'";
+ #"/.well-known/nostr.json".return = "200 '${
+ # builtins.toJSON {
+ # names.bensima = "2fa4b9ba71b6dab17c4723745bb7850dfdafcb6ae1a8642f76f9c64fa5f43436";
+ # }
+ #}'";
"/" = {
- root = "/var/web/simatime.com";
+ root = "/var/web/ben";
+ index = "index.html index.htm";
extraConfig = ''
autoindex on;
'';
};
- # serve /~$USER paths
+ # serve /~$USER paths, yeah i'm the only user, but whatever this
+ # trick might be useful someday
"~ ^/~(.+?)(/.*)?$" = {
alias = "/var/web/$1$2";
index = "index.html index.htm";
@@ -140,26 +104,6 @@ in {
};
};
- "bensima.com" = {
- locations."/" = {
- root = "/var/web/ben";
- index = "index.html index.htm";
- extraConfig = ''
- autoindex on;
- '';
- };
- serverAliases = [
- "www.bensima.com"
- "www.bsima.me"
- "bsima.me"
- ];
- forceSSL = true;
- useACMEHost = rootDomain;
- };
-
- # Monica virtual host configuration is handled by the Monica service
- # Don't add a manual entry here to avoid conflicts
-
"hoogle.${rootDomain}" = {
locations."/".proxyPass = "http://${ports.bensIp}:${toString ports.hoogle}";
forceSSL = true;
@@ -172,34 +116,6 @@ in {
useACMEHost = rootDomain;
};
- "cal.${rootDomain}" = {
- locations."/".proxyPass = "http://localhost:${toString ports.radicale}";
- forceSSL = true;
- useACMEHost = rootDomain;
- extraConfig = ''
- proxy_set_header X-Script-Name /radicale;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header Host $host;
- proxy_pass_header Authorization;
- '';
- };
-
- "reddit.${rootDomain}" = {
- locations."/".proxyPass = "http://localhost:${toString ports.libreddit}";
- forceSSL = true;
- useACMEHost = rootDomain;
- };
- "www.reddit.${rootDomain}" = {
- forceSSL = true;
- useACMEHost = rootDomain;
- globalRedirect = "reddit.${rootDomain}";
- };
- "old.reddit.${rootDomain}" = {
- forceSSL = true;
- useACMEHost = rootDomain;
- globalRedirect = "reddit.${rootDomain}";
- };
-
"youtube.${rootDomain}" = {
locations."/".proxyPass = "http://localhost:${toString ports.invidious}";
forceSSL = true;
@@ -216,27 +132,6 @@ in {
globalRedirect = "youtube.${rootDomain}";
};
- "dandel-rovbur.${rootDomain}" = {
- locations."/".proxyPass = "http://${ports.bensIp}:${toString ports.dandel-rovbur}";
- forceSSL = true;
- useACMEHost = rootDomain;
- };
-
- "sabten.${rootDomain}" = {
- locations."/".proxyPass = "http://localhost:${toString ports.sabten}";
- forceSSL = true;
- useACMEHost = rootDomain;
- };
-
- "sd.${rootDomain}" = {
- forceSSL = true;
- useACMEHost = rootDomain;
- locations."/" = {
- proxyPass = "http://${ports.bensIp}:${toString ports.stableDiffusion}";
- proxyWebsockets = true;
- };
- };
-
"music.${rootDomain}" = {
forceSSL = true;
useACMEHost = rootDomain;
@@ -257,7 +152,7 @@ in {
};
};
- "notebook.${rootDomain}" = {
+ "jupyter.${rootDomain}" = {
forceSSL = true;
useACMEHost = rootDomain;
locations = {
@@ -275,6 +170,17 @@ in {
};
};
};
+
+ "aichat.${rootDomain}" = {
+ forceSSL = true;
+ useACMEHost = rootDomain;
+ locations = {
+ "/" = {
+ proxyPass = "http://127.0.0.1:${toString ports.open-webui-aichat}";
+ proxyWebsockets = true;
+ };
+ };
+ };
};
};
};
@@ -283,26 +189,18 @@ in {
group = "nginx";
# This must contain all of the other domains we host
extraDomainNames =
- ["bensima.com" "www.bensima.com" "bsima.me" "www.bsima.me"]
+ ["simatime.com" "www.simatime.com" "bsima.me" "www.bsima.me"]
++ map (sub: "${sub}.${rootDomain}") [
- "music"
"tv"
"hoogle"
- "dandel-rovbur"
- "sabten"
"cal"
- "notebook"
- "nostr"
- "youtube"
- "www.youtube"
- "m.youtube"
- "sd"
- "gerrit"
+ "jupyter"
"git"
"monica"
# xmpp stuff
"upload"
"conference"
+ "aichat"
];
};
}
diff --git a/Omni/Cloud/Znc.nix b/Omni/Cloud/Znc.nix
index 5b927bc..036a14f 100644
--- a/Omni/Cloud/Znc.nix
+++ b/Omni/Cloud/Znc.nix
@@ -15,7 +15,7 @@ N.B.: generate znc passwords with 'nix-shell -p znc --command "znc --makepass"'
useLegacyConfig = false;
config = {
LoadModule = ["adminlog"];
- Motd = "welcome to znc.simatime.com";
+ Motd = "welcome to znc.bensima.com";
User.bsima = {
Admin = true;
Nick = "bsima";
diff --git a/Omni/Cloud/post-receive.sh b/Omni/Cloud/post-receive.sh
index 179fbd0..8df799f 100755
--- a/Omni/Cloud/post-receive.sh
+++ b/Omni/Cloud/post-receive.sh
@@ -6,11 +6,11 @@
# properly, so we have to manually deploy this like so:
#
# scp Omni/Cloud/post-receive \
-# root@simatime.com:/srv/git/.gitolite/hooks/common/post-receive
+# root@bensima.com:/srv/git/.gitolite/hooks/common/post-receive
#
# One time only:
#
-# ssh root@simatime.com "sudo -u git gitolite setup -ho"
+# ssh root@bensima.com "sudo -u git gitolite setup -ho"
#
# Also on first-time setup, might need to manually check the permissions are
# correct on $webroot/archive or wherever else.
@@ -22,13 +22,13 @@ do
then
repo=$(basename "$PWD" | sed 's/.git//g')
branch=$(git rev-parse --symbolic --abbrev-ref "$refname")
- webroot="/srv/www/simatime.com/"
+ webroot="/srv/www/bensima.com/"
outdir="$webroot/archive/$repo/$branch"
mkdir -p "$outdir"
- echo " making: https://simatime.com/archive/$repo/$branch/$newrev.tar.gz"
+ echo " making: https://git.bensima.com/archive/$repo/$branch/$newrev.tar.gz"
git archive "$branch" --prefix "$repo-$branch/" --format tar \
| gzip > "$outdir/$newrev.tar.gz"
- echo " making: https://simatime.com/archive/$repo/$branch/$newrev.sha256"
+ echo " making: https://git.bensima.com/archive/$repo/$branch/$newrev.sha256"
hash=$(nix-prefetch-url --unpack file://"$outdir"/"$newrev".tar.gz 2>/dev/null)
echo "$hash" > "$outdir/$newrev.sha256"
echo " commit: $newrev"
diff --git a/Omni/Deploy/Caddy.hs b/Omni/Deploy/Caddy.hs
new file mode 100644
index 0000000..6cedf92
--- /dev/null
+++ b/Omni/Deploy/Caddy.hs
@@ -0,0 +1,241 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE TypeApplications #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Caddy admin API integration for the mini-PaaS deployment system.
+--
+-- : out deploy-caddy
+-- : dep aeson
+-- : dep http-conduit
+-- : dep http-types
+module Omni.Deploy.Caddy
+ ( buildRoute,
+ getCurrentRoutes,
+ upsertRoute,
+ deleteRoute,
+ syncRoutes,
+ getRouteById,
+ caddyAdmin,
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.Map as Map
+import qualified Data.Text as Text
+import qualified Network.HTTP.Simple as HTTP
+import qualified Network.HTTP.Types.Status as Status
+import Omni.Deploy.Manifest (Artifact (..), Exec (..), Hardening (..), Http (..), Service (..), Systemd (..))
+import qualified Omni.Test as Test
+
+caddyAdmin :: Text
+caddyAdmin = "http://localhost:2019"
+
+data Route = Route
+ { routeId :: Text,
+ routeMatch :: [RouteMatch],
+ routeHandle :: [RouteHandler],
+ routeTerminal :: Bool
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Route where
+ toJSON Route {..} =
+ Aeson.object
+ [ "@id" .= routeId,
+ "match" .= routeMatch,
+ "handle" .= routeHandle,
+ "terminal" .= routeTerminal
+ ]
+
+newtype RouteMatch = RouteMatch
+ { matchHost :: [Text]
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON RouteMatch where
+ toJSON RouteMatch {..} =
+ Aeson.object ["host" .= matchHost]
+
+data RouteHandler = RouteHandler
+ { handlerType :: Text,
+ handlerUpstreams :: [Upstream]
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON RouteHandler where
+ toJSON RouteHandler {..} =
+ Aeson.object
+ [ "handler" .= handlerType,
+ "upstreams" .= handlerUpstreams
+ ]
+
+newtype Upstream = Upstream
+ { upstreamDial :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.ToJSON Upstream where
+ toJSON Upstream {..} =
+ Aeson.object ["dial" .= upstreamDial]
+
+buildRoute :: Service -> Maybe Route
+buildRoute Service {..} = case serviceHttp of
+ Nothing -> Nothing
+ Just Http {..} ->
+ Just
+ <| Route
+ { routeId = "biz-" <> serviceName,
+ routeMatch = [RouteMatch [httpDomain]],
+ routeHandle =
+ [ RouteHandler
+ "reverse_proxy"
+ [Upstream <| "localhost:" <> tshow httpInternalPort]
+ ],
+ routeTerminal = True
+ }
+
+getCurrentRoutes :: Text -> IO [Aeson.Value]
+getCurrentRoutes adminUrl = do
+ let url = Text.unpack adminUrl <> "/config/apps/http/servers/srv0/routes"
+ request <- HTTP.parseRequest url
+ result <- try @SomeException <| HTTP.httpLBS request
+ case result of
+ Left _ -> pure []
+ Right response ->
+ if Status.statusIsSuccessful (HTTP.getResponseStatus response)
+ then case Aeson.decode (HTTP.getResponseBody response) of
+ Just routes -> pure routes
+ Nothing -> pure []
+ else pure []
+
+upsertRoute :: Text -> Service -> IO Bool
+upsertRoute adminUrl svc = case buildRoute svc of
+ Nothing -> pure False
+ Just route -> do
+ let routeId' = "biz-" <> serviceName svc
+ patchUrl = Text.unpack adminUrl <> "/id/" <> Text.unpack routeId'
+ postUrl = Text.unpack adminUrl <> "/config/apps/http/servers/srv0/routes"
+ body = Aeson.encode route
+
+ patchRequest <-
+ HTTP.parseRequest patchUrl
+ /> HTTP.setRequestMethod "PATCH"
+ /> HTTP.setRequestBodyLBS body
+ /> HTTP.setRequestHeader "Content-Type" ["application/json"]
+ patchResult <- try @SomeException <| HTTP.httpLBS patchRequest
+
+ case patchResult of
+ Right resp
+ | Status.statusIsSuccessful (HTTP.getResponseStatus resp) ->
+ pure True
+ _ -> do
+ postRequest <-
+ HTTP.parseRequest postUrl
+ /> HTTP.setRequestMethod "POST"
+ /> HTTP.setRequestBodyLBS body
+ /> HTTP.setRequestHeader "Content-Type" ["application/json"]
+ postResult <- try @SomeException <| HTTP.httpLBS postRequest
+ case postResult of
+ Right resp -> pure <| Status.statusIsSuccessful (HTTP.getResponseStatus resp)
+ Left _ -> pure False
+
+deleteRoute :: Text -> Text -> IO Bool
+deleteRoute adminUrl serviceName' = do
+ let routeId' = "biz-" <> serviceName'
+ url = Text.unpack adminUrl <> "/id/" <> Text.unpack routeId'
+ request <-
+ HTTP.parseRequest url
+ /> HTTP.setRequestMethod "DELETE"
+ result <- try @SomeException <| HTTP.httpLBS request
+ case result of
+ Right resp -> pure <| Status.statusIsSuccessful (HTTP.getResponseStatus resp)
+ Left _ -> pure False
+
+syncRoutes :: Text -> [Service] -> IO (Map Text Bool)
+syncRoutes adminUrl services = do
+ results <-
+ forM services <| \svc ->
+ case serviceHttp svc of
+ Nothing -> pure Nothing
+ Just _ -> do
+ success <- upsertRoute adminUrl svc
+ pure <| Just (serviceName svc, success)
+ pure <| Map.fromList <| catMaybes results
+
+getRouteById :: Text -> Text -> IO (Maybe Aeson.Value)
+getRouteById adminUrl routeId' = do
+ let url = Text.unpack adminUrl <> "/id/" <> Text.unpack routeId'
+ request <- HTTP.parseRequest url
+ result <- try @SomeException <| HTTP.httpLBS request
+ case result of
+ Right resp
+ | Status.statusIsSuccessful (HTTP.getResponseStatus resp) ->
+ pure <| Aeson.decode (HTTP.getResponseBody resp)
+ _ -> pure Nothing
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Deploy.Caddy"
+ [ test_buildRouteWithHttp,
+ test_buildRouteWithoutHttp,
+ test_buildRouteWithPath
+ ]
+
+mkTestService :: Text -> Text -> Maybe Http -> Service
+mkTestService name path http =
+ Service
+ { serviceName = name,
+ serviceArtifact = Artifact "nix-closure" path,
+ serviceHosts = ["biz"],
+ serviceExec = Exec Nothing "root" "root",
+ serviceEnv = mempty,
+ serviceEnvFile = Nothing,
+ serviceHttp = http,
+ serviceSystemd = Systemd ["network-online.target"] [] "on-failure" 5,
+ serviceHardening = Hardening False True "strict" True,
+ serviceRevision = Nothing
+ }
+
+test_buildRouteWithHttp :: Test.Tree
+test_buildRouteWithHttp =
+ Test.unit "builds route for service with HTTP" <| do
+ let svc = mkTestService "test-svc" "/nix/store/abc" (Just <| Http "example.com" "/" 8000)
+ case buildRoute svc of
+ Nothing -> Test.assertFailure "expected route"
+ Just route -> do
+ routeId route Test.@=? "biz-test-svc"
+ case (head <| routeMatch route, head <| routeHandle route) of
+ (Just m, Just h) -> do
+ matchHost m Test.@=? ["example.com"]
+ case head <| handlerUpstreams h of
+ Just u -> upstreamDial u Test.@=? "localhost:8000"
+ Nothing -> Test.assertFailure "no upstreams"
+ _ -> Test.assertFailure "no match/handle"
+
+test_buildRouteWithoutHttp :: Test.Tree
+test_buildRouteWithoutHttp =
+ Test.unit "returns Nothing for service without HTTP" <| do
+ let svc = mkTestService "worker" "/nix/store/xyz" Nothing
+ case buildRoute svc of
+ Nothing -> pure ()
+ Just _ -> Test.assertFailure "expected Nothing"
+
+test_buildRouteWithPath :: Test.Tree
+test_buildRouteWithPath =
+ Test.unit "builds route with custom path" <| do
+ let svc = mkTestService "api" "/nix/store/abc" (Just <| Http "api.example.com" "/v1" 8080)
+ case buildRoute svc of
+ Nothing -> Test.assertFailure "expected route"
+ Just route -> case head <| routeMatch route of
+ Nothing -> Test.assertFailure "no match"
+ Just m -> matchHost m Test.@=? ["api.example.com"]
+
+main :: IO ()
+main = Test.run test
diff --git a/Omni/Deploy/Deployer.hs b/Omni/Deploy/Deployer.hs
new file mode 100644
index 0000000..7e57b34
--- /dev/null
+++ b/Omni/Deploy/Deployer.hs
@@ -0,0 +1,317 @@
+{-# LANGUAGE DeriveAnyClass #-}
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE DerivingStrategies #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE QuasiQuotes #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Mini-PaaS deployer service.
+--
+-- Polls manifest from S3, compares to local state, pulls changed closures,
+-- generates systemd units, updates Caddy routes, and manages GC roots.
+--
+-- : out biz-deployer
+-- : dep aeson
+-- : dep amazonka
+-- : dep amazonka-core
+-- : dep amazonka-s3
+-- : dep directory
+-- : dep http-conduit
+-- : dep http-types
+-- : dep time
+module Omni.Deploy.Deployer
+ ( DeployerState (..),
+ loadState,
+ saveState,
+ pullClosure,
+ createGcRoot,
+ removeGcRoot,
+ deployService,
+ removeService,
+ reconcile,
+ runOnce,
+ runDaemon,
+ stateDir,
+ stateFile,
+ gcrootsDir,
+ main,
+ test,
+ )
+where
+
+import Alpha
+import qualified Control.Concurrent as Concurrent
+import qualified Data.Aeson as Aeson
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.Map as Map
+import qualified Data.Set as Set
+import qualified Data.Text as Text
+import qualified Network.HostName as HostName
+import qualified Omni.Cli as Cli
+import qualified Omni.Deploy.Caddy as Caddy
+import qualified Omni.Deploy.Manifest as Manifest
+import qualified Omni.Deploy.Systemd as Systemd
+import qualified Omni.Log as Log
+import qualified Omni.Test as Test
+import qualified System.Directory as Dir
+import qualified System.Exit as Exit
+import System.FilePath ((</>))
+import qualified System.Process as Process
+
+stateDir :: FilePath
+stateDir = "/var/lib/biz-deployer"
+
+stateFile :: FilePath
+stateFile = stateDir </> "state.json"
+
+gcrootsDir :: FilePath
+gcrootsDir = "/nix/var/nix/gcroots/biz"
+
+s3Url :: String
+s3Url = "s3://omni-nix-cache?profile=digitalocean&scheme=https&endpoint=nyc3.digitaloceanspaces.com"
+
+newtype DeployerState = DeployerState
+ { stateServices :: Map Text Text
+ }
+ deriving (Show, Eq, Generic)
+ deriving anyclass (Aeson.FromJSON, Aeson.ToJSON)
+
+emptyState :: DeployerState
+emptyState = DeployerState mempty
+
+loadState :: IO DeployerState
+loadState = do
+ exists <- Dir.doesFileExist stateFile
+ if exists
+ then do
+ contents <- BL.readFile stateFile
+ case Aeson.eitherDecode contents of
+ Left _ -> pure emptyState
+ Right s -> pure s
+ else pure emptyState
+
+saveState :: DeployerState -> IO ()
+saveState st = do
+ Dir.createDirectoryIfMissing True stateDir
+ BL.writeFile stateFile (Aeson.encode st)
+
+getHostname :: IO Text
+getHostname = HostName.getHostName /> Text.pack
+
+pullClosure :: Text -> IO Bool
+pullClosure storePath = do
+ -- First check if the path already exists locally
+ exists <- Dir.doesDirectoryExist (Text.unpack storePath)
+ if exists
+ then do
+ Log.info ["deployer", "path already exists locally", storePath]
+ pure True
+ else do
+ (exitCode, _, stderr') <-
+ Process.readProcessWithExitCode
+ "nix"
+ [ "copy",
+ "--extra-experimental-features",
+ "nix-command",
+ "--from",
+ s3Url,
+ Text.unpack storePath
+ ]
+ ""
+ case exitCode of
+ Exit.ExitSuccess -> pure True
+ Exit.ExitFailure _ -> do
+ Log.fail ["deployer", "pull failed", storePath, Text.pack stderr']
+ pure False
+
+createGcRoot :: Text -> Text -> IO FilePath
+createGcRoot serviceName storePath = do
+ Dir.createDirectoryIfMissing True gcrootsDir
+ let rootPath = gcrootsDir </> Text.unpack serviceName
+ exists <- Dir.doesPathExist rootPath
+ when exists <| Dir.removeFile rootPath
+ Dir.createFileLink (Text.unpack storePath) rootPath
+ pure rootPath
+
+removeGcRoot :: Text -> IO ()
+removeGcRoot serviceName = do
+ let rootPath = gcrootsDir </> Text.unpack serviceName
+ exists <- Dir.doesPathExist rootPath
+ when exists <| Dir.removeFile rootPath
+
+deployService :: Manifest.Service -> DeployerState -> IO (Bool, DeployerState)
+deployService svc st = do
+ let name = Manifest.serviceName svc
+ path = Manifest.storePath (Manifest.serviceArtifact svc)
+
+ -- Check what's actually running in systemd instead of in-memory state
+ runningPath <- Systemd.getRunningStorePath name
+
+ if runningPath == Just path
+ then do
+ Log.info ["deployer", name, "already at", path]
+ pure (True, st)
+ else do
+ Log.info ["deployer", "deploying", name, fromMaybe "new" runningPath, "->", path]
+
+ pulled <- pullClosure path
+ if don't pulled
+ then do
+ Log.fail ["deployer", "failed to pull", name]
+ pure (False, st)
+ else do
+ _ <- createGcRoot name path
+
+ _ <- Systemd.writeUnit Systemd.servicesDir svc
+ _ <- Systemd.createSymlink Systemd.servicesDir "/run/systemd/system" svc
+ Systemd.reloadAndRestart name
+
+ case Manifest.serviceHttp svc of
+ Just _ -> void <| Caddy.upsertRoute Caddy.caddyAdmin svc
+ Nothing -> pure ()
+
+ let newSt = st {stateServices = Map.insert name path (stateServices st)}
+ Log.good ["deployer", "deployed", name]
+ pure (True, newSt)
+
+removeService :: Text -> DeployerState -> IO DeployerState
+removeService svcName st = do
+ Log.info ["deployer", "removing", svcName]
+
+ Systemd.stopAndDisable svcName
+ Systemd.removeUnit Systemd.servicesDir "/run/systemd/system" svcName
+ _ <- Caddy.deleteRoute Caddy.caddyAdmin svcName
+ removeGcRoot svcName
+
+ pure <| st {stateServices = Map.delete svcName (stateServices st)}
+
+reconcile :: Manifest.Manifest -> DeployerState -> IO DeployerState
+reconcile manifest st = do
+ hostname <- getHostname
+
+ let mfstServices =
+ Set.fromList
+ [ Manifest.serviceName svc
+ | svc <- Manifest.manifestServices manifest,
+ hostname `elem` Manifest.serviceHosts svc
+ ]
+ localServices = Set.fromList <| Map.keys (stateServices st)
+ toRemove = localServices Set.\\ mfstServices
+
+ st' <- foldM (flip removeService) st (Set.toList toRemove)
+
+ foldM
+ ( \s svc ->
+ if hostname `elem` Manifest.serviceHosts svc
+ then do
+ (_, newSt) <- deployService svc s
+ pure newSt
+ else pure s
+ )
+ st'
+ (Manifest.manifestServices manifest)
+
+runOnce :: IO Bool
+runOnce = do
+ Log.info ["deployer", "starting reconciliation"]
+
+ manifest <- Manifest.loadManifestFromS3
+ case manifest of
+ Nothing -> do
+ Log.warn ["deployer", "no manifest found in S3"]
+ pure False
+ Just m -> do
+ st <- loadState
+ st' <- reconcile m st
+ saveState st'
+ Log.good ["deployer", "reconciliation complete"]
+ pure True
+
+runDaemon :: Int -> IO ()
+runDaemon intervalSeconds = do
+ Log.info ["deployer", "starting daemon", "interval=" <> tshow intervalSeconds <> "s"]
+ forever <| do
+ result <- try runOnce
+ case result of
+ Left (e :: SomeException) ->
+ Log.fail ["deployer", "error in reconciliation", tshow e]
+ Right _ -> pure ()
+ Concurrent.threadDelay (intervalSeconds * 1_000_000)
+
+help :: Cli.Docopt
+help =
+ [Cli.docopt|
+biz-deployer - Mini-PaaS deployment agent
+
+Usage:
+ biz-deployer test
+ biz-deployer once
+ biz-deployer daemon [<interval>]
+ biz-deployer status
+ biz-deployer (-h | --help)
+
+Commands:
+ test Run tests
+ once Run a single reconciliation cycle
+ daemon Run as daemon with interval in seconds (default: 300)
+ status Show current deployer state
+
+Options:
+ -h --help Show this help
+|]
+
+move :: Cli.Arguments -> IO ()
+move args
+ | args `Cli.has` Cli.command "once" = do
+ success <- runOnce
+ if success
+ then Exit.exitSuccess
+ else Exit.exitWith (Exit.ExitFailure 1)
+ | args `Cli.has` Cli.command "daemon" = do
+ let interval =
+ Cli.getArg args (Cli.argument "interval")
+ +> readMaybe
+ |> fromMaybe 300
+ runDaemon interval
+ | args `Cli.has` Cli.command "status" = do
+ st <- loadState
+ BL.putStr <| Aeson.encode st
+ putStrLn ("" :: String)
+ | otherwise = do
+ Log.fail ["deployer", "unknown command"]
+ Exit.exitWith (Exit.ExitFailure 1)
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Deploy.Deployer"
+ [ test_emptyState,
+ test_stateJsonRoundtrip
+ ]
+
+test_emptyState :: Test.Tree
+test_emptyState =
+ Test.unit "empty state has no services" <| do
+ let st = emptyState
+ Map.null (stateServices st) Test.@=? True
+
+test_stateJsonRoundtrip :: Test.Tree
+test_stateJsonRoundtrip =
+ Test.unit "state JSON roundtrip" <| do
+ let testState =
+ DeployerState
+ { stateServices =
+ Map.fromList
+ [ ("svc-a", "/nix/store/abc"),
+ ("svc-b", "/nix/store/xyz")
+ ]
+ }
+ let encoded = Aeson.encode testState
+ case Aeson.eitherDecode encoded of
+ Left err -> Test.assertFailure err
+ Right decoded -> stateServices decoded Test.@=? stateServices testState
+
+main :: IO ()
+main = Cli.main <| Cli.Plan help move test pure
diff --git a/Omni/Deploy/Deployer.nix b/Omni/Deploy/Deployer.nix
new file mode 100644
index 0000000..091b43b
--- /dev/null
+++ b/Omni/Deploy/Deployer.nix
@@ -0,0 +1,104 @@
+{
+ options,
+ lib,
+ config,
+ pkgs,
+ ...
+}: let
+ cfg = config.services.biz-deployer;
+in {
+ options.services.biz-deployer = {
+ enable = lib.mkEnableOption "Enable the biz-deployer mini-PaaS service";
+
+ package = lib.mkOption {
+ type = lib.types.package;
+ description = "The biz-deployer package to use";
+ };
+
+ manifestPackage = lib.mkOption {
+ type = lib.types.package;
+ description = "The deploy-manifest package for CLI operations";
+ };
+
+ interval = lib.mkOption {
+ type = lib.types.int;
+ default = 300;
+ description = "Interval in seconds between reconciliation cycles";
+ };
+
+ stateDir = lib.mkOption {
+ type = lib.types.path;
+ default = "/var/lib/biz-deployer";
+ description = "Directory for deployer state and generated unit files";
+ };
+
+ secretsDir = lib.mkOption {
+ type = lib.types.path;
+ default = "/var/lib/biz-secrets";
+ description = "Directory containing service secret .env files";
+ };
+
+ gcrootsDir = lib.mkOption {
+ type = lib.types.path;
+ default = "/nix/var/nix/gcroots/biz";
+ description = "Directory for GC roots to prevent closure garbage collection";
+ };
+ };
+
+ config = lib.mkIf cfg.enable {
+ # Create required directories
+ systemd.tmpfiles.rules = [
+ "d ${cfg.stateDir} 0755 root root -"
+ "d ${cfg.stateDir}/services 0755 root root -"
+ "d ${cfg.secretsDir} 0700 root root -"
+ "d ${cfg.gcrootsDir} 0755 root root -"
+ ];
+
+ # The deployer service runs as a timer-triggered oneshot
+ systemd.services.biz-deployer = {
+ description = "Mini-PaaS deployment agent";
+ after = ["network-online.target"];
+ wants = ["network-online.target"];
+ path = [cfg.package cfg.manifestPackage pkgs.nix pkgs.awscli2];
+
+ serviceConfig = {
+ Type = "oneshot";
+ ExecStart = "${cfg.package}/bin/biz-deployer once";
+ Environment = [
+ "HOME=/root"
+ "AWS_SHARED_CREDENTIALS_FILE=/root/.aws/credentials"
+ ];
+
+ # Note: Hardening disabled because deployer needs write access to
+ # /etc/systemd/system, /nix/store, /nix/var, /root/.cache/nix
+ PrivateTmp = true;
+ };
+ };
+
+ # Timer to run deployer every N seconds
+ systemd.timers.biz-deployer = {
+ description = "Timer for biz-deployer reconciliation";
+ wantedBy = ["timers.target"];
+ timerConfig = {
+ OnBootSec = "1min";
+ OnUnitActiveSec = "${toString cfg.interval}s";
+ Unit = "biz-deployer.service";
+ };
+ };
+
+ # Caddy reverse proxy for deployed services
+ # TODO: Generate this dynamically from manifest in the future
+ services.caddy = {
+ enable = true;
+ globalConfig = ''
+ admin localhost:2019
+ '';
+ virtualHosts."podcastitlater.bensima.com".extraConfig = ''
+ reverse_proxy localhost:8000
+ '';
+ };
+
+ # Open firewall for HTTP/HTTPS
+ networking.firewall.allowedTCPPorts = [80 443];
+ };
+}
diff --git a/Omni/Deploy/Manifest.hs b/Omni/Deploy/Manifest.hs
new file mode 100644
index 0000000..e0d0b78
--- /dev/null
+++ b/Omni/Deploy/Manifest.hs
@@ -0,0 +1,673 @@
+{-# LANGUAGE DeriveAnyClass #-}
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE QuasiQuotes #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Manifest schema and S3 operations for the mini-PaaS deployment system.
+--
+-- Uses aws CLI for S3 operations (simpler than amazonka, already available).
+--
+-- : out deploy-manifest
+-- : dep aeson
+-- : dep time
+-- : dep directory
+-- : dep temporary
+-- : run awscli2
+module Omni.Deploy.Manifest
+ ( Artifact (..),
+ Exec (..),
+ Http (..),
+ Systemd (..),
+ Hardening (..),
+ Service (..),
+ Manifest (..),
+ findService,
+ updateService,
+ createEmptyManifest,
+ loadManifestFromS3,
+ saveManifestToS3,
+ archiveManifest,
+ listArchivedManifests,
+ rollbackToManifest,
+ s3Bucket,
+ s3Endpoint,
+ main,
+ test,
+ )
+where
+
+import Alpha
+import Data.Aeson ((.!=), (.:), (.:?), (.=))
+import qualified Data.Aeson as Aeson
+import qualified Data.ByteString.Lazy as BL
+import qualified Data.Text as Text
+import qualified Data.Text.Encoding as TE
+import Data.Time (UTCTime, getCurrentTime)
+import Data.Time.Format.ISO8601 (iso8601Show)
+import qualified Omni.Cli as Cli
+import qualified Omni.Log as Log
+import qualified Omni.Test as Test
+import qualified System.Exit as Exit
+import qualified System.IO as IO
+import qualified System.IO.Temp as Temp
+import qualified System.Process as Process
+
+s3Bucket :: Text
+s3Bucket = "omni-nix-cache"
+
+s3Endpoint :: Text
+s3Endpoint = "https://nyc3.digitaloceanspaces.com"
+
+data Artifact = Artifact
+ { artifactType :: Text,
+ storePath :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Artifact where
+ parseJSON =
+ Aeson.withObject "Artifact" <| \o ->
+ Artifact
+ </ (o .:? "type" .!= "nix-closure")
+ <*> o
+ .: "storePath"
+
+instance Aeson.ToJSON Artifact where
+ toJSON Artifact {..} =
+ Aeson.object
+ [ "type" .= artifactType,
+ "storePath" .= storePath
+ ]
+
+data Exec = Exec
+ { execCommand :: Maybe Text,
+ execUser :: Text,
+ execGroup :: Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Exec where
+ parseJSON =
+ Aeson.withObject "Exec" <| \o ->
+ Exec
+ </ (o .:? "command")
+ <*> o
+ .:? "user"
+ .!= "root"
+ <*> o
+ .:? "group"
+ .!= "root"
+
+instance Aeson.ToJSON Exec where
+ toJSON Exec {..} =
+ Aeson.object
+ [ "command" .= execCommand,
+ "user" .= execUser,
+ "group" .= execGroup
+ ]
+
+defaultExec :: Exec
+defaultExec = Exec Nothing "root" "root"
+
+data Http = Http
+ { httpDomain :: Text,
+ httpPath :: Text,
+ httpInternalPort :: Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Http where
+ parseJSON =
+ Aeson.withObject "Http" <| \o ->
+ Http
+ </ (o .: "domain")
+ <*> o
+ .:? "path"
+ .!= "/"
+ <*> o
+ .: "internalPort"
+
+instance Aeson.ToJSON Http where
+ toJSON Http {..} =
+ Aeson.object
+ [ "domain" .= httpDomain,
+ "path" .= httpPath,
+ "internalPort" .= httpInternalPort
+ ]
+
+data Systemd = Systemd
+ { systemdAfter :: [Text],
+ systemdRequires :: [Text],
+ systemdRestart :: Text,
+ systemdRestartSec :: Int
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Systemd where
+ parseJSON =
+ Aeson.withObject "Systemd" <| \o ->
+ Systemd
+ </ (o .:? "after" .!= ["network-online.target"])
+ <*> o
+ .:? "requires"
+ .!= []
+ <*> o
+ .:? "restart"
+ .!= "on-failure"
+ <*> o
+ .:? "restartSec"
+ .!= 5
+
+instance Aeson.ToJSON Systemd where
+ toJSON Systemd {..} =
+ Aeson.object
+ [ "after" .= systemdAfter,
+ "requires" .= systemdRequires,
+ "restart" .= systemdRestart,
+ "restartSec" .= systemdRestartSec
+ ]
+
+defaultSystemd :: Systemd
+defaultSystemd = Systemd ["network-online.target"] [] "on-failure" 5
+
+data Hardening = Hardening
+ { hardeningDynamicUser :: Bool,
+ hardeningPrivateTmp :: Bool,
+ hardeningProtectSystem :: Text,
+ hardeningProtectHome :: Bool
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Hardening where
+ parseJSON =
+ Aeson.withObject "Hardening" <| \o ->
+ Hardening
+ </ (o .:? "dynamicUser" .!= False)
+ <*> o
+ .:? "privateTmp"
+ .!= True
+ <*> o
+ .:? "protectSystem"
+ .!= "strict"
+ <*> o
+ .:? "protectHome"
+ .!= True
+
+instance Aeson.ToJSON Hardening where
+ toJSON Hardening {..} =
+ Aeson.object
+ [ "dynamicUser" .= hardeningDynamicUser,
+ "privateTmp" .= hardeningPrivateTmp,
+ "protectSystem" .= hardeningProtectSystem,
+ "protectHome" .= hardeningProtectHome
+ ]
+
+defaultHardening :: Hardening
+defaultHardening = Hardening False True "strict" True
+
+data Service = Service
+ { serviceName :: Text,
+ serviceArtifact :: Artifact,
+ serviceHosts :: [Text],
+ serviceExec :: Exec,
+ serviceEnv :: Map Text Text,
+ serviceEnvFile :: Maybe Text,
+ serviceHttp :: Maybe Http,
+ serviceSystemd :: Systemd,
+ serviceHardening :: Hardening,
+ serviceRevision :: Maybe Text
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Service where
+ parseJSON =
+ Aeson.withObject "Service" <| \o ->
+ Service
+ </ (o .: "name")
+ <*> o
+ .: "artifact"
+ <*> o
+ .:? "hosts"
+ .!= ["biz"]
+ <*> o
+ .:? "exec"
+ .!= defaultExec
+ <*> o
+ .:? "env"
+ .!= mempty
+ <*> o
+ .:? "envFile"
+ <*> o
+ .:? "http"
+ <*> o
+ .:? "systemd"
+ .!= defaultSystemd
+ <*> o
+ .:? "hardening"
+ .!= defaultHardening
+ <*> o
+ .:? "revision"
+
+instance Aeson.ToJSON Service where
+ toJSON Service {..} =
+ Aeson.object
+ [ "name" .= serviceName,
+ "artifact" .= serviceArtifact,
+ "hosts" .= serviceHosts,
+ "exec" .= serviceExec,
+ "env" .= serviceEnv,
+ "envFile" .= serviceEnvFile,
+ "http" .= serviceHttp,
+ "systemd" .= serviceSystemd,
+ "hardening" .= serviceHardening,
+ "revision" .= serviceRevision
+ ]
+
+data Manifest = Manifest
+ { manifestVersion :: Int,
+ manifestGeneration :: UTCTime,
+ manifestServices :: [Service]
+ }
+ deriving (Show, Eq, Generic)
+
+instance Aeson.FromJSON Manifest where
+ parseJSON =
+ Aeson.withObject "Manifest" <| \o ->
+ Manifest
+ </ (o .:? "version" .!= 1)
+ <*> o
+ .: "generation"
+ <*> o
+ .:? "services"
+ .!= []
+
+instance Aeson.ToJSON Manifest where
+ toJSON Manifest {..} =
+ Aeson.object
+ [ "version" .= manifestVersion,
+ "generation" .= manifestGeneration,
+ "services" .= manifestServices
+ ]
+
+findService :: Text -> Manifest -> Maybe Service
+findService name manifest =
+ find (\s -> serviceName s == name) (manifestServices manifest)
+
+updateService :: Text -> Text -> Maybe Text -> Manifest -> Either Text Manifest
+updateService name newStorePath revision manifest =
+ case findService name manifest of
+ Nothing -> Left <| "Service '" <> name <> "' not found in manifest"
+ Just _ -> Right <| manifest {manifestServices = updatedServices}
+ where
+ updatedServices = map updateIfMatch (manifestServices manifest)
+ updateIfMatch svc
+ | serviceName svc == name =
+ svc
+ { serviceArtifact = (serviceArtifact svc) {storePath = newStorePath},
+ serviceRevision = revision
+ }
+ | otherwise = svc
+
+createEmptyManifest :: IO Manifest
+createEmptyManifest = do
+ now <- getCurrentTime
+ pure <| Manifest 1 now []
+
+awsS3Args :: [String]
+awsS3Args =
+ [ "--endpoint-url",
+ Text.unpack s3Endpoint,
+ "--profile",
+ "digitalocean"
+ ]
+
+s3Get :: Text -> FilePath -> IO Bool
+s3Get key destPath = do
+ let url = "s3://" <> Text.unpack s3Bucket <> "/" <> Text.unpack key
+ args = ["s3", "cp"] ++ awsS3Args ++ [url, destPath]
+ (exitCode, _, _) <- Process.readProcessWithExitCode "aws" args ""
+ pure <| exitCode == Exit.ExitSuccess
+
+s3Put :: FilePath -> Text -> IO Bool
+s3Put srcPath key = do
+ let url = "s3://" <> Text.unpack s3Bucket <> "/" <> Text.unpack key
+ args = ["s3", "cp"] ++ awsS3Args ++ [srcPath, url]
+ (exitCode, _, _) <- Process.readProcessWithExitCode "aws" args ""
+ pure <| exitCode == Exit.ExitSuccess
+
+s3List :: Text -> IO [Text]
+s3List prefix = do
+ let url = "s3://" <> Text.unpack s3Bucket <> "/" <> Text.unpack prefix
+ args = ["s3", "ls"] ++ awsS3Args ++ [url]
+ (exitCode, stdout', _) <- Process.readProcessWithExitCode "aws" args ""
+ case exitCode of
+ Exit.ExitSuccess ->
+ pure <| parseS3ListOutput (Text.pack stdout')
+ Exit.ExitFailure _ -> pure []
+
+parseS3ListOutput :: Text -> [Text]
+parseS3ListOutput output =
+ output
+ |> Text.lines
+ |> map extractFilename
+ |> filter (not <. Text.null)
+ where
+ extractFilename line =
+ case Text.words line of
+ [_, _, _, filename] -> filename
+ _ -> ""
+
+loadManifestFromS3 :: IO (Maybe Manifest)
+loadManifestFromS3 = loadManifestFromS3' "manifest.json"
+
+loadManifestFromS3' :: Text -> IO (Maybe Manifest)
+loadManifestFromS3' key = do
+ Temp.withSystemTempFile "manifest.json" <| \tmpPath tmpHandle -> do
+ IO.hClose tmpHandle
+ success <- s3Get key tmpPath
+ if success
+ then do
+ contents <- BL.readFile tmpPath
+ case Aeson.eitherDecode contents of
+ Left _ -> pure Nothing
+ Right manifest -> pure <| Just manifest
+ else pure Nothing
+
+archiveManifest :: Manifest -> IO Text
+archiveManifest manifest = do
+ let timestamp =
+ iso8601Show (manifestGeneration manifest)
+ |> filter (\c -> c /= ':' && c /= '-')
+ |> Text.pack
+ archiveKey = "manifests/manifest-" <> timestamp <> ".json"
+ Temp.withSystemTempFile "manifest.json" <| \tmpPath tmpHandle -> do
+ BL.hPut tmpHandle (Aeson.encode manifest)
+ IO.hClose tmpHandle
+ _ <- s3Put tmpPath archiveKey
+ pure archiveKey
+
+listArchivedManifests :: IO [Text]
+listArchivedManifests = do
+ files <- s3List "manifests/"
+ pure <| filter (Text.isSuffixOf ".json") files
+
+rollbackToManifest :: Text -> IO Bool
+rollbackToManifest archiveKey = do
+ let fullKey =
+ if "manifests/" `Text.isPrefixOf` archiveKey
+ then archiveKey
+ else "manifests/" <> archiveKey
+ archived <- loadManifestFromS3' fullKey
+ case archived of
+ Nothing -> pure False
+ Just manifest -> do
+ saveManifestToS3 manifest
+ pure True
+
+saveManifestToS3 :: Manifest -> IO ()
+saveManifestToS3 = saveManifestToS3' "manifest.json"
+
+saveManifestToS3' :: Text -> Manifest -> IO ()
+saveManifestToS3' key manifest = do
+ existing <- loadManifestFromS3' key
+ case existing of
+ Just old -> void <| archiveManifest old
+ Nothing -> pure ()
+
+ now <- getCurrentTime
+ let updatedManifest = manifest {manifestGeneration = now}
+ Temp.withSystemTempFile "manifest.json" <| \tmpPath tmpHandle -> do
+ BL.hPut tmpHandle (Aeson.encode updatedManifest)
+ IO.hClose tmpHandle
+ _ <- s3Put tmpPath key
+ pure ()
+
+help :: Cli.Docopt
+help =
+ [Cli.docopt|
+deploy-manifest - Manage deployment manifest in S3
+
+Usage:
+ deploy-manifest test
+ deploy-manifest init
+ deploy-manifest show
+ deploy-manifest update <name> <store-path> [<revision>]
+ deploy-manifest add-service <json>
+ deploy-manifest list
+ deploy-manifest rollback <archive>
+ deploy-manifest (-h | --help)
+
+Commands:
+ test Run tests
+ init Initialize empty manifest in S3
+ show Show current manifest
+ update Update service store path in manifest
+ add-service Add a new service from JSON
+ list List archived manifest generations
+ rollback Restore a previous manifest version
+
+Options:
+ -h --help Show this help
+|]
+
+move :: Cli.Arguments -> IO ()
+move args
+ | args `Cli.has` Cli.command "init" = do
+ existing <- loadManifestFromS3
+ case existing of
+ Just _ -> do
+ Log.fail ["manifest", "already exists"]
+ Exit.exitWith (Exit.ExitFailure 1)
+ Nothing -> do
+ manifest <- createEmptyManifest
+ saveManifestToS3 manifest
+ Log.good ["manifest", "initialized empty manifest"]
+ | args `Cli.has` Cli.command "show" = do
+ manifest <- loadManifestFromS3
+ case manifest of
+ Nothing -> putStrLn ("no manifest found" :: String)
+ Just m -> BL.putStr <| Aeson.encode m
+ | args `Cli.has` Cli.command "update" = do
+ let name =
+ Cli.getArg args (Cli.argument "name")
+ |> fromMaybe ""
+ |> Text.pack
+ storePath' =
+ Cli.getArg args (Cli.argument "store-path")
+ |> fromMaybe ""
+ |> Text.pack
+ revision =
+ Cli.getArg args (Cli.argument "revision")
+ /> Text.pack
+ manifest <- loadManifestFromS3
+ case manifest of
+ Nothing -> do
+ Log.fail ["manifest", "no manifest found in S3"]
+ Exit.exitWith (Exit.ExitFailure 1)
+ Just m -> case updateService name storePath' revision m of
+ Left err -> do
+ Log.fail ["manifest", err]
+ Exit.exitWith (Exit.ExitFailure 1)
+ Right updated -> do
+ saveManifestToS3 updated
+ Log.good ["manifest", "updated", name, "->", storePath']
+ | args `Cli.has` Cli.command "add-service" = do
+ let jsonStr =
+ Cli.getArg args (Cli.argument "json")
+ |> fromMaybe ""
+ case Aeson.eitherDecode (BL.fromStrict <| TE.encodeUtf8 <| Text.pack jsonStr) of
+ Left err -> do
+ Log.fail ["manifest", "invalid JSON:", Text.pack err]
+ Exit.exitWith (Exit.ExitFailure 1)
+ Right svc -> do
+ manifest <- loadManifestFromS3
+ m <- maybe createEmptyManifest pure manifest
+ case findService (serviceName svc) m of
+ Just _ -> do
+ Log.fail ["manifest", "service already exists:", serviceName svc]
+ Exit.exitWith (Exit.ExitFailure 1)
+ Nothing -> do
+ let newManifest = m {manifestServices = manifestServices m ++ [svc]}
+ saveManifestToS3 newManifest
+ Log.good ["manifest", "added service", serviceName svc]
+ | args `Cli.has` Cli.command "list" = do
+ archives <- listArchivedManifests
+ if null archives
+ then putStrLn ("no archived manifests found" :: String)
+ else
+ forM_ archives <| \archive -> do
+ putStrLn <| Text.unpack archive
+ | args `Cli.has` Cli.command "rollback" = do
+ let archive =
+ Cli.getArg args (Cli.argument "archive")
+ |> fromMaybe ""
+ |> Text.pack
+ success <- rollbackToManifest archive
+ if success
+ then Log.good ["manifest", "rolled back to", archive]
+ else do
+ Log.fail ["manifest", "failed to rollback to", archive]
+ Exit.exitWith (Exit.ExitFailure 1)
+ | otherwise = do
+ Log.fail ["manifest", "unknown command"]
+ Exit.exitWith (Exit.ExitFailure 1)
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Deploy.Manifest"
+ [ test_artifactDefaults,
+ test_serviceDefaults,
+ test_manifestJsonRoundtrip,
+ test_updateService,
+ test_findService
+ ]
+
+test_artifactDefaults :: Test.Tree
+test_artifactDefaults =
+ Test.unit "artifact defaults type to nix-closure" <| do
+ let json = "{\"storePath\": \"/nix/store/abc123\"}"
+ case Aeson.eitherDecode json of
+ Left err -> Test.assertFailure err
+ Right (artifact :: Artifact) ->
+ artifactType artifact Test.@=? "nix-closure"
+
+test_serviceDefaults :: Test.Tree
+test_serviceDefaults =
+ Test.unit "service has correct defaults" <| do
+ let json = "{\"name\": \"test-svc\", \"artifact\": {\"storePath\": \"/nix/store/xyz\"}}"
+ case Aeson.eitherDecode json of
+ Left err -> Test.assertFailure err
+ Right (svc :: Service) -> do
+ serviceHosts svc Test.@=? ["biz"]
+ execUser (serviceExec svc) Test.@=? "root"
+ systemdRestart (serviceSystemd svc) Test.@=? "on-failure"
+ hardeningPrivateTmp (serviceHardening svc) Test.@=? True
+
+test_manifestJsonRoundtrip :: Test.Tree
+test_manifestJsonRoundtrip =
+ Test.unit "manifest JSON roundtrip" <| do
+ now <- getCurrentTime
+ let manifest =
+ Manifest
+ { manifestVersion = 1,
+ manifestGeneration = now,
+ manifestServices =
+ [ Service
+ { serviceName = "test-svc",
+ serviceArtifact = Artifact "nix-closure" "/nix/store/abc123",
+ serviceHosts = ["biz"],
+ serviceExec = defaultExec,
+ serviceEnv = mempty,
+ serviceEnvFile = Nothing,
+ serviceHttp = Just (Http "example.com" "/" 8000),
+ serviceSystemd = defaultSystemd,
+ serviceHardening = defaultHardening,
+ serviceRevision = Nothing
+ }
+ ]
+ }
+ encoded = Aeson.encode manifest
+ case Aeson.eitherDecode encoded of
+ Left err -> Test.assertFailure err
+ Right decoded -> do
+ length (manifestServices decoded) Test.@=? 1
+ case head <| manifestServices decoded of
+ Nothing -> Test.assertFailure "no services"
+ Just svc -> serviceName svc Test.@=? "test-svc"
+
+test_updateService :: Test.Tree
+test_updateService =
+ Test.unit "updateService updates store path" <| do
+ now <- getCurrentTime
+ let manifest =
+ Manifest
+ { manifestVersion = 1,
+ manifestGeneration = now,
+ manifestServices =
+ [ Service
+ "svc-a"
+ (Artifact "nix-closure" "/nix/store/old")
+ ["biz"]
+ defaultExec
+ mempty
+ Nothing
+ Nothing
+ defaultSystemd
+ defaultHardening
+ Nothing,
+ Service
+ "svc-b"
+ (Artifact "nix-closure" "/nix/store/other")
+ ["biz"]
+ defaultExec
+ mempty
+ Nothing
+ Nothing
+ defaultSystemd
+ defaultHardening
+ Nothing
+ ]
+ }
+ case updateService "svc-a" "/nix/store/new" (Just "abc123") manifest of
+ Left err -> Test.assertFailure (Text.unpack err)
+ Right updated -> case head <| manifestServices updated of
+ Nothing -> Test.assertFailure "no services"
+ Just svcA -> do
+ storePath (serviceArtifact svcA) Test.@=? "/nix/store/new"
+ serviceRevision svcA Test.@=? Just "abc123"
+
+test_findService :: Test.Tree
+test_findService =
+ Test.unit "findService finds service by name" <| do
+ now <- getCurrentTime
+ let manifest =
+ Manifest
+ { manifestVersion = 1,
+ manifestGeneration = now,
+ manifestServices =
+ [ Service
+ "svc-a"
+ (Artifact "nix-closure" "/nix/store/a")
+ ["biz"]
+ defaultExec
+ mempty
+ Nothing
+ Nothing
+ defaultSystemd
+ defaultHardening
+ Nothing
+ ]
+ }
+ case findService "svc-a" manifest of
+ Nothing -> Test.assertFailure "service not found"
+ Just svc -> serviceName svc Test.@=? "svc-a"
+ case findService "nonexistent" manifest of
+ Nothing -> pure ()
+ Just _ -> Test.assertFailure "found nonexistent service"
+
+main :: IO ()
+main = Cli.main <| Cli.Plan help move test pure
diff --git a/Omni/Deploy/PLAN.md b/Omni/Deploy/PLAN.md
new file mode 100644
index 0000000..1870ebd
--- /dev/null
+++ b/Omni/Deploy/PLAN.md
@@ -0,0 +1,299 @@
+# Mini-PaaS Deployment System
+
+## Overview
+
+A pull-based deployment system that allows deploying Nix-built services without full NixOS rebuilds. Services are defined in a manifest, pulled from an S3 binary cache, and managed as systemd units with Caddy for reverse proxying.
+
+## Problem Statement
+
+Current deployment (`push.sh` + full NixOS rebuild) is slow and heavyweight:
+- Every service change requires rebuilding the entire NixOS configuration
+- Adding a new service requires modifying Biz.nix and doing a full rebuild
+- Deploy time from "code ready" to "running in prod" is too long
+
+## Goals
+
+1. **Fast deploys**: Update a single service in <5 minutes without touching others
+2. **Independent services**: Deploy services without NixOS rebuild
+3. **Add services dynamically**: New services via manifest, no NixOS changes needed
+4. **Maintain NixOS for base OS**: Keep NixOS for infra (Postgres, SSH, firewall)
+5. **Clear scale-up path**: Single host now, easy migration to Nomad later
+
+## Key Design Decisions
+
+1. **Nix closures, not Docker**: Deploy Nix store paths directly, not containers. Simpler, no Docker daemon needed. Use systemd hardening for isolation.
+
+2. **Pull-based, not push-based**: Target host polls S3 for manifest changes every 5 min. No SSH needed for deploys, just update manifest.
+
+3. **Caddy, not nginx**: Caddy has admin API for dynamic route updates and automatic HTTPS. No config file regeneration needed.
+
+4. **Separation of concerns**:
+ - `bild`: Build tool, adds `--cache` flag to sign+push closures
+ - `push.sh`: Deploy orchestrator, handles both NixOS and service deploys
+ - `deployer`: Runs on target, polls manifest, manages services
+
+5. **Out-of-band secrets**: Secrets stored in `/var/lib/biz-secrets/*.env`, manifest only references paths. No secrets in S3.
+
+6. **Nix profiles for rollback**: Each service gets a Nix profile, enabling `nix-env --rollback`.
+
+## Relevant Existing Files
+
+- `Omni/Bild.hs` - Build tool, modify to add `--cache` flag
+- `Omni/Bild.nix` - Nix build library, has `bild.run` for building packages
+- `Omni/Ide/push.sh` - Current deploy script, enhance for service deploys
+- `Biz.nix` - Current NixOS config for biz host
+- `Biz/Packages.nix` - Builds all Biz packages
+- `Biz/PodcastItLater/Web.nix` - Example NixOS service module (to be replaced)
+- `Biz/PodcastItLater/Web.py` - Example Python service (deploy target)
+- `Omni/Os/Base.nix` - Base NixOS config, add S3 substituter here
+
+## Architecture
+
+```
+┌─────────────────────────────────────────────────────────────────────────────┐
+│ DEV MACHINE │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────┐ │
+│ │ push.sh <target> │ │
+│ │ │ │
+│ │ if target.nix: (NixOS deploy - existing behavior) │ │
+│ │ bild <target> │ │
+│ │ nix copy --to ssh://host │ │
+│ │ ssh host switch-to-configuration │ │
+│ │ │ │
+│ │ else: (Service deploy - new behavior) │ │
+│ │ bild <target> --cache ──▶ sign + push closure to S3 │ │
+│ │ update manifest.json in S3 with new storePath │ │
+│ │ (deployer on target will pick up changes) │ │
+│ └─────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ Separation of concerns: │
+│ - bild: Build + sign + push to S3 cache (--cache flag) │
+│ - push.sh: Orchestrates deploy, updates manifest, handles both modes │
+└─────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────┐
+│ DO SPACES (S3 BINARY CACHE) - PRIVATE │
+│ │
+│ /nar/*.nar.xz ← Compressed Nix store paths │
+│ /*.narinfo ← Metadata + signatures │
+│ /nix-cache-info ← Cache metadata │
+│ /manifest.json ← Current deployment state │
+│ /manifests/ ← Historical manifests for rollback │
+│ manifest-<ts>.json │
+│ │
+│ Authentication: AWS credentials (Spaces access key) │
+│ - Dev machine: write access for pushing │
+│ - Target host: read access for pulling │
+└─────────────────────────────────────────────────────────────────────────────┘
+ │
+ poll every 5 min
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────┐
+│ TARGET HOST (biz) │
+│ │
+│ ┌──────────────────────────────────────────────────────────────────────┐ │
+│ │ biz-deployer │ │
+│ │ (Python systemd service, runs every 5 min via timer) │ │
+│ │ │ │
+│ │ 1. Fetch manifest.json from S3 │ │
+│ │ 2. Compare to local state │ │
+│ │ 3. For changed services: │ │
+│ │ - nix copy --from s3://... <storePath> │ │
+│ │ - Generate systemd unit file │ │
+│ │ - Create GC root │ │
+│ │ - systemctl daemon-reload && restart │ │
+│ │ 4. Update Caddy routes via API │ │
+│ │ 5. Save local state │ │
+│ └──────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ Directories: │
+│ - /var/lib/biz-deployer/services/*.service (generated units) │
+│ - /var/lib/biz-deployer/state.json (local state) │
+│ - /var/lib/biz-secrets/*.env (secret env files) │
+│ - /nix/var/nix/gcroots/biz/* (GC roots) │
+│ │
+│ NixOS manages: │
+│ - Base OS, SSH, firewall │
+│ - Caddy with admin API enabled │
+│ - PostgreSQL, Redis (infra services) │
+│ - biz-deployer service itself │
+└─────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Components
+
+### 1. S3 Binary Cache (DO Spaces)
+
+**Bucket**: `omni-nix-cache` (private)
+**Region**: `nyc3` (or nearest)
+
+**Credentials**:
+- Dev machine: `~/.aws/credentials` with `[digitalocean]` profile
+- Target host: `/root/.aws/credentials` with same profile
+
+**Signing key**:
+- Generate: `nix-store --generate-binary-cache-key omni-cache cache-priv-key.pem cache-pub-key.pem`
+- Private key: `~/.config/nix/cache-priv-key.pem` (dev machine only)
+- Public key: Added to target's `nix.settings.trusted-public-keys`
+
+**S3 URL format**:
+```
+s3://omni-nix-cache?profile=digitalocean&scheme=https&endpoint=nyc3.digitaloceanspaces.com
+```
+
+### 2. Manifest Schema (v1)
+
+```json
+{
+ "version": 1,
+ "generation": "2025-01-15T12:34:56Z",
+ "services": [
+ {
+ "name": "podcastitlater-web",
+ "artifact": {
+ "type": "nix-closure",
+ "storePath": "/nix/store/abc123-podcastitlater-web-1.2.3"
+ },
+ "hosts": ["biz"],
+ "exec": {
+ "command": "podcastitlater-web",
+ "user": "pil-web",
+ "group": "pil"
+ },
+ "env": {
+ "PORT": "8000",
+ "AREA": "Live",
+ "DATA_DIR": "/var/podcastitlater",
+ "BASE_URL": "https://podcastitlater.com"
+ },
+ "envFile": "/var/lib/biz-secrets/podcastitlater-web.env",
+ "http": {
+ "domain": "podcastitlater.com",
+ "path": "/",
+ "internalPort": 8000
+ },
+ "systemd": {
+ "after": ["network-online.target", "postgresql.service"],
+ "requires": [],
+ "restart": "on-failure",
+ "restartSec": 5
+ },
+ "hardening": {
+ "dynamicUser": false,
+ "privateTmp": true,
+ "protectSystem": "strict",
+ "protectHome": true
+ },
+ "revision": "abc123def"
+ }
+ ]
+}
+```
+
+### 3. Deployer Service (Omni/Deploy/Deployer.py)
+
+Python service that:
+- Polls manifest from S3
+- Pulls Nix closures
+- Generates systemd units
+- Updates Caddy via API
+- Manages GC roots
+- Tracks local state
+
+### 4. NixOS Module (Omni/Deploy/Deployer.nix)
+
+Configures:
+- biz-deployer systemd service + timer
+- Caddy with admin API
+- S3 substituter configuration
+- Required directories and permissions
+
+### 5. Bild Integration (Omni/Bild.hs)
+
+New `--cache` flag that:
+1. Builds the target
+2. Signs the closure with cache key (using NIX_CACHE_KEY env var)
+3. Pushes to S3 cache
+4. Outputs the store path for push.sh to use
+
+Does NOT update manifest - that's push.sh's responsibility.
+
+### 6. Push.sh Enhancement (Omni/Ide/push.sh)
+
+Detect deploy mode from target extension:
+- `.nix` → NixOS deploy (existing behavior)
+- `.py`, `.hs`, etc. → Service deploy (new behavior)
+
+For service deploys:
+1. Call `bild <target> --cache`
+2. Capture store path from bild output
+3. Fetch current manifest.json from S3
+4. Archive current manifest to manifests/manifest-<timestamp>.json
+5. Update manifest with new storePath for this service
+6. Upload new manifest.json to S3
+7. Deployer on target picks up change within 5 minutes
+
+## Migration Path
+
+### Phase 1: Infrastructure Setup
+1. Create DO Spaces bucket
+2. Generate signing keys
+3. Configure S3 substituter on target
+4. Deploy base deployer service (empty manifest)
+
+### Phase 2: Migrate First Service
+1. Choose non-critical service (e.g., podcastitlater-worker)
+2. Add to manifest with different port
+3. Verify via staging route
+4. Flip Caddy to new service
+5. Disable old NixOS-managed service
+
+### Phase 3: Migrate Remaining Services
+- Repeat Phase 2 for each service
+- Order: worker → web → storybook
+
+### Phase 4: Cleanup
+- Remove service-specific NixOS modules
+- Simplify Biz.nix to base OS only
+
+## Rollback Strategy
+
+1. Each deploy archives current manifest to `/manifests/manifest-<ts>.json`
+2. Rollback = copy old manifest back to `manifest.json`
+3. Deployer sees new generation, converges to old state
+4. GC roots keep old closures alive (last 5 versions per service)
+
+## Scale-up Path
+
+| Stage | Hosts | Changes |
+|-------|-------|---------|
+| Current | 1 | Full architecture as described |
+| 2-3 hosts | 2-3 | Add `hosts` filtering, each host runs deployer |
+| 4+ hosts | 4+ | Consider Nomad with nix-nomad for job definitions |
+
+## Security Considerations
+
+- S3 bucket is private (authenticated reads/writes)
+- Signing key never leaves dev machine
+- Secrets stored out-of-band in `/var/lib/biz-secrets/`
+- systemd hardening for service isolation
+- Deployer validates manifest schema before applying
+
+## File Locations
+
+```
+Omni/
+ Deploy/
+ PLAN.md # This document
+ Deployer.py # Main deployer service
+ Deployer.nix # NixOS module
+ Manifest.py # Manifest schema/validation
+ Systemd.py # Unit file generation
+ Caddy.py # Caddy API integration
+ S3.py # S3 operations (for deployer)
+ Bild.hs # Add --cache flag for sign+push
+ Ide/
+ push.sh # Enhanced: NixOS deploy OR service deploy + manifest update
+```
diff --git a/Omni/Deploy/Packages.nix b/Omni/Deploy/Packages.nix
new file mode 100644
index 0000000..4cc42e9
--- /dev/null
+++ b/Omni/Deploy/Packages.nix
@@ -0,0 +1,11 @@
+# Build all deployer packages independently, outside NixOS context.
+#
+# Usage:
+# nix-build Omni/Deploy/Packages.nix # builds all packages
+# nix-build Omni/Deploy/Packages.nix -A biz-deployer # builds one package
+{bild ? import ../Bild.nix {}}: {
+ biz-deployer = bild.run ./Deployer.hs;
+ deploy-manifest = bild.run ./Manifest.hs;
+ deploy-systemd = bild.run ./Systemd.hs;
+ deploy-caddy = bild.run ./Caddy.hs;
+}
diff --git a/Omni/Deploy/README.md b/Omni/Deploy/README.md
new file mode 100644
index 0000000..cabad43
--- /dev/null
+++ b/Omni/Deploy/README.md
@@ -0,0 +1,211 @@
+# Mini-PaaS Deployment System
+
+A pull-based deployment system for deploying Nix-built services without full NixOS rebuilds.
+
+## Quick Start
+
+### Deploy a Service
+
+```bash
+# Build, cache to S3, and update manifest
+Omni/Ide/push.sh Biz/PodcastItLater/Web.py
+
+# The deployer on the target host polls every 5 minutes
+# To force immediate deployment, SSH to host and run:
+ssh biz sudo systemctl start biz-deployer
+```
+
+### View Current State
+
+```bash
+# Show current manifest
+deploy-manifest show
+
+# List archived manifests (for rollback)
+deploy-manifest list
+
+# Check deployer status on target
+ssh biz sudo systemctl status biz-deployer
+ssh biz cat /var/lib/biz-deployer/state.json
+```
+
+## Deployment Workflow
+
+```
+Developer Machine S3 Cache Target Host (biz)
+ │ │ │
+ │ push.sh Biz/App.py │ │
+ ├───────────────────────────────►│ │
+ │ 1. bild builds + caches │ │
+ │ 2. deploy-manifest update │ │
+ │ │ poll every 5 min │
+ │ │◄─────────────────────────────┤
+ │ │ │
+ │ │ manifest changed? │
+ │ │ - pull closure │
+ │ │ - generate systemd unit │
+ │ │ - update Caddy route │
+ │ │ - restart service │
+ │ │─────────────────────────────►│
+ │ │ │
+```
+
+## Adding a New Service
+
+### 1. Create the Service Definition
+
+```bash
+deploy-manifest add-service '{
+ "name": "my-new-service",
+ "artifact": {"storePath": "/nix/store/placeholder"},
+ "hosts": ["biz"],
+ "exec": {"command": null, "user": "root", "group": "root"},
+ "env": {"PORT": "8080", "AREA": "Live"},
+ "envFile": "/var/lib/biz-secrets/my-new-service.env",
+ "http": {"domain": "myservice.bensima.com", "path": "/", "internalPort": 8080}
+}'
+```
+
+### 2. Create Secrets File on Target
+
+```bash
+ssh biz
+sudo mkdir -p /var/lib/biz-secrets
+sudo tee /var/lib/biz-secrets/my-new-service.env << 'EOF'
+SECRET_KEY=your-secret-here
+DATABASE_URL=postgres://...
+EOF
+sudo chmod 600 /var/lib/biz-secrets/my-new-service.env
+```
+
+### 3. Deploy the Service
+
+```bash
+Omni/Ide/push.sh Biz/MyService.py
+```
+
+## Secrets Management
+
+Secrets are stored out-of-band on the target host, never in S3 or the manifest.
+
+**Location**: `/var/lib/biz-secrets/<service-name>.env`
+
+**Format**: Standard environment file
+```
+SECRET_KEY=abc123
+DATABASE_URL=postgres://user:pass@localhost/db
+STRIPE_API_KEY=sk_live_...
+```
+
+**Permissions**: `chmod 600`, owned by root
+
+**Referencing in manifest**: Set `envFile` field to the path
+
+## Rollback
+
+### List Available Versions
+
+```bash
+deploy-manifest list
+# Output:
+# manifest-20251216T033000Z.json
+# manifest-20251216T045211.json
+# manifest-20251215T120000Z.json
+```
+
+### Rollback to Previous Version
+
+```bash
+# Restore a specific archived manifest
+deploy-manifest rollback manifest-20251215T120000Z.json
+
+# Force immediate deployment
+ssh biz sudo systemctl start biz-deployer
+```
+
+The rollback archives the current manifest before restoring, so you can always rollback the rollback.
+
+## Troubleshooting
+
+### Service Not Starting
+
+```bash
+# Check deployer logs
+ssh biz sudo journalctl -u biz-deployer -f
+
+# Check service logs
+ssh biz sudo journalctl -u <service-name> -f
+
+# Check deployer state
+ssh biz cat /var/lib/biz-deployer/state.json
+```
+
+### Manifest Update Failed
+
+```bash
+# Verify AWS credentials
+aws s3 ls s3://omni-nix-cache/ --endpoint-url https://nyc3.digitaloceanspaces.com --profile digitalocean
+
+# Check manifest exists
+deploy-manifest show
+```
+
+### Closure Not Pulling
+
+```bash
+# Check if store path exists in cache
+aws s3 ls s3://omni-nix-cache/<hash>.narinfo --endpoint-url https://nyc3.digitaloceanspaces.com --profile digitalocean
+
+# Check target can access cache
+ssh biz nix copy --from 's3://omni-nix-cache?profile=digitalocean&scheme=https&endpoint=nyc3.digitaloceanspaces.com' /nix/store/<path>
+```
+
+### Caddy Route Issues
+
+```bash
+# Check Caddy config
+ssh biz curl -s localhost:2019/config/ | jq .
+
+# Check Caddy logs
+ssh biz sudo journalctl -u caddy -f
+```
+
+## Architecture
+
+| Component | Location | Purpose |
+|-----------|----------|---------|
+| `bild` | Dev machine | Build tool, caches to S3 |
+| `push.sh` | Dev machine | Orchestrates deploys |
+| `deploy-manifest` | Dev machine | Manage manifest in S3 |
+| `biz-deployer` | Target host | Polls manifest, deploys services |
+| Caddy | Target host | Reverse proxy with auto-HTTPS |
+
+### File Locations on Target
+
+| Path | Purpose |
+|------|---------|
+| `/var/lib/biz-deployer/state.json` | Local deployer state |
+| `/var/lib/biz-deployer/services/` | Generated systemd units |
+| `/var/lib/biz-secrets/` | Service secret env files |
+| `/nix/var/nix/gcroots/biz/` | GC roots for deployed closures |
+| `/root/.aws/credentials` | S3 credentials |
+
+## Scale-Up Path
+
+| Stage | Hosts | Changes Needed |
+|-------|-------|----------------|
+| Current | 1 | Full architecture as described |
+| 2-3 hosts | 2-3 | Add `hosts` filtering (already supported) |
+| 4+ hosts | 4+ | Consider migrating to Nomad + nix-nomad |
+
+The manifest already supports multi-host deployments via the `hosts` array. Each host runs its own deployer and only deploys services where its hostname appears in the `hosts` list.
+
+## Related Files
+
+- [Omni/Deploy/Manifest.hs](Manifest.hs) - Manifest CLI and schema
+- [Omni/Deploy/Deployer.hs](Deployer.hs) - Deployer service
+- [Omni/Deploy/Deployer.nix](Deployer.nix) - NixOS module
+- [Omni/Deploy/Systemd.hs](Systemd.hs) - Systemd unit generation
+- [Omni/Deploy/Caddy.hs](Caddy.hs) - Caddy API integration
+- [Omni/Ide/push.sh](../Ide/push.sh) - Deploy script
+- [Omni/Deploy/PLAN.md](PLAN.md) - Original design document
diff --git a/Omni/Deploy/Systemd.hs b/Omni/Deploy/Systemd.hs
new file mode 100644
index 0000000..7b64d1f
--- /dev/null
+++ b/Omni/Deploy/Systemd.hs
@@ -0,0 +1,269 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Systemd unit file generator for the mini-PaaS deployment system.
+--
+-- : out deploy-systemd
+-- : dep directory
+module Omni.Deploy.Systemd
+ ( generateUnit,
+ writeUnit,
+ createSymlink,
+ reloadAndRestart,
+ stopAndDisable,
+ removeUnit,
+ getRunningStorePath,
+ servicesDir,
+ main,
+ test,
+ )
+where
+
+import Alpha
+import qualified Data.Map as Map
+import qualified Data.Text as Text
+import qualified Data.Text.IO as Text.IO
+import Omni.Deploy.Manifest (Artifact (..), Exec (..), Hardening (..), Service (..), Systemd (..))
+import qualified Omni.Test as Test
+import qualified System.Directory as Dir
+import System.FilePath ((</>))
+import qualified System.Process as Process
+
+servicesDir :: FilePath
+servicesDir = "/var/lib/biz-deployer/services"
+
+generateUnit :: Service -> Text
+generateUnit Service {..} =
+ Text.unlines <| unitSection ++ serviceSection ++ hardeningSection ++ installSection
+ where
+ binary = fromMaybe serviceName (execCommand serviceExec)
+ execStart = storePath serviceArtifact <> "/bin/" <> binary
+
+ unitSection =
+ [ "[Unit]",
+ "Description=" <> serviceName,
+ "After=" <> Text.intercalate " " (systemdAfter serviceSystemd)
+ ]
+ ++ requiresLine
+
+ requiresLine =
+ ["Requires=" <> Text.intercalate " " (systemdRequires serviceSystemd) | not (null (systemdRequires serviceSystemd))]
+
+ serviceSection =
+ [ "",
+ "[Service]",
+ "Type=simple",
+ "ExecStart=" <> execStart,
+ "User=" <> execUser serviceExec,
+ "Group=" <> execGroup serviceExec,
+ "Restart=" <> systemdRestart serviceSystemd,
+ "RestartSec=" <> tshow (systemdRestartSec serviceSystemd)
+ ]
+ ++ envLines
+ ++ envFileLine
+
+ envLines =
+ Map.toList serviceEnv
+ |> map (\(k, v) -> "Environment=\"" <> k <> "=" <> v <> "\"")
+
+ envFileLine = case serviceEnvFile of
+ Nothing -> []
+ Just path -> ["EnvironmentFile=" <> path]
+
+ hardeningSection =
+ [ "",
+ "# Hardening",
+ "PrivateTmp=" <> boolToYesNo (hardeningPrivateTmp serviceHardening),
+ "ProtectSystem=" <> hardeningProtectSystem serviceHardening,
+ "ProtectHome=" <> boolToYesNo (hardeningProtectHome serviceHardening),
+ "NoNewPrivileges=yes"
+ ]
+ ++ readWritePathsLine
+
+ readWritePathsLine =
+ case Map.lookup "DATA_DIR" serviceEnv of
+ Just dataDir -> ["ReadWritePaths=" <> dataDir]
+ Nothing -> []
+
+ installSection =
+ [ "",
+ "[Install]",
+ "WantedBy=multi-user.target"
+ ]
+
+ boolToYesNo True = "yes"
+ boolToYesNo False = "no"
+
+writeUnit :: FilePath -> Service -> IO FilePath
+writeUnit baseDir svc = do
+ Dir.createDirectoryIfMissing True baseDir
+ let path = baseDir </> Text.unpack (serviceName svc) <> ".service"
+ content = generateUnit svc
+ Text.IO.writeFile path content
+ pure path
+
+createSymlink :: FilePath -> FilePath -> Service -> IO FilePath
+createSymlink baseDir sysDir svc = do
+ let unitPath = baseDir </> Text.unpack (serviceName svc) <> ".service"
+ linkPath = sysDir </> Text.unpack (serviceName svc) <> ".service"
+ exists <- Dir.doesPathExist linkPath
+ when exists <| Dir.removeFile linkPath
+ Dir.createFileLink unitPath linkPath
+ pure linkPath
+
+reloadAndRestart :: Text -> IO ()
+reloadAndRestart serviceName' = do
+ _ <- Process.readProcessWithExitCode "systemctl" ["daemon-reload"] ""
+ _ <-
+ Process.readProcessWithExitCode
+ "systemctl"
+ ["enable", "--now", Text.unpack serviceName' <> ".service"]
+ ""
+ pure ()
+
+stopAndDisable :: Text -> IO ()
+stopAndDisable serviceName' = do
+ _ <-
+ Process.readProcessWithExitCode
+ "systemctl"
+ ["disable", "--now", Text.unpack serviceName' <> ".service"]
+ ""
+ pure ()
+
+removeUnit :: FilePath -> FilePath -> Text -> IO ()
+removeUnit baseDir sysDir serviceName' = do
+ let unitPath = baseDir </> Text.unpack serviceName' <> ".service"
+ linkPath = sysDir </> Text.unpack serviceName' <> ".service"
+ linkExists <- Dir.doesPathExist linkPath
+ when linkExists <| Dir.removeFile linkPath
+ unitExists <- Dir.doesPathExist unitPath
+ when unitExists <| Dir.removeFile unitPath
+ _ <- Process.readProcessWithExitCode "systemctl" ["daemon-reload"] ""
+ pure ()
+
+-- | Get the store path of the currently running service by reading its unit file.
+getRunningStorePath :: Text -> IO (Maybe Text)
+getRunningStorePath serviceName' = do
+ let unitPath = servicesDir </> Text.unpack serviceName' <> ".service"
+ exists <- Dir.doesFileExist unitPath
+ if not exists
+ then pure Nothing
+ else do
+ content <- Text.IO.readFile unitPath
+ pure <| extractStorePath content
+ where
+ -- Extract /nix/store/...-service-name from ExecStart=/nix/store/.../bin/...
+ extractStorePath content =
+ content
+ |> Text.lines
+ |> find (Text.isPrefixOf "ExecStart=")
+ |> fmap (Text.drop (Text.length "ExecStart="))
+ |> fmap (Text.dropWhile (/= '/'))
+ |> fmap (Text.drop 1)
+ |> fmap (Text.takeWhile (/= '/'))
+ |> fmap ("/nix/store/" <>)
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Deploy.Systemd"
+ [ test_generateBasicUnit,
+ test_generateUnitWithEnv,
+ test_generateUnitWithCustomExec,
+ test_generateUnitWithEnvFile,
+ test_generateUnitWithDependencies,
+ test_generateUnitWithHardening
+ ]
+
+mkTestService :: Text -> Text -> Service
+mkTestService name path =
+ Service
+ { serviceName = name,
+ serviceArtifact = Artifact "nix-closure" path,
+ serviceHosts = ["biz"],
+ serviceExec = Exec Nothing "root" "root",
+ serviceEnv = mempty,
+ serviceEnvFile = Nothing,
+ serviceHttp = Nothing,
+ serviceSystemd = Systemd ["network-online.target"] [] "on-failure" 5,
+ serviceHardening = Hardening False True "strict" True,
+ serviceRevision = Nothing
+ }
+
+test_generateBasicUnit :: Test.Tree
+test_generateBasicUnit =
+ Test.unit "generates basic unit file" <| do
+ let svc = mkTestService "test-service" "/nix/store/abc123-test"
+ unit = generateUnit svc
+ Text.isInfixOf "[Unit]" unit Test.@=? True
+ Text.isInfixOf "Description=test-service" unit Test.@=? True
+ Text.isInfixOf "[Service]" unit Test.@=? True
+ Text.isInfixOf "ExecStart=/nix/store/abc123-test/bin/test-service" unit Test.@=? True
+ Text.isInfixOf "[Install]" unit Test.@=? True
+ Text.isInfixOf "WantedBy=multi-user.target" unit Test.@=? True
+
+test_generateUnitWithEnv :: Test.Tree
+test_generateUnitWithEnv =
+ Test.unit "generates unit with environment" <| do
+ let svc =
+ (mkTestService "env-test" "/nix/store/xyz")
+ { serviceEnv = Map.fromList [("PORT", "8000"), ("DEBUG", "true")]
+ }
+ unit = generateUnit svc
+ Text.isInfixOf "Environment=\"PORT=8000\"" unit Test.@=? True
+ Text.isInfixOf "Environment=\"DEBUG=true\"" unit Test.@=? True
+
+test_generateUnitWithCustomExec :: Test.Tree
+test_generateUnitWithCustomExec =
+ Test.unit "generates unit with custom exec" <| do
+ let svc =
+ (mkTestService "custom-exec" "/nix/store/abc")
+ { serviceExec = Exec (Just "my-binary") "www-data" "www-data"
+ }
+ unit = generateUnit svc
+ Text.isInfixOf "ExecStart=/nix/store/abc/bin/my-binary" unit Test.@=? True
+ Text.isInfixOf "User=www-data" unit Test.@=? True
+ Text.isInfixOf "Group=www-data" unit Test.@=? True
+
+test_generateUnitWithEnvFile :: Test.Tree
+test_generateUnitWithEnvFile =
+ Test.unit "generates unit with env file" <| do
+ let svc =
+ (mkTestService "env-file-test" "/nix/store/xyz")
+ { serviceEnvFile = Just "/var/lib/biz-secrets/test.env"
+ }
+ unit = generateUnit svc
+ Text.isInfixOf "EnvironmentFile=/var/lib/biz-secrets/test.env" unit Test.@=? True
+
+test_generateUnitWithDependencies :: Test.Tree
+test_generateUnitWithDependencies =
+ Test.unit "generates unit with dependencies" <| do
+ let svc =
+ (mkTestService "dep-test" "/nix/store/abc")
+ { serviceSystemd =
+ Systemd
+ ["network-online.target", "postgresql.service"]
+ ["postgresql.service"]
+ "on-failure"
+ 5
+ }
+ unit = generateUnit svc
+ Text.isInfixOf "After=network-online.target postgresql.service" unit Test.@=? True
+ Text.isInfixOf "Requires=postgresql.service" unit Test.@=? True
+
+test_generateUnitWithHardening :: Test.Tree
+test_generateUnitWithHardening =
+ Test.unit "generates unit with hardening" <| do
+ let svc =
+ (mkTestService "hardened" "/nix/store/abc")
+ { serviceHardening = Hardening False True "full" True
+ }
+ unit = generateUnit svc
+ Text.isInfixOf "PrivateTmp=yes" unit Test.@=? True
+ Text.isInfixOf "ProtectSystem=full" unit Test.@=? True
+ Text.isInfixOf "ProtectHome=yes" unit Test.@=? True
+ Text.isInfixOf "NoNewPrivileges=yes" unit Test.@=? True
+
+main :: IO ()
+main = Test.run test
diff --git a/Omni/Dev/Beryllium.nix b/Omni/Dev/Beryllium.nix
index 9a72353..4d9ed09 100755
--- a/Omni/Dev/Beryllium.nix
+++ b/Omni/Dev/Beryllium.nix
@@ -5,13 +5,16 @@ bild.os {
../Os/Base.nix
../Packages.nix
../Users.nix
+ ./Beryllium/Ava.nix
./Beryllium/Configuration.nix
./Beryllium/Hardware.nix
./Beryllium/Ollama.nix
./Docker.nix
./Vpn.nix
./Beryllium/OpenWebui.nix
+ ./Beryllium/Live.nix
+ ../Syncthing.nix
];
networking.hostName = "beryllium";
- networking.domain = "beryl.simatime.com";
+ networking.domain = "beryl.bensima.com";
}
diff --git a/Omni/Dev/Beryllium/AVA.md b/Omni/Dev/Beryllium/AVA.md
new file mode 100644
index 0000000..620592b
--- /dev/null
+++ b/Omni/Dev/Beryllium/AVA.md
@@ -0,0 +1,111 @@
+# Ava Deployment on Beryllium
+
+Ava runs as a systemd service under the `ava` user.
+
+## Architecture
+
+```
+/home/ava/ # Ava's home directory (AVA_DATA_ROOT)
+├── omni/ # Clone of the omni repo
+├── skills/ # Ava's skills directory
+│ ├── shared/ # Skills available to all users
+│ └── <username>/ # User-specific skills
+├── outreach/ # Outreach approval queue
+│ ├── pending/
+│ ├── approved/
+│ ├── rejected/
+│ └── sent/
+├── users/ # Per-user scratch space
+│ └── <username>/
+└── .local/share/omni/
+ └── memory.db # SQLite memory database
+```
+
+## Configuration
+
+The service is configured in `Ava.nix` and requires these environment variables in `/run/secrets/ava.env`:
+
+```bash
+TELEGRAM_BOT_TOKEN=xxx
+OPENROUTER_API_KEY=xxx
+KAGI_API_KEY=xxx # optional
+ALLOWED_TELEGRAM_USER_IDS=xxx,yyy # or * for all
+```
+
+## Commands
+
+```bash
+# View logs
+journalctl -u ava -f
+
+# Restart service
+sudo systemctl restart ava
+
+# Check status
+sudo systemctl status ava
+
+# Stop/Start
+sudo systemctl stop ava
+sudo systemctl start ava
+```
+
+## SSH Access
+
+The Ava private key is at `~/.ssh/ava_ed25519`. Use it to SSH as ava:
+
+```bash
+ssh -i ~/.ssh/ava_ed25519 ava@beryl.bensima.com
+```
+
+Ben can also access ava's workspace via his own SSH key since ava is in the git group.
+
+## Git Setup
+
+Ava has its own clone of the omni repo at `/home/ava/omni`. To fetch changes from ben:
+
+```bash
+# As ava:
+cd /home/ava/omni
+git fetch origin
+git pull origin main
+```
+
+Ben can also push directly to ava's repo if needed:
+
+```bash
+# From /home/ben/omni:
+git remote add ava /home/ava/omni
+git push ava main
+```
+
+## Redeploy
+
+To redeploy Ava with code changes:
+
+```bash
+# 1. Rebuild the NixOS config
+push.sh Omni/Dev/Beryllium.nix
+
+# 2. Or just restart the service if only env changes
+sudo systemctl restart ava
+```
+
+## Migration from tmux
+
+If migrating from the old tmux-based deployment:
+
+1. Deploy the NixOS config with the new ava user
+2. Run the migration script: `sudo ./Omni/Dev/Beryllium/migrate-ava.sh`
+3. Create `/run/secrets/ava.env` with the required secrets
+4. Stop the tmux ava process
+5. Start the systemd service: `sudo systemctl start ava`
+6. Enable on boot: `sudo systemctl enable ava`
+
+## Environment Variable: AVA_DATA_ROOT
+
+The `AVA_DATA_ROOT` environment variable controls where Ava stores its data:
+
+- **Development** (unset): Uses `_/var/ava/` (relative to repo)
+- **Production**: Set to `/home/ava` via the systemd service
+
+This allows the same codebase to run in both environments without changes.
diff --git a/Omni/Dev/Beryllium/Ava.nix b/Omni/Dev/Beryllium/Ava.nix
new file mode 100644
index 0000000..f0765cd
--- /dev/null
+++ b/Omni/Dev/Beryllium/Ava.nix
@@ -0,0 +1,81 @@
+{...}: let
+ bild = import ../../Bild.nix {};
+ avaPkg = bild.run ../../Ava.hs;
+
+ # Python environment for Ava's python_exec tool
+ avaPython = bild.python.pythonWith (p: [
+ p.requests # HTTP requests
+ p.beautifulsoup4 # HTML/XML parsing
+ p.lxml # Fast XML/HTML parser
+ p.pandas # Data analysis
+ p.numpy # Numerical computing
+ p.pyyaml # YAML parsing
+ p.python-dateutil # Date utilities
+ ]);
+
+ # Wrap ava binary with tools in PATH
+ avaWithTools = bild.stdenv.mkDerivation {
+ name = "ava-wrapped";
+ buildInputs = [bild.makeWrapper];
+ phases = ["installPhase"];
+ installPhase = ''
+ mkdir -p $out/bin
+ makeWrapper ${avaPkg}/bin/ava $out/bin/ava \
+ --prefix PATH : ${bild.lib.makeBinPath [
+ avaPython
+ bild.pkgs.jq
+ bild.pkgs.ripgrep
+ bild.pkgs.coreutils
+ bild.pkgs.git
+ bild.pkgs.sqlite
+ ]}
+ '';
+ };
+in {
+ systemd.services.ava = {
+ description = "Ava Telegram assistant";
+ after = ["network-online.target" "ollama.service"];
+ wants = ["network-online.target" "ollama.service"];
+ wantedBy = ["multi-user.target"];
+
+ serviceConfig = {
+ Type = "simple";
+ User = "ava";
+ Group = "users";
+ WorkingDirectory = "/home/ava/omni";
+
+ Environment = [
+ "AVA_DATA_ROOT=/home/ava"
+ "HOME=/home/ava"
+ "OLLAMA_URL=http://localhost:11434"
+ ];
+
+ EnvironmentFile = "/run/secrets/ava.env";
+
+ ExecStart = "${avaWithTools}/bin/ava";
+
+ StandardOutput = "journal";
+ StandardError = "journal";
+
+ Restart = "on-failure";
+ RestartSec = 5;
+
+ TimeoutStopSec = 90;
+ KillMode = "mixed";
+ KillSignal = "SIGTERM";
+ };
+ };
+
+ systemd.tmpfiles.rules = [
+ "d /home/ava 0755 ava users -"
+ "d /home/ava/omni 0755 ava users -"
+ "d /home/ava/skills 0755 ava users -"
+ "d /home/ava/outreach 0755 ava users -"
+ "d /home/ava/outreach/pending 0755 ava users -"
+ "d /home/ava/outreach/approved 0755 ava users -"
+ "d /home/ava/outreach/rejected 0755 ava users -"
+ "d /home/ava/outreach/sent 0755 ava users -"
+ "d /home/ava/users 0755 ava users -"
+ "d /home/ava/.local/share/omni 0755 ava users -"
+ ];
+}
diff --git a/Omni/Dev/Beryllium/Configuration.nix b/Omni/Dev/Beryllium/Configuration.nix
index 8fa783b..3e39fe4 100644
--- a/Omni/Dev/Beryllium/Configuration.nix
+++ b/Omni/Dev/Beryllium/Configuration.nix
@@ -14,9 +14,6 @@ in {
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
- boot.kernelModules = ["v4l2loopback"];
- boot.extraModulePackages = [pkgs.linuxPackages.v4l2loopback];
-
# Enable networking
networking.networkmanager.enable = true;
@@ -52,24 +49,21 @@ in {
services.xserver = {
layout = "us";
xkbVariant = "";
+ extraConfig = ''
+ Section "InputClass"
+ Identifier "Kensington Expert Mouse"
+ MatchProduct "Kensington Expert Mouse"
+ Option "ButtonMapping" "1 8 2 4 5 6 7 3 9"
+ Option "NaturalScrolling" "true"
+ Option "ScrollMethod" "button"
+ Option "ScrollButton" "3"
+ EndSection
+ '';
};
# Enable CUPS to print documents.
services.printing.enable = true;
- # Enable sound with pipewire.
- hardware.pulseaudio.enable = false;
- security.rtkit.enable = true;
- services.pipewire = {
- enable = true;
- alsa.enable = true;
- alsa.support32Bit = true;
- pulse.enable = true;
- # If you want to use JACK applications, uncomment this
- jack.enable = true;
- wireplumber.enable = true;
- };
-
hardware.opengl.enable = true;
hardware.opengl.driSupport32Bit = true;
services.xserver.videoDrivers = ["nvidia"];
@@ -100,6 +94,8 @@ in {
services.eternal-terminal.enable = true;
+ services.pcscd.enable = true;
+
environment.systemPackages = with pkgs; [
v4l-utils
linuxPackages.v4l2loopback
diff --git a/Omni/Dev/Beryllium/Hardware.nix b/Omni/Dev/Beryllium/Hardware.nix
index 5a8b583..c9632f5 100644
--- a/Omni/Dev/Beryllium/Hardware.nix
+++ b/Omni/Dev/Beryllium/Hardware.nix
@@ -12,7 +12,6 @@
boot.initrd.availableKernelModules = ["xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod"];
boot.initrd.kernelModules = [];
boot.kernelModules = ["kvm-amd"];
- boot.extraModulePackages = [];
fileSystems."/" = {
device = "/dev/disk/by-uuid/f96eaa16-d0e2-4230-aece-131ce7b630da";
diff --git a/Omni/Dev/Beryllium/Live.nix b/Omni/Dev/Beryllium/Live.nix
new file mode 100644
index 0000000..a44452f
--- /dev/null
+++ b/Omni/Dev/Beryllium/Live.nix
@@ -0,0 +1,135 @@
+{pkgs, ...}: {
+ programs.obs-studio = {
+ enable = true;
+ enableVirtualCamera = true;
+ plugins = with pkgs.obs-studio-plugins; [
+ obs-pipewire-audio-capture
+ ];
+ };
+
+ # Enable sound with pipewire.
+ hardware.pulseaudio.enable = false;
+ security.rtkit.enable = true;
+
+ # Latency optimization
+ boot.kernelParams = ["threadirqs"];
+ boot.kernel.sysctl."vm.swappiness" = 10;
+
+ environment.systemPackages = with pkgs; [
+ obs-do # cli for controlling obs
+ pamixer # cli volume control
+ patchage # another connection manager
+ pwvucontrol # gui for quick adjustments
+ qpwgraph # better than helvum
+ supercollider-with-plugins
+ ];
+
+ # Virtual sinks for routing audio
+ services.pipewire = {
+ enable = true;
+ alsa.enable = true;
+ alsa.support32Bit = true;
+ pulse.enable = true;
+ jack.enable = true;
+ wireplumber.enable = true;
+
+ extraConfig.pipewire = {
+ "10-loopback" = {
+ # loopback my mic into my headphones so i can hear myself, this creates
+ # an auditory space that encourages focus and thinking
+
+ "context.properties" = {
+ "default.clock.rate" = 48000;
+ "default.clock.quantum" = 128; # lower for less latency
+ "default.clock.min-quantum" = 32;
+ "default.clock.max-quantum" = 8192;
+ };
+
+ "context.exec" = [
+ {
+ "path" = "${pkgs.writeShellScript "setup-mic-monitor" ''
+ sleep 1
+ ${pkgs.pipewire}/bin/pw-link \
+ "alsa_input.usb-Antlion_Audio_Antlion_USB_Microphone-00.pro-input-0:capture_AUX0" \
+ "input.mic-monitor:input_FL"
+
+ ${pkgs.pipewire}/bin/pw-link \
+ "alsa_input.usb-Antlion_Audio_Antlion_USB_Microphone-00.pro-input-0:capture_AUX0" \
+ "input.mic-monitor:input_FR"
+ ''}";
+ }
+ ];
+
+ "context.modules" = [
+ {
+ name = "libpipewire-module-loopback";
+ args = {
+ "node.name" = "mic-monitor";
+ "node.description" = "Microphone Monitor";
+ "capture.props" = {
+ "target.object" = "alsa_input.usb-Antlion_Audio_Antlion_USB_Microphone-00.pro-input-0";
+ "channelmix.normalize" = true;
+ "audio.channels" = 1;
+ "audio.position" = ["FR" "FL"];
+ };
+ "playback.props" = {
+ "target.object" = "alsa_output.usb-Focusrite_Scarlett_Solo_USB-00.HiFi__Line1__sink";
+ "node.passive" = true;
+ "channelmix.normalize" = true;
+ "audio.channels" = 2;
+ "audio.position" = ["FR" "FL"];
+ };
+ };
+ }
+ ];
+ };
+
+ "10-combined" = {
+ "context.modules" = [
+ {
+ name = "libpipewire-module-loopback";
+ args = {
+ "node.name" = "combined-audio";
+ "node.description" = "Combined Mic+Desktop Audio";
+ "capture.props" = {
+ "media.class" = "Audio/Sink";
+ "audio.class" = 2;
+ "audio.position" = ["FL" "FR"];
+ "channelmix.normalize" = true;
+ };
+ "playback.props" = {
+ "media.class" = "Audio/Source";
+ "audio.channels" = 2;
+ "audio.position" = ["FL" "FR"];
+ "channelmix.normalize" = true;
+ };
+ };
+ }
+ ];
+
+ "context.exec" = [
+ {
+ "path" = "${pkgs.writeShellScript "setup-audio-routing" ''
+ sleep 1
+ ${pkgs.pipewire}/bin/pw-link \
+ "alsa_input.usb-Antlion_Audio_Antlion_USB_Microphone-00.pro-input-0:capture_AUX0" \
+ "input.combined-audio:playback_FL"
+
+ ${pkgs.pipewire}/bin/pw-link \
+ "alsa_input.usb-Antlion_Audio_Antlion_USB_Microphone-00.pro-input-0:capture_AUX0" \
+ "input.combined-audio:playback_FR"
+
+ ${pkgs.pipewire}/bin/pw-link \
+ "input.combined-audio:monitor_FL" \
+ "alsa_output.usb-Focusrite_Scarlett_Solo_USB-00.HiFi__Line1__sink:playback_FL"
+
+ ${pkgs.pipewire}/bin/pw-link \
+ "input.combined-audio:monitor_FR" \
+ "alsa_output.usb-Focusrite_Scarlett_Solo_USB-00.HiFi__Line1__sink:playback_FR"
+ ''}";
+ }
+ ];
+ };
+ };
+ };
+}
diff --git a/Omni/Dev/Beryllium/migrate-ava.sh b/Omni/Dev/Beryllium/migrate-ava.sh
new file mode 100755
index 0000000..91d2740
--- /dev/null
+++ b/Omni/Dev/Beryllium/migrate-ava.sh
@@ -0,0 +1,102 @@
+#!/usr/bin/env bash
+# Migration script: move Ava data from _/var/ava to /home/ava
+#
+# Run this ONCE after deploying the NixOS config with the new ava user.
+#
+# Usage:
+# sudo ./migrate-ava.sh
+#
+# This script:
+# 1. Copies existing data from _/var/ava to /home/ava
+# 2. Copies memory.db from ben's .local to ava's .local
+# 3. Clones the omni repo into /home/ava/omni
+# 4. Sets proper ownership
+
+set -euo pipefail
+
+GRN='\033[0;32m'
+YLW='\033[0;33m'
+RED='\033[0;31m'
+NC='\033[0m'
+
+OMNI_REPO="/home/ben/omni"
+AVA_HOME="/home/ava"
+OLD_DATA_ROOT="$OMNI_REPO/_/var/ava"
+
+echo -e "${YLW}=== Ava Migration Script ===${NC}"
+
+# Check we're running as root
+if [[ $EUID -ne 0 ]]; then
+ echo -e "${RED}Error: This script must be run as root${NC}"
+ exit 1
+fi
+
+# Check ava user exists
+if ! id ava &>/dev/null; then
+ echo -e "${RED}Error: ava user does not exist. Deploy the NixOS config first.${NC}"
+ exit 1
+fi
+
+# Create directory structure (tmpfiles should handle this, but just in case)
+echo -e "${YLW}Creating directory structure...${NC}"
+mkdir -p "$AVA_HOME"/{skills,outreach,users,omni,.local/share/omni}
+mkdir -p "$AVA_HOME"/outreach/{pending,approved,rejected,sent}
+
+# Copy skills
+if [[ -d "$OLD_DATA_ROOT/skills" ]]; then
+ echo -e "${YLW}Copying skills...${NC}"
+ rsync -av --progress "$OLD_DATA_ROOT/skills/" "$AVA_HOME/skills/"
+else
+ echo -e "${YLW}No skills to migrate${NC}"
+fi
+
+# Copy outreach
+if [[ -d "$OLD_DATA_ROOT/outreach" ]]; then
+ echo -e "${YLW}Copying outreach data...${NC}"
+ rsync -av --progress "$OLD_DATA_ROOT/outreach/" "$AVA_HOME/outreach/"
+else
+ echo -e "${YLW}No outreach data to migrate${NC}"
+fi
+
+# Copy memory.db if it exists
+BEN_MEMORY="/home/ben/.local/share/omni/memory.db"
+AVA_MEMORY="$AVA_HOME/.local/share/omni/memory.db"
+if [[ -f "$BEN_MEMORY" ]]; then
+ echo -e "${YLW}Copying memory database...${NC}"
+ cp -v "$BEN_MEMORY" "$AVA_MEMORY"
+else
+ echo -e "${YLW}No memory.db found at $BEN_MEMORY${NC}"
+fi
+
+# Clone or update the omni repo
+if [[ -d "$AVA_HOME/omni/.git" ]]; then
+ echo -e "${YLW}Omni repo already exists, updating...${NC}"
+ cd "$AVA_HOME/omni"
+ sudo -u ava git fetch origin
+else
+ echo -e "${YLW}Cloning omni repo...${NC}"
+ sudo -u ava git clone "$OMNI_REPO" "$AVA_HOME/omni"
+fi
+
+# Set ownership
+echo -e "${YLW}Setting ownership...${NC}"
+chown -R ava:users "$AVA_HOME"
+
+# Show summary
+echo ""
+echo -e "${GRN}=== Migration Complete ===${NC}"
+echo ""
+echo "Directory structure:"
+ls -la "$AVA_HOME"
+echo ""
+echo "Next steps:"
+echo "1. Create /run/secrets/ava.env with:"
+echo " TELEGRAM_BOT_TOKEN=xxx"
+echo " OPENROUTER_API_KEY=xxx"
+echo " KAGI_API_KEY=xxx (optional)"
+echo " ALLOWED_TELEGRAM_USER_IDS=xxx (or * for all)"
+echo ""
+echo "2. Stop the tmux Ava process"
+echo "3. Start the systemd service: sudo systemctl start ava"
+echo "4. Watch logs: journalctl -u ava -f"
+echo "5. Enable on boot: sudo systemctl enable ava"
diff --git a/Omni/Dev/Lithium.nix b/Omni/Dev/Lithium.nix
index 7befc7d..6b7b9a4 100755
--- a/Omni/Dev/Lithium.nix
+++ b/Omni/Dev/Lithium.nix
@@ -16,5 +16,5 @@ bild.os {
./Vpn.nix
];
networking.hostName = "lithium";
- networking.domain = "dev.simatime.com";
+ networking.domain = "dev.bensima.com";
}
diff --git a/Omni/Dev/Lithium/Configuration.nix b/Omni/Dev/Lithium/Configuration.nix
index 92290f4..a439ec4 100644
--- a/Omni/Dev/Lithium/Configuration.nix
+++ b/Omni/Dev/Lithium/Configuration.nix
@@ -38,7 +38,7 @@ in {
services.my-hoogle.enable = true;
services.my-hoogle.port = ports.hoogle;
- services.my-hoogle.home = "//hoogle.simatime.com";
+ services.my-hoogle.home = "//hoogle.bensima.com";
services.my-hoogle.packages = pkgset:
lib.attrsets.attrVals (import ../../Bild/Deps/Haskell.nix) pkgset;
services.my-hoogle.haskellPackages = pkgs.haskell.packages.${ghcCompiler};
@@ -49,11 +49,6 @@ in {
services.k3s.enable = false;
services.k3s.role = "server";
- services.syncthing.enable = true;
- services.syncthing.guiAddress = "127.0.0.1:${toString ports.syncthing-gui}";
- services.syncthing.openDefaultPorts = true;
- services.syncthing.systemService = true;
-
services.tor.enable = true;
services.tor.client.enable = true;
services.tor.relay.role = "bridge";
@@ -66,7 +61,6 @@ in {
services.bitcoind.mainnet.enable = true;
services.bitcoind.mainnet.dataDir = "/mnt/campbell/bitcoind-mainnet/data";
services.bitcoind.mainnet.configFile = "/mnt/campbell/bitcoind-mainnet/bitcoin.conf";
- services.bitcoind.mainnet.prune = 10000;
services.pcscd.enable = true;
services.logind.lidSwitch = "ignore";
@@ -107,7 +101,7 @@ in {
services.xserver.modules = [pkgs.xf86_input_wacom];
services.xserver.wacom.enable = true;
- services.jupyter.enable = true;
+ services.jupyter.enable = false;
services.jupyter.port = ports.jupyter;
services.jupyter.ip = "*";
users.users.jupyter.group = "jupyter";
diff --git a/Omni/Dev/Vpn.nix b/Omni/Dev/Vpn.nix
index 7172d84..996b22c 100644
--- a/Omni/Dev/Vpn.nix
+++ b/Omni/Dev/Vpn.nix
@@ -6,13 +6,12 @@ inappropriate sites, as well as a ton of ads.
*/
let
ports = import ../Cloud/Ports.nix;
- domain = "headscale.simatime.com";
in {
services.headscale = {
enable = false; # don't use headscale rn, just use tailscale.com
address = "0.0.0.0";
port = ports.headscale;
- settings = {dns.base_domain = "simatime.com";};
+ settings = {dns.base_domain = "bensima.com";};
};
environment.systemPackages = [config.services.headscale.package];
diff --git a/Omni/Fact.hs b/Omni/Fact.hs
new file mode 100644
index 0000000..57db7fc
--- /dev/null
+++ b/Omni/Fact.hs
@@ -0,0 +1,81 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Fact module for the Jr knowledge base.
+--
+-- Facts are pieces of knowledge learned during task execution that can
+-- inform future work on similar tasks or files.
+module Omni.Fact
+ ( Fact (..),
+ createFact,
+ getFact,
+ getAllFacts,
+ getFactsByProject,
+ getFactsByFile,
+ updateFact,
+ deleteFact,
+ )
+where
+
+import Alpha
+import Data.Aeson (encode)
+import qualified Data.ByteString.Lazy.Char8 as BLC
+import qualified Data.Text as Text
+import Data.Time (getCurrentTime)
+import qualified Database.SQLite.Simple as SQL
+import Omni.Task.Core
+ ( Fact (..),
+ getFactsForFile,
+ getFactsForProject,
+ loadFacts,
+ saveFact,
+ withDb,
+ )
+import qualified Omni.Task.Core as TaskCore
+
+-- | Create a new fact and return its ID.
+createFact :: Text -> Text -> [Text] -> Maybe Text -> Double -> IO Int
+createFact project content relatedFiles sourceTask confidence = do
+ now <- getCurrentTime
+ let fact =
+ Fact
+ { factId = Nothing,
+ factProject = project,
+ factContent = content,
+ factRelatedFiles = relatedFiles,
+ factSourceTask = sourceTask,
+ factConfidence = confidence,
+ factCreatedAt = now
+ }
+ saveFact fact
+
+-- | Get a fact by its ID.
+getFact :: Int -> IO (Maybe Fact)
+getFact fid = do
+ facts <- getAllFacts
+ pure <| find (\f -> factId f == Just fid) facts
+
+-- | Get all facts from the database.
+getAllFacts :: IO [Fact]
+getAllFacts = loadFacts
+
+-- | Get facts for a specific project.
+getFactsByProject :: Text -> IO [Fact]
+getFactsByProject = getFactsForProject
+
+-- | Get facts related to a specific file.
+getFactsByFile :: Text -> IO [Fact]
+getFactsByFile = getFactsForFile
+
+-- | Update an existing fact.
+updateFact :: Int -> Text -> [Text] -> Double -> IO ()
+updateFact fid content relatedFiles confidence =
+ withDb <| \conn ->
+ SQL.execute
+ conn
+ "UPDATE facts SET fact = ?, related_files = ?, confidence = ? WHERE id = ?"
+ (content, Text.pack (BLC.unpack (encode relatedFiles)), confidence, fid)
+
+-- | Delete a fact by ID.
+deleteFact :: Int -> IO ()
+deleteFact = TaskCore.deleteFact
diff --git a/Omni/Ide/README.md b/Omni/Ide/README.md
new file mode 100644
index 0000000..7511090
--- /dev/null
+++ b/Omni/Ide/README.md
@@ -0,0 +1,143 @@
+# Development Tools and Workflow
+
+## Tools
+
+### run.sh
+
+`run.sh` is a convenience wrapper that builds (if needed) and runs a namespace.
+
+Examples:
+```bash
+Omni/Ide/run.sh Omni/Task.hs # Build and run task manager
+Omni/Ide/run.sh Biz/PodcastItLater/Web.py # Build and run web server
+```
+
+This script will:
+1. Check if the binary exists in `_/bin/`
+2. Build it if it doesn't exist (exits on build failure)
+3. Execute the binary with any additional arguments
+
+### lint
+
+Universal lint and formatting tool. Errors if lints fail or code is not formatted properly.
+
+Examples:
+```bash
+lint Omni/Cli.hs # Lint a namespace
+lint --fix **/*.py # Lint and fix all Python files
+```
+
+### repl.sh
+
+Like `nix-shell` but specific to this repo. Analyzes the namespace, pulls dependencies, and starts a shell or repl.
+
+Examples:
+```bash
+repl.sh Omni/Bild.hs # Start Haskell repl with namespace loaded
+repl.sh --bash Omni/Log.py # Start bash shell for namespace
+```
+
+### typecheck.sh
+
+Like `lint` but only runs type checkers. Currently just supports Python with `mypy`, but eventually will support everything that `bild` supports.
+
+Examples:
+```bash
+typecheck.sh Omni/Bild/Example.py # Run the typechecker and report any errors
+```
+
+### Test Commands
+
+Run tests:
+```bash
+bild --test Omni/Task.hs # Build and test a namespace
+```
+
+The convention for all programs in the omnirepo is to run their tests if the first argument is `test`. So for example:
+
+```bash
+# this will build a the latest executable and then run tests
+bild --test Omni/Task.hs
+
+# this will just run the tests from the existing executable
+_/bin/task test
+```
+
+## Git Workflow
+
+### Use git-branchless
+
+This repository uses **git-branchless** for a patch-based workflow instead of traditional branch-based git.
+
+Key concepts:
+- Work with **patches** (commits) directly rather than branches
+- Use **stacking** to organize related changes
+- Leverage **smartlog** to visualize commit history
+
+### Common git-branchless Commands
+
+**View commit graph:**
+```bash
+git smartlog
+```
+
+**Create a new commit:**
+```bash
+# Make your changes
+git add .
+git commit -m "Your commit message"
+```
+
+**Amend the current commit:**
+```bash
+# Make additional changes
+git add .
+git amend
+```
+
+**Move/restack commits:**
+```bash
+git move -s <source> -d <destination>
+git restack
+```
+
+### When to Record Changes in Git
+
+**DO record in git:**
+- Completed features or bug fixes
+- Working code that passes tests and linting
+- Significant milestones in task completion
+
+**DO NOT record in git:**
+- Work in progress (unless specifically requested)
+- Broken or untested code
+- Temporary debugging changes
+
+**NEVER do these git operations without explicit user request:**
+- ❌ `git push` - NEVER push to remote unless explicitly asked
+- ❌ `git pull` - NEVER pull from remote unless explicitly asked
+- ❌ Force pushes or destructive operations
+- ❌ Branch deletion or remote branch operations
+
+**Why:** The user maintains control over when code is shared with collaborators. Always ask before syncing with remote repositories.
+
+### Workflow Best Practices
+
+1. **Make small, focused commits** - Each commit should represent one logical change
+2. **Write descriptive commit messages** - Explain what and why, not just what
+3. **Rebase and clean up history** - Use `git commit --amend` and `git restack` to keep history clean
+4. **Test before committing** - Run `bild --test` and `lint` on affected namespaces
+
+### Required Checks Before Completing Tasks
+
+After completing a task, **always** run these commands for the namespace(s) you modified:
+
+```bash
+# Run tests
+bild --test Omni/YourNamespace.hs
+
+# Run linter
+lint Omni/YourNamespace.hs
+```
+
+**Fix all reported errors** related to your changes before marking the task as complete. This ensures code quality and prevents breaking the build for other contributors.
diff --git a/Omni/Ide/ailint.sh b/Omni/Ide/ailint.sh
new file mode 100755
index 0000000..a107be8
--- /dev/null
+++ b/Omni/Ide/ailint.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+set -xu
+target=${1:?}
+instructions=$(mktemp)
+echo "Fix the following lint errors, or silence them with a `# noqa:` comment if they aren't problematic:" >> "$instructions"
+if lint -f "$target" >> "$instructions" 2>&1
+then
+ echo "no bad lints"
+else
+ aider --yes --message-file "$instructions" "$target"
+fi
diff --git a/Omni/Ide/hooks/commit-msg b/Omni/Ide/hooks/commit-msg
index e07d1f4..a91b651 100755
--- a/Omni/Ide/hooks/commit-msg
+++ b/Omni/Ide/hooks/commit-msg
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
if ! gitlint --ignore-stdin --staged --msg-filename "$1" run-hook; then
backup="$CODEROOT"/.git/COMMIT_EDITMSG.backup
- cp "$CODEROOT"/.git/COMMIT_EDITMSG "$backup"
+ cp "$1" "$backup"
echo "error: gitlint failed, saved your commit msg as $backup"
exit 1
fi
diff --git a/Omni/Ide/hooks/post-checkout b/Omni/Ide/hooks/post-checkout
index 85541a2..a360517 100755
--- a/Omni/Ide/hooks/post-checkout
+++ b/Omni/Ide/hooks/post-checkout
@@ -14,6 +14,7 @@ elif [[ ${#changed[@]} -gt 0 ]]
then
MakeTags "${changed[@]}"
fi
+
## START BRANCHLESS CONFIG
git branchless hook post-checkout "$@"
diff --git a/Omni/Ide/hooks/post-merge b/Omni/Ide/hooks/post-merge
index fcfd314..bf0e996 100755
--- a/Omni/Ide/hooks/post-merge
+++ b/Omni/Ide/hooks/post-merge
@@ -1,5 +1,6 @@
#!/usr/bin/env bash
"${CODEROOT:?}"/Omni/Ide/hooks/post-checkout 'HEAD@{1}' HEAD
+
## START BRANCHLESS CONFIG
git branchless hook post-merge "$@"
diff --git a/Omni/Ide/hooks/pre-push b/Omni/Ide/hooks/pre-push
index 00110bd..adbf858 100755
--- a/Omni/Ide/hooks/pre-push
+++ b/Omni/Ide/hooks/pre-push
@@ -1,5 +1,11 @@
#!/usr/bin/env bash
set -euo pipefail
+
+# Task manager: Ensure tasks are exported before push
+if [ -d .tasks ]; then
+ task export --flush 2>/dev/null || true
+fi
+
remote="$1"
z40=0000000000000000000000000000000000000000
IFS=" "
diff --git a/Omni/Ide/push.sh b/Omni/Ide/push.sh
index 5c22e07..f6d67f4 100755
--- a/Omni/Ide/push.sh
+++ b/Omni/Ide/push.sh
@@ -1,34 +1,136 @@
#!/usr/bin/env bash
-# Eventually convert to haskell, see:
-# - https://github.com/awakesecurity/nix-deploy/blob/master/src/Main.hs
-# - http://www.haskellforall.com/2018/08/nixos-in-production.html
-prefix=${PWD/$CODEROOT}
-if [[ "$prefix" == "" ]]
-then
- target="$1"
-else
- target="$prefix.$1"
-fi
-what=$(realpath "${CODEROOT:?}/_/nix/$target")
-# hack: get the domain from the systemd service. there does not seem to be a way
-# to get it from nix-instantiate. (or, maybe i should put this in bild --plan?)
-where=$(rg --only-matching --replace '$2' --regexp '(domainname ")(.*)(")' \
- "$what/etc/systemd/system/domainname.service")
-nix copy --to ssh://"$USER"@"$where" "$what"
-ssh "$USER"@"$where" sudo nix-env --profile /nix/var/nix/profiles/system --set "$what"
-switch_cmd=(
- systemd-run
- -E LOCALE_ARCHIVE
- --collect
- --no-ask-password
- --pipe
- --quiet
- --service-type=exec
- --unit=push-switch-to-configuration
- --wait
- "$what/bin/switch-to-configuration"
- "switch"
-)
-# shellcheck disable=SC2029
-ssh "$USER"@"$where" sudo "${switch_cmd[@]}"
-echo "${GRN}good: push: $target${NC}"
+# Deployment script for both NixOS configs and individual services.
+#
+# Usage:
+# push.sh Biz.nix # NixOS deploy (existing behavior)
+# push.sh Biz/PodcastItLater/Web.py # Service deploy (new behavior)
+#
+# For service deploys:
+# 1. Builds the target with bild (caches to S3 by default)
+# 2. Updates the manifest.json in S3 with new store path
+# 3. Deployer on target picks up change within 5 minutes
+#
+# Environment:
+# CODEROOT - Root of the codebase (required)
+# NIX_CACHE_KEY - Path to signing key (required for service deploys)
+
+set -euo pipefail
+
+# Colors
+GRN='\033[0;32m'
+YLW='\033[0;33m'
+RED='\033[0;31m'
+NC='\033[0m' # No Color
+
+# Derive service name from target path
+# Biz/PodcastItLater/Web.py -> podcastitlater-web
+# Biz/Storybook.py -> storybook
+derive_service_name() {
+ local target="$1"
+ # Remove extension
+ local base="${target%.*}"
+ # Remove Biz/ prefix if present
+ base="${base#Biz/}"
+ # Convert slashes to hyphens and lowercase
+ echo "$base" | tr '/' '-' | tr '[:upper:]' '[:lower:]'
+}
+
+# NixOS deploy (existing behavior)
+nixos_deploy() {
+ local target="$1"
+ prefix=${PWD/$CODEROOT}
+ if [[ "$prefix" == "" ]]; then
+ target="$1"
+ else
+ target="$prefix.$1"
+ fi
+ what=$(realpath "${CODEROOT:?}/_/nix/$target")
+ # hack: get the domain from the systemd service
+ where=$(rg --only-matching --replace '$2' --regexp '(domainname ")(.*)(")' \
+ "$what/etc/systemd/system/domainname.service")
+ nix copy --to ssh://"$USER"@"$where" "$what"
+ ssh "$USER"@"$where" sudo nix-env --profile /nix/var/nix/profiles/system --set "$what"
+ switch_cmd=(
+ systemd-run
+ -E LOCALE_ARCHIVE
+ --setenv=XDG_RUNTIME_DIR=""
+ --collect
+ --no-ask-password
+ --pipe
+ --quiet
+ --service-type=exec
+ --unit=push-switch-to-configuration
+ --wait
+ "$what/bin/switch-to-configuration"
+ "switch"
+ )
+ # shellcheck disable=SC2029
+ ssh "$USER"@"$where" sudo "${switch_cmd[@]}"
+ echo -e "${GRN}good: push: $target${NC}"
+}
+
+# Service deploy (new behavior)
+service_deploy() {
+ local target="$1"
+ local service_name
+ service_name=$(derive_service_name "$target")
+
+ echo -e "${YLW}info: push: deploying service $service_name${NC}"
+
+ # 1. Build and cache
+ echo -e "${YLW}info: push: building $target${NC}"
+ if ! bild "$target"; then
+ echo -e "${RED}fail: push: bild failed${NC}"
+ exit 1
+ fi
+
+ # Get store path from symlink in _/nix/
+ local symlink_path="${CODEROOT}/_/nix/${service_name}"
+ if [[ ! -L "$symlink_path" ]]; then
+ echo -e "${RED}fail: push: symlink not found: $symlink_path${NC}"
+ exit 1
+ fi
+
+ local store_path
+ store_path=$(readlink "$symlink_path")
+
+ if [[ -z "$store_path" ]]; then
+ echo -e "${RED}fail: push: could not read store path from symlink${NC}"
+ exit 1
+ fi
+
+ echo -e "${YLW}info: push: cached $store_path${NC}"
+
+ # 2. Get git revision
+ local revision
+ revision=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
+
+ # 3. Update manifest in S3
+ echo -e "${YLW}info: push: updating manifest${NC}"
+ "${CODEROOT}/_/nix/deploy-manifest/bin/deploy-manifest" update "$service_name" "$store_path" "$revision" || {
+ echo -e "${RED}fail: push: manifest update failed${NC}"
+ exit 1
+ }
+
+ echo -e "${GRN}good: push: $service_name deployed (deployer will pick up in <5 min)${NC}"
+}
+
+# Main
+main() {
+ if [[ $# -lt 1 ]]; then
+ echo "Usage: push.sh <target>"
+ echo " target.nix -> NixOS deploy"
+ echo " target.py/.hs/.. -> Service deploy"
+ exit 1
+ fi
+
+ local target="$1"
+
+ if [[ "$target" == *.nix ]]; then
+ nixos_deploy "$target"
+ else
+ service_deploy "$target"
+ fi
+}
+
+main "$@"
diff --git a/Omni/Ide/repl.sh b/Omni/Ide/repl.sh
index 3b6a536..6225078 100755
--- a/Omni/Ide/repl.sh
+++ b/Omni/Ide/repl.sh
@@ -10,6 +10,7 @@
###
### Options:
### --bash start bash instead of the target language repl
+### --cmd x run 'x' instead of bash, or the target language repl
help() {
sed -rn 's/^### ?//;T;p' "$0"
}
@@ -23,9 +24,13 @@ fi
if [[ "$1" == "--bash" ]]; then
CMD="bash"
shift
+ elif [[ "$1" == "--cmd" ]]; then
+ shift
+ CMD="$1"
+ shift
fi
targets="${*:?}"
- json=$(bild --plan "${targets[@]}")
+ json=$(bild --plan "${targets[@]}" 2>&1)
mapfile -t langdeps < <(jq --raw-output '.[].langdeps | select(length > 0) | join("\n")' <<< "$json")
mapfile -t sysdeps < <(jq --raw-output '.[].sysdeps | select(length > 0) | join("\n")' <<< "$json")
mapfile -t rundeps < <(jq --raw-output '.[].rundeps | select(length > 0) | join("\n")' <<< "$json")
diff --git a/Omni/Ide/run.sh b/Omni/Ide/run.sh
index e300fcc..c426969 100755
--- a/Omni/Ide/run.sh
+++ b/Omni/Ide/run.sh
@@ -2,6 +2,6 @@
set -eu
target=$1
shift
-out=$(bild --plan "$target" | jq --raw-output ".\"${target}\".out")
-[[ -f "$out" ]] || bild "$target"
+out=$(bild --plan "$target" 2>&1 | tail -1 | jq --raw-output ".\"${target}\".out")
+bild "$target" || exit 1
exec "${CODEROOT:?}/_/bin/$out" "$@"
diff --git a/Omni/Ide/typecheck.sh b/Omni/Ide/typecheck.sh
new file mode 100755
index 0000000..fe11ef5
--- /dev/null
+++ b/Omni/Ide/typecheck.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+###
+### typecheck a given target
+###
+### > typecheck.sh <target..>
+###
+### Uses repl.sh to provision the environment for target, then runs the
+### appropriate typechecker for the given module.
+###
+help() {
+ sed -rn 's/^### ?//;T;p' "$0"
+}
+if [[ $# == 0 ]] || [[ "$1" == "-h" ]]; then
+ help
+ exit 1
+fi
+target="$1"
+
+# Determine file extension
+ext="${target##*.}"
+
+case "$ext" in
+ py)
+ # Python: use mypy via repl.sh environment
+ repl.sh --cmd "python -m mypy $target" "$target"
+ ;;
+ hs)
+ # Haskell: use ghc -fno-code for typechecking without code generation
+ # Use repl.sh to provision the right GHC environment with dependencies
+ repl.sh --cmd "ghc -fno-code -i${CODEROOT:?} $target" "$target"
+ ;;
+ *)
+ echo "Unknown file extension: $ext"
+ echo "Typechecking not supported for this file type"
+ exit 1
+ ;;
+esac
diff --git a/Omni/Jr.hs b/Omni/Jr.hs
new file mode 100755
index 0000000..48dbf90
--- /dev/null
+++ b/Omni/Jr.hs
@@ -0,0 +1,1046 @@
+#!/usr/bin/env run.sh
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE QuasiQuotes #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- : out jr
+-- : dep sqlite-simple
+-- : dep warp
+-- : dep servant-server
+-- : dep lucid
+-- : dep servant-lucid
+module Omni.Jr where
+
+import Alpha
+import qualified Data.Aeson as Aeson
+import qualified Data.ByteString.Lazy.Char8 as BLC
+import qualified Data.List as List
+import qualified Data.Text as Text
+import qualified Omni.Agent.Core as AgentCore
+import qualified Omni.Agent.Engine as Engine
+import qualified Omni.Agent.Worker as AgentWorker
+import qualified Omni.Cli as Cli
+import qualified Omni.Fact as Fact
+import qualified Omni.Jr.Web as Web
+import qualified Omni.Task as Task
+import qualified Omni.Task.Core as TaskCore
+import qualified Omni.Test as Test
+import qualified System.Console.Docopt as Docopt
+import qualified System.Directory as Directory
+import System.Environment (withArgs)
+import qualified System.Environment as Env
+import qualified System.Exit as Exit
+import System.FilePath (takeFileName)
+import qualified System.IO as IO
+import qualified System.Process as Process
+
+main :: IO ()
+main = Cli.main plan
+
+plan :: Cli.Plan ()
+plan =
+ Cli.Plan
+ { Cli.help = help,
+ Cli.move = move,
+ Cli.test = test,
+ Cli.tidy = \_ -> pure ()
+ }
+
+help :: Cli.Docopt
+help =
+ [Cli.docopt|
+jr
+
+Usage:
+ jr task [<args>...]
+ jr work [<task-id>] [--engine=ENGINE]
+ jr prompt <task-id>
+ jr web [--port=PORT]
+ jr review [<task-id>] [--auto]
+ jr loop [--delay=SECONDS]
+ jr facts list [--project=PROJECT] [--json]
+ jr facts show <fact-id> [--json]
+ jr facts add <project> <content> [--files=FILES] [--task=TASK] [--confidence=CONF] [--json]
+ jr facts delete <fact-id> [--json]
+ jr test
+ jr (-h | --help)
+
+Commands:
+ task Manage tasks
+ work Start a worker agent on a task
+ prompt Show the system prompt that would be sent to the agent
+ web Start the web UI server
+ review Review a completed task (show diff, accept/reject)
+ loop Run autonomous work+review loop
+ facts Manage knowledge base facts
+
+Options:
+ -h --help Show this help
+ --port=PORT Port for web server [default: 8080]
+ --engine=ENGINE LLM engine: openrouter, ollama, amp [default: openrouter]
+ --auto Auto-review: accept if tests pass, reject if they fail
+ --delay=SECONDS Delay between loop iterations [default: 5]
+ --project=PROJECT Filter facts by project
+ --files=FILES Comma-separated list of related files
+ --task=TASK Source task ID
+ --confidence=CONF Confidence level 0.0-1.0 [default: 0.8]
+ --json Output in JSON format
+|]
+
+move :: Cli.Arguments -> IO ()
+move args
+ | args `Cli.has` Cli.command "task" = do
+ let extraArgs = Cli.getAllArgs args (Cli.argument "args")
+ withArgs extraArgs Task.main
+ | args `Cli.has` Cli.command "web" = do
+ let port = case Cli.getArg args (Cli.longOption "port") of
+ Just p -> fromMaybe Web.defaultPort (readMaybe p)
+ Nothing -> Web.defaultPort
+ Web.run port
+ | args `Cli.has` Cli.command "prompt" = do
+ case Cli.getArg args (Cli.argument "task-id") of
+ Nothing -> do
+ IO.hPutStrLn IO.stderr "Error: task-id is required"
+ Exit.exitFailure
+ Just tidStr -> do
+ let tid = Text.pack tidStr
+ tasks <- TaskCore.loadTasks
+ case TaskCore.findTask tid tasks of
+ Nothing -> do
+ IO.hPutStrLn IO.stderr ("Error: task not found: " <> tidStr)
+ Exit.exitFailure
+ Just task -> do
+ prompt <- AgentWorker.buildFullPrompt task
+ putText prompt
+ | args `Cli.has` Cli.command "work" = do
+ -- Always run in current directory
+ let path = "."
+
+ -- Infer name from current directory
+ absPath <- Directory.getCurrentDirectory
+ let name = Text.pack (takeFileName absPath)
+
+ -- Parse engine flag
+ let engineType = case Cli.getArg args (Cli.longOption "engine") of
+ Just "ollama" -> AgentCore.EngineOllama
+ Just "amp" -> AgentCore.EngineAmp
+ _ -> AgentCore.EngineOpenRouter
+
+ let worker =
+ AgentCore.Worker
+ { AgentCore.workerName = name,
+ AgentCore.workerPid = Nothing,
+ AgentCore.workerStatus = AgentCore.Idle,
+ AgentCore.workerPath = path,
+ AgentCore.workerQuiet = False, -- Show ANSI status bar for manual work
+ AgentCore.workerEngine = engineType
+ }
+
+ let taskId = fmap Text.pack (Cli.getArg args (Cli.argument "task-id"))
+
+ AgentWorker.start worker taskId
+ | args `Cli.has` Cli.command "review" = do
+ let autoMode = args `Cli.has` Cli.longOption "auto"
+ case Cli.getArg args (Cli.argument "task-id") of
+ Just tidStr -> reviewTask (Text.pack tidStr) autoMode
+ Nothing -> do
+ -- Find tasks in Review status
+ tasks <- TaskCore.loadTasks
+ let reviewTasks = filter (\t -> TaskCore.taskStatus t == TaskCore.Review) tasks
+ case reviewTasks of
+ [] -> putText "No tasks in Review status."
+ (t : _) -> reviewTask (TaskCore.taskId t) autoMode
+ | args `Cli.has` Cli.command "loop" = do
+ let delay = case Cli.getArg args (Cli.longOption "delay") of
+ Just d -> fromMaybe 5 (readMaybe d)
+ Nothing -> 5
+ runLoop delay
+ | args `Cli.has` Cli.command "facts" = handleFacts args
+ | otherwise = putText (str <| Docopt.usage help)
+
+-- | Run the autonomous loop: work -> review -> repeat
+runLoop :: Int -> IO ()
+runLoop delaySec = do
+ putText "[loop] Starting autonomous jr loop..."
+ putText ("[loop] Delay between iterations: " <> tshow delaySec <> "s")
+ go
+ where
+ go = do
+ -- First check for tasks to review (prioritize finishing work)
+ reviewResult <- reviewPending
+ if reviewResult
+ then do
+ -- Reviewed something, continue loop immediately
+ threadDelay (delaySec * 1000000)
+ go
+ else do
+ -- No reviews, check for ready work
+ readyTasks <- TaskCore.getReadyTasks
+ case readyTasks of
+ [] -> do
+ putText "[loop] No ready tasks, no pending reviews."
+ (task : _) -> do
+ putText ""
+ putText ("[loop] === Working on: " <> TaskCore.taskId task <> " ===")
+ -- Run worker (this blocks until the engine completes)
+ absPath <- Directory.getCurrentDirectory
+ let name = Text.pack (takeFileName absPath)
+ let worker =
+ AgentCore.Worker
+ { AgentCore.workerName = name,
+ AgentCore.workerPid = Nothing,
+ AgentCore.workerStatus = AgentCore.Idle,
+ AgentCore.workerPath = ".",
+ AgentCore.workerQuiet = True, -- No ANSI status bar in loop mode
+ AgentCore.workerEngine = AgentCore.EngineOpenRouter -- Default for loop
+ }
+ putText "[loop] Starting worker..."
+ AgentWorker.start worker (Just (TaskCore.taskId task))
+ putText "[loop] Worker finished."
+
+ -- Delay and loop
+ putText ("[loop] Sleeping " <> tshow delaySec <> "s...")
+ threadDelay (delaySec * 1000000)
+ go
+
+ -- Returns True if a task was reviewed, False otherwise
+ reviewPending :: IO Bool
+ reviewPending = do
+ tasks <- TaskCore.loadTasks
+ let reviewTasks = filter (\t -> TaskCore.taskStatus t == TaskCore.Review) tasks
+ case reviewTasks of
+ [] -> pure False
+ (t : _) -> do
+ putText ""
+ putText ("[loop] === Reviewing: " <> TaskCore.taskId t <> " ===")
+ tryAutoReview (TaskCore.taskId t)
+ pure True
+
+ -- Auto-review that doesn't exit on missing commit
+ tryAutoReview :: Text -> IO ()
+ tryAutoReview tid = do
+ tasks <- TaskCore.loadTasks
+ case TaskCore.findTask tid tasks of
+ Nothing -> do
+ putText ("[review] Task " <> tid <> " not found.")
+ Just task -> do
+ let grepArg = "--grep=" <> Text.unpack tid
+ (code, shaOut, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["log", "--pretty=format:%H", "-n", "1", grepArg]
+ ""
+
+ if code /= Exit.ExitSuccess || null shaOut
+ then do
+ putText "[review] No commit found for this task."
+ putText "[review] Resetting to Open for retry."
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.System
+ else do
+ let commitSha = case List.lines shaOut of
+ (x : _) -> x
+ [] -> ""
+
+ -- Check for merge conflicts
+ conflictResult <- checkMergeConflict commitSha
+ case conflictResult of
+ Just conflictFiles -> do
+ putText "[review] MERGE CONFLICT DETECTED"
+ traverse_ (\f -> putText (" - " <> f)) conflictFiles
+ handleConflict tid conflictFiles commitSha
+ Nothing -> do
+ autoReview tid task commitSha
+
+-- | Handle merge conflict during review (Gerrit-style: provide rich context)
+handleConflict :: Text -> [Text] -> String -> IO ()
+handleConflict tid conflictFiles commitSha = do
+ maybeCtx <- TaskCore.getRetryContext tid
+ let attempt = maybe 1 (\c -> TaskCore.retryAttempt c + 1) maybeCtx
+
+ let conflictComment = buildConflictComment commitSha conflictFiles attempt
+ _ <- TaskCore.addComment tid conflictComment TaskCore.Junior
+
+ if attempt > 3
+ then do
+ putText "[review] Task has failed 3 times. Needs human intervention."
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.System
+ else do
+ conflictDetails <- gatherConflictContext commitSha conflictFiles
+ maybeExistingCtx <- TaskCore.getRetryContext tid
+ let currentReason = "attempt " <> tshow attempt <> ":\n" <> conflictDetails
+ let accumulatedReason = case maybeExistingCtx of
+ Nothing -> currentReason
+ Just ctx -> TaskCore.retryReason ctx <> "\n\n" <> currentReason
+ TaskCore.setRetryContext
+ TaskCore.RetryContext
+ { TaskCore.retryTaskId = tid,
+ TaskCore.retryOriginalCommit = Text.pack commitSha,
+ TaskCore.retryConflictFiles = conflictFiles,
+ TaskCore.retryAttempt = attempt,
+ TaskCore.retryReason = accumulatedReason,
+ TaskCore.retryNotes = maybeExistingCtx +> TaskCore.retryNotes
+ }
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.System
+ putText ("[review] Task " <> tid <> " returned to queue (attempt " <> tshow attempt <> "/3).")
+
+-- | Build a review comment for merge conflicts
+buildConflictComment :: String -> [Text] -> Int -> Text
+buildConflictComment commitSha conflictFiles attempt =
+ Text.unlines
+ [ "## Auto-Review: Merge Conflict",
+ "",
+ "**Commit:** " <> Text.pack (take 8 commitSha),
+ "**Result:** ✗ MERGE CONFLICT",
+ "**Attempt:** " <> tshow attempt <> "/3",
+ "",
+ "### Conflicting Files",
+ Text.unlines (map ("- " <>) conflictFiles),
+ "Task returned to queue for conflict resolution."
+ ]
+
+-- | Gather Gerrit-style conflict context for the coder
+gatherConflictContext :: String -> [Text] -> IO Text
+gatherConflictContext commitSha conflictFiles = do
+ commitInfo <- getCommitInfo commitSha
+ currentHeadInfo <- getCurrentHeadInfo
+ fileDiffs <- traverse (getFileConflictInfo commitSha <. Text.unpack) conflictFiles
+
+ pure
+ <| Text.unlines
+ [ "MERGE CONFLICT - Your changes could not be cleanly applied",
+ "",
+ "== Your Commit ==",
+ commitInfo,
+ "",
+ "== Current HEAD ==",
+ currentHeadInfo,
+ "",
+ "== Conflicting Files ==",
+ Text.unlines fileDiffs,
+ "",
+ "== Resolution Instructions ==",
+ "1. The codebase has been updated since your work",
+ "2. Review the current state of conflicting files",
+ "3. Re-implement your changes on top of the current code",
+ "4. Ensure your changes still make sense given the updates"
+ ]
+
+-- | Get info about the commit that caused the conflict
+getCommitInfo :: String -> IO Text
+getCommitInfo sha = do
+ (_, out, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["log", "-1", "--format=%h %s%n%b", sha]
+ ""
+ pure <| Text.pack out
+
+-- | Get info about current HEAD
+getCurrentHeadInfo :: IO Text
+getCurrentHeadInfo = do
+ (_, out, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["log", "-1", "--format=%h %s (%cr)"]
+ ""
+ pure <| Text.pack out
+
+-- | Get file-level conflict context showing what changed in both branches
+getFileConflictInfo :: String -> FilePath -> IO Text
+getFileConflictInfo commitSha filePath = do
+ yourChanges <- getYourChangesToFile commitSha filePath
+ recentChanges <- getRecentChangesToFile filePath
+ pure
+ <| Text.unlines
+ [ "--- " <> Text.pack filePath <> " ---",
+ "",
+ "Your changes to this file:",
+ yourChanges,
+ "",
+ "Recent changes by others:",
+ recentChanges
+ ]
+
+-- | Get a summary of changes in a specific commit to a file
+getYourChangesToFile :: String -> FilePath -> IO Text
+getYourChangesToFile commitSha filePath = do
+ (code, out, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["show", "--stat", commitSha, "--", filePath]
+ ""
+ case code of
+ Exit.ExitSuccess -> pure <| Text.pack (take 500 out)
+ Exit.ExitFailure _ -> pure "(unable to get diff)"
+
+-- | Get recent changes to a file (last few commits)
+getRecentChangesToFile :: FilePath -> IO Text
+getRecentChangesToFile filePath = do
+ (code, out, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["log", "-3", "--oneline", "--", filePath]
+ ""
+ case code of
+ Exit.ExitSuccess -> pure <| Text.pack out
+ Exit.ExitFailure _ -> pure "(unable to get history)"
+
+-- | Interactive review command (jr review <task-id>)
+reviewTask :: Text -> Bool -> IO ()
+reviewTask tid autoMode = do
+ tasks <- TaskCore.loadTasks
+ case TaskCore.findTask tid tasks of
+ Nothing -> do
+ putText ("Task " <> tid <> " not found.")
+ Exit.exitFailure
+ Just task -> do
+ unless autoMode <| TaskCore.showTaskDetailed task
+
+ let grepArg = "--grep=" <> Text.unpack tid
+ (code, shaOut, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["log", "--pretty=format:%H", "-n", "1", grepArg]
+ ""
+
+ when (code /= Exit.ExitSuccess || null shaOut) <| do
+ putText "\nNo commit found for this task."
+ putText "The worker may not have completed yet, or the commit message doesn't include the task ID."
+ Exit.exitFailure
+
+ let commitSha = case List.lines shaOut of
+ (x : _) -> x
+ [] -> ""
+
+ -- Check for merge conflicts before showing diff
+ conflictResult <- checkMergeConflict commitSha
+ case conflictResult of
+ Just conflictFiles -> do
+ putText "\n=== MERGE CONFLICT DETECTED ==="
+ traverse_ (\f -> putText (" - " <> f)) conflictFiles
+ handleConflict tid conflictFiles commitSha
+ Nothing -> do
+ if autoMode
+ then autoReview tid task commitSha
+ else interactiveReview tid task commitSha
+
+-- | Auto-review: run tests on namespace, accept if pass, reject if fail
+autoReview :: Text -> TaskCore.Task -> String -> IO ()
+autoReview tid task commitSha = do
+ putText "[review] Running automated review..."
+ putText ("[review] Commit: " <> Text.pack (take 8 commitSha))
+
+ let namespace = fromMaybe "." (TaskCore.taskNamespace task)
+ let testTarget = Text.unpack namespace
+
+ putText ("[review] Testing: " <> Text.pack testTarget)
+
+ (testCode, testOut, testErr) <-
+ Process.readProcessWithExitCode
+ "bild"
+ ["--test", testTarget]
+ ""
+
+ case testCode of
+ Exit.ExitSuccess -> do
+ putText "[review] ✓ Tests passed."
+ let reviewComment = buildReviewComment commitSha testTarget True testOut testErr
+ _ <- TaskCore.addComment tid reviewComment TaskCore.Junior
+ TaskCore.clearRetryContext tid
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Done [] TaskCore.System
+ putText ("[review] Task " <> tid <> " -> Done")
+ addCompletionSummary tid commitSha
+ extractFacts tid commitSha
+ checkEpicCompletion task
+ Exit.ExitFailure code -> do
+ putText ("[review] ✗ Tests failed (exit " <> tshow code <> ")")
+ let reason = "Test failure:\n" <> Text.pack testOut <> Text.pack testErr
+
+ maybeCtx <- TaskCore.getRetryContext tid
+ let attempt = maybe 1 (\ctx -> TaskCore.retryAttempt ctx + 1) maybeCtx
+
+ let reviewComment = buildReviewComment commitSha testTarget False testOut testErr
+ _ <- TaskCore.addComment tid reviewComment TaskCore.Junior
+
+ if attempt > 3
+ then do
+ putText "[review] Task has failed 3 times. Needs human intervention."
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.System
+ else do
+ let currentReason = "attempt " <> tshow attempt <> ": " <> reason
+ let accumulatedReason = case maybeCtx of
+ Nothing -> currentReason
+ Just ctx -> TaskCore.retryReason ctx <> "\n" <> currentReason
+ TaskCore.setRetryContext
+ TaskCore.RetryContext
+ { TaskCore.retryTaskId = tid,
+ TaskCore.retryOriginalCommit = Text.pack commitSha,
+ TaskCore.retryConflictFiles = [],
+ TaskCore.retryAttempt = attempt,
+ TaskCore.retryReason = accumulatedReason,
+ TaskCore.retryNotes = maybeCtx +> TaskCore.retryNotes
+ }
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.System
+ putText ("[review] Task " <> tid <> " reopened (attempt " <> tshow attempt <> "/3).")
+
+-- | Build a review comment summarizing what was tested and the result
+buildReviewComment :: String -> String -> Bool -> String -> String -> Text
+buildReviewComment commitSha testTarget passed testOut testErr =
+ Text.unlines
+ [ "## Auto-Review",
+ "",
+ "**Commit:** " <> Text.pack (take 8 commitSha),
+ "**Test target:** " <> Text.pack testTarget,
+ "**Result:** " <> if passed then "✓ PASSED" else "✗ FAILED",
+ "",
+ if passed
+ then "All tests passed. Task accepted."
+ else
+ Text.unlines
+ [ "### Test Output",
+ "```",
+ Text.pack (truncateOutput 1000 (testOut ++ testErr)),
+ "```",
+ "",
+ "Task rejected and returned to queue for retry."
+ ]
+ ]
+
+-- | Truncate output to a maximum number of characters
+truncateOutput :: Int -> String -> String
+truncateOutput maxLen s
+ | length s <= maxLen = s
+ | otherwise = take maxLen s ++ "\n... (truncated)"
+
+-- | Interactive review with user prompts
+interactiveReview :: Text -> TaskCore.Task -> String -> IO ()
+interactiveReview tid task commitSha = do
+ putText "\n=== Diff for this task ===\n"
+ _ <- Process.rawSystem "git" ["show", commitSha]
+
+ putText "\n[a]ccept / [r]eject / [s]kip? "
+ IO.hFlush IO.stdout
+ choice <- getLine
+
+ case Text.toLower choice of
+ c
+ | "a" `Text.isPrefixOf` c -> do
+ let acceptComment = buildHumanReviewComment commitSha True Nothing
+ _ <- TaskCore.addComment tid acceptComment TaskCore.Human
+ TaskCore.clearRetryContext tid
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Done [] TaskCore.Human
+ putText ("Task " <> tid <> " marked as Done.")
+ addCompletionSummary tid commitSha
+ extractFacts tid commitSha
+ checkEpicCompletion task
+ | "r" `Text.isPrefixOf` c -> do
+ putText "Enter rejection reason: "
+ IO.hFlush IO.stdout
+ reason <- getLine
+ let rejectComment = buildHumanReviewComment commitSha False (Just reason)
+ _ <- TaskCore.addComment tid rejectComment TaskCore.Human
+ maybeCtx <- TaskCore.getRetryContext tid
+ let attempt = maybe 1 (\ctx -> TaskCore.retryAttempt ctx + 1) maybeCtx
+ let currentReason = "attempt " <> tshow attempt <> ": rejected: " <> reason
+ let accumulatedReason = case maybeCtx of
+ Nothing -> currentReason
+ Just ctx -> TaskCore.retryReason ctx <> "\n" <> currentReason
+ TaskCore.setRetryContext
+ TaskCore.RetryContext
+ { TaskCore.retryTaskId = tid,
+ TaskCore.retryOriginalCommit = Text.pack commitSha,
+ TaskCore.retryConflictFiles = [],
+ TaskCore.retryAttempt = attempt,
+ TaskCore.retryReason = accumulatedReason,
+ TaskCore.retryNotes = maybeCtx +> TaskCore.retryNotes
+ }
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.Human
+ putText ("Task " <> tid <> " reopened (attempt " <> tshow attempt <> "/3).")
+ | otherwise -> putText "Skipped; no status change."
+
+-- | Build a human review comment
+buildHumanReviewComment :: String -> Bool -> Maybe Text -> Text
+buildHumanReviewComment commitSha accepted maybeReason =
+ Text.unlines
+ [ "## Human Review",
+ "",
+ "**Commit:** " <> Text.pack (take 8 commitSha),
+ "**Result:** " <> if accepted then "✓ ACCEPTED" else "✗ REJECTED",
+ case maybeReason of
+ Just reason -> "**Reason:** " <> reason
+ Nothing -> ""
+ ]
+
+-- | Check if a commit can be cleanly cherry-picked onto live
+-- Returns Nothing if clean, Just [conflicting files] if conflict
+checkMergeConflict :: String -> IO (Maybe [Text])
+checkMergeConflict commitSha = do
+ -- Save current state
+ (_, _, _) <- Process.readProcessWithExitCode "git" ["branch", "--show-current"] ""
+ (_, origHead, _) <- Process.readProcessWithExitCode "git" ["rev-parse", "HEAD"] ""
+
+ -- Try cherry-pick
+ (cpCode, _, cpErr) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["cherry-pick", "--no-commit", commitSha]
+ ""
+
+ -- Always abort/reset regardless of result
+ _ <- Process.readProcessWithExitCode "git" ["cherry-pick", "--abort"] ""
+ _ <- Process.readProcessWithExitCode "git" ["reset", "--hard", List.head (List.lines origHead)] ""
+
+ case cpCode of
+ Exit.ExitSuccess -> pure Nothing
+ Exit.ExitFailure _ -> do
+ -- Parse conflict files from error message
+ let errLines = Text.lines (Text.pack cpErr)
+ conflictLines = filter (Text.isPrefixOf "CONFLICT") errLines
+ -- Extract file names (rough parsing)
+ files = mapMaybe extractConflictFile conflictLines
+ pure (Just (if null files then ["(unknown files)"] else files))
+
+extractConflictFile :: Text -> Maybe Text
+extractConflictFile line =
+ -- CONFLICT (content): Merge conflict in path/to/file.hs
+ case Text.breakOn "Merge conflict in " line of
+ (_, rest)
+ | not (Text.null rest) -> Just (Text.strip (Text.drop 18 rest))
+ _ -> case Text.breakOn "in " line of
+ (_, rest)
+ | not (Text.null rest) -> Just (Text.strip (Text.drop 3 rest))
+ _ -> Nothing
+
+-- | Generate and add a completion summary comment for a task
+addCompletionSummary :: Text -> String -> IO ()
+addCompletionSummary tid commitSha = do
+ -- Get the diff and commit message for this commit
+ (diffCode, diffOut, _) <- Process.readProcessWithExitCode "git" ["show", "--stat", commitSha] ""
+ (msgCode, msgOut, _) <- Process.readProcessWithExitCode "git" ["log", "-1", "--format=%B", commitSha] ""
+
+ when (diffCode == Exit.ExitSuccess && msgCode == Exit.ExitSuccess) <| do
+ -- Get list of modified files
+ (filesCode, filesOut, _) <- Process.readProcessWithExitCode "git" ["diff-tree", "--no-commit-id", "--name-only", "-r", commitSha] ""
+
+ let files = if filesCode == Exit.ExitSuccess then List.lines filesOut else []
+ commitMessage = Text.pack msgOut
+ diffSummary = Text.pack diffOut
+
+ -- Build prompt for llm
+ let prompt = buildCompletionPrompt tid commitMessage diffSummary files
+
+ -- Try to get API key
+ maybeApiKey <- Env.lookupEnv "OPENROUTER_API_KEY"
+ case maybeApiKey of
+ Nothing -> do
+ putText "[review] Warning: OPENROUTER_API_KEY not set, skipping completion summary"
+ Just apiKey -> do
+ -- Call LLM via Engine.chat
+ let llm = Engine.defaultLLM {Engine.llmApiKey = Text.pack apiKey}
+ messages = [Engine.Message Engine.User prompt Nothing Nothing]
+
+ result <- Engine.chat llm [] messages
+ case result of
+ Left err -> do
+ putText ("[review] Failed to generate completion summary: " <> err)
+ Right msg -> do
+ let summary = Text.strip (Engine.msgContent msg)
+ unless (Text.null summary) <| do
+ _ <- TaskCore.addComment tid ("## Completion Summary\n\n" <> summary) TaskCore.Junior
+ putText "[review] Added completion summary comment"
+
+-- | Build prompt for LLM to generate completion summary
+buildCompletionPrompt :: Text -> Text -> Text -> [String] -> Text
+buildCompletionPrompt tid commitMessage diffSummary files =
+ Text.unlines
+ [ "Generate a concise completion summary for this task. The summary should be 2-4 sentences.",
+ "",
+ "Task ID: " <> tid,
+ "",
+ "Commit Message:",
+ commitMessage,
+ "",
+ "Files Modified (" <> tshow (length files) <> "):",
+ Text.unlines (map Text.pack (take 10 files)),
+ if length files > 10 then "... and " <> tshow (length files - 10) <> " more files" else "",
+ "",
+ "Diff Summary:",
+ diffSummary,
+ "",
+ "Write a brief summary that includes:",
+ "- What was accomplished (from the commit message and changes)",
+ "- Key files that were modified (mention 2-3 most important ones)",
+ "",
+ "Keep it professional and concise. Do NOT include markdown headers or formatting.",
+ "Just return the plain summary text."
+ ]
+
+-- | Extract facts from completed task
+extractFacts :: Text -> String -> IO ()
+extractFacts tid commitSha = do
+ -- Get the diff for this commit
+ (_, diffOut, _) <- Process.readProcessWithExitCode "git" ["show", "--stat", commitSha] ""
+
+ -- Get task context
+ tasks <- TaskCore.loadTasks
+ case TaskCore.findTask tid tasks of
+ Nothing -> pure ()
+ Just task -> do
+ let prompt = buildFactExtractionPrompt task diffOut
+
+ -- Try to get API key
+ maybeApiKey <- Env.lookupEnv "OPENROUTER_API_KEY"
+ case maybeApiKey of
+ Nothing -> do
+ putText "[facts] Warning: OPENROUTER_API_KEY not set, skipping fact extraction"
+ Just apiKey -> do
+ -- Call LLM via Engine.chat
+ let llm = Engine.defaultLLM {Engine.llmApiKey = Text.pack apiKey}
+ messages = [Engine.Message Engine.User prompt Nothing Nothing]
+
+ result <- Engine.chat llm [] messages
+ case result of
+ Left err -> do
+ putText ("[facts] Failed to extract facts: " <> err)
+ Right msg -> do
+ parseFacts tid (Text.unpack (Engine.msgContent msg))
+
+-- | Build prompt for LLM to extract facts from completed task
+buildFactExtractionPrompt :: TaskCore.Task -> String -> Text
+buildFactExtractionPrompt task diffSummary =
+ Text.unlines
+ [ "You just completed the following task:",
+ "",
+ "Task: " <> TaskCore.taskId task,
+ "Title: " <> TaskCore.taskTitle task,
+ "Description: " <> TaskCore.taskDescription task,
+ "",
+ "Diff summary:",
+ Text.pack diffSummary,
+ "",
+ "List any facts you learned about this codebase that would be useful for future tasks.",
+ "Each fact should be on its own line, starting with 'FACT: '.",
+ "Include the relevant file paths in brackets after each fact.",
+ "Example: FACT: The Alpha module re-exports common Prelude functions [Alpha.hs]",
+ "If you didn't learn anything notable, respond with 'NO_FACTS'."
+ ]
+
+-- | Parse facts from LLM output and add them to the knowledge base
+parseFacts :: Text -> String -> IO ()
+parseFacts tid output = do
+ let outputLines = Text.lines (Text.pack output)
+ factLines = filter (Text.isPrefixOf "FACT: ") outputLines
+ traverse_ (addFactFromLine tid) factLines
+
+-- | Parse a single fact line and add it to the knowledge base
+addFactFromLine :: Text -> Text -> IO ()
+addFactFromLine tid line = do
+ let content = Text.drop 6 line -- Remove "FACT: "
+ (factText, filesRaw) = Text.breakOn " [" content
+ files = parseFiles filesRaw
+ _ <- Fact.createFact "Omni" factText files (Just tid) 0.7 -- Lower initial confidence
+ putText ("[facts] Added: " <> factText)
+
+-- | Parse file list from brackets [file1, file2, ...]
+parseFiles :: Text -> [Text]
+parseFiles raw
+ | Text.null raw = []
+ | not ("[" `Text.isInfixOf` raw) = []
+ | otherwise =
+ let stripped = Text.strip (Text.dropWhile (/= '[') raw)
+ inner = Text.dropEnd 1 (Text.drop 1 stripped) -- Remove [ and ]
+ trimmed = Text.strip inner
+ in if Text.null trimmed
+ then []
+ else map Text.strip (Text.splitOn "," inner)
+
+-- | Generate a summary comment for an epic when all children are complete
+generateEpicSummary :: Text -> TaskCore.Task -> [TaskCore.Task] -> IO ()
+generateEpicSummary epicId epic children = do
+ putText "[epic] Generating summary for completed epic..."
+
+ -- Try to get API key
+ maybeApiKey <- Env.lookupEnv "OPENROUTER_API_KEY"
+ case maybeApiKey of
+ Nothing -> do
+ putText "[epic] Warning: OPENROUTER_API_KEY not set, skipping summary generation"
+ pure ()
+ Just apiKey -> do
+ -- Build the prompt for LLM
+ prompt <- buildEpicSummaryPrompt epic children
+
+ -- Call LLM
+ let llm = Engine.defaultLLM {Engine.llmApiKey = Text.pack apiKey}
+ messages = [Engine.Message Engine.User prompt Nothing Nothing]
+
+ result <- Engine.chat llm [] messages
+ case result of
+ Left err -> do
+ putText ("[epic] Failed to generate summary: " <> err)
+ Right msg -> do
+ let summary = Engine.msgContent msg
+ _ <- TaskCore.addComment epicId summary TaskCore.Junior
+ putText "[epic] Summary comment added to epic"
+
+-- | Build a prompt for the LLM to summarize an epic
+buildEpicSummaryPrompt :: TaskCore.Task -> [TaskCore.Task] -> IO Text
+buildEpicSummaryPrompt epic children = do
+ -- Get commit info for each child task
+ childSummaries <- traverse summarizeChildTask children
+
+ pure
+ <| Text.unlines
+ [ "Generate a concise summary comment for this completed epic.",
+ "",
+ "## Epic Information",
+ "**Title:** " <> TaskCore.taskTitle epic,
+ "**Description:**",
+ TaskCore.taskDescription epic,
+ "",
+ "## Completed Child Tasks (" <> tshow (length children) <> ")",
+ Text.unlines childSummaries,
+ "",
+ "## Instructions",
+ "Create a markdown summary that includes:",
+ "1. A brief overview of what was accomplished",
+ "2. List of completed tasks with their titles",
+ "3. Key changes or files modified (if mentioned in task descriptions)",
+ "4. Any notable patterns or themes across the work",
+ "",
+ "Format the summary as a markdown comment starting with '## Epic Summary'.",
+ "Keep it concise but informative."
+ ]
+
+-- | Summarize a single child task for the epic summary
+summarizeChildTask :: TaskCore.Task -> IO Text
+summarizeChildTask task = do
+ -- Try to get commit info
+ let grepArg = "--grep=" <> Text.unpack (TaskCore.taskId task)
+ (code, shaOut, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["log", "--pretty=format:%h %s", "-n", "1", grepArg]
+ ""
+
+ let commitInfo =
+ if code == Exit.ExitSuccess && not (null shaOut)
+ then " [" <> Text.pack (take 80 shaOut) <> "]"
+ else ""
+
+ -- Get files changed in the commit
+ filesInfo <- getCommitFiles (TaskCore.taskId task)
+
+ pure <| "- **" <> TaskCore.taskId task <> "**: " <> TaskCore.taskTitle task <> commitInfo <> filesInfo
+
+-- | Get files modified in a commit for a task
+getCommitFiles :: Text -> IO Text
+getCommitFiles taskId = do
+ let grepArg = "--grep=" <> Text.unpack taskId
+ (code, shaOut, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["log", "--pretty=format:%H", "-n", "1", grepArg]
+ ""
+
+ if code /= Exit.ExitSuccess || null shaOut
+ then pure ""
+ else do
+ let sha = List.head (List.lines shaOut)
+ (fileCode, filesOut, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["diff-tree", "--no-commit-id", "--name-only", "-r", sha]
+ ""
+ if fileCode /= Exit.ExitSuccess || null filesOut
+ then pure ""
+ else do
+ let files = List.lines filesOut
+ fileList = List.take 3 files -- Limit to first 3 files
+ moreCount = length files - 3
+ filesText = Text.intercalate ", " (map Text.pack fileList)
+ suffix = if moreCount > 0 then " (+" <> tshow moreCount <> " more)" else ""
+ pure <| if null files then "" else " — " <> filesText <> suffix
+
+-- | Check if all children of an epic are Done, and if so, transition epic to Review
+checkEpicCompletion :: TaskCore.Task -> IO ()
+checkEpicCompletion task =
+ case TaskCore.taskParent task of
+ Nothing -> pure ()
+ Just parentId -> do
+ tasks <- TaskCore.loadTasks
+ case TaskCore.findTask parentId tasks of
+ Nothing -> pure ()
+ Just parentTask ->
+ when (TaskCore.taskType parentTask == TaskCore.Epic) <| do
+ let children = filter (hasParent parentId) tasks
+ allDone = all (\t -> TaskCore.taskStatus t == TaskCore.Done) children
+ when (allDone && not (null children)) <| do
+ putText ("[review] All children of epic " <> parentId <> " are Done.")
+ TaskCore.updateTaskStatusWithActor parentId TaskCore.Review [] TaskCore.System
+ putText ("[review] Epic " <> parentId <> " -> Review")
+ -- Generate summary comment for the epic
+ generateEpicSummary parentId parentTask children
+ where
+ hasParent pid t = maybe False (TaskCore.matchesId pid) (TaskCore.taskParent t)
+
+-- | Handle facts subcommands
+handleFacts :: Cli.Arguments -> IO ()
+handleFacts args
+ | args `Cli.has` Cli.command "list" = do
+ let maybeProject = Text.pack </ Cli.getArg args (Cli.longOption "project")
+ jsonMode = args `Cli.has` Cli.longOption "json"
+ facts <- maybe Fact.getAllFacts Fact.getFactsByProject maybeProject
+ if jsonMode
+ then BLC.putStrLn (Aeson.encode facts)
+ else traverse_ printFact facts
+ | args `Cli.has` Cli.command "show" = do
+ let jsonMode = args `Cli.has` Cli.longOption "json"
+ case Cli.getArg args (Cli.argument "fact-id") of
+ Nothing -> putText "fact-id required"
+ Just fidStr -> case readMaybe fidStr of
+ Nothing -> putText "Invalid fact ID (must be integer)"
+ Just fid -> do
+ maybeFact <- Fact.getFact fid
+ case maybeFact of
+ Nothing -> putText "Fact not found"
+ Just fact ->
+ if jsonMode
+ then BLC.putStrLn (Aeson.encode fact)
+ else printFactDetailed fact
+ | args `Cli.has` Cli.command "add" = do
+ let jsonMode = args `Cli.has` Cli.longOption "json"
+ case (Cli.getArg args (Cli.argument "project"), Cli.getArg args (Cli.argument "content")) of
+ (Just proj, Just content) -> do
+ let files = case Cli.getArg args (Cli.longOption "files") of
+ Just f -> Text.splitOn "," (Text.pack f)
+ Nothing -> []
+ sourceTask = Text.pack </ Cli.getArg args (Cli.longOption "task")
+ confidence = case Cli.getArg args (Cli.longOption "confidence") of
+ Just c -> fromMaybe 0.8 (readMaybe c)
+ Nothing -> 0.8
+ factId <- Fact.createFact (Text.pack proj) (Text.pack content) files sourceTask confidence
+ if jsonMode
+ then BLC.putStrLn (Aeson.encode (Aeson.object ["id" Aeson..= factId, "success" Aeson..= True]))
+ else putText ("Created fact: " <> tshow factId)
+ _ -> putText "project and content required"
+ | args `Cli.has` Cli.command "delete" = do
+ let jsonMode = args `Cli.has` Cli.longOption "json"
+ case Cli.getArg args (Cli.argument "fact-id") of
+ Nothing -> putText "fact-id required"
+ Just fidStr -> case readMaybe fidStr of
+ Nothing -> putText "Invalid fact ID (must be integer)"
+ Just fid -> do
+ Fact.deleteFact fid
+ if jsonMode
+ then BLC.putStrLn (Aeson.encode (Aeson.object ["success" Aeson..= True, "message" Aeson..= ("Deleted fact " <> tshow fid)]))
+ else putText ("Deleted fact: " <> tshow fid)
+ | otherwise = putText "Unknown facts subcommand. Use: list, show, add, or delete"
+
+-- | Print a fact in a compact format
+printFact :: TaskCore.Fact -> IO ()
+printFact fact = do
+ let fid = maybe "?" tshow (TaskCore.factId fact)
+ proj = TaskCore.factProject fact
+ content = Text.take 60 (TaskCore.factContent fact)
+ suffix = if Text.length (TaskCore.factContent fact) > 60 then "..." else ""
+ putText (fid <> "\t" <> proj <> "\t" <> content <> suffix)
+
+-- | Print a fact in detailed format
+printFactDetailed :: TaskCore.Fact -> IO ()
+printFactDetailed fact = do
+ putText ("ID: " <> maybe "?" tshow (TaskCore.factId fact))
+ putText ("Project: " <> TaskCore.factProject fact)
+ putText ("Content: " <> TaskCore.factContent fact)
+ putText ("Files: " <> Text.intercalate ", " (TaskCore.factRelatedFiles fact))
+ putText ("Source: " <> fromMaybe "-" (TaskCore.factSourceTask fact))
+ putText ("Confidence: " <> tshow (TaskCore.factConfidence fact))
+ putText ("Created: " <> tshow (TaskCore.factCreatedAt fact))
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Jr"
+ [ Test.unit "can run tests" <| True Test.@?= True,
+ Test.unit "can parse task command" <| do
+ let result = Docopt.parseArgs help ["task"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'task': " <> show err
+ Right args -> args `Cli.has` Cli.command "task" Test.@?= True,
+ Test.unit "can parse task command with args" <| do
+ let result = Docopt.parseArgs help ["task", "list", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'task list --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "task" Test.@?= True
+ Cli.getAllArgs args (Cli.argument "args") Test.@?= ["list", "--json"],
+ Test.unit "can parse work command" <| do
+ let result = Docopt.parseArgs help ["work"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'work': " <> show err
+ Right args -> args `Cli.has` Cli.command "work" Test.@?= True,
+ Test.unit "can parse work command with task id" <| do
+ let result = Docopt.parseArgs help ["work", "t-123"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'work t-123': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "work" Test.@?= True
+ Cli.getArg args (Cli.argument "task-id") Test.@?= Just "t-123",
+ Test.unit "can parse facts list command" <| do
+ let result = Docopt.parseArgs help ["facts", "list"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'facts list': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "facts" Test.@?= True
+ args `Cli.has` Cli.command "list" Test.@?= True,
+ Test.unit "can parse facts list with --project" <| do
+ let result = Docopt.parseArgs help ["facts", "list", "--project=myproj"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'facts list --project': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "facts" Test.@?= True
+ args `Cli.has` Cli.command "list" Test.@?= True
+ Cli.getArg args (Cli.longOption "project") Test.@?= Just "myproj",
+ Test.unit "can parse facts list with --json" <| do
+ let result = Docopt.parseArgs help ["facts", "list", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'facts list --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "facts" Test.@?= True
+ args `Cli.has` Cli.command "list" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True,
+ Test.unit "can parse facts show command" <| do
+ let result = Docopt.parseArgs help ["facts", "show", "42"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'facts show 42': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "facts" Test.@?= True
+ args `Cli.has` Cli.command "show" Test.@?= True
+ Cli.getArg args (Cli.argument "fact-id") Test.@?= Just "42",
+ Test.unit "can parse facts add command" <| do
+ let result = Docopt.parseArgs help ["facts", "add", "myproj", "This is a fact"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'facts add': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "facts" Test.@?= True
+ args `Cli.has` Cli.command "add" Test.@?= True
+ Cli.getArg args (Cli.argument "project") Test.@?= Just "myproj"
+ Cli.getArg args (Cli.argument "content") Test.@?= Just "This is a fact",
+ Test.unit "can parse facts add with options" <| do
+ let result = Docopt.parseArgs help ["facts", "add", "myproj", "fact", "--files=a.hs,b.hs", "--task=t-123", "--confidence=0.9"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'facts add' with options: " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "facts" Test.@?= True
+ args `Cli.has` Cli.command "add" Test.@?= True
+ Cli.getArg args (Cli.longOption "files") Test.@?= Just "a.hs,b.hs"
+ Cli.getArg args (Cli.longOption "task") Test.@?= Just "t-123"
+ Cli.getArg args (Cli.longOption "confidence") Test.@?= Just "0.9",
+ Test.unit "can parse facts delete command" <| do
+ let result = Docopt.parseArgs help ["facts", "delete", "42"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'facts delete 42': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "facts" Test.@?= True
+ args `Cli.has` Cli.command "delete" Test.@?= True
+ Cli.getArg args (Cli.argument "fact-id") Test.@?= Just "42"
+ ]
diff --git a/Omni/Jr/Web.hs b/Omni/Jr/Web.hs
new file mode 100644
index 0000000..bb28e93
--- /dev/null
+++ b/Omni/Jr/Web.hs
@@ -0,0 +1,40 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Jr Web UI - Main module that re-exports the API and provides the run function.
+--
+-- The web interface is split into submodules:
+-- - Types: Data types for pages, partials, and forms
+-- - Components: Reusable UI components and helpers
+-- - Pages: Full page ToHtml instances
+-- - Partials: HTMX partial ToHtml instances
+-- - Handlers: Servant handler implementations
+-- - Style: CSS styling
+--
+-- : dep warp
+-- : dep servant-server
+-- : dep lucid
+-- : dep servant-lucid
+module Omni.Jr.Web
+ ( run,
+ defaultPort,
+ -- Re-exports for external use
+ API,
+ server,
+ )
+where
+
+import Alpha
+import qualified Network.Wai.Handler.Warp as Warp
+import Omni.Jr.Web.Handlers (API, api, server)
+import Omni.Jr.Web.Pages ()
+import Omni.Jr.Web.Partials ()
+import Servant (serve)
+
+defaultPort :: Warp.Port
+defaultPort = 8080
+
+run :: Warp.Port -> IO ()
+run port = do
+ putText <| "Starting Jr web server on port " <> tshow port
+ Warp.run port (serve api server)
diff --git a/Omni/Jr/Web/Components.hs b/Omni/Jr/Web/Components.hs
new file mode 100644
index 0000000..ac36131
--- /dev/null
+++ b/Omni/Jr/Web/Components.hs
@@ -0,0 +1,1751 @@
+{-# LANGUAGE DataKinds #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- : dep lucid
+-- : dep servant-lucid
+module Omni.Jr.Web.Components
+ ( -- * Time formatting
+ formatRelativeTime,
+ relativeText,
+ formatExactTimestamp,
+ renderRelativeTimestamp,
+
+ -- * Small components
+ metaSep,
+
+ -- * Page layout
+ pageHead,
+ pageBody,
+ pageBodyWithCrumbs,
+ navbar,
+
+ -- * JavaScript
+ navbarDropdownJs,
+ statusDropdownJs,
+ priorityDropdownJs,
+ complexityDropdownJs,
+ liveToggleJs,
+
+ -- * Breadcrumbs
+ Breadcrumb (..),
+ Breadcrumbs,
+ renderBreadcrumbs,
+ getAncestors,
+ taskBreadcrumbs,
+
+ -- * Badges
+ statusBadge,
+ complexityBadge,
+ statusBadgeWithForm,
+ clickableBadge,
+ statusDropdownOptions,
+ statusOption,
+ priorityBadgeWithForm,
+ clickablePriorityBadge,
+ priorityDropdownOptions,
+ priorityOption,
+ complexityBadgeWithForm,
+ clickableComplexityBadge,
+ complexityDropdownOptions,
+ complexityOption,
+
+ -- * Sorting
+ sortDropdown,
+ sortOption,
+
+ -- * Progress bars
+ multiColorProgressBar,
+ epicProgressBar,
+
+ -- * Task rendering
+ renderTaskCard,
+ renderBlockedTaskCard,
+ renderListGroupItem,
+ renderEpicReviewCard,
+ renderEpicCardWithStats,
+ getDescendants,
+
+ -- * Metrics
+ renderAggregatedMetrics,
+
+ -- * Retry context
+ renderRetryContextBanner,
+
+ -- * Markdown
+ MarkdownBlock (..),
+ InlinePart (..),
+ renderMarkdown,
+ parseBlocks,
+ renderBlocks,
+ renderBlock,
+ renderListItem,
+ renderInline,
+ parseInline,
+ parseBold,
+ renderInlineParts,
+ renderInlinePart,
+
+ -- * Comments
+ commentForm,
+
+ -- * Live toggles
+ renderLiveToggle,
+ renderAutoscrollToggle,
+
+ -- * Cost/Token metrics
+ aggregateCostMetrics,
+ formatCostHeader,
+ formatTokensHeader,
+
+ -- * Timeline
+ renderUnifiedTimeline,
+ renderTimelineEvent,
+ eventTypeIconAndLabel,
+ renderActorLabel,
+ renderCommentTimelineEvent,
+ renderStatusChangeEvent,
+ parseStatusChange,
+ renderActivityEvent,
+ renderErrorTimelineEvent,
+ renderAssistantTimelineEvent,
+ renderToolCallTimelineEvent,
+ renderToolResultTimelineEvent,
+ renderCheckpointEvent,
+ renderGuardrailEvent,
+ renderGenericEvent,
+ parseToolCallContent,
+ formatToolCallSummary,
+ renderCollapsibleOutput,
+ renderDecodedToolResult,
+ renderFormattedJson,
+ timelineScrollScript,
+
+ -- * Tool rendering helpers
+ renderBashToolCall,
+ renderReadToolCall,
+ renderEditToolCall,
+ renderSearchToolCall,
+ renderSearchAndReadToolCall,
+ renderWriteToolCall,
+ renderGenericToolCall,
+ extractJsonField,
+ extractJsonFieldInt,
+ shortenPath,
+ DecodedToolResult (..),
+ decodeToolResult,
+ )
+where
+
+import Alpha
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.Key as AesonKey
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString.Lazy as LBS
+import qualified Data.List as List
+import qualified Data.Text as Text
+import Data.Time (NominalDiffTime, UTCTime, defaultTimeLocale, diffUTCTime, formatTime)
+import qualified Lucid
+import qualified Lucid.Base as Lucid
+import Numeric (showFFloat)
+import Omni.Jr.Web.Types (SortOrder (..), sortOrderLabel, sortOrderToParam)
+import qualified Omni.Task.Core as TaskCore
+
+-- * Time formatting
+
+formatRelativeTime :: UTCTime -> UTCTime -> Text
+formatRelativeTime now timestamp =
+ let delta = diffUTCTime now timestamp
+ in relativeText delta
+
+relativeText :: NominalDiffTime -> Text
+relativeText delta
+ | delta < 60 = "just now"
+ | delta < 3600 = tshow (round (delta / 60) :: Int) <> " minutes ago"
+ | delta < 7200 = "1 hour ago"
+ | delta < 86400 = tshow (round (delta / 3600) :: Int) <> " hours ago"
+ | delta < 172800 = "yesterday"
+ | delta < 604800 = tshow (round (delta / 86400) :: Int) <> " days ago"
+ | delta < 1209600 = "1 week ago"
+ | delta < 2592000 = tshow (round (delta / 604800) :: Int) <> " weeks ago"
+ | delta < 5184000 = "1 month ago"
+ | delta < 31536000 = tshow (round (delta / 2592000) :: Int) <> " months ago"
+ | otherwise = tshow (round (delta / 31536000) :: Int) <> " years ago"
+
+formatExactTimestamp :: UTCTime -> Text
+formatExactTimestamp = Text.pack <. formatTime defaultTimeLocale "%Y-%m-%d %H:%M:%S UTC"
+
+renderRelativeTimestamp :: (Monad m) => UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderRelativeTimestamp now timestamp =
+ Lucid.span_
+ [ Lucid.class_ "relative-time",
+ Lucid.title_ (formatExactTimestamp timestamp)
+ ]
+ (Lucid.toHtml (formatRelativeTime now timestamp))
+
+-- * Small components
+
+metaSep :: (Monad m) => Lucid.HtmlT m ()
+metaSep = Lucid.span_ [Lucid.class_ "meta-sep"] "·"
+
+-- * Page layout
+
+pageHead :: (Monad m) => Text -> Lucid.HtmlT m ()
+pageHead title =
+ Lucid.head_ <| do
+ Lucid.title_ (Lucid.toHtml title)
+ Lucid.meta_ [Lucid.charset_ "utf-8"]
+ Lucid.meta_
+ [ Lucid.name_ "viewport",
+ Lucid.content_ "width=device-width, initial-scale=1"
+ ]
+ Lucid.link_ [Lucid.rel_ "stylesheet", Lucid.href_ "/style.css"]
+ Lucid.script_
+ [ Lucid.src_ "https://unpkg.com/htmx.org@2.0.4",
+ Lucid.integrity_ "sha384-HGfztofotfshcF7+8n44JQL2oJmowVChPTg48S+jvZoztPfvwD79OC/LTtG6dMp+",
+ Lucid.crossorigin_ "anonymous"
+ ]
+ ("" :: Text)
+ Lucid.script_ [] statusDropdownJs
+ Lucid.script_ [] priorityDropdownJs
+ Lucid.script_ [] complexityDropdownJs
+ Lucid.script_ [] navbarDropdownJs
+ Lucid.script_ [] liveToggleJs
+
+-- * JavaScript
+
+navbarDropdownJs :: Text
+navbarDropdownJs =
+ Text.unlines
+ [ "document.addEventListener('DOMContentLoaded', function() {",
+ " document.querySelectorAll('.navbar-dropdown-btn').forEach(function(btn) {",
+ " btn.addEventListener('click', function(e) {",
+ " e.preventDefault();",
+ " var dropdown = btn.closest('.navbar-dropdown');",
+ " var isOpen = dropdown.classList.contains('open');",
+ " document.querySelectorAll('.navbar-dropdown.open').forEach(function(d) {",
+ " d.classList.remove('open');",
+ " });",
+ " if (!isOpen) {",
+ " dropdown.classList.add('open');",
+ " }",
+ " });",
+ " });",
+ " document.addEventListener('click', function(e) {",
+ " if (!e.target.closest('.navbar-dropdown')) {",
+ " document.querySelectorAll('.navbar-dropdown.open').forEach(function(d) {",
+ " d.classList.remove('open');",
+ " });",
+ " }",
+ " });",
+ "});"
+ ]
+
+statusDropdownJs :: Text
+statusDropdownJs =
+ Text.unlines
+ [ "function toggleStatusDropdown(el) {",
+ " var container = el.parentElement;",
+ " var isOpen = container.classList.toggle('open');",
+ " el.setAttribute('aria-expanded', isOpen);",
+ " if (isOpen) {",
+ " var firstItem = container.querySelector('[role=\"menuitem\"]');",
+ " if (firstItem) firstItem.focus();",
+ " }",
+ "}",
+ "",
+ "function closeStatusDropdown(container) {",
+ " container.classList.remove('open');",
+ " var badge = container.querySelector('[role=\"button\"]');",
+ " if (badge) {",
+ " badge.setAttribute('aria-expanded', 'false');",
+ " badge.focus();",
+ " }",
+ "}",
+ "",
+ "function handleStatusKeydown(event, el) {",
+ " if (event.key === 'Enter' || event.key === ' ') {",
+ " event.preventDefault();",
+ " toggleStatusDropdown(el);",
+ " } else if (event.key === 'Escape') {",
+ " closeStatusDropdown(el.parentElement);",
+ " } else if (event.key === 'ArrowDown') {",
+ " event.preventDefault();",
+ " var container = el.parentElement;",
+ " if (!container.classList.contains('open')) {",
+ " toggleStatusDropdown(el);",
+ " } else {",
+ " var firstItem = container.querySelector('[role=\"menuitem\"]');",
+ " if (firstItem) firstItem.focus();",
+ " }",
+ " }",
+ "}",
+ "",
+ "function handleMenuItemKeydown(event) {",
+ " var container = event.target.closest('.status-badge-dropdown');",
+ " var items = container.querySelectorAll('[role=\"menuitem\"]');",
+ " var currentIndex = Array.from(items).indexOf(event.target);",
+ " ",
+ " if (event.key === 'ArrowDown') {",
+ " event.preventDefault();",
+ " var next = (currentIndex + 1) % items.length;",
+ " items[next].focus();",
+ " } else if (event.key === 'ArrowUp') {",
+ " event.preventDefault();",
+ " var prev = (currentIndex - 1 + items.length) % items.length;",
+ " items[prev].focus();",
+ " } else if (event.key === 'Escape') {",
+ " event.preventDefault();",
+ " closeStatusDropdown(container);",
+ " } else if (event.key === 'Tab') {",
+ " closeStatusDropdown(container);",
+ " }",
+ "}",
+ "",
+ "document.addEventListener('click', function(e) {",
+ " var dropdowns = document.querySelectorAll('.status-badge-dropdown.open');",
+ " dropdowns.forEach(function(d) {",
+ " if (!d.contains(e.target)) {",
+ " closeStatusDropdown(d);",
+ " }",
+ " });",
+ "});"
+ ]
+
+priorityDropdownJs :: Text
+priorityDropdownJs =
+ Text.unlines
+ [ "function togglePriorityDropdown(el) {",
+ " var container = el.parentElement;",
+ " var isOpen = container.classList.toggle('open');",
+ " el.setAttribute('aria-expanded', isOpen);",
+ " if (isOpen) {",
+ " var firstItem = container.querySelector('[role=\"menuitem\"]');",
+ " if (firstItem) firstItem.focus();",
+ " }",
+ "}",
+ "",
+ "function closePriorityDropdown(container) {",
+ " container.classList.remove('open');",
+ " var badge = container.querySelector('[role=\"button\"]');",
+ " if (badge) {",
+ " badge.setAttribute('aria-expanded', 'false');",
+ " badge.focus();",
+ " }",
+ "}",
+ "",
+ "function handlePriorityKeydown(event, el) {",
+ " if (event.key === 'Enter' || event.key === ' ') {",
+ " event.preventDefault();",
+ " togglePriorityDropdown(el);",
+ " } else if (event.key === 'Escape') {",
+ " closePriorityDropdown(el.parentElement);",
+ " } else if (event.key === 'ArrowDown') {",
+ " event.preventDefault();",
+ " var container = el.parentElement;",
+ " if (!container.classList.contains('open')) {",
+ " togglePriorityDropdown(el);",
+ " } else {",
+ " var firstItem = container.querySelector('[role=\"menuitem\"]');",
+ " if (firstItem) firstItem.focus();",
+ " }",
+ " }",
+ "}",
+ "",
+ "function handlePriorityMenuItemKeydown(event) {",
+ " var container = event.target.closest('.priority-badge-dropdown');",
+ " var items = container.querySelectorAll('[role=\"menuitem\"]');",
+ " var currentIndex = Array.from(items).indexOf(event.target);",
+ " ",
+ " if (event.key === 'ArrowDown') {",
+ " event.preventDefault();",
+ " var next = (currentIndex + 1) % items.length;",
+ " items[next].focus();",
+ " } else if (event.key === 'ArrowUp') {",
+ " event.preventDefault();",
+ " var prev = (currentIndex - 1 + items.length) % items.length;",
+ " items[prev].focus();",
+ " } else if (event.key === 'Escape') {",
+ " event.preventDefault();",
+ " closePriorityDropdown(container);",
+ " } else if (event.key === 'Tab') {",
+ " closePriorityDropdown(container);",
+ " }",
+ "}",
+ "",
+ "document.addEventListener('click', function(e) {",
+ " var dropdowns = document.querySelectorAll('.priority-badge-dropdown.open');",
+ " dropdowns.forEach(function(d) {",
+ " if (!d.contains(e.target)) {",
+ " closePriorityDropdown(d);",
+ " }",
+ " });",
+ "});"
+ ]
+
+complexityDropdownJs :: Text
+complexityDropdownJs =
+ Text.unlines
+ [ "function toggleComplexityDropdown(el) {",
+ " var container = el.parentElement;",
+ " var isOpen = container.classList.toggle('open');",
+ " el.setAttribute('aria-expanded', isOpen);",
+ " if (isOpen) {",
+ " var firstItem = container.querySelector('[role=\"menuitem\"]');",
+ " if (firstItem) firstItem.focus();",
+ " }",
+ "}",
+ "",
+ "function closeComplexityDropdown(container) {",
+ " container.classList.remove('open');",
+ " var badge = container.querySelector('[role=\"button\"]');",
+ " if (badge) {",
+ " badge.setAttribute('aria-expanded', 'false');",
+ " badge.focus();",
+ " }",
+ "}",
+ "",
+ "function handleComplexityKeydown(event, el) {",
+ " if (event.key === 'Enter' || event.key === ' ') {",
+ " event.preventDefault();",
+ " toggleComplexityDropdown(el);",
+ " } else if (event.key === 'Escape') {",
+ " closeComplexityDropdown(el.parentElement);",
+ " } else if (event.key === 'ArrowDown') {",
+ " event.preventDefault();",
+ " var container = el.parentElement;",
+ " if (!container.classList.contains('open')) {",
+ " toggleComplexityDropdown(el);",
+ " } else {",
+ " var firstItem = container.querySelector('[role=\"menuitem\"]');",
+ " if (firstItem) firstItem.focus();",
+ " }",
+ " }",
+ "}",
+ "",
+ "function handleComplexityMenuItemKeydown(event) {",
+ " var container = event.target.closest('.complexity-badge-dropdown');",
+ " var items = container.querySelectorAll('[role=\"menuitem\"]');",
+ " var currentIndex = Array.from(items).indexOf(event.target);",
+ " ",
+ " if (event.key === 'ArrowDown') {",
+ " event.preventDefault();",
+ " var next = (currentIndex + 1) % items.length;",
+ " items[next].focus();",
+ " } else if (event.key === 'ArrowUp') {",
+ " event.preventDefault();",
+ " var prev = (currentIndex - 1 + items.length) % items.length;",
+ " items[prev].focus();",
+ " } else if (event.key === 'Escape') {",
+ " event.preventDefault();",
+ " closeComplexityDropdown(container);",
+ " } else if (event.key === 'Tab') {",
+ " closeComplexityDropdown(container);",
+ " }",
+ "}",
+ "",
+ "document.addEventListener('click', function(e) {",
+ " var dropdowns = document.querySelectorAll('.complexity-badge-dropdown.open');",
+ " dropdowns.forEach(function(d) {",
+ " if (!d.contains(e.target)) {",
+ " closeComplexityDropdown(d);",
+ " }",
+ " });",
+ "});"
+ ]
+
+liveToggleJs :: Text
+liveToggleJs =
+ Text.unlines
+ [ "var liveUpdatesEnabled = true;",
+ "var autoscrollEnabled = true;",
+ "",
+ "function toggleLiveUpdates() {",
+ " liveUpdatesEnabled = !liveUpdatesEnabled;",
+ " var btn = document.getElementById('live-toggle');",
+ " if (btn) {",
+ " btn.classList.toggle('timeline-live-paused', !liveUpdatesEnabled);",
+ " }",
+ "}",
+ "",
+ "function toggleAutoscroll() {",
+ " autoscrollEnabled = !autoscrollEnabled;",
+ " var btn = document.getElementById('autoscroll-toggle');",
+ " if (btn) {",
+ " btn.classList.toggle('timeline-autoscroll-disabled', !autoscrollEnabled);",
+ " }",
+ "}",
+ "",
+ "document.body.addEventListener('htmx:beforeRequest', function(evt) {",
+ " var timeline = document.getElementById('unified-timeline');",
+ " if (timeline && timeline.contains(evt.target) && !liveUpdatesEnabled) {",
+ " evt.preventDefault();",
+ " }",
+ "});",
+ "",
+ "document.body.addEventListener('htmx:afterSettle', function(evt) {",
+ " if (autoscrollEnabled) {",
+ " var log = document.querySelector('.timeline-events');",
+ " if (log) {",
+ " log.scrollTop = log.scrollHeight;",
+ " }",
+ " }",
+ "});"
+ ]
+
+pageBody :: (Monad m) => Lucid.HtmlT m () -> Lucid.HtmlT m ()
+pageBody content =
+ Lucid.body_ [Lucid.makeAttribute "hx-boost" "true"] <| do
+ navbar
+ content
+
+-- * Breadcrumbs
+
+data Breadcrumb = Breadcrumb
+ { _crumbLabel :: Text,
+ _crumbHref :: Maybe Text
+ }
+
+type Breadcrumbs = [Breadcrumb]
+
+pageBodyWithCrumbs :: (Monad m) => Breadcrumbs -> Lucid.HtmlT m () -> Lucid.HtmlT m ()
+pageBodyWithCrumbs crumbs content =
+ Lucid.body_ [Lucid.makeAttribute "hx-boost" "true"] <| do
+ navbar
+ unless (null crumbs) <| do
+ Lucid.div_ [Lucid.class_ "breadcrumb-container"] <| do
+ Lucid.div_ [Lucid.class_ "container"] <| renderBreadcrumbs crumbs
+ content
+
+renderBreadcrumbs :: (Monad m) => Breadcrumbs -> Lucid.HtmlT m ()
+renderBreadcrumbs [] = pure ()
+renderBreadcrumbs crumbs =
+ Lucid.nav_ [Lucid.class_ "breadcrumbs", Lucid.makeAttribute "aria-label" "Breadcrumb"] <| do
+ Lucid.ol_ [Lucid.class_ "breadcrumb-list"] <| do
+ traverse_ renderCrumb (zip [0 ..] crumbs)
+ where
+ renderCrumb :: (Monad m') => (Int, Breadcrumb) -> Lucid.HtmlT m' ()
+ renderCrumb (idx, Breadcrumb label mHref) = do
+ Lucid.li_ [Lucid.class_ "breadcrumb-item"] <| do
+ when (idx > 0) <| Lucid.span_ [Lucid.class_ "breadcrumb-sep"] ">"
+ case mHref of
+ Just href -> Lucid.a_ [Lucid.href_ href] (Lucid.toHtml label)
+ Nothing -> Lucid.span_ [Lucid.class_ "breadcrumb-current"] (Lucid.toHtml label)
+
+getAncestors :: [TaskCore.Task] -> TaskCore.Task -> [TaskCore.Task]
+getAncestors allTasks task =
+ case TaskCore.taskParent task of
+ Nothing -> [task]
+ Just pid -> case TaskCore.findTask pid allTasks of
+ Nothing -> [task]
+ Just parent -> getAncestors allTasks parent ++ [task]
+
+taskBreadcrumbs :: [TaskCore.Task] -> TaskCore.Task -> Breadcrumbs
+taskBreadcrumbs allTasks task =
+ let ancestors = getAncestors allTasks task
+ taskCrumbs = [Breadcrumb (TaskCore.taskId t) (Just ("/tasks/" <> TaskCore.taskId t)) | t <- List.init ancestors]
+ currentCrumb = Breadcrumb (TaskCore.taskId task) Nothing
+ in [Breadcrumb "Jr" (Just "/"), Breadcrumb "Tasks" (Just "/tasks")]
+ ++ taskCrumbs
+ ++ [currentCrumb]
+
+-- * Navbar
+
+navbar :: (Monad m) => Lucid.HtmlT m ()
+navbar =
+ Lucid.nav_ [Lucid.class_ "navbar"] <| do
+ Lucid.a_ [Lucid.href_ "/", Lucid.class_ "navbar-brand"] "Junior"
+ Lucid.input_
+ [ Lucid.type_ "checkbox",
+ Lucid.id_ "navbar-toggle",
+ Lucid.class_ "navbar-toggle-checkbox"
+ ]
+ Lucid.label_
+ [ Lucid.for_ "navbar-toggle",
+ Lucid.class_ "navbar-hamburger"
+ ]
+ <| do
+ Lucid.span_ [Lucid.class_ "hamburger-line"] ""
+ Lucid.span_ [Lucid.class_ "hamburger-line"] ""
+ Lucid.span_ [Lucid.class_ "hamburger-line"] ""
+ Lucid.div_ [Lucid.class_ "navbar-links"] <| do
+ Lucid.a_ [Lucid.href_ "/", Lucid.class_ "navbar-link"] "Dashboard"
+ Lucid.div_ [Lucid.class_ "navbar-dropdown"] <| do
+ Lucid.button_ [Lucid.class_ "navbar-dropdown-btn"] "Tasks ▾"
+ Lucid.div_ [Lucid.class_ "navbar-dropdown-content"] <| do
+ Lucid.a_ [Lucid.href_ "/ready", Lucid.class_ "navbar-dropdown-item"] "Ready"
+ Lucid.a_ [Lucid.href_ "/blocked", Lucid.class_ "navbar-dropdown-item"] "Blocked"
+ Lucid.a_ [Lucid.href_ "/intervention", Lucid.class_ "navbar-dropdown-item"] "Human Action"
+ Lucid.a_ [Lucid.href_ "/tasks", Lucid.class_ "navbar-dropdown-item"] "All"
+ Lucid.div_ [Lucid.class_ "navbar-dropdown"] <| do
+ Lucid.button_ [Lucid.class_ "navbar-dropdown-btn"] "Plans ▾"
+ Lucid.div_ [Lucid.class_ "navbar-dropdown-content"] <| do
+ Lucid.a_ [Lucid.href_ "/epics", Lucid.class_ "navbar-dropdown-item"] "Epics"
+ Lucid.a_ [Lucid.href_ "/kb", Lucid.class_ "navbar-dropdown-item"] "KB"
+ Lucid.a_ [Lucid.href_ "/stats", Lucid.class_ "navbar-link"] "Stats"
+
+-- * Badges
+
+statusBadge :: (Monad m) => TaskCore.Status -> Lucid.HtmlT m ()
+statusBadge status =
+ let (cls, label) = case status of
+ TaskCore.Draft -> ("badge badge-draft", "Draft")
+ TaskCore.Open -> ("badge badge-open", "Open")
+ TaskCore.InProgress -> ("badge badge-inprogress", "In Progress")
+ TaskCore.Review -> ("badge badge-review", "Review")
+ TaskCore.Approved -> ("badge badge-approved", "Approved")
+ TaskCore.Done -> ("badge badge-done", "Done")
+ TaskCore.NeedsHelp -> ("badge badge-needshelp", "Needs Help")
+ in Lucid.span_ [Lucid.class_ cls] label
+
+complexityBadge :: (Monad m) => Int -> Lucid.HtmlT m ()
+complexityBadge complexity =
+ let cls = "badge badge-complexity badge-complexity-" <> tshow complexity
+ label = "ℂ " <> tshow complexity
+ in Lucid.span_ [Lucid.class_ cls, Lucid.title_ "Task Complexity (1-5)"] (Lucid.toHtml label)
+
+-- * Sort dropdown
+
+sortDropdown :: (Monad m) => Text -> SortOrder -> Lucid.HtmlT m ()
+sortDropdown basePath currentSort =
+ Lucid.div_ [Lucid.class_ "sort-dropdown"] <| do
+ Lucid.span_ [Lucid.class_ "sort-label"] "Sort:"
+ Lucid.div_ [Lucid.class_ "sort-dropdown-wrapper navbar-dropdown"] <| do
+ Lucid.button_ [Lucid.class_ "sort-dropdown-btn navbar-dropdown-btn"]
+ <| Lucid.toHtml (sortOrderLabel currentSort <> " ▾")
+ Lucid.div_ [Lucid.class_ "sort-dropdown-content navbar-dropdown-content"] <| do
+ sortOption basePath SortNewest currentSort
+ sortOption basePath SortOldest currentSort
+ sortOption basePath SortUpdated currentSort
+ sortOption basePath SortPriorityHigh currentSort
+ sortOption basePath SortPriorityLow currentSort
+
+sortOption :: (Monad m) => Text -> SortOrder -> SortOrder -> Lucid.HtmlT m ()
+sortOption basePath option currentSort =
+ let cls = "sort-dropdown-item navbar-dropdown-item" <> if option == currentSort then " active" else ""
+ href = basePath <> "?sort=" <> sortOrderToParam option
+ in Lucid.a_ [Lucid.href_ href, Lucid.class_ cls] (Lucid.toHtml (sortOrderLabel option))
+
+-- * Progress bars
+
+multiColorProgressBar :: (Monad m) => TaskCore.TaskStats -> Lucid.HtmlT m ()
+multiColorProgressBar stats =
+ let total = TaskCore.totalTasks stats
+ doneCount = TaskCore.doneTasks stats
+ inProgressCount = TaskCore.inProgressTasks stats
+ openCount = TaskCore.openTasks stats + TaskCore.reviewTasks stats + TaskCore.approvedTasks stats
+ donePct = if total == 0 then 0 else (doneCount * 100) `div` total
+ inProgressPct = if total == 0 then 0 else (inProgressCount * 100) `div` total
+ openPct = if total == 0 then 0 else (openCount * 100) `div` total
+ in Lucid.div_ [Lucid.class_ "multi-progress-container"] <| do
+ Lucid.div_ [Lucid.class_ "multi-progress-bar"] <| do
+ when (donePct > 0)
+ <| Lucid.div_
+ [ Lucid.class_ "multi-progress-segment progress-done",
+ Lucid.style_ ("width: " <> tshow donePct <> "%"),
+ Lucid.title_ (tshow doneCount <> " done")
+ ]
+ ""
+ when (inProgressPct > 0)
+ <| Lucid.div_
+ [ Lucid.class_ "multi-progress-segment progress-inprogress",
+ Lucid.style_ ("width: " <> tshow inProgressPct <> "%"),
+ Lucid.title_ (tshow inProgressCount <> " in progress")
+ ]
+ ""
+ when (openPct > 0)
+ <| Lucid.div_
+ [ Lucid.class_ "multi-progress-segment progress-open",
+ Lucid.style_ ("width: " <> tshow openPct <> "%"),
+ Lucid.title_ (tshow openCount <> " open")
+ ]
+ ""
+ Lucid.div_ [Lucid.class_ "progress-legend"] <| do
+ Lucid.span_ [Lucid.class_ "legend-item"] <| do
+ Lucid.span_ [Lucid.class_ "legend-dot legend-done"] ""
+ Lucid.toHtml ("Done " <> tshow doneCount)
+ Lucid.span_ [Lucid.class_ "legend-item"] <| do
+ Lucid.span_ [Lucid.class_ "legend-dot legend-inprogress"] ""
+ Lucid.toHtml ("In Progress " <> tshow inProgressCount)
+ Lucid.span_ [Lucid.class_ "legend-item"] <| do
+ Lucid.span_ [Lucid.class_ "legend-dot legend-open"] ""
+ Lucid.toHtml ("Open " <> tshow openCount)
+
+epicProgressBar :: (Monad m) => Int -> Int -> Int -> Int -> Lucid.HtmlT m ()
+epicProgressBar doneCount inProgressCount openCount totalCount =
+ let donePct = if totalCount == 0 then 0 else (doneCount * 100) `div` totalCount
+ inProgressPct = if totalCount == 0 then 0 else (inProgressCount * 100) `div` totalCount
+ openPct = if totalCount == 0 then 0 else (openCount * 100) `div` totalCount
+ in Lucid.div_ [Lucid.class_ "multi-progress-container epic-progress"] <| do
+ Lucid.div_ [Lucid.class_ "multi-progress-bar"] <| do
+ when (donePct > 0)
+ <| Lucid.div_
+ [ Lucid.class_ "multi-progress-segment progress-done",
+ Lucid.style_ ("width: " <> tshow donePct <> "%"),
+ Lucid.title_ (tshow doneCount <> " done")
+ ]
+ ""
+ when (inProgressPct > 0)
+ <| Lucid.div_
+ [ Lucid.class_ "multi-progress-segment progress-inprogress",
+ Lucid.style_ ("width: " <> tshow inProgressPct <> "%"),
+ Lucid.title_ (tshow inProgressCount <> " in progress")
+ ]
+ ""
+ when (openPct > 0)
+ <| Lucid.div_
+ [ Lucid.class_ "multi-progress-segment progress-open",
+ Lucid.style_ ("width: " <> tshow openPct <> "%"),
+ Lucid.title_ (tshow openCount <> " open")
+ ]
+ ""
+ Lucid.div_ [Lucid.class_ "progress-legend"] <| do
+ Lucid.span_ [Lucid.class_ "legend-item"] <| do
+ Lucid.span_ [Lucid.class_ "legend-dot legend-done"] ""
+ Lucid.toHtml (tshow doneCount)
+ Lucid.span_ [Lucid.class_ "legend-item"] <| do
+ Lucid.span_ [Lucid.class_ "legend-dot legend-inprogress"] ""
+ Lucid.toHtml (tshow inProgressCount)
+ Lucid.span_ [Lucid.class_ "legend-item"] <| do
+ Lucid.span_ [Lucid.class_ "legend-dot legend-open"] ""
+ Lucid.toHtml (tshow openCount)
+
+-- * Status badge with form
+
+statusBadgeWithForm :: (Monad m) => TaskCore.Status -> Text -> Lucid.HtmlT m ()
+statusBadgeWithForm status tid =
+ Lucid.div_
+ [ Lucid.id_ "status-badge-container",
+ Lucid.class_ "status-badge-dropdown"
+ ]
+ <| do
+ clickableBadge status tid
+ statusDropdownOptions status tid
+
+clickableBadge :: (Monad m) => TaskCore.Status -> Text -> Lucid.HtmlT m ()
+clickableBadge status _tid =
+ let (cls, label) = case status of
+ TaskCore.Draft -> ("badge badge-draft status-badge-clickable", "Draft" :: Text)
+ TaskCore.Open -> ("badge badge-open status-badge-clickable", "Open")
+ TaskCore.InProgress -> ("badge badge-inprogress status-badge-clickable", "In Progress")
+ TaskCore.Review -> ("badge badge-review status-badge-clickable", "Review")
+ TaskCore.Approved -> ("badge badge-approved status-badge-clickable", "Approved")
+ TaskCore.Done -> ("badge badge-done status-badge-clickable", "Done")
+ TaskCore.NeedsHelp -> ("badge badge-needshelp status-badge-clickable", "Needs Help")
+ in Lucid.span_
+ [ Lucid.class_ cls,
+ Lucid.tabindex_ "0",
+ Lucid.role_ "button",
+ Lucid.makeAttribute "aria-haspopup" "true",
+ Lucid.makeAttribute "aria-expanded" "false",
+ Lucid.makeAttribute "onclick" "toggleStatusDropdown(this)",
+ Lucid.makeAttribute "onkeydown" "handleStatusKeydown(event, this)"
+ ]
+ <| do
+ Lucid.toHtml label
+ Lucid.span_ [Lucid.class_ "dropdown-arrow", Lucid.makeAttribute "aria-hidden" "true"] " ▾"
+
+statusDropdownOptions :: (Monad m) => TaskCore.Status -> Text -> Lucid.HtmlT m ()
+statusDropdownOptions currentStatus tid =
+ Lucid.div_
+ [ Lucid.class_ "status-dropdown-menu",
+ Lucid.role_ "menu",
+ Lucid.makeAttribute "aria-label" "Change task status"
+ ]
+ <| do
+ statusOption TaskCore.Draft currentStatus tid
+ statusOption TaskCore.Open currentStatus tid
+ statusOption TaskCore.InProgress currentStatus tid
+ statusOption TaskCore.Review currentStatus tid
+ statusOption TaskCore.Approved currentStatus tid
+ statusOption TaskCore.Done currentStatus tid
+ statusOption TaskCore.NeedsHelp currentStatus tid
+
+statusOption :: (Monad m) => TaskCore.Status -> TaskCore.Status -> Text -> Lucid.HtmlT m ()
+statusOption opt currentStatus tid =
+ let (cls, label) = case opt of
+ TaskCore.Draft -> ("badge badge-draft", "Draft" :: Text)
+ TaskCore.Open -> ("badge badge-open", "Open")
+ TaskCore.InProgress -> ("badge badge-inprogress", "In Progress")
+ TaskCore.Review -> ("badge badge-review", "Review")
+ TaskCore.Approved -> ("badge badge-approved", "Approved")
+ TaskCore.Done -> ("badge badge-done", "Done")
+ TaskCore.NeedsHelp -> ("badge badge-needshelp", "Needs Help")
+ isSelected = opt == currentStatus
+ optClass = cls <> " status-dropdown-option" <> if isSelected then " selected" else ""
+ in Lucid.form_
+ [ Lucid.class_ "status-option-form",
+ Lucid.role_ "none",
+ Lucid.makeAttribute "hx-post" ("/tasks/" <> tid <> "/status"),
+ Lucid.makeAttribute "hx-target" "#status-badge-container",
+ Lucid.makeAttribute "hx-swap" "outerHTML"
+ ]
+ <| do
+ Lucid.input_ [Lucid.type_ "hidden", Lucid.name_ "status", Lucid.value_ (tshow opt)]
+ Lucid.button_
+ [ Lucid.type_ "submit",
+ Lucid.class_ optClass,
+ Lucid.role_ "menuitem",
+ Lucid.tabindex_ "-1",
+ Lucid.makeAttribute "onkeydown" "handleMenuItemKeydown(event)"
+ ]
+ (Lucid.toHtml label)
+
+-- * Priority badge with form
+
+priorityBadgeWithForm :: (Monad m) => TaskCore.Priority -> Text -> Lucid.HtmlT m ()
+priorityBadgeWithForm priority tid =
+ Lucid.div_
+ [ Lucid.id_ "priority-badge-container",
+ Lucid.class_ "priority-badge-dropdown"
+ ]
+ <| do
+ clickablePriorityBadge priority tid
+ priorityDropdownOptions priority tid
+
+clickablePriorityBadge :: (Monad m) => TaskCore.Priority -> Text -> Lucid.HtmlT m ()
+clickablePriorityBadge priority _tid =
+ let (cls, label) = case priority of
+ TaskCore.P0 -> ("badge badge-p0 priority-badge-clickable", "P0 Critical" :: Text)
+ TaskCore.P1 -> ("badge badge-p1 priority-badge-clickable", "P1 High")
+ TaskCore.P2 -> ("badge badge-p2 priority-badge-clickable", "P2 Normal")
+ TaskCore.P3 -> ("badge badge-p3 priority-badge-clickable", "P3 Low")
+ TaskCore.P4 -> ("badge badge-p4 priority-badge-clickable", "P4 Defer")
+ in Lucid.span_
+ [ Lucid.class_ cls,
+ Lucid.tabindex_ "0",
+ Lucid.role_ "button",
+ Lucid.makeAttribute "aria-haspopup" "true",
+ Lucid.makeAttribute "aria-expanded" "false",
+ Lucid.makeAttribute "onclick" "togglePriorityDropdown(this)",
+ Lucid.makeAttribute "onkeydown" "handlePriorityKeydown(event, this)"
+ ]
+ <| do
+ Lucid.toHtml label
+ Lucid.span_ [Lucid.class_ "dropdown-arrow", Lucid.makeAttribute "aria-hidden" "true"] " ▾"
+
+priorityDropdownOptions :: (Monad m) => TaskCore.Priority -> Text -> Lucid.HtmlT m ()
+priorityDropdownOptions currentPriority tid =
+ Lucid.div_
+ [ Lucid.class_ "priority-dropdown-menu",
+ Lucid.role_ "menu",
+ Lucid.makeAttribute "aria-label" "Change task priority"
+ ]
+ <| do
+ priorityOption TaskCore.P0 currentPriority tid
+ priorityOption TaskCore.P1 currentPriority tid
+ priorityOption TaskCore.P2 currentPriority tid
+ priorityOption TaskCore.P3 currentPriority tid
+ priorityOption TaskCore.P4 currentPriority tid
+
+priorityOption :: (Monad m) => TaskCore.Priority -> TaskCore.Priority -> Text -> Lucid.HtmlT m ()
+priorityOption opt currentPriority tid =
+ let (cls, label) = case opt of
+ TaskCore.P0 -> ("badge badge-p0", "P0 Critical" :: Text)
+ TaskCore.P1 -> ("badge badge-p1", "P1 High")
+ TaskCore.P2 -> ("badge badge-p2", "P2 Normal")
+ TaskCore.P3 -> ("badge badge-p3", "P3 Low")
+ TaskCore.P4 -> ("badge badge-p4", "P4 Defer")
+ isSelected = opt == currentPriority
+ optClass = cls <> " priority-dropdown-option" <> if isSelected then " selected" else ""
+ in Lucid.form_
+ [ Lucid.class_ "priority-option-form",
+ Lucid.role_ "none",
+ Lucid.makeAttribute "hx-post" ("/tasks/" <> tid <> "/priority"),
+ Lucid.makeAttribute "hx-target" "#priority-badge-container",
+ Lucid.makeAttribute "hx-swap" "outerHTML"
+ ]
+ <| do
+ Lucid.input_ [Lucid.type_ "hidden", Lucid.name_ "priority", Lucid.value_ (tshow opt)]
+ Lucid.button_
+ [ Lucid.type_ "submit",
+ Lucid.class_ optClass,
+ Lucid.role_ "menuitem",
+ Lucid.tabindex_ "-1",
+ Lucid.makeAttribute "onkeydown" "handlePriorityMenuItemKeydown(event)"
+ ]
+ (Lucid.toHtml label)
+
+-- * Complexity badge with form
+
+complexityBadgeWithForm :: (Monad m) => Maybe Int -> Text -> Lucid.HtmlT m ()
+complexityBadgeWithForm complexity tid =
+ Lucid.div_
+ [ Lucid.id_ "complexity-badge-container",
+ Lucid.class_ "complexity-badge-dropdown"
+ ]
+ <| do
+ clickableComplexityBadge complexity tid
+ complexityDropdownOptions complexity tid
+
+clickableComplexityBadge :: (Monad m) => Maybe Int -> Text -> Lucid.HtmlT m ()
+clickableComplexityBadge complexity _tid =
+ let (cls, label) = case complexity of
+ Nothing -> ("badge badge-complexity-none complexity-badge-clickable", "Set Complexity" :: Text)
+ Just 1 -> ("badge badge-complexity-1 complexity-badge-clickable", "ℂ 1")
+ Just 2 -> ("badge badge-complexity-2 complexity-badge-clickable", "ℂ 2")
+ Just 3 -> ("badge badge-complexity-3 complexity-badge-clickable", "ℂ 3")
+ Just 4 -> ("badge badge-complexity-4 complexity-badge-clickable", "ℂ 4")
+ Just 5 -> ("badge badge-complexity-5 complexity-badge-clickable", "ℂ 5")
+ Just _ -> ("badge badge-complexity-none complexity-badge-clickable", "Invalid")
+ in Lucid.span_
+ [ Lucid.class_ cls,
+ Lucid.tabindex_ "0",
+ Lucid.role_ "button",
+ Lucid.makeAttribute "aria-haspopup" "true",
+ Lucid.makeAttribute "aria-expanded" "false",
+ Lucid.makeAttribute "onclick" "toggleComplexityDropdown(this)",
+ Lucid.makeAttribute "onkeydown" "handleComplexityKeydown(event, this)"
+ ]
+ <| do
+ Lucid.toHtml label
+ Lucid.span_ [Lucid.class_ "dropdown-arrow", Lucid.makeAttribute "aria-hidden" "true"] " ▾"
+
+complexityDropdownOptions :: (Monad m) => Maybe Int -> Text -> Lucid.HtmlT m ()
+complexityDropdownOptions currentComplexity tid =
+ Lucid.div_
+ [ Lucid.class_ "complexity-dropdown-menu",
+ Lucid.role_ "menu",
+ Lucid.makeAttribute "aria-label" "Change task complexity"
+ ]
+ <| do
+ complexityOption Nothing currentComplexity tid
+ complexityOption (Just 1) currentComplexity tid
+ complexityOption (Just 2) currentComplexity tid
+ complexityOption (Just 3) currentComplexity tid
+ complexityOption (Just 4) currentComplexity tid
+ complexityOption (Just 5) currentComplexity tid
+
+complexityOption :: (Monad m) => Maybe Int -> Maybe Int -> Text -> Lucid.HtmlT m ()
+complexityOption opt currentComplexity tid =
+ let (cls, label, val) = case opt of
+ Nothing -> ("badge badge-complexity-none", "None" :: Text, "none" :: Text)
+ Just 1 -> ("badge badge-complexity-1", "ℂ 1", "1")
+ Just 2 -> ("badge badge-complexity-2", "ℂ 2", "2")
+ Just 3 -> ("badge badge-complexity-3", "ℂ 3", "3")
+ Just 4 -> ("badge badge-complexity-4", "ℂ 4", "4")
+ Just 5 -> ("badge badge-complexity-5", "ℂ 5", "5")
+ Just _ -> ("badge badge-complexity-none", "Invalid", "none")
+ isSelected = opt == currentComplexity
+ optClass = cls <> " complexity-dropdown-option" <> if isSelected then " selected" else ""
+ in Lucid.form_
+ [ Lucid.class_ "complexity-option-form",
+ Lucid.role_ "none",
+ Lucid.makeAttribute "hx-post" ("/tasks/" <> tid <> "/complexity"),
+ Lucid.makeAttribute "hx-target" "#complexity-badge-container",
+ Lucid.makeAttribute "hx-swap" "outerHTML"
+ ]
+ <| do
+ Lucid.input_ [Lucid.type_ "hidden", Lucid.name_ "complexity", Lucid.value_ val]
+ Lucid.button_
+ [ Lucid.type_ "submit",
+ Lucid.class_ optClass,
+ Lucid.role_ "menuitem",
+ Lucid.tabindex_ "-1",
+ Lucid.makeAttribute "onkeydown" "handleComplexityMenuItemKeydown(event)"
+ ]
+ (Lucid.toHtml label)
+
+-- * Task rendering
+
+renderTaskCard :: (Monad m) => TaskCore.Task -> Lucid.HtmlT m ()
+renderTaskCard t =
+ Lucid.a_
+ [ Lucid.class_ "task-card task-card-link",
+ Lucid.href_ ("/tasks/" <> TaskCore.taskId t)
+ ]
+ <| do
+ Lucid.div_ [Lucid.class_ "task-header"] <| do
+ Lucid.span_ [Lucid.class_ "task-id"] (Lucid.toHtml (TaskCore.taskId t))
+ statusBadge (TaskCore.taskStatus t)
+ Lucid.span_ [Lucid.class_ "priority"] (Lucid.toHtml (tshow (TaskCore.taskPriority t)))
+ Lucid.p_ [Lucid.class_ "task-title"] (Lucid.toHtml (TaskCore.taskTitle t))
+
+renderBlockedTaskCard :: (Monad m) => (TaskCore.Task, Int) -> Lucid.HtmlT m ()
+renderBlockedTaskCard (t, impact) =
+ Lucid.a_
+ [ Lucid.class_ "task-card task-card-link",
+ Lucid.href_ ("/tasks/" <> TaskCore.taskId t)
+ ]
+ <| do
+ Lucid.div_ [Lucid.class_ "task-header"] <| do
+ Lucid.span_ [Lucid.class_ "task-id"] (Lucid.toHtml (TaskCore.taskId t))
+ statusBadge (TaskCore.taskStatus t)
+ Lucid.span_ [Lucid.class_ "priority"] (Lucid.toHtml (tshow (TaskCore.taskPriority t)))
+ when (impact > 0)
+ <| Lucid.span_ [Lucid.class_ "blocking-impact"] (Lucid.toHtml ("Blocks " <> tshow impact))
+ Lucid.p_ [Lucid.class_ "task-title"] (Lucid.toHtml (TaskCore.taskTitle t))
+
+renderListGroupItem :: (Monad m) => TaskCore.Task -> Lucid.HtmlT m ()
+renderListGroupItem t =
+ Lucid.a_
+ [ Lucid.class_ "list-group-item",
+ Lucid.href_ ("/tasks/" <> TaskCore.taskId t),
+ Lucid.makeAttribute "hx-boost" "true",
+ Lucid.makeAttribute "hx-target" "body",
+ Lucid.makeAttribute "hx-swap" "innerHTML"
+ ]
+ <| do
+ Lucid.div_ [Lucid.class_ "list-group-item-content"] <| do
+ Lucid.span_ [Lucid.class_ "list-group-item-id"] (Lucid.toHtml (TaskCore.taskId t))
+ Lucid.span_ [Lucid.class_ "list-group-item-title"] (Lucid.toHtml (TaskCore.taskTitle t))
+ Lucid.div_ [Lucid.class_ "list-group-item-meta"] <| do
+ statusBadge (TaskCore.taskStatus t)
+ Lucid.span_ [Lucid.class_ "priority"] (Lucid.toHtml (tshow (TaskCore.taskPriority t)))
+
+renderEpicReviewCard :: (Monad m) => TaskCore.EpicForReview -> Lucid.HtmlT m ()
+renderEpicReviewCard epicReview = do
+ let task = TaskCore.epicTask epicReview
+ total = TaskCore.epicTotal epicReview
+ completed = TaskCore.epicCompleted epicReview
+ progressText = tshow completed <> "/" <> tshow total <> " subtasks done"
+ Lucid.div_ [Lucid.class_ "task-card"] <| do
+ Lucid.div_ [Lucid.class_ "task-card-header"] <| do
+ Lucid.div_ [Lucid.class_ "task-title-row"] <| do
+ Lucid.a_
+ [Lucid.href_ ("/tasks/" <> TaskCore.taskId task), Lucid.class_ "task-link"]
+ <| Lucid.toHtml (TaskCore.taskTitle task)
+ Lucid.span_ [Lucid.class_ "badge badge-epic"] "Epic"
+ Lucid.span_ [Lucid.class_ "task-id"] <| Lucid.toHtml (TaskCore.taskId task)
+ Lucid.div_ [Lucid.class_ "task-card-body"] <| do
+ Lucid.div_ [Lucid.class_ "progress-info"] <| do
+ Lucid.span_ [Lucid.class_ "badge badge-success"] <| Lucid.toHtml progressText
+ Lucid.div_ [Lucid.class_ "epic-actions"] <| do
+ Lucid.form_
+ [ Lucid.method_ "POST",
+ Lucid.action_ ("/tasks/" <> TaskCore.taskId task <> "/status"),
+ Lucid.class_ "inline-form"
+ ]
+ <| do
+ Lucid.input_ [Lucid.type_ "hidden", Lucid.name_ "status", Lucid.value_ "done"]
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "btn btn-success btn-sm"] "Approve & Close"
+
+renderEpicCardWithStats :: (Monad m) => [TaskCore.Task] -> TaskCore.Task -> Lucid.HtmlT m ()
+renderEpicCardWithStats allTasks t =
+ let children = getDescendants allTasks (TaskCore.taskId t)
+ openCount = length [c | c <- children, TaskCore.taskStatus c == TaskCore.Open]
+ inProgressCount = length [c | c <- children, TaskCore.taskStatus c == TaskCore.InProgress]
+ reviewCount = length [c | c <- children, TaskCore.taskStatus c == TaskCore.Review]
+ doneCount = length [c | c <- children, TaskCore.taskStatus c == TaskCore.Done]
+ totalCount = length children
+ openAndReview = openCount + reviewCount
+ in Lucid.a_
+ [ Lucid.class_ "task-card task-card-link",
+ Lucid.href_ ("/tasks/" <> TaskCore.taskId t)
+ ]
+ <| do
+ Lucid.div_ [Lucid.class_ "task-header"] <| do
+ Lucid.span_ [Lucid.class_ "task-id"] (Lucid.toHtml (TaskCore.taskId t))
+ statusBadge (TaskCore.taskStatus t)
+ Lucid.span_ [Lucid.class_ "priority"] (Lucid.toHtml (tshow (TaskCore.taskPriority t)))
+ Lucid.p_ [Lucid.class_ "task-title"] (Lucid.toHtml (TaskCore.taskTitle t))
+ when (totalCount > 0) <| epicProgressBar doneCount inProgressCount openAndReview totalCount
+ unless (Text.null (TaskCore.taskDescription t))
+ <| Lucid.p_ [Lucid.class_ "kb-preview"] (Lucid.toHtml (Text.take 200 (TaskCore.taskDescription t) <> "..."))
+
+getDescendants :: [TaskCore.Task] -> Text -> [TaskCore.Task]
+getDescendants allTasks parentId =
+ let children = [c | c <- allTasks, maybe False (TaskCore.matchesId parentId) (TaskCore.taskParent c)]
+ in children ++ concatMap (getDescendants allTasks <. TaskCore.taskId) children
+
+-- * Aggregated metrics
+
+renderAggregatedMetrics :: (Monad m) => [TaskCore.Task] -> TaskCore.Task -> TaskCore.AggregatedMetrics -> Lucid.HtmlT m ()
+renderAggregatedMetrics allTasks task metrics =
+ let descendants = getDescendants allTasks (TaskCore.taskId task)
+ totalCount = length descendants
+ costCents = TaskCore.aggTotalCostCents metrics
+ durationSecs = TaskCore.aggTotalDurationSeconds metrics
+ completedCount = TaskCore.aggCompletedTasks metrics
+ tokensUsed = TaskCore.aggTotalTokens metrics
+ in Lucid.div_ [Lucid.class_ "detail-section aggregated-metrics"] <| do
+ Lucid.h3_ "Execution Summary"
+ Lucid.div_ [Lucid.class_ "metrics-grid"] <| do
+ Lucid.div_ [Lucid.class_ "metric-card"] <| do
+ Lucid.div_ [Lucid.class_ "metric-value"] (Lucid.toHtml (tshow completedCount <> "/" <> tshow totalCount))
+ Lucid.div_ [Lucid.class_ "metric-label"] "Tasks Completed"
+ Lucid.div_ [Lucid.class_ "metric-card"] <| do
+ Lucid.div_ [Lucid.class_ "metric-value"] (Lucid.toHtml (formatCostMetric costCents))
+ Lucid.div_ [Lucid.class_ "metric-label"] "Total Cost"
+ Lucid.div_ [Lucid.class_ "metric-card"] <| do
+ Lucid.div_ [Lucid.class_ "metric-value"] (Lucid.toHtml (formatDurationMetric durationSecs))
+ Lucid.div_ [Lucid.class_ "metric-label"] "Total Time"
+ when (tokensUsed > 0) <| do
+ Lucid.div_ [Lucid.class_ "metric-card"] <| do
+ Lucid.div_ [Lucid.class_ "metric-value"] (Lucid.toHtml (formatTokensMetric tokensUsed))
+ Lucid.div_ [Lucid.class_ "metric-label"] "Tokens Used"
+ where
+ formatCostMetric :: Int -> Text
+ formatCostMetric cents =
+ let dollars = fromIntegral cents / 100.0 :: Double
+ in "$" <> Text.pack (showFFloat (Just 2) dollars "")
+
+ formatDurationMetric :: Int -> Text
+ formatDurationMetric secs
+ | secs < 60 = tshow secs <> "s"
+ | secs < 3600 =
+ let mins = secs `div` 60
+ remSecs = secs `mod` 60
+ in tshow mins <> "m " <> tshow remSecs <> "s"
+ | otherwise =
+ let hrs = secs `div` 3600
+ mins = (secs `mod` 3600) `div` 60
+ in tshow hrs <> "h " <> tshow mins <> "m"
+
+ formatTokensMetric :: Int -> Text
+ formatTokensMetric t
+ | t < 1000 = tshow t
+ | t < 1000000 = Text.pack (showFFloat (Just 1) (fromIntegral t / 1000.0 :: Double) "") <> "K"
+ | otherwise = Text.pack (showFFloat (Just 2) (fromIntegral t / 1000000.0 :: Double) "") <> "M"
+
+-- * Retry context banner
+
+renderRetryContextBanner :: (Monad m) => Text -> Maybe TaskCore.RetryContext -> Lucid.HtmlT m ()
+renderRetryContextBanner _ Nothing = pure ()
+renderRetryContextBanner tid (Just ctx) =
+ Lucid.div_ [Lucid.class_ bannerClass] <| do
+ Lucid.div_ [Lucid.class_ "retry-banner-header"] <| do
+ Lucid.span_ [Lucid.class_ "retry-icon"] retryIcon
+ Lucid.span_ [Lucid.class_ "retry-attempt"] (Lucid.toHtml attemptText)
+ when maxRetriesExceeded
+ <| Lucid.span_ [Lucid.class_ "retry-warning-badge"] "Needs Human Intervention"
+
+ Lucid.div_ [Lucid.class_ "retry-banner-details"] <| do
+ Lucid.div_ [Lucid.class_ "retry-detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "retry-label"] "Failure Reason:"
+ Lucid.span_ [Lucid.class_ "retry-value"] (Lucid.toHtml (summarizeReason (TaskCore.retryReason ctx)))
+
+ let commit = TaskCore.retryOriginalCommit ctx
+ unless (Text.null commit) <| do
+ Lucid.div_ [Lucid.class_ "retry-detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "retry-label"] "Original Commit:"
+ Lucid.code_ [Lucid.class_ "retry-commit"] (Lucid.toHtml (Text.take 8 commit))
+
+ let conflicts = TaskCore.retryConflictFiles ctx
+ unless (null conflicts) <| do
+ Lucid.div_ [Lucid.class_ "retry-detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "retry-label"] "Conflict Files:"
+ Lucid.ul_ [Lucid.class_ "retry-conflict-list"]
+ <| traverse_ (Lucid.li_ <. Lucid.toHtml) conflicts
+
+ when maxRetriesExceeded <| do
+ Lucid.div_
+ [Lucid.class_ "retry-warning-message"]
+ "This task has exceeded the maximum number of retries. A human must review the failure and either fix the issue manually or reset the retry count."
+
+ Lucid.p_ [Lucid.class_ "retry-hint"] "Use comments below to provide guidance for retry."
+
+ Lucid.div_ [Lucid.class_ "retry-reset-section"] <| do
+ Lucid.h4_ "Reset Retries"
+ Lucid.p_ [Lucid.class_ "notes-help"] "Clear retry context and give task a fresh start:"
+ Lucid.form_ [Lucid.method_ "POST", Lucid.action_ ("/tasks/" <> tid <> "/reset-retries")] <| do
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "reset-btn"] "Reset Retries"
+ where
+ attempt = TaskCore.retryAttempt ctx
+ maxRetriesExceeded = attempt >= 3
+ bannerClass = if maxRetriesExceeded then "retry-banner retry-banner-critical" else "retry-banner retry-banner-warning"
+ retryIcon = if maxRetriesExceeded then "⚠" else "↻"
+ attemptText = "Attempt " <> tshow attempt <> " of 3"
+
+ summarizeReason :: Text -> Text
+ summarizeReason reason
+ | "rejected:" `Text.isPrefixOf` reason = "Rejected: " <> Text.strip (Text.drop 9 reason)
+ | "Test failure:" `Text.isPrefixOf` reason = "Test failure (see details below)"
+ | "MERGE CONFLICT" `Text.isPrefixOf` reason = "Merge conflict with concurrent changes"
+ | otherwise = Text.take 100 reason <> if Text.length reason > 100 then "..." else ""
+
+-- * Markdown rendering
+
+renderMarkdown :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderMarkdown input = renderBlocks (parseBlocks (Text.lines input))
+
+data MarkdownBlock
+ = MdHeader Int Text
+ | MdParagraph [Text]
+ | MdCodeBlock [Text]
+ | MdList [Text]
+ deriving (Show, Eq)
+
+parseBlocks :: [Text] -> [MarkdownBlock]
+parseBlocks [] = []
+parseBlocks lns = case lns of
+ (l : rest)
+ | "```" `Text.isPrefixOf` l ->
+ let (codeLines, afterCode) = List.span (not <. Text.isPrefixOf "```") rest
+ remaining = List.drop 1 afterCode
+ in MdCodeBlock codeLines : parseBlocks remaining
+ | "### " `Text.isPrefixOf` l ->
+ MdHeader 3 (Text.drop 4 l) : parseBlocks rest
+ | "## " `Text.isPrefixOf` l ->
+ MdHeader 2 (Text.drop 3 l) : parseBlocks rest
+ | "# " `Text.isPrefixOf` l ->
+ MdHeader 1 (Text.drop 2 l) : parseBlocks rest
+ | isListItem l ->
+ let (listLines, afterList) = List.span isListItem lns
+ in MdList (map stripListPrefix listLines) : parseBlocks afterList
+ | Text.null (Text.strip l) ->
+ parseBlocks rest
+ | otherwise ->
+ let (paraLines, afterPara) = List.span isParagraphLine lns
+ in MdParagraph paraLines : parseBlocks afterPara
+ where
+ isListItem t =
+ let stripped = Text.stripStart t
+ in "- " `Text.isPrefixOf` stripped || "* " `Text.isPrefixOf` stripped
+ stripListPrefix t =
+ let stripped = Text.stripStart t
+ in Text.drop 2 stripped
+ isParagraphLine t =
+ not (Text.null (Text.strip t))
+ && not ("```" `Text.isPrefixOf` t)
+ && not ("#" `Text.isPrefixOf` t)
+ && not (isListItem t)
+
+renderBlocks :: (Monad m) => [MarkdownBlock] -> Lucid.HtmlT m ()
+renderBlocks = traverse_ renderBlock
+
+renderBlock :: (Monad m) => MarkdownBlock -> Lucid.HtmlT m ()
+renderBlock block = case block of
+ MdHeader 1 txt -> Lucid.h2_ [Lucid.class_ "md-h1"] (renderInline txt)
+ MdHeader 2 txt -> Lucid.h3_ [Lucid.class_ "md-h2"] (renderInline txt)
+ MdHeader 3 txt -> Lucid.h4_ [Lucid.class_ "md-h3"] (renderInline txt)
+ MdHeader _ txt -> Lucid.h4_ (renderInline txt)
+ MdParagraph lns -> Lucid.p_ [Lucid.class_ "md-para"] (renderInline (Text.unlines lns))
+ MdCodeBlock lns -> Lucid.pre_ [Lucid.class_ "md-code"] (Lucid.code_ (Lucid.toHtml (Text.unlines lns)))
+ MdList items -> Lucid.ul_ [Lucid.class_ "md-list"] (traverse_ renderListItem items)
+
+renderListItem :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderListItem txt = Lucid.li_ (renderInline txt)
+
+renderInline :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderInline txt = renderInlineParts (parseInline txt)
+
+data InlinePart = PlainText Text | InlineCode Text | BoldText Text
+ deriving (Show, Eq)
+
+parseInline :: Text -> [InlinePart]
+parseInline t
+ | Text.null t = []
+ | otherwise = case Text.breakOn "`" t of
+ (before, rest)
+ | Text.null rest -> parseBold before
+ | otherwise ->
+ let afterTick = Text.drop 1 rest
+ in case Text.breakOn "`" afterTick of
+ (code, rest2)
+ | Text.null rest2 ->
+ parseBold before ++ [PlainText ("`" <> afterTick)]
+ | otherwise ->
+ parseBold before ++ [InlineCode code] ++ parseInline (Text.drop 1 rest2)
+
+parseBold :: Text -> [InlinePart]
+parseBold t
+ | Text.null t = []
+ | otherwise = case Text.breakOn "**" t of
+ (before, rest)
+ | Text.null rest -> [PlainText before | not (Text.null before)]
+ | otherwise ->
+ let afterBold = Text.drop 2 rest
+ in case Text.breakOn "**" afterBold of
+ (boldText, rest2)
+ | Text.null rest2 ->
+ [PlainText before | not (Text.null before)] ++ [PlainText ("**" <> afterBold)]
+ | otherwise ->
+ [PlainText before | not (Text.null before)]
+ ++ [BoldText boldText]
+ ++ parseBold (Text.drop 2 rest2)
+
+renderInlineParts :: (Monad m) => [InlinePart] -> Lucid.HtmlT m ()
+renderInlineParts = traverse_ renderInlinePart
+
+renderInlinePart :: (Monad m) => InlinePart -> Lucid.HtmlT m ()
+renderInlinePart part = case part of
+ PlainText txt -> Lucid.toHtml txt
+ InlineCode txt -> Lucid.code_ [Lucid.class_ "md-inline-code"] (Lucid.toHtml txt)
+ BoldText txt -> Lucid.strong_ (Lucid.toHtml txt)
+
+-- * Comment form
+
+commentForm :: (Monad m) => Text -> Lucid.HtmlT m ()
+commentForm tid =
+ Lucid.form_
+ [ Lucid.method_ "POST",
+ Lucid.action_ ("/tasks/" <> tid <> "/comment"),
+ Lucid.class_ "comment-form"
+ ]
+ <| do
+ Lucid.textarea_
+ [ Lucid.name_ "comment",
+ Lucid.placeholder_ "Add a comment...",
+ Lucid.rows_ "3",
+ Lucid.class_ "comment-textarea"
+ ]
+ ""
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "btn btn-primary"] "Post Comment"
+
+-- * Live toggles
+
+renderLiveToggle :: (Monad m) => Lucid.HtmlT m ()
+renderLiveToggle =
+ Lucid.button_
+ [ Lucid.class_ "timeline-live-toggle",
+ Lucid.id_ "live-toggle",
+ Lucid.makeAttribute "onclick" "toggleLiveUpdates()",
+ Lucid.title_ "Click to pause/resume live updates"
+ ]
+ " LIVE"
+
+renderAutoscrollToggle :: (Monad m) => Lucid.HtmlT m ()
+renderAutoscrollToggle =
+ Lucid.button_
+ [ Lucid.class_ "timeline-autoscroll-toggle",
+ Lucid.id_ "autoscroll-toggle",
+ Lucid.makeAttribute "onclick" "toggleAutoscroll()",
+ Lucid.title_ "Toggle automatic scrolling to newest events"
+ ]
+ " ⬇ Auto-scroll"
+
+-- * Cost/Token metrics
+
+aggregateCostMetrics :: [TaskCore.StoredEvent] -> (Int, Int)
+aggregateCostMetrics events =
+ let costEvents = filter (\e -> TaskCore.storedEventType e == "Cost") events
+ aggregateOne (totalCents, totalTokens) event =
+ case Aeson.decode (LBS.fromStrict (str (TaskCore.storedEventContent event))) of
+ Just (Aeson.Object obj) ->
+ let cents = case KeyMap.lookup "cents" obj of
+ Just (Aeson.Number n) -> floor n
+ _ -> 0
+ tokens = case KeyMap.lookup "tokens" obj of
+ Just (Aeson.Number n) -> floor n
+ _ -> 0
+ in (totalCents + cents, totalTokens + tokens)
+ _ -> (totalCents, totalTokens)
+ in foldl' aggregateOne (0, 0) costEvents
+
+formatCostHeader :: Int -> Text
+formatCostHeader cents =
+ let dollars = fromIntegral cents / 100.0 :: Double
+ in "$" <> Text.pack (showFFloat (Just 2) dollars "")
+
+formatTokensHeader :: Int -> Text
+formatTokensHeader t
+ | t < 1000 = tshow t
+ | t < 1000000 = Text.pack (showFFloat (Just 1) (fromIntegral t / 1000.0 :: Double) "") <> "K"
+ | otherwise = Text.pack (showFFloat (Just 2) (fromIntegral t / 1000000.0 :: Double) "") <> "M"
+
+-- * Timeline
+
+renderUnifiedTimeline :: (Monad m) => Text -> [TaskCore.Comment] -> [TaskCore.StoredEvent] -> TaskCore.Status -> UTCTime -> Lucid.HtmlT m ()
+renderUnifiedTimeline tid legacyComments events status now = do
+ let isInProgress = status == TaskCore.InProgress
+ pollAttrs =
+ if isInProgress
+ then
+ [ Lucid.makeAttribute "hx-get" ("/partials/task/" <> tid <> "/events"),
+ Lucid.makeAttribute "hx-trigger" "every 3s",
+ Lucid.makeAttribute "hx-swap" "innerHTML",
+ Lucid.makeAttribute "hx-on::before-request" "var log = this.querySelector('.timeline-events'); if(log) this.dataset.scroll = log.scrollTop",
+ Lucid.makeAttribute "hx-on::after-swap" "var log = this.querySelector('.timeline-events'); if(log && this.dataset.scroll) log.scrollTop = this.dataset.scroll"
+ ]
+ else []
+ nonCostEvents = filter (\e -> TaskCore.storedEventType e /= "Cost") events
+ eventCount = length nonCostEvents + length legacyComments
+ (totalCents, totalTokens) = aggregateCostMetrics events
+ Lucid.div_ ([Lucid.class_ "unified-timeline-section", Lucid.id_ "unified-timeline"] <> pollAttrs) <| do
+ Lucid.h3_ <| do
+ Lucid.toHtml ("Timeline (" <> tshow eventCount <> ")")
+ when (totalCents > 0 || totalTokens > 0) <| do
+ Lucid.span_ [Lucid.class_ "timeline-cost-summary"] <| do
+ metaSep
+ when (totalCents > 0) <| Lucid.toHtml (formatCostHeader totalCents)
+ when (totalCents > 0 && totalTokens > 0) <| metaSep
+ when (totalTokens > 0) <| Lucid.toHtml (formatTokensHeader totalTokens <> " tokens")
+ when isInProgress <| do
+ renderLiveToggle
+ renderAutoscrollToggle
+
+ if null nonCostEvents && null legacyComments
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No activity yet."
+ else do
+ Lucid.div_ [Lucid.class_ "timeline-events"] <| do
+ traverse_ (renderTimelineEvent now) nonCostEvents
+ when isInProgress <| timelineScrollScript
+
+ commentForm tid
+
+renderTimelineEvent :: (Monad m) => UTCTime -> TaskCore.StoredEvent -> Lucid.HtmlT m ()
+renderTimelineEvent now event =
+ let eventType = TaskCore.storedEventType event
+ content = TaskCore.storedEventContent event
+ timestamp = TaskCore.storedEventTimestamp event
+ actor = TaskCore.storedEventActor event
+ eventId = TaskCore.storedEventId event
+ (icon, label) = eventTypeIconAndLabel eventType
+ in Lucid.div_
+ [ Lucid.class_ ("timeline-event timeline-event-" <> eventType),
+ Lucid.makeAttribute "data-event-id" (tshow eventId)
+ ]
+ <| do
+ case eventType of
+ "comment" -> renderCommentTimelineEvent content actor timestamp now
+ "status_change" -> renderStatusChangeEvent content actor timestamp now
+ "claim" -> renderActivityEvent icon label content actor timestamp now
+ "running" -> renderActivityEvent icon label content actor timestamp now
+ "reviewing" -> renderActivityEvent icon label content actor timestamp now
+ "retrying" -> renderActivityEvent icon label content actor timestamp now
+ "complete" -> renderActivityEvent icon label content actor timestamp now
+ "error" -> renderErrorTimelineEvent content actor timestamp now
+ "Assistant" -> renderAssistantTimelineEvent content actor timestamp now
+ "ToolCall" -> renderToolCallTimelineEvent content actor timestamp now
+ "ToolResult" -> renderToolResultTimelineEvent content actor timestamp now
+ "Cost" -> pure ()
+ "Checkpoint" -> renderCheckpointEvent content actor timestamp now
+ "Guardrail" -> renderGuardrailEvent content actor timestamp now
+ _ -> renderGenericEvent eventType content actor timestamp now
+
+eventTypeIconAndLabel :: Text -> (Text, Text)
+eventTypeIconAndLabel "comment" = ("💬", "Comment")
+eventTypeIconAndLabel "status_change" = ("🔄", "Status")
+eventTypeIconAndLabel "claim" = ("🤖", "Claimed")
+eventTypeIconAndLabel "running" = ("▶️", "Running")
+eventTypeIconAndLabel "reviewing" = ("👀", "Reviewing")
+eventTypeIconAndLabel "retrying" = ("🔁", "Retrying")
+eventTypeIconAndLabel "complete" = ("✅", "Complete")
+eventTypeIconAndLabel "error" = ("❌", "Error")
+eventTypeIconAndLabel "Assistant" = ("💭", "Thought")
+eventTypeIconAndLabel "ToolCall" = ("🔧", "Tool")
+eventTypeIconAndLabel "ToolResult" = ("📄", "Result")
+eventTypeIconAndLabel "Cost" = ("💰", "Cost")
+eventTypeIconAndLabel "Checkpoint" = ("📍", "Checkpoint")
+eventTypeIconAndLabel "Guardrail" = ("⚠️", "Guardrail")
+eventTypeIconAndLabel t = ("📝", t)
+
+renderActorLabel :: (Monad m) => TaskCore.CommentAuthor -> Lucid.HtmlT m ()
+renderActorLabel actor =
+ let (cls, label) :: (Text, Text) = case actor of
+ TaskCore.Human -> ("actor-human", "human")
+ TaskCore.Junior -> ("actor-junior", "junior")
+ TaskCore.System -> ("actor-system", "system")
+ in Lucid.span_ [Lucid.class_ ("actor-label " <> cls)] (Lucid.toHtml ("[" <> label <> "]"))
+
+renderCommentTimelineEvent :: (Monad m) => Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderCommentTimelineEvent content actor timestamp now =
+ Lucid.div_ [Lucid.class_ "timeline-comment"] <| do
+ Lucid.div_ [Lucid.class_ "event-header"] <| do
+ Lucid.span_ [Lucid.class_ "event-icon"] "💬"
+ renderActorLabel actor
+ renderRelativeTimestamp now timestamp
+ Lucid.div_ [Lucid.class_ "event-content comment-bubble markdown-content"] <| do
+ renderMarkdown content
+
+renderStatusChangeEvent :: (Monad m) => Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderStatusChangeEvent content actor timestamp now =
+ Lucid.div_ [Lucid.class_ "timeline-status-change"] <| do
+ Lucid.span_ [Lucid.class_ "event-icon"] "🔄"
+ renderActorLabel actor
+ Lucid.span_ [Lucid.class_ "status-change-text"] (Lucid.toHtml (parseStatusChange content))
+ renderRelativeTimestamp now timestamp
+
+parseStatusChange :: Text -> Text
+parseStatusChange content =
+ case Aeson.decode (LBS.fromStrict (str content)) of
+ Just (Aeson.Object obj) ->
+ let fromStatus = case KeyMap.lookup "from" obj of
+ Just (Aeson.String s) -> s
+ _ -> "?"
+ toStatus = case KeyMap.lookup "to" obj of
+ Just (Aeson.String s) -> s
+ _ -> "?"
+ in fromStatus <> " → " <> toStatus
+ _ -> content
+
+renderActivityEvent :: (Monad m) => Text -> Text -> Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderActivityEvent icon label content actor timestamp now =
+ Lucid.div_ [Lucid.class_ "timeline-activity"] <| do
+ Lucid.span_ [Lucid.class_ "event-icon"] (Lucid.toHtml icon)
+ Lucid.span_ [Lucid.class_ "event-label"] (Lucid.toHtml label)
+ renderActorLabel actor
+ unless (Text.null content) <| Lucid.span_ [Lucid.class_ "activity-detail"] (Lucid.toHtml content)
+ renderRelativeTimestamp now timestamp
+
+renderErrorTimelineEvent :: (Monad m) => Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderErrorTimelineEvent content actor timestamp now =
+ Lucid.div_ [Lucid.class_ "timeline-error"] <| do
+ Lucid.div_ [Lucid.class_ "event-header"] <| do
+ Lucid.span_ [Lucid.class_ "event-icon"] "❌"
+ Lucid.span_ [Lucid.class_ "event-label"] "Error"
+ renderActorLabel actor
+ renderRelativeTimestamp now timestamp
+ Lucid.div_ [Lucid.class_ "event-content error-message"] (renderFormattedJson content)
+
+renderAssistantTimelineEvent :: (Monad m) => Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderAssistantTimelineEvent content _actor timestamp now =
+ Lucid.div_ [Lucid.class_ "timeline-thought"] <| do
+ Lucid.div_ [Lucid.class_ "event-header"] <| do
+ Lucid.span_ [Lucid.class_ "event-icon"] "💭"
+ Lucid.span_ [Lucid.class_ "event-label"] "Thought"
+ renderActorLabel TaskCore.Junior
+ renderRelativeTimestamp now timestamp
+ Lucid.div_ [Lucid.class_ "event-content thought-bubble markdown-content"] <| do
+ let truncated = Text.take 2000 content
+ isTruncated = Text.length content > 2000
+ renderMarkdown truncated
+ when isTruncated <| Lucid.span_ [Lucid.class_ "event-truncated"] "..."
+
+renderToolCallTimelineEvent :: (Monad m) => Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderToolCallTimelineEvent content _actor _timestamp _now =
+ let (toolName, argsJson) = parseToolCallContent content
+ in case toolName of
+ "run_bash" -> renderBashToolCall argsJson
+ "read_file" -> renderReadToolCall argsJson
+ "edit_file" -> renderEditToolCall argsJson
+ "search_codebase" -> renderSearchToolCall argsJson
+ "search_and_read" -> renderSearchAndReadToolCall argsJson
+ "write_file" -> renderWriteToolCall argsJson
+ _ -> renderGenericToolCall toolName argsJson
+
+renderBashToolCall :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderBashToolCall argsJson =
+ let cmd = extractJsonField "command" argsJson
+ in Lucid.div_ [Lucid.class_ "tool-bash"] <| do
+ Lucid.span_ [Lucid.class_ "tool-bash-prompt"] "ϟ"
+ Lucid.code_ [Lucid.class_ "tool-bash-cmd"] (Lucid.toHtml cmd)
+
+renderReadToolCall :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderReadToolCall argsJson =
+ let path = extractJsonField "path" argsJson
+ startLine = extractJsonFieldInt "start_line" argsJson
+ endLine = extractJsonFieldInt "end_line" argsJson
+ lineRange = case (startLine, endLine) of
+ (Just s, Just e) -> " @" <> tshow s <> "-" <> tshow e
+ (Just s, Nothing) -> " @" <> tshow s <> "+"
+ _ -> ""
+ in Lucid.div_ [Lucid.class_ "tool-compact"] <| do
+ Lucid.span_ [Lucid.class_ "tool-check"] "✓"
+ Lucid.span_ [Lucid.class_ "tool-label"] "Read"
+ Lucid.code_ [Lucid.class_ "tool-path"] (Lucid.toHtml (shortenPath path <> lineRange))
+
+renderEditToolCall :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderEditToolCall argsJson =
+ let path = extractJsonField "path" argsJson
+ in Lucid.div_ [Lucid.class_ "tool-compact"] <| do
+ Lucid.span_ [Lucid.class_ "tool-check"] "✓"
+ Lucid.span_ [Lucid.class_ "tool-label"] "Edit"
+ Lucid.code_ [Lucid.class_ "tool-path"] (Lucid.toHtml (shortenPath path))
+
+renderSearchToolCall :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderSearchToolCall argsJson =
+ let searchPat = extractJsonField "pattern" argsJson
+ searchPath = extractJsonField "path" argsJson
+ pathSuffix = if Text.null searchPath || searchPath == "." then "" else " in " <> shortenPath searchPath
+ in Lucid.div_ [Lucid.class_ "tool-compact"] <| do
+ Lucid.span_ [Lucid.class_ "tool-check"] "✓"
+ Lucid.span_ [Lucid.class_ "tool-label"] "Grep"
+ Lucid.code_ [Lucid.class_ "tool-pattern"] (Lucid.toHtml searchPat)
+ unless (Text.null pathSuffix)
+ <| Lucid.span_ [Lucid.class_ "tool-path-suffix"] (Lucid.toHtml pathSuffix)
+
+renderWriteToolCall :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderWriteToolCall argsJson =
+ let path = extractJsonField "path" argsJson
+ in Lucid.div_ [Lucid.class_ "tool-compact"] <| do
+ Lucid.span_ [Lucid.class_ "tool-check"] "✓"
+ Lucid.span_ [Lucid.class_ "tool-label"] "Write"
+ Lucid.code_ [Lucid.class_ "tool-path"] (Lucid.toHtml (shortenPath path))
+
+renderSearchAndReadToolCall :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderSearchAndReadToolCall argsJson =
+ let searchPat = extractJsonField "pattern" argsJson
+ searchPath = extractJsonField "path" argsJson
+ pathSuffix = if Text.null searchPath || searchPath == "." then "" else " in " <> shortenPath searchPath
+ in Lucid.div_ [Lucid.class_ "tool-compact"] <| do
+ Lucid.span_ [Lucid.class_ "tool-check"] "✓"
+ Lucid.span_ [Lucid.class_ "tool-label"] "Find"
+ Lucid.code_ [Lucid.class_ "tool-pattern"] (Lucid.toHtml searchPat)
+ unless (Text.null pathSuffix)
+ <| Lucid.span_ [Lucid.class_ "tool-path-suffix"] (Lucid.toHtml pathSuffix)
+
+renderGenericToolCall :: (Monad m) => Text -> Text -> Lucid.HtmlT m ()
+renderGenericToolCall toolName argsJson =
+ Lucid.details_ [Lucid.class_ "tool-generic"] <| do
+ Lucid.summary_ <| do
+ Lucid.span_ [Lucid.class_ "tool-check"] "✓"
+ Lucid.span_ [Lucid.class_ "tool-label"] (Lucid.toHtml toolName)
+ Lucid.pre_ [Lucid.class_ "tool-args-pre"] (Lucid.toHtml argsJson)
+
+extractJsonField :: Text -> Text -> Text
+extractJsonField field jsonText =
+ case Aeson.decode (LBS.fromStrict (str jsonText)) of
+ Just (Aeson.Object obj) ->
+ case KeyMap.lookup (AesonKey.fromText field) obj of
+ Just (Aeson.String s) -> s
+ _ -> ""
+ _ -> ""
+
+extractJsonFieldInt :: Text -> Text -> Maybe Int
+extractJsonFieldInt field jsonText =
+ case Aeson.decode (LBS.fromStrict (str jsonText)) of
+ Just (Aeson.Object obj) ->
+ case KeyMap.lookup (AesonKey.fromText field) obj of
+ Just (Aeson.Number n) -> Just (floor n)
+ _ -> Nothing
+ _ -> Nothing
+
+shortenPath :: Text -> Text
+shortenPath path =
+ let parts = Text.splitOn "/" path
+ relevant = dropWhile (\p -> p `elem` ["", "home", "ben", "omni"]) parts
+ in Text.intercalate "/" relevant
+
+renderToolResultTimelineEvent :: (Monad m) => Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderToolResultTimelineEvent content _actor _timestamp _now =
+ let decoded = decodeToolResult content
+ isSuccess = toolResultIsSuccess decoded
+ output = toolResultOutput' decoded
+ lineCount = length (Text.lines output)
+ in if Text.null output || (isSuccess && lineCount <= 1)
+ then pure ()
+ else
+ Lucid.div_ [Lucid.class_ "tool-result-output"] <| do
+ when (lineCount > 10)
+ <| Lucid.details_ [Lucid.class_ "result-collapsible"]
+ <| do
+ Lucid.summary_ (Lucid.toHtml (tshow lineCount <> " lines"))
+ Lucid.pre_ [Lucid.class_ "tool-output-pre"] (Lucid.toHtml output)
+ when (lineCount <= 10)
+ <| Lucid.pre_ [Lucid.class_ "tool-output-pre"] (Lucid.toHtml output)
+
+data DecodedToolResult = DecodedToolResult
+ { toolResultIsSuccess :: Bool,
+ toolResultOutput' :: Text,
+ toolResultError' :: Maybe Text
+ }
+
+decodeToolResult :: Text -> DecodedToolResult
+decodeToolResult content =
+ case Aeson.decode (LBS.fromStrict (str content)) of
+ Just (Aeson.Object obj) ->
+ DecodedToolResult
+ { toolResultIsSuccess = case KeyMap.lookup "success" obj of
+ Just (Aeson.Bool b) -> b
+ _ -> True,
+ toolResultOutput' = case KeyMap.lookup "output" obj of
+ Just (Aeson.String s) -> s
+ _ -> "",
+ toolResultError' = case KeyMap.lookup "error" obj of
+ Just (Aeson.String s) -> Just s
+ _ -> Nothing
+ }
+ _ -> DecodedToolResult True content Nothing
+
+renderCheckpointEvent :: (Monad m) => Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderCheckpointEvent content actor timestamp now =
+ Lucid.div_ [Lucid.class_ "timeline-checkpoint"] <| do
+ Lucid.div_ [Lucid.class_ "event-header"] <| do
+ Lucid.span_ [Lucid.class_ "event-icon"] "📍"
+ Lucid.span_ [Lucid.class_ "event-label"] "Checkpoint"
+ renderActorLabel actor
+ renderRelativeTimestamp now timestamp
+ Lucid.div_ [Lucid.class_ "event-content checkpoint-content"] (Lucid.toHtml content)
+
+renderGuardrailEvent :: (Monad m) => Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderGuardrailEvent content actor timestamp now =
+ Lucid.div_ [Lucid.class_ "timeline-guardrail"] <| do
+ Lucid.div_ [Lucid.class_ "event-header"] <| do
+ Lucid.span_ [Lucid.class_ "event-icon"] "⚠️"
+ Lucid.span_ [Lucid.class_ "event-label"] "Guardrail"
+ renderActorLabel actor
+ renderRelativeTimestamp now timestamp
+ Lucid.div_ [Lucid.class_ "event-content guardrail-content"] (renderFormattedJson content)
+
+renderGenericEvent :: (Monad m) => Text -> Text -> TaskCore.CommentAuthor -> UTCTime -> UTCTime -> Lucid.HtmlT m ()
+renderGenericEvent eventType content actor timestamp now =
+ Lucid.div_ [Lucid.class_ "timeline-generic"] <| do
+ Lucid.div_ [Lucid.class_ "event-header"] <| do
+ Lucid.span_ [Lucid.class_ "event-icon"] "📝"
+ Lucid.span_ [Lucid.class_ "event-label"] (Lucid.toHtml eventType)
+ renderActorLabel actor
+ renderRelativeTimestamp now timestamp
+ unless (Text.null content) <| Lucid.div_ [Lucid.class_ "event-content"] (Lucid.toHtml content)
+
+parseToolCallContent :: Text -> (Text, Text)
+parseToolCallContent content =
+ case Text.breakOn ":" content of
+ (name, rest)
+ | Text.null rest -> (content, "")
+ | otherwise -> (Text.strip name, Text.strip (Text.drop 1 rest))
+
+formatToolCallSummary :: Text -> Text -> Text
+formatToolCallSummary toolName argsJson =
+ case Aeson.decode (LBS.fromStrict (str argsJson)) of
+ Just (Aeson.Object obj) ->
+ let keyArg = case toolName of
+ "run_bash" -> KeyMap.lookup "command" obj
+ "read_file" -> KeyMap.lookup "path" obj
+ "edit_file" -> KeyMap.lookup "path" obj
+ "write_file" -> KeyMap.lookup "path" obj
+ "search_codebase" -> KeyMap.lookup "pattern" obj
+ "glob_files" -> KeyMap.lookup "pattern" obj
+ "list_directory" -> KeyMap.lookup "path" obj
+ _ -> Nothing
+ in case keyArg of
+ Just (Aeson.String s) -> "`" <> Text.take 100 s <> "`"
+ _ -> Text.take 80 argsJson
+ _ -> Text.take 80 argsJson
+
+renderCollapsibleOutput :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderCollapsibleOutput content =
+ let lineCount = length (Text.lines content)
+ in if lineCount > 20
+ then
+ Lucid.details_ [Lucid.class_ "output-collapsible"] <| do
+ Lucid.summary_ (Lucid.toHtml (tshow lineCount <> " lines"))
+ Lucid.pre_ [Lucid.class_ "tool-output-pre"] (Lucid.toHtml content)
+ else Lucid.pre_ [Lucid.class_ "tool-output-pre"] (Lucid.toHtml content)
+
+renderDecodedToolResult :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderDecodedToolResult content =
+ case Aeson.decode (LBS.fromStrict (str content)) of
+ Just (Aeson.Object obj) ->
+ case KeyMap.lookup "output" obj of
+ Just (Aeson.String output) -> Lucid.toHtml output
+ _ -> Lucid.toHtml content
+ _ -> Lucid.toHtml content
+
+-- | Format JSON content for human-readable display.
+-- Tries to pretty-print JSON, falls back to raw text if not valid JSON.
+renderFormattedJson :: (Monad m) => Text -> Lucid.HtmlT m ()
+renderFormattedJson content =
+ case Aeson.decode (LBS.fromStrict (str content)) of
+ Just (val :: Aeson.Value) ->
+ Lucid.pre_ [Lucid.class_ "formatted-json"] <| do
+ Lucid.toHtml (decodeUtf8 (LBS.toStrict (Aeson.encode val)))
+ Nothing -> Lucid.toHtml content
+
+timelineScrollScript :: (Monad m) => Lucid.HtmlT m ()
+timelineScrollScript =
+ Lucid.script_
+ [ Lucid.type_ "text/javascript"
+ ]
+ ( Text.unlines
+ [ "(function() {",
+ " function scrollToBottom() {",
+ " if (typeof autoscrollEnabled !== 'undefined' && !autoscrollEnabled) return;",
+ " var log = document.querySelector('.timeline-events');",
+ " if (log) {",
+ " var isNearBottom = log.scrollHeight - log.scrollTop - log.clientHeight < 100;",
+ " if (isNearBottom) {",
+ " log.scrollTop = log.scrollHeight;",
+ " }",
+ " }",
+ " }",
+ " scrollToBottom();",
+ " document.body.addEventListener('htmx:afterSwap', function(e) {",
+ " if (e.target.closest('.timeline-events') || e.target.classList.contains('timeline-events')) {",
+ " scrollToBottom();",
+ " }",
+ " });",
+ "})();"
+ ]
+ )
diff --git a/Omni/Jr/Web/Handlers.hs b/Omni/Jr/Web/Handlers.hs
new file mode 100644
index 0000000..9dd5847
--- /dev/null
+++ b/Omni/Jr/Web/Handlers.hs
@@ -0,0 +1,649 @@
+{-# LANGUAGE DataKinds #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE TypeOperators #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- : dep warp
+-- : dep servant-server
+-- : dep lucid
+-- : dep servant-lucid
+-- : dep process
+-- : dep aeson
+module Omni.Jr.Web.Handlers
+ ( API,
+ server,
+ api,
+ streamAgentEvents,
+ )
+where
+
+import Alpha
+import qualified Control.Concurrent as Concurrent
+import qualified Data.Aeson as Aeson
+import qualified Data.List as List
+import qualified Data.Text as Text
+import qualified Data.Text.Lazy as LazyText
+import Data.Time (getCurrentTime)
+import Data.Time.Clock.POSIX (posixSecondsToUTCTime, utcTimeToPOSIXSeconds)
+import qualified Omni.Fact as Fact
+import qualified Omni.Jr.Web.Style as Style
+import Omni.Jr.Web.Types
+import qualified Omni.Task.Core as TaskCore
+import Servant
+import qualified Servant.HTML.Lucid as Lucid
+import qualified Servant.Types.SourceT as Source
+import qualified System.Exit as Exit
+import qualified System.Process as Process
+
+type PostRedirect = Verb 'POST 303 '[Lucid.HTML] (Headers '[Header "Location" Text] NoContent)
+
+type API =
+ QueryParam "range" Text :> Get '[Lucid.HTML] HomePage
+ :<|> "style.css" :> Get '[CSS] LazyText.Text
+ :<|> "ready" :> QueryParam "sort" Text :> Get '[Lucid.HTML] ReadyQueuePage
+ :<|> "blocked" :> QueryParam "sort" Text :> Get '[Lucid.HTML] BlockedPage
+ :<|> "intervention" :> QueryParam "sort" Text :> Get '[Lucid.HTML] InterventionPage
+ :<|> "stats" :> QueryParam "epic" Text :> Get '[Lucid.HTML] StatsPage
+ :<|> "tasks"
+ :> QueryParam "status" Text
+ :> QueryParam "priority" Text
+ :> QueryParam "namespace" Text
+ :> QueryParam "type" Text
+ :> QueryParam "sort" Text
+ :> Get '[Lucid.HTML] TaskListPage
+ :<|> "kb" :> Get '[Lucid.HTML] KBPage
+ :<|> "kb" :> "create" :> ReqBody '[FormUrlEncoded] FactCreateForm :> PostRedirect
+ :<|> "kb" :> Capture "id" Int :> Get '[Lucid.HTML] FactDetailPage
+ :<|> "kb" :> Capture "id" Int :> "edit" :> ReqBody '[FormUrlEncoded] FactEditForm :> PostRedirect
+ :<|> "kb" :> Capture "id" Int :> "delete" :> PostRedirect
+ :<|> "epics" :> QueryParam "sort" Text :> Get '[Lucid.HTML] EpicsPage
+ :<|> "tasks" :> Capture "id" Text :> Get '[Lucid.HTML] TaskDetailPage
+ :<|> "tasks" :> Capture "id" Text :> "status" :> ReqBody '[FormUrlEncoded] StatusForm :> Post '[Lucid.HTML] StatusBadgePartial
+ :<|> "tasks" :> Capture "id" Text :> "priority" :> ReqBody '[FormUrlEncoded] PriorityForm :> Post '[Lucid.HTML] PriorityBadgePartial
+ :<|> "tasks" :> Capture "id" Text :> "complexity" :> ReqBody '[FormUrlEncoded] ComplexityForm :> Post '[Lucid.HTML] ComplexityBadgePartial
+ :<|> "tasks" :> Capture "id" Text :> "description" :> "view" :> Get '[Lucid.HTML] DescriptionViewPartial
+ :<|> "tasks" :> Capture "id" Text :> "description" :> "edit" :> Get '[Lucid.HTML] DescriptionEditPartial
+ :<|> "tasks" :> Capture "id" Text :> "description" :> ReqBody '[FormUrlEncoded] DescriptionForm :> Post '[Lucid.HTML] DescriptionViewPartial
+ :<|> "tasks" :> Capture "id" Text :> "notes" :> ReqBody '[FormUrlEncoded] NotesForm :> PostRedirect
+ :<|> "tasks" :> Capture "id" Text :> "comment" :> ReqBody '[FormUrlEncoded] CommentForm :> PostRedirect
+ :<|> "tasks" :> Capture "id" Text :> "review" :> Get '[Lucid.HTML] TaskReviewPage
+ :<|> "tasks" :> Capture "id" Text :> "diff" :> Capture "commit" Text :> Get '[Lucid.HTML] TaskDiffPage
+ :<|> "tasks" :> Capture "id" Text :> "accept" :> PostRedirect
+ :<|> "tasks" :> Capture "id" Text :> "reject" :> ReqBody '[FormUrlEncoded] RejectForm :> PostRedirect
+ :<|> "tasks" :> Capture "id" Text :> "reset-retries" :> PostRedirect
+ :<|> "partials" :> "recent-activity-new" :> QueryParam "since" Int :> Get '[Lucid.HTML] RecentActivityNewPartial
+ :<|> "partials" :> "recent-activity-more" :> QueryParam "offset" Int :> Get '[Lucid.HTML] RecentActivityMorePartial
+ :<|> "partials" :> "ready-count" :> Get '[Lucid.HTML] ReadyCountPartial
+ :<|> "partials"
+ :> "task-list"
+ :> QueryParam "status" Text
+ :> QueryParam "priority" Text
+ :> QueryParam "namespace" Text
+ :> QueryParam "type" Text
+ :> QueryParam "sort" Text
+ :> Get '[Lucid.HTML] TaskListPartial
+ :<|> "partials" :> "task" :> Capture "id" Text :> "metrics" :> Get '[Lucid.HTML] TaskMetricsPartial
+ :<|> "partials" :> "task" :> Capture "id" Text :> "events" :> QueryParam "since" Int :> Get '[Lucid.HTML] AgentEventsPartial
+ :<|> "tasks" :> Capture "id" Text :> "events" :> "stream" :> StreamGet NoFraming SSE (SourceIO ByteString)
+
+api :: Proxy API
+api = Proxy
+
+server :: Server API
+server =
+ homeHandler
+ :<|> styleHandler
+ :<|> readyQueueHandler
+ :<|> blockedHandler
+ :<|> interventionHandler
+ :<|> statsHandler
+ :<|> taskListHandler
+ :<|> kbHandler
+ :<|> factCreateHandler
+ :<|> factDetailHandler
+ :<|> factEditHandler
+ :<|> factDeleteHandler
+ :<|> epicsHandler
+ :<|> taskDetailHandler
+ :<|> taskStatusHandler
+ :<|> taskPriorityHandler
+ :<|> taskComplexityHandler
+ :<|> descriptionViewHandler
+ :<|> descriptionEditHandler
+ :<|> descriptionPostHandler
+ :<|> taskNotesHandler
+ :<|> taskCommentHandler
+ :<|> taskReviewHandler
+ :<|> taskDiffHandler
+ :<|> taskAcceptHandler
+ :<|> taskRejectHandler
+ :<|> taskResetRetriesHandler
+ :<|> recentActivityNewHandler
+ :<|> recentActivityMoreHandler
+ :<|> readyCountHandler
+ :<|> taskListPartialHandler
+ :<|> taskMetricsPartialHandler
+ :<|> agentEventsPartialHandler
+ :<|> taskEventsStreamHandler
+ where
+ styleHandler :: Servant.Handler LazyText.Text
+ styleHandler = pure Style.css
+
+ homeHandler :: Maybe Text -> Servant.Handler HomePage
+ homeHandler maybeRangeText = do
+ now <- liftIO getCurrentTime
+ let range = parseTimeRange maybeRangeText
+ maybeStart = getTimeRangeStart range now
+ allTasks <- liftIO TaskCore.loadTasks
+ let filteredTasks = case maybeStart of
+ Nothing -> allTasks
+ Just start -> [t | t <- allTasks, TaskCore.taskUpdatedAt t >= start]
+ stats = TaskCore.computeTaskStatsFromList filteredTasks
+ readyTasks <- liftIO TaskCore.getReadyTasks
+ allActivities <- liftIO <| concat </ traverse (TaskCore.getActivitiesForTask <. TaskCore.taskId) allTasks
+ let filteredActivities = case maybeStart of
+ Nothing -> allActivities
+ Just start -> [a | a <- allActivities, TaskCore.activityTimestamp a >= start]
+ globalMetrics = computeMetricsFromActivities filteredTasks filteredActivities
+ sortedTasks = List.sortBy (flip compare `on` TaskCore.taskUpdatedAt) filteredTasks
+ recentTasks = take 5 sortedTasks
+ hasMoreRecent = length filteredTasks > 5
+ pure (HomePage stats readyTasks recentTasks hasMoreRecent globalMetrics range now)
+
+ readyQueueHandler :: Maybe Text -> Servant.Handler ReadyQueuePage
+ readyQueueHandler maybeSortText = do
+ now <- liftIO getCurrentTime
+ readyTasks <- liftIO TaskCore.getReadyTasks
+ let sortOrder = parseSortOrder maybeSortText
+ sortedTasks = sortTasks sortOrder readyTasks
+ pure (ReadyQueuePage sortedTasks sortOrder now)
+
+ blockedHandler :: Maybe Text -> Servant.Handler BlockedPage
+ blockedHandler maybeSortText = do
+ now <- liftIO getCurrentTime
+ blockedTasks <- liftIO TaskCore.getBlockedTasks
+ allTasks <- liftIO TaskCore.loadTasks
+ let sortOrder = parseSortOrder maybeSortText
+ tasksWithImpact = [(t, TaskCore.getBlockingImpact allTasks t) | t <- blockedTasks]
+ sorted = List.sortBy (comparing (Down <. snd)) tasksWithImpact
+ pure (BlockedPage sorted sortOrder now)
+
+ interventionHandler :: Maybe Text -> Servant.Handler InterventionPage
+ interventionHandler maybeSortText = do
+ now <- liftIO getCurrentTime
+ actionItems <- liftIO TaskCore.getHumanActionItems
+ let sortOrder = parseSortOrder maybeSortText
+ pure (InterventionPage actionItems sortOrder now)
+
+ statsHandler :: Maybe Text -> Servant.Handler StatsPage
+ statsHandler maybeEpic = do
+ let epicId = emptyToNothing maybeEpic
+ stats <- liftIO <| TaskCore.getTaskStats epicId
+ pure (StatsPage stats epicId)
+
+ taskListHandler :: Maybe Text -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Text -> Servant.Handler TaskListPage
+ taskListHandler maybeStatusText maybePriorityText maybeNamespace maybeTypeText maybeSortText = do
+ now <- liftIO getCurrentTime
+ allTasks <- liftIO TaskCore.loadTasks
+ let maybeStatus = parseStatus =<< emptyToNothing maybeStatusText
+ maybePriority = parsePriority =<< emptyToNothing maybePriorityText
+ maybeType = parseTaskType =<< emptyToNothing maybeTypeText
+ filters = TaskFilters maybeStatus maybePriority (emptyToNothing maybeNamespace) maybeType
+ sortOrder = parseSortOrder maybeSortText
+ filteredTasks = sortTasks sortOrder (applyFilters filters allTasks)
+ pure (TaskListPage filteredTasks filters sortOrder now)
+
+ kbHandler :: Servant.Handler KBPage
+ kbHandler = do
+ facts <- liftIO Fact.getAllFacts
+ pure (KBPage facts)
+
+ factCreateHandler :: FactCreateForm -> Servant.Handler (Headers '[Header "Location" Text] NoContent)
+ factCreateHandler (FactCreateForm project content filesText confText) = do
+ let files = filter (not <. Text.null) (Text.splitOn "," (Text.strip filesText))
+ confidence = fromMaybe 0.8 (readMaybe (Text.unpack confText))
+ fid <- liftIO (Fact.createFact project content files Nothing confidence)
+ pure <| addHeader ("/kb/" <> tshow fid) NoContent
+
+ factDetailHandler :: Int -> Servant.Handler FactDetailPage
+ factDetailHandler fid = do
+ now <- liftIO getCurrentTime
+ maybeFact <- liftIO (Fact.getFact fid)
+ case maybeFact of
+ Nothing -> pure (FactDetailNotFound fid)
+ Just fact -> pure (FactDetailFound fact now)
+
+ factEditHandler :: Int -> FactEditForm -> Servant.Handler (Headers '[Header "Location" Text] NoContent)
+ factEditHandler fid (FactEditForm content filesText confText) = do
+ let files = filter (not <. Text.null) (Text.splitOn "," (Text.strip filesText))
+ confidence = fromMaybe 0.8 (readMaybe (Text.unpack confText))
+ liftIO (Fact.updateFact fid content files confidence)
+ pure <| addHeader ("/kb/" <> tshow fid) NoContent
+
+ factDeleteHandler :: Int -> Servant.Handler (Headers '[Header "Location" Text] NoContent)
+ factDeleteHandler fid = do
+ liftIO (Fact.deleteFact fid)
+ pure <| addHeader "/kb" NoContent
+
+ epicsHandler :: Maybe Text -> Servant.Handler EpicsPage
+ epicsHandler maybeSortText = do
+ allTasks <- liftIO TaskCore.loadTasks
+ let epicTasks = filter (\t -> TaskCore.taskType t == TaskCore.Epic) allTasks
+ sortOrder = parseSortOrder maybeSortText
+ sortedEpics = sortTasks sortOrder epicTasks
+ pure (EpicsPage sortedEpics allTasks sortOrder)
+
+ parseStatus :: Text -> Maybe TaskCore.Status
+ parseStatus = readMaybe <. Text.unpack
+
+ parsePriority :: Text -> Maybe TaskCore.Priority
+ parsePriority = readMaybe <. Text.unpack
+
+ parseTaskType :: Text -> Maybe TaskCore.TaskType
+ parseTaskType = readMaybe <. Text.unpack
+
+ emptyToNothing :: Maybe Text -> Maybe Text
+ emptyToNothing (Just t) | Text.null (Text.strip t) = Nothing
+ emptyToNothing x = x
+
+ applyFilters :: TaskFilters -> [TaskCore.Task] -> [TaskCore.Task]
+ applyFilters filters = filter matchesAllFilters
+ where
+ matchesAllFilters task =
+ matchesStatus task
+ && matchesPriority task
+ && matchesNamespace task
+ && matchesType task
+
+ matchesStatus task = case filterStatus filters of
+ Nothing -> True
+ Just s -> TaskCore.taskStatus task == s
+
+ matchesPriority task = case filterPriority filters of
+ Nothing -> True
+ Just p -> TaskCore.taskPriority task == p
+
+ matchesNamespace task = case filterNamespace filters of
+ Nothing -> True
+ Just ns -> case TaskCore.taskNamespace task of
+ Nothing -> False
+ Just taskNs -> ns `Text.isPrefixOf` taskNs
+
+ matchesType task = case filterType filters of
+ Nothing -> True
+ Just t -> TaskCore.taskType task == t
+
+ taskDetailHandler :: Text -> Servant.Handler TaskDetailPage
+ taskDetailHandler tid = do
+ now <- liftIO getCurrentTime
+ tasks <- liftIO TaskCore.loadTasks
+ case TaskCore.findTask tid tasks of
+ Nothing -> pure (TaskDetailNotFound tid)
+ Just task -> do
+ activities <- liftIO (TaskCore.getActivitiesForTask tid)
+ retryCtx <- liftIO (TaskCore.getRetryContext tid)
+ commits <- liftIO (getCommitsForTask tid)
+ aggMetrics <-
+ if TaskCore.taskType task == TaskCore.Epic
+ then Just </ liftIO (TaskCore.getAggregatedMetrics tid)
+ else pure Nothing
+ agentEvents <- liftIO (TaskCore.getAllEventsForTask tid)
+ pure (TaskDetailFound task tasks activities retryCtx commits aggMetrics agentEvents now)
+
+ taskStatusHandler :: Text -> StatusForm -> Servant.Handler StatusBadgePartial
+ taskStatusHandler tid (StatusForm newStatus) = do
+ liftIO <| TaskCore.updateTaskStatusWithActor tid newStatus [] TaskCore.Human
+ pure (StatusBadgePartial newStatus tid)
+
+ taskPriorityHandler :: Text -> PriorityForm -> Servant.Handler PriorityBadgePartial
+ taskPriorityHandler tid (PriorityForm newPriority) = do
+ _ <- liftIO <| TaskCore.editTask tid (\t -> t {TaskCore.taskPriority = newPriority})
+ pure (PriorityBadgePartial newPriority tid)
+
+ taskComplexityHandler :: Text -> ComplexityForm -> Servant.Handler ComplexityBadgePartial
+ taskComplexityHandler tid (ComplexityForm newComplexity) = do
+ _ <- liftIO <| TaskCore.editTask tid (\t -> t {TaskCore.taskComplexity = newComplexity})
+ pure (ComplexityBadgePartial newComplexity tid)
+
+ descriptionViewHandler :: Text -> Servant.Handler DescriptionViewPartial
+ descriptionViewHandler tid = do
+ tasks <- liftIO TaskCore.loadTasks
+ case TaskCore.findTask tid tasks of
+ Nothing -> throwError err404
+ Just task -> pure (DescriptionViewPartial tid (TaskCore.taskDescription task) (TaskCore.taskType task == TaskCore.Epic))
+
+ descriptionEditHandler :: Text -> Servant.Handler DescriptionEditPartial
+ descriptionEditHandler tid = do
+ tasks <- liftIO TaskCore.loadTasks
+ case TaskCore.findTask tid tasks of
+ Nothing -> throwError err404
+ Just task -> pure (DescriptionEditPartial tid (TaskCore.taskDescription task) (TaskCore.taskType task == TaskCore.Epic))
+
+ descriptionPostHandler :: Text -> DescriptionForm -> Servant.Handler DescriptionViewPartial
+ descriptionPostHandler tid (DescriptionForm desc) = do
+ let descText = Text.strip desc
+ _ <- liftIO <| TaskCore.editTask tid (\t -> t {TaskCore.taskDescription = descText})
+ tasks <- liftIO TaskCore.loadTasks
+ case TaskCore.findTask tid tasks of
+ Nothing -> throwError err404
+ Just task -> pure (DescriptionViewPartial tid (TaskCore.taskDescription task) (TaskCore.taskType task == TaskCore.Epic))
+
+ taskNotesHandler :: Text -> NotesForm -> Servant.Handler (Headers '[Header "Location" Text] NoContent)
+ taskNotesHandler tid (NotesForm notes) = do
+ liftIO <| TaskCore.updateRetryNotes tid notes
+ pure <| addHeader ("/tasks/" <> tid) NoContent
+
+ taskCommentHandler :: Text -> CommentForm -> Servant.Handler (Headers '[Header "Location" Text] NoContent)
+ taskCommentHandler tid (CommentForm commentText) = do
+ _ <- liftIO (TaskCore.addComment tid commentText TaskCore.Human)
+ pure <| addHeader ("/tasks/" <> tid) NoContent
+
+ taskReviewHandler :: Text -> Servant.Handler TaskReviewPage
+ taskReviewHandler tid = do
+ tasks <- liftIO TaskCore.loadTasks
+ case TaskCore.findTask tid tasks of
+ Nothing -> pure (ReviewPageNotFound tid)
+ Just task -> do
+ reviewInfo <- liftIO <| getReviewInfo tid
+ pure (ReviewPageFound task reviewInfo)
+
+ taskDiffHandler :: Text -> Text -> Servant.Handler TaskDiffPage
+ taskDiffHandler tid commitSha = do
+ diffOutput <- liftIO <| getDiffForCommit commitSha
+ case diffOutput of
+ Nothing -> pure (DiffPageNotFound tid commitSha)
+ Just output -> pure (DiffPageFound tid commitSha output)
+
+ taskAcceptHandler :: Text -> Servant.Handler (Headers '[Header "Location" Text] NoContent)
+ taskAcceptHandler tid = do
+ liftIO <| do
+ TaskCore.clearRetryContext tid
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Done [] TaskCore.Human
+ pure <| addHeader ("/tasks/" <> tid) NoContent
+
+ taskRejectHandler :: Text -> RejectForm -> Servant.Handler (Headers '[Header "Location" Text] NoContent)
+ taskRejectHandler tid (RejectForm maybeNotes) = do
+ liftIO <| do
+ maybeCommit <- findCommitForTask tid
+ let commitSha = fromMaybe "" maybeCommit
+ maybeCtx <- TaskCore.getRetryContext tid
+ let attempt = maybe 1 (\ctx -> TaskCore.retryAttempt ctx + 1) maybeCtx
+ let currentReason = "attempt " <> tshow attempt <> ": rejected: " <> fromMaybe "(no notes)" maybeNotes
+ let accumulatedReason = case maybeCtx of
+ Nothing -> currentReason
+ Just ctx -> TaskCore.retryReason ctx <> "\n" <> currentReason
+ TaskCore.setRetryContext
+ TaskCore.RetryContext
+ { TaskCore.retryTaskId = tid,
+ TaskCore.retryOriginalCommit = commitSha,
+ TaskCore.retryConflictFiles = [],
+ TaskCore.retryAttempt = attempt,
+ TaskCore.retryReason = accumulatedReason,
+ TaskCore.retryNotes = maybeCtx +> TaskCore.retryNotes
+ }
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.Human
+ pure <| addHeader ("/tasks/" <> tid) NoContent
+
+ taskResetRetriesHandler :: Text -> Servant.Handler (Headers '[Header "Location" Text] NoContent)
+ taskResetRetriesHandler tid = do
+ liftIO <| do
+ TaskCore.clearRetryContext tid
+ TaskCore.updateTaskStatusWithActor tid TaskCore.Open [] TaskCore.Human
+ pure <| addHeader ("/tasks/" <> tid) NoContent
+
+ recentActivityNewHandler :: Maybe Int -> Servant.Handler RecentActivityNewPartial
+ recentActivityNewHandler maybeSince = do
+ allTasks <- liftIO TaskCore.loadTasks
+ let sinceTime = maybe (posixSecondsToUTCTime 0) (posixSecondsToUTCTime <. fromIntegral) maybeSince
+ sortedTasks = List.sortBy (flip compare `on` TaskCore.taskUpdatedAt) allTasks
+ newTasks = filter (\t -> TaskCore.taskUpdatedAt t > sinceTime) sortedTasks
+ newestTs = maybe maybeSince (Just <. taskToUnixTs) (head newTasks)
+ pure (RecentActivityNewPartial newTasks newestTs)
+
+ recentActivityMoreHandler :: Maybe Int -> Servant.Handler RecentActivityMorePartial
+ recentActivityMoreHandler maybeOffset = do
+ allTasks <- liftIO TaskCore.loadTasks
+ let offset = fromMaybe 0 maybeOffset
+ pageSize = 5
+ sortedTasks = List.sortBy (flip compare `on` TaskCore.taskUpdatedAt) allTasks
+ pageTasks = take pageSize <| drop offset sortedTasks
+ hasMore = length sortedTasks > offset + pageSize
+ nextOffset = offset + pageSize
+ pure (RecentActivityMorePartial pageTasks nextOffset hasMore)
+
+ readyCountHandler :: Servant.Handler ReadyCountPartial
+ readyCountHandler = do
+ readyTasks <- liftIO TaskCore.getReadyTasks
+ pure (ReadyCountPartial (length readyTasks))
+
+ taskListPartialHandler :: Maybe Text -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Text -> Servant.Handler TaskListPartial
+ taskListPartialHandler maybeStatusText maybePriorityText maybeNamespace maybeTypeText maybeSortText = do
+ allTasks <- liftIO TaskCore.loadTasks
+ let maybeStatus = parseStatus =<< emptyToNothing maybeStatusText
+ maybePriority = parsePriority =<< emptyToNothing maybePriorityText
+ maybeType = parseTaskType =<< emptyToNothing maybeTypeText
+ filters = TaskFilters maybeStatus maybePriority (emptyToNothing maybeNamespace) maybeType
+ sortOrder = parseSortOrder maybeSortText
+ filteredTasks = sortTasks sortOrder (applyFilters filters allTasks)
+ pure (TaskListPartial filteredTasks)
+
+ taskMetricsPartialHandler :: Text -> Servant.Handler TaskMetricsPartial
+ taskMetricsPartialHandler tid = do
+ now <- liftIO getCurrentTime
+ activities <- liftIO (TaskCore.getActivitiesForTask tid)
+ maybeRetry <- liftIO (TaskCore.getRetryContext tid)
+ pure (TaskMetricsPartial tid activities maybeRetry now)
+
+ agentEventsPartialHandler :: Text -> Maybe Int -> Servant.Handler AgentEventsPartial
+ agentEventsPartialHandler tid _maybeSince = do
+ now <- liftIO getCurrentTime
+ events <- liftIO (TaskCore.getAllEventsForTask tid)
+ tasks <- liftIO TaskCore.loadTasks
+ let isInProgress = case TaskCore.findTask tid tasks of
+ Nothing -> False
+ Just task -> TaskCore.taskStatus task == TaskCore.InProgress
+ pure (AgentEventsPartial tid events isInProgress now)
+
+ taskEventsStreamHandler :: Text -> Servant.Handler (SourceIO ByteString)
+ taskEventsStreamHandler tid = do
+ maybeSession <- liftIO (TaskCore.getLatestSessionForTask tid)
+ case maybeSession of
+ Nothing -> pure (Source.source [])
+ Just sid -> liftIO (streamAgentEvents tid sid)
+
+streamAgentEvents :: Text -> Text -> IO (SourceIO ByteString)
+streamAgentEvents tid sid = do
+ existingEvents <- TaskCore.getEventsForSession sid
+ let lastId = if null existingEvents then 0 else maximum (map TaskCore.storedEventId existingEvents)
+ let existingSSE = map eventToSSE existingEvents
+ pure <| Source.fromStepT <| streamEventsStep tid sid lastId existingSSE True
+
+streamEventsStep :: Text -> Text -> Int -> [ByteString] -> Bool -> Source.StepT IO ByteString
+streamEventsStep tid sid lastId buffer sendExisting = case (sendExisting, buffer) of
+ (True, b : bs) -> Source.Yield b (streamEventsStep tid sid lastId bs True)
+ (True, []) -> streamEventsStep tid sid lastId [] False
+ (False, _) ->
+ Source.Effect <| do
+ tasks <- TaskCore.loadTasks
+ let isComplete = case TaskCore.findTask tid tasks of
+ Nothing -> True
+ Just task -> TaskCore.taskStatus task /= TaskCore.InProgress
+
+ if isComplete
+ then do
+ let completeSSE = formatSSE "complete" "{}"
+ pure <| Source.Yield completeSSE Source.Stop
+ else do
+ Concurrent.threadDelay 500000
+ newEvents <- TaskCore.getEventsSince sid lastId
+ if null newEvents
+ then pure <| streamEventsStep tid sid lastId [] False
+ else do
+ let newLastId = maximum (map TaskCore.storedEventId newEvents)
+ let newSSE = map eventToSSE newEvents
+ case newSSE of
+ (e : es) -> pure <| Source.Yield e (streamEventsStep tid sid newLastId es False)
+ [] -> pure <| streamEventsStep tid sid newLastId [] False
+
+eventToSSE :: TaskCore.StoredEvent -> ByteString
+eventToSSE event =
+ let eventType = Text.toLower (TaskCore.storedEventType event)
+ content = TaskCore.storedEventContent event
+ jsonData = case eventType of
+ "assistant" -> Aeson.object ["content" Aeson..= content]
+ "toolcall" ->
+ let (tool, args) = parseToolCallContent content
+ in Aeson.object ["tool" Aeson..= tool, "args" Aeson..= Aeson.object ["data" Aeson..= args]]
+ "toolresult" ->
+ Aeson.object ["tool" Aeson..= ("unknown" :: Text), "success" Aeson..= True, "output" Aeson..= content]
+ "cost" -> Aeson.object ["cost" Aeson..= content]
+ "error" -> Aeson.object ["error" Aeson..= content]
+ "complete" -> Aeson.object []
+ _ -> Aeson.object ["content" Aeson..= content]
+ in formatSSE eventType (str (Aeson.encode jsonData))
+
+formatSSE :: Text -> ByteString -> ByteString
+formatSSE eventType jsonData =
+ str
+ <| "event: "
+ <> eventType
+ <> "\n"
+ <> "data: "
+ <> str jsonData
+ <> "\n\n"
+
+parseToolCallContent :: Text -> (Text, Text)
+parseToolCallContent content =
+ case Text.breakOn ":" content of
+ (name, rest)
+ | Text.null rest -> (content, "")
+ | otherwise -> (Text.strip name, Text.strip (Text.drop 1 rest))
+
+taskToUnixTs :: TaskCore.Task -> Int
+taskToUnixTs t = ceiling (utcTimeToPOSIXSeconds (TaskCore.taskUpdatedAt t))
+
+getReviewInfo :: Text -> IO ReviewInfo
+getReviewInfo tid = do
+ maybeCommit <- findCommitForTask tid
+ case maybeCommit of
+ Nothing -> pure ReviewNoCommit
+ Just commitSha -> do
+ conflictResult <- checkMergeConflict (Text.unpack commitSha)
+ case conflictResult of
+ Just conflictFiles -> pure (ReviewMergeConflict commitSha conflictFiles)
+ Nothing -> do
+ (_, diffOut, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["show", Text.unpack commitSha]
+ ""
+ pure (ReviewReady commitSha (Text.pack diffOut))
+
+getDiffForCommit :: Text -> IO (Maybe Text)
+getDiffForCommit commitSha = do
+ (code, diffOut, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["show", Text.unpack commitSha]
+ ""
+ case code of
+ Exit.ExitSuccess -> pure (Just (Text.pack diffOut))
+ Exit.ExitFailure _ -> pure Nothing
+
+findCommitForTask :: Text -> IO (Maybe Text)
+findCommitForTask tid = do
+ let grepArg = "--grep=" <> Text.unpack tid
+ (code, shaOut, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["log", "--pretty=format:%H", "-n", "1", grepArg]
+ ""
+ if code /= Exit.ExitSuccess || null shaOut
+ then pure Nothing
+ else case List.lines shaOut of
+ (x : _) -> pure (Just (Text.pack x))
+ [] -> pure Nothing
+
+getCommitsForTask :: Text -> IO [GitCommit]
+getCommitsForTask tid = do
+ let grepArg = "--grep=Task-Id: " <> Text.unpack tid
+ (code, out, _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["log", "--pretty=format:%H|%h|%s|%an|%ar", grepArg]
+ ""
+ if code /= Exit.ExitSuccess || null out
+ then pure []
+ else do
+ let commitLines = filter (not <. null) (List.lines out)
+ traverse parseCommitLine commitLines
+ where
+ parseCommitLine :: String -> IO GitCommit
+ parseCommitLine line =
+ case Text.splitOn "|" (Text.pack line) of
+ [sha, shortSha, summary, author, relDate] -> do
+ filesCount <- getFilesChangedCount (Text.unpack sha)
+ pure
+ GitCommit
+ { commitHash = sha,
+ commitShortHash = shortSha,
+ commitSummary = summary,
+ commitAuthor = author,
+ commitRelativeDate = relDate,
+ commitFilesChanged = filesCount
+ }
+ _ ->
+ pure
+ GitCommit
+ { commitHash = Text.pack line,
+ commitShortHash = Text.take 7 (Text.pack line),
+ commitSummary = "(parse error)",
+ commitAuthor = "",
+ commitRelativeDate = "",
+ commitFilesChanged = 0
+ }
+
+ getFilesChangedCount :: String -> IO Int
+ getFilesChangedCount sha = do
+ (code', out', _) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["show", "--stat", "--format=", sha]
+ ""
+ pure
+ <| if code' /= Exit.ExitSuccess
+ then 0
+ else
+ let statLines = filter (not <. null) (List.lines out')
+ in max 0 (length statLines - 1)
+
+checkMergeConflict :: String -> IO (Maybe [Text])
+checkMergeConflict commitSha = do
+ (_, origHead, _) <- Process.readProcessWithExitCode "git" ["rev-parse", "HEAD"] ""
+
+ (cpCode, _, cpErr) <-
+ Process.readProcessWithExitCode
+ "git"
+ ["cherry-pick", "--no-commit", commitSha]
+ ""
+
+ _ <- Process.readProcessWithExitCode "git" ["cherry-pick", "--abort"] ""
+ _ <- Process.readProcessWithExitCode "git" ["reset", "--hard", List.head (List.lines origHead)] ""
+
+ case cpCode of
+ Exit.ExitSuccess -> pure Nothing
+ Exit.ExitFailure _ -> do
+ let errLines = Text.lines (Text.pack cpErr)
+ conflictLines = filter (Text.isPrefixOf "CONFLICT") errLines
+ files = mapMaybe extractConflictFile conflictLines
+ pure (Just (if null files then ["(unknown files)"] else files))
+
+extractConflictFile :: Text -> Maybe Text
+extractConflictFile line =
+ case Text.breakOn "Merge conflict in " line of
+ (_, rest)
+ | not (Text.null rest) -> Just (Text.strip (Text.drop 18 rest))
+ _ -> case Text.breakOn "in " line of
+ (_, rest)
+ | not (Text.null rest) -> Just (Text.strip (Text.drop 3 rest))
+ _ -> Nothing
diff --git a/Omni/Jr/Web/Pages.hs b/Omni/Jr/Web/Pages.hs
new file mode 100644
index 0000000..b3cc8ea
--- /dev/null
+++ b/Omni/Jr/Web/Pages.hs
@@ -0,0 +1,862 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+{-# OPTIONS_GHC -Wno-orphans #-}
+
+-- : dep lucid
+-- : dep servant-lucid
+module Omni.Jr.Web.Pages
+ ( -- * Re-export page types
+ module Omni.Jr.Web.Types,
+ )
+where
+
+import Alpha
+import qualified Data.Text as Text
+import Data.Time (utctDayTime)
+import qualified Lucid
+import qualified Lucid.Base as Lucid
+import Numeric (showFFloat)
+import Omni.Jr.Web.Components
+ ( Breadcrumb (..),
+ complexityBadgeWithForm,
+ metaSep,
+ multiColorProgressBar,
+ pageBody,
+ pageBodyWithCrumbs,
+ pageHead,
+ priorityBadgeWithForm,
+ renderAggregatedMetrics,
+ renderBlockedTaskCard,
+ renderEpicCardWithStats,
+ renderEpicReviewCard,
+ renderListGroupItem,
+ renderRelativeTimestamp,
+ renderRetryContextBanner,
+ renderTaskCard,
+ renderUnifiedTimeline,
+ sortDropdown,
+ statusBadge,
+ statusBadgeWithForm,
+ taskBreadcrumbs,
+ )
+import Omni.Jr.Web.Partials ()
+import Omni.Jr.Web.Types
+ ( BlockedPage (..),
+ DescriptionViewPartial (..),
+ EpicsPage (..),
+ FactDetailPage (..),
+ GitCommit (..),
+ HomePage (..),
+ InterventionPage (..),
+ KBPage (..),
+ ReadyQueuePage (..),
+ ReviewInfo (..),
+ SortOrder (..),
+ StatsPage (..),
+ TaskDetailPage (..),
+ TaskDiffPage (..),
+ TaskFilters (..),
+ TaskListPage (..),
+ TaskReviewPage (..),
+ TimeRange (..),
+ filterNamespace,
+ filterPriority,
+ filterStatus,
+ sortOrderToParam,
+ sortTasks,
+ timeRangeToParam,
+ )
+import qualified Omni.Task.Core as TaskCore
+
+taskToUnixTs :: TaskCore.Task -> Int
+taskToUnixTs t =
+ let ts = TaskCore.taskUpdatedAt t
+ in floor (realToFrac (utctDayTime ts) :: Double)
+
+instance Lucid.ToHtml HomePage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (HomePage stats readyTasks recentTasks hasMoreRecent globalMetrics currentRange _now) =
+ Lucid.doctypehtml_ <| do
+ pageHead "Jr Dashboard"
+ pageBody <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.h2_ "Task Status"
+ Lucid.div_ [Lucid.class_ "time-filter"] <| do
+ timeFilterBtn "Today" Today currentRange
+ timeFilterBtn "This Week" Week currentRange
+ timeFilterBtn "This Month" Month currentRange
+ timeFilterBtn "All Time" AllTime currentRange
+ Lucid.div_ [Lucid.class_ "stats-grid"] <| do
+ statCard "Open" (TaskCore.openTasks stats) "badge-open" "/tasks?status=Open"
+ statCard "In Progress" (TaskCore.inProgressTasks stats) "badge-inprogress" "/tasks?status=InProgress"
+ statCard "Review" (TaskCore.reviewTasks stats) "badge-review" "/tasks?status=Review"
+ statCard "Approved" (TaskCore.approvedTasks stats) "badge-approved" "/tasks?status=Approved"
+ statCard "Done" (TaskCore.doneTasks stats) "badge-done" "/tasks?status=Done"
+ metricCard "Cost" (formatCost (TaskCore.aggTotalCostCents globalMetrics))
+ metricCard "Duration" (formatDuration (TaskCore.aggTotalDurationSeconds globalMetrics))
+
+ Lucid.h2_ <| do
+ "Ready Queue "
+ Lucid.span_
+ [ Lucid.class_ "ready-count",
+ Lucid.makeAttribute "hx-get" "/partials/ready-count",
+ Lucid.makeAttribute "hx-trigger" "every 5s"
+ ]
+ <| do
+ Lucid.a_ [Lucid.href_ "/ready", Lucid.class_ "ready-link"]
+ <| Lucid.toHtml ("(" <> tshow (length readyTasks) <> " tasks)")
+ if null readyTasks
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No tasks ready for work."
+ else
+ Lucid.div_ [Lucid.class_ "list-group"]
+ <| traverse_ renderListGroupItem (take 5 readyTasks)
+
+ Lucid.h2_ "Recent Activity"
+ let newestTimestamp = maybe 0 taskToUnixTs (head recentTasks)
+ Lucid.div_
+ [ Lucid.class_ "recent-activity",
+ Lucid.id_ "recent-activity",
+ Lucid.makeAttribute "data-newest-ts" (tshow newestTimestamp),
+ Lucid.makeAttribute "hx-get" "/partials/recent-activity-new",
+ Lucid.makeAttribute "hx-trigger" "every 10s",
+ Lucid.makeAttribute "hx-vals" "js:{since: document.getElementById('recent-activity')?.dataset?.newestTs || 0}",
+ Lucid.makeAttribute "hx-target" "#activity-list",
+ Lucid.makeAttribute "hx-swap" "afterbegin"
+ ]
+ <| do
+ Lucid.div_ [Lucid.id_ "activity-list", Lucid.class_ "list-group"]
+ <| traverse_ renderListGroupItem recentTasks
+ when hasMoreRecent
+ <| Lucid.button_
+ [ Lucid.id_ "activity-load-more",
+ Lucid.class_ "btn btn-secondary load-more-btn",
+ Lucid.makeAttribute "hx-get" "/partials/recent-activity-more?offset=5",
+ Lucid.makeAttribute "hx-target" "#activity-list",
+ Lucid.makeAttribute "hx-swap" "beforeend"
+ ]
+ "Load More"
+ where
+ statCard :: (Monad m) => Text -> Int -> Text -> Text -> Lucid.HtmlT m ()
+ statCard label count badgeClass href =
+ Lucid.a_ [Lucid.href_ href, Lucid.class_ ("stat-card " <> badgeClass)] <| do
+ Lucid.div_ [Lucid.class_ "stat-count"] (Lucid.toHtml (tshow count))
+ Lucid.div_ [Lucid.class_ "stat-label"] (Lucid.toHtml label)
+
+ metricCard :: (Monad m) => Text -> Text -> Lucid.HtmlT m ()
+ metricCard label value =
+ Lucid.div_ [Lucid.class_ "stat-card badge-neutral"] <| do
+ Lucid.div_ [Lucid.class_ "stat-count"] (Lucid.toHtml value)
+ Lucid.div_ [Lucid.class_ "stat-label"] (Lucid.toHtml label)
+
+ formatCost :: Int -> Text
+ formatCost cents =
+ let dollars = fromIntegral cents / 100.0 :: Double
+ in Text.pack ("$" <> showFFloat (Just 2) dollars "")
+
+ formatDuration :: Int -> Text
+ formatDuration totalSeconds
+ | totalSeconds < 60 = tshow totalSeconds <> "s"
+ | totalSeconds < 3600 =
+ let mins = totalSeconds `div` 60
+ in tshow mins <> "m"
+ | otherwise =
+ let hours = totalSeconds `div` 3600
+ mins = (totalSeconds `mod` 3600) `div` 60
+ in tshow hours <> "h " <> tshow mins <> "m"
+
+ timeFilterBtn :: (Monad m) => Text -> TimeRange -> TimeRange -> Lucid.HtmlT m ()
+ timeFilterBtn label range current =
+ let activeClass = if range == current then " active" else ""
+ href = "/?" <> "range=" <> timeRangeToParam range
+ in Lucid.a_
+ [ Lucid.href_ href,
+ Lucid.class_ ("time-filter-btn" <> activeClass)
+ ]
+ (Lucid.toHtml label)
+
+instance Lucid.ToHtml ReadyQueuePage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (ReadyQueuePage tasks currentSort _now) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Ready Queue" Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Ready Queue - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.div_ [Lucid.class_ "page-header-row"] <| do
+ Lucid.h1_ <| Lucid.toHtml ("Ready Queue (" <> tshow (length tasks) <> " tasks)")
+ sortDropdown "/ready" currentSort
+ if null tasks
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No tasks are ready for work."
+ else Lucid.div_ [Lucid.class_ "task-list"] <| traverse_ renderTaskCard tasks
+
+instance Lucid.ToHtml BlockedPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (BlockedPage tasksWithImpact currentSort _now) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Blocked" Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Blocked Tasks - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.div_ [Lucid.class_ "page-header-row"] <| do
+ Lucid.h1_ <| Lucid.toHtml ("Blocked Tasks (" <> tshow (length tasksWithImpact) <> " tasks)")
+ sortDropdown "/blocked" currentSort
+ Lucid.p_ [Lucid.class_ "info-msg"] "Tasks with unmet blocking dependencies, sorted by blocking impact."
+ if null tasksWithImpact
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No blocked tasks."
+ else Lucid.div_ [Lucid.class_ "task-list"] <| traverse_ renderBlockedTaskCard tasksWithImpact
+
+instance Lucid.ToHtml InterventionPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (InterventionPage actionItems currentSort _now) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Needs Human Action" Nothing]
+ failed = TaskCore.failedTasks actionItems
+ epicsReady = TaskCore.epicsInReview actionItems
+ needsHelp = TaskCore.tasksNeedingHelp actionItems
+ totalCount = length failed + length epicsReady + length needsHelp
+ in Lucid.doctypehtml_ <| do
+ pageHead "Needs Human Action - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.div_ [Lucid.class_ "page-header-row"] <| do
+ Lucid.h1_ <| Lucid.toHtml ("Needs Human Action (" <> tshow totalCount <> " items)")
+ sortDropdown "/intervention" currentSort
+ if totalCount == 0
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No items need human action."
+ else do
+ unless (null failed) <| do
+ Lucid.h2_ [Lucid.class_ "section-header"] <| Lucid.toHtml ("Failed Tasks (" <> tshow (length failed) <> ")")
+ Lucid.p_ [Lucid.class_ "info-msg"] "Tasks that have failed 3+ times and need human help."
+ Lucid.div_ [Lucid.class_ "task-list"] <| traverse_ renderTaskCard (sortTasks currentSort failed)
+ unless (null epicsReady) <| do
+ Lucid.h2_ [Lucid.class_ "section-header"] <| Lucid.toHtml ("Epics Ready for Review (" <> tshow (length epicsReady) <> ")")
+ Lucid.p_ [Lucid.class_ "info-msg"] "Epics with all children completed. Verify before closing."
+ Lucid.div_ [Lucid.class_ "task-list"] <| traverse_ renderEpicReviewCard epicsReady
+ unless (null needsHelp) <| do
+ Lucid.h2_ [Lucid.class_ "section-header"] <| Lucid.toHtml ("Needs Help (" <> tshow (length needsHelp) <> ")")
+ Lucid.p_ [Lucid.class_ "info-msg"] "Tasks where Jr needs human guidance or decisions."
+ Lucid.div_ [Lucid.class_ "task-list"] <| traverse_ renderTaskCard (sortTasks currentSort needsHelp)
+
+instance Lucid.ToHtml KBPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (KBPage facts) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Knowledge Base" Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Knowledge Base - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.h1_ "Knowledge Base"
+ Lucid.p_ [Lucid.class_ "info-msg"] "Facts learned during task execution."
+
+ Lucid.details_ [Lucid.class_ "create-fact-section"] <| do
+ Lucid.summary_ [Lucid.class_ "btn btn-primary create-fact-toggle"] "Create New Fact"
+ Lucid.form_
+ [ Lucid.method_ "POST",
+ Lucid.action_ "/kb/create",
+ Lucid.class_ "fact-create-form"
+ ]
+ <| do
+ Lucid.div_ [Lucid.class_ "form-group"] <| do
+ Lucid.label_ [Lucid.for_ "project"] "Project:"
+ Lucid.input_
+ [ Lucid.type_ "text",
+ Lucid.name_ "project",
+ Lucid.id_ "project",
+ Lucid.class_ "form-input",
+ Lucid.required_ "required",
+ Lucid.placeholder_ "e.g., Omni/Jr"
+ ]
+ Lucid.div_ [Lucid.class_ "form-group"] <| do
+ Lucid.label_ [Lucid.for_ "content"] "Fact Content:"
+ Lucid.textarea_
+ [ Lucid.name_ "content",
+ Lucid.id_ "content",
+ Lucid.class_ "form-textarea",
+ Lucid.rows_ "4",
+ Lucid.required_ "required",
+ Lucid.placeholder_ "Describe the fact or knowledge..."
+ ]
+ ""
+ Lucid.div_ [Lucid.class_ "form-group"] <| do
+ Lucid.label_ [Lucid.for_ "files"] "Related Files (comma-separated):"
+ Lucid.input_
+ [ Lucid.type_ "text",
+ Lucid.name_ "files",
+ Lucid.id_ "files",
+ Lucid.class_ "form-input",
+ Lucid.placeholder_ "path/to/file1.hs, path/to/file2.hs"
+ ]
+ Lucid.div_ [Lucid.class_ "form-group"] <| do
+ Lucid.label_ [Lucid.for_ "confidence"] "Confidence (0.0 - 1.0):"
+ Lucid.input_
+ [ Lucid.type_ "number",
+ Lucid.name_ "confidence",
+ Lucid.id_ "confidence",
+ Lucid.class_ "form-input",
+ Lucid.step_ "0.1",
+ Lucid.min_ "0",
+ Lucid.max_ "1",
+ Lucid.value_ "0.8"
+ ]
+ Lucid.div_ [Lucid.class_ "form-actions"] <| do
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "btn btn-primary"] "Create Fact"
+
+ if null facts
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No facts recorded yet."
+ else Lucid.div_ [Lucid.class_ "task-list"] <| traverse_ renderFactCard facts
+ where
+ renderFactCard :: (Monad m) => TaskCore.Fact -> Lucid.HtmlT m ()
+ renderFactCard f =
+ let factUrl = "/kb/" <> maybe "-" tshow (TaskCore.factId f)
+ in Lucid.a_
+ [ Lucid.class_ "task-card task-card-link",
+ Lucid.href_ factUrl
+ ]
+ <| do
+ Lucid.div_ [Lucid.class_ "task-header"] <| do
+ Lucid.span_ [Lucid.class_ "task-id"] (Lucid.toHtml (maybe "-" tshow (TaskCore.factId f)))
+ confidenceBadge (TaskCore.factConfidence f)
+ Lucid.span_ [Lucid.class_ "priority"] (Lucid.toHtml (TaskCore.factProject f))
+ Lucid.p_ [Lucid.class_ "task-title"] (Lucid.toHtml (Text.take 80 (TaskCore.factContent f) <> if Text.length (TaskCore.factContent f) > 80 then "..." else ""))
+ unless (null (TaskCore.factRelatedFiles f)) <| do
+ Lucid.p_ [Lucid.class_ "kb-files"] <| do
+ Lucid.span_ [Lucid.class_ "files-label"] "Files: "
+ Lucid.toHtml (Text.intercalate ", " (take 3 (TaskCore.factRelatedFiles f)))
+ when (length (TaskCore.factRelatedFiles f) > 3) <| do
+ Lucid.toHtml (" +" <> tshow (length (TaskCore.factRelatedFiles f) - 3) <> " more")
+
+ confidenceBadge :: (Monad m) => Double -> Lucid.HtmlT m ()
+ confidenceBadge conf =
+ let pct = floor (conf * 100) :: Int
+ cls
+ | conf >= 0.8 = "badge badge-done"
+ | conf >= 0.5 = "badge badge-inprogress"
+ | otherwise = "badge badge-open"
+ in Lucid.span_ [Lucid.class_ cls] (Lucid.toHtml (tshow pct <> "%"))
+
+instance Lucid.ToHtml FactDetailPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (FactDetailNotFound fid) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Knowledge Base" (Just "/kb"), Breadcrumb ("Fact #" <> tshow fid) Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Fact Not Found - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.h1_ "Fact Not Found"
+ Lucid.p_ [Lucid.class_ "error-msg"] (Lucid.toHtml ("Fact with ID " <> tshow fid <> " not found."))
+ Lucid.a_ [Lucid.href_ "/kb", Lucid.class_ "btn btn-secondary"] "Back to Knowledge Base"
+ toHtml (FactDetailFound fact now) =
+ let fid' = maybe "-" tshow (TaskCore.factId fact)
+ crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Knowledge Base" (Just "/kb"), Breadcrumb ("Fact #" <> fid') Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Fact Detail - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.div_ [Lucid.class_ "task-detail-header"] <| do
+ Lucid.h1_ <| do
+ Lucid.span_ [Lucid.class_ "detail-id"] (Lucid.toHtml ("Fact #" <> maybe "-" tshow (TaskCore.factId fact)))
+ Lucid.div_ [Lucid.class_ "task-meta-row"] <| do
+ Lucid.span_ [Lucid.class_ "meta-label"] "Project:"
+ Lucid.span_ [Lucid.class_ "meta-value"] (Lucid.toHtml (TaskCore.factProject fact))
+ Lucid.span_ [Lucid.class_ "meta-label"] "Confidence:"
+ confidenceBadgeDetail (TaskCore.factConfidence fact)
+ Lucid.span_ [Lucid.class_ "meta-label"] "Created:"
+ Lucid.span_ [Lucid.class_ "meta-value"] (renderRelativeTimestamp now (TaskCore.factCreatedAt fact))
+
+ Lucid.div_ [Lucid.class_ "detail-section"] <| do
+ Lucid.h2_ "Content"
+ Lucid.form_
+ [ Lucid.method_ "POST",
+ Lucid.action_ ("/kb/" <> maybe "-" tshow (TaskCore.factId fact) <> "/edit"),
+ Lucid.class_ "fact-edit-form"
+ ]
+ <| do
+ Lucid.div_ [Lucid.class_ "form-group"] <| do
+ Lucid.label_ [Lucid.for_ "content"] "Fact Content:"
+ Lucid.textarea_
+ [ Lucid.name_ "content",
+ Lucid.id_ "content",
+ Lucid.class_ "form-textarea",
+ Lucid.rows_ "6"
+ ]
+ (Lucid.toHtml (TaskCore.factContent fact))
+
+ Lucid.div_ [Lucid.class_ "form-group"] <| do
+ Lucid.label_ [Lucid.for_ "files"] "Related Files (comma-separated):"
+ Lucid.input_
+ [ Lucid.type_ "text",
+ Lucid.name_ "files",
+ Lucid.id_ "files",
+ Lucid.class_ "form-input",
+ Lucid.value_ (Text.intercalate ", " (TaskCore.factRelatedFiles fact))
+ ]
+
+ Lucid.div_ [Lucid.class_ "form-group"] <| do
+ Lucid.label_ [Lucid.for_ "confidence"] "Confidence (0.0 - 1.0):"
+ Lucid.input_
+ [ Lucid.type_ "number",
+ Lucid.name_ "confidence",
+ Lucid.id_ "confidence",
+ Lucid.class_ "form-input",
+ Lucid.step_ "0.1",
+ Lucid.min_ "0",
+ Lucid.max_ "1",
+ Lucid.value_ (tshow (TaskCore.factConfidence fact))
+ ]
+
+ Lucid.div_ [Lucid.class_ "form-actions"] <| do
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "btn btn-primary"] "Save Changes"
+
+ case TaskCore.factSourceTask fact of
+ Nothing -> pure ()
+ Just tid -> do
+ Lucid.div_ [Lucid.class_ "detail-section"] <| do
+ Lucid.h2_ "Source Task"
+ Lucid.a_ [Lucid.href_ ("/tasks/" <> tid), Lucid.class_ "task-link"] (Lucid.toHtml tid)
+
+ Lucid.div_ [Lucid.class_ "detail-section danger-zone"] <| do
+ Lucid.h2_ "Danger Zone"
+ Lucid.form_
+ [ Lucid.method_ "POST",
+ Lucid.action_ ("/kb/" <> maybe "-" tshow (TaskCore.factId fact) <> "/delete"),
+ Lucid.class_ "delete-form",
+ Lucid.makeAttribute "onsubmit" "return confirm('Are you sure you want to delete this fact?');"
+ ]
+ <| do
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "btn btn-danger"] "Delete Fact"
+
+ Lucid.div_ [Lucid.class_ "back-link"] <| do
+ Lucid.a_ [Lucid.href_ "/kb"] "← Back to Knowledge Base"
+ where
+ confidenceBadgeDetail :: (Monad m) => Double -> Lucid.HtmlT m ()
+ confidenceBadgeDetail conf =
+ let pct = floor (conf * 100) :: Int
+ cls
+ | conf >= 0.8 = "badge badge-done"
+ | conf >= 0.5 = "badge badge-inprogress"
+ | otherwise = "badge badge-open"
+ in Lucid.span_ [Lucid.class_ cls] (Lucid.toHtml (tshow pct <> "%"))
+
+instance Lucid.ToHtml EpicsPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (EpicsPage epics allTasks currentSort) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Epics" Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Epics - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.div_ [Lucid.class_ "page-header-row"] <| do
+ Lucid.h1_ <| Lucid.toHtml ("Epics (" <> tshow (length epics) <> ")")
+ sortDropdown "/epics" currentSort
+ Lucid.p_ [Lucid.class_ "info-msg"] "All epics (large, multi-task projects)."
+ if null epics
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No epics found."
+ else Lucid.div_ [Lucid.class_ "task-list"] <| traverse_ (renderEpicCardWithStats allTasks) epics
+
+instance Lucid.ToHtml TaskListPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (TaskListPage tasks filters currentSort _now) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Tasks" Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Tasks - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.div_ [Lucid.class_ "page-header-row"] <| do
+ Lucid.h1_ <| Lucid.toHtml ("Tasks (" <> tshow (length tasks) <> ")")
+ sortDropdown "/tasks" currentSort
+
+ Lucid.div_ [Lucid.class_ "filter-form"] <| do
+ Lucid.form_
+ [ Lucid.method_ "GET",
+ Lucid.action_ "/tasks",
+ Lucid.makeAttribute "hx-get" "/partials/task-list",
+ Lucid.makeAttribute "hx-target" "#task-list",
+ Lucid.makeAttribute "hx-push-url" "/tasks",
+ Lucid.makeAttribute "hx-trigger" "submit, change from:select"
+ ]
+ <| do
+ Lucid.div_ [Lucid.class_ "filter-row"] <| do
+ Lucid.div_ [Lucid.class_ "filter-group"] <| do
+ Lucid.label_ [Lucid.for_ "status"] "Status:"
+ Lucid.select_ [Lucid.name_ "status", Lucid.id_ "status", Lucid.class_ "filter-select"] <| do
+ Lucid.option_ ([Lucid.value_ ""] <> maybeSelected Nothing (filterStatus filters)) "All"
+ statusFilterOption TaskCore.Open (filterStatus filters)
+ statusFilterOption TaskCore.InProgress (filterStatus filters)
+ statusFilterOption TaskCore.Review (filterStatus filters)
+ statusFilterOption TaskCore.Approved (filterStatus filters)
+ statusFilterOption TaskCore.Done (filterStatus filters)
+
+ Lucid.div_ [Lucid.class_ "filter-group"] <| do
+ Lucid.label_ [Lucid.for_ "priority"] "Priority:"
+ Lucid.select_ [Lucid.name_ "priority", Lucid.id_ "priority", Lucid.class_ "filter-select"] <| do
+ Lucid.option_ ([Lucid.value_ ""] <> maybeSelected Nothing (filterPriority filters)) "All"
+ priorityFilterOption TaskCore.P0 (filterPriority filters)
+ priorityFilterOption TaskCore.P1 (filterPriority filters)
+ priorityFilterOption TaskCore.P2 (filterPriority filters)
+ priorityFilterOption TaskCore.P3 (filterPriority filters)
+ priorityFilterOption TaskCore.P4 (filterPriority filters)
+
+ Lucid.div_ [Lucid.class_ "filter-group"] <| do
+ Lucid.label_ [Lucid.for_ "namespace"] "Namespace:"
+ Lucid.input_
+ [ Lucid.type_ "text",
+ Lucid.name_ "namespace",
+ Lucid.id_ "namespace",
+ Lucid.class_ "filter-input",
+ Lucid.placeholder_ "e.g. Omni/Jr",
+ Lucid.value_ (fromMaybe "" (filterNamespace filters))
+ ]
+
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "filter-btn"] "Filter"
+ Lucid.a_
+ [ Lucid.href_ "/tasks",
+ Lucid.class_ "clear-btn",
+ Lucid.makeAttribute "hx-get" "/partials/task-list",
+ Lucid.makeAttribute "hx-target" "#task-list",
+ Lucid.makeAttribute "hx-push-url" "/tasks"
+ ]
+ "Clear"
+
+ Lucid.div_ [Lucid.id_ "task-list"] <| do
+ if null tasks
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No tasks match the current filters."
+ else Lucid.div_ [Lucid.class_ "list-group"] <| traverse_ renderListGroupItem tasks
+ where
+ maybeSelected :: (Eq a) => Maybe a -> Maybe a -> [Lucid.Attribute]
+ maybeSelected opt current = [Lucid.selected_ "selected" | opt == current]
+
+ statusFilterOption :: (Monad m) => TaskCore.Status -> Maybe TaskCore.Status -> Lucid.HtmlT m ()
+ statusFilterOption s current =
+ let attrs = [Lucid.value_ (tshow s)] <> [Lucid.selected_ "selected" | Just s == current]
+ in Lucid.option_ attrs (Lucid.toHtml (tshow s))
+
+ priorityFilterOption :: (Monad m) => TaskCore.Priority -> Maybe TaskCore.Priority -> Lucid.HtmlT m ()
+ priorityFilterOption p current =
+ let attrs = [Lucid.value_ (tshow p)] <> [Lucid.selected_ "selected" | Just p == current]
+ in Lucid.option_ attrs (Lucid.toHtml (tshow p))
+
+instance Lucid.ToHtml TaskDetailPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (TaskDetailNotFound tid) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Tasks" (Just "/tasks"), Breadcrumb tid Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Task Not Found - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.h1_ "Task Not Found"
+ Lucid.p_ <| do
+ "The task "
+ Lucid.code_ (Lucid.toHtml tid)
+ " could not be found."
+ toHtml (TaskDetailFound task allTasks _activities maybeRetry commits maybeAggMetrics agentEvents now) =
+ let crumbs = taskBreadcrumbs allTasks task
+ in Lucid.doctypehtml_ <| do
+ pageHead (TaskCore.taskId task <> " - Jr")
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.h1_ <| Lucid.toHtml (TaskCore.taskTitle task)
+
+ renderRetryContextBanner (TaskCore.taskId task) maybeRetry
+
+ Lucid.div_ [Lucid.class_ "task-detail"] <| do
+ Lucid.div_ [Lucid.class_ "task-meta"] <| do
+ Lucid.div_ [Lucid.class_ "task-meta-primary"] <| do
+ Lucid.code_ [Lucid.class_ "task-meta-id"] (Lucid.toHtml (TaskCore.taskId task))
+ metaSep
+ Lucid.span_ [Lucid.class_ "task-meta-type"] (Lucid.toHtml (tshow (TaskCore.taskType task)))
+ metaSep
+ statusBadgeWithForm (TaskCore.taskStatus task) (TaskCore.taskId task)
+ metaSep
+ priorityBadgeWithForm (TaskCore.taskPriority task) (TaskCore.taskId task)
+ metaSep
+ complexityBadgeWithForm (TaskCore.taskComplexity task) (TaskCore.taskId task)
+ case TaskCore.taskNamespace task of
+ Nothing -> pure ()
+ Just ns -> do
+ metaSep
+ Lucid.span_ [Lucid.class_ "task-meta-ns"] (Lucid.toHtml ns)
+
+ Lucid.div_ [Lucid.class_ "task-meta-secondary"] <| do
+ case TaskCore.taskParent task of
+ Nothing -> pure ()
+ Just pid -> do
+ Lucid.span_ [Lucid.class_ "task-meta-label"] "Parent:"
+ Lucid.a_ [Lucid.href_ ("/tasks/" <> pid), Lucid.class_ "task-link"] (Lucid.toHtml pid)
+ metaSep
+ Lucid.span_ [Lucid.class_ "task-meta-label"] "Created"
+ renderRelativeTimestamp now (TaskCore.taskCreatedAt task)
+ metaSep
+ Lucid.span_ [Lucid.class_ "task-meta-label"] "Updated"
+ renderRelativeTimestamp now (TaskCore.taskUpdatedAt task)
+
+ let deps = TaskCore.taskDependencies task
+ unless (null deps) <| do
+ Lucid.div_ [Lucid.class_ "detail-section"] <| do
+ Lucid.h3_ "Dependencies"
+ Lucid.ul_ [Lucid.class_ "dep-list"] <| do
+ traverse_ renderDependency deps
+
+ when (TaskCore.taskType task == TaskCore.Epic) <| do
+ for_ maybeAggMetrics (renderAggregatedMetrics allTasks task)
+
+ Lucid.div_ [Lucid.class_ "detail-section"] <| do
+ Lucid.toHtml (DescriptionViewPartial (TaskCore.taskId task) (TaskCore.taskDescription task) (TaskCore.taskType task == TaskCore.Epic))
+
+ let children = filter (maybe False (TaskCore.matchesId (TaskCore.taskId task)) <. TaskCore.taskParent) allTasks
+ unless (null children) <| do
+ Lucid.div_ [Lucid.class_ "detail-section"] <| do
+ Lucid.h3_ "Child Tasks"
+ Lucid.ul_ [Lucid.class_ "child-list"] <| do
+ traverse_ renderChild children
+
+ unless (null commits) <| do
+ Lucid.div_ [Lucid.class_ "detail-section"] <| do
+ Lucid.h3_ "Git Commits"
+ Lucid.div_ [Lucid.class_ "commit-list"] <| do
+ traverse_ (renderCommit (TaskCore.taskId task)) commits
+
+ when (TaskCore.taskStatus task == TaskCore.Review) <| do
+ Lucid.div_ [Lucid.class_ "review-link-section"] <| do
+ Lucid.a_
+ [ Lucid.href_ ("/tasks/" <> TaskCore.taskId task <> "/review"),
+ Lucid.class_ "review-link-btn"
+ ]
+ "Review This Task"
+
+ renderUnifiedTimeline (TaskCore.taskId task) (TaskCore.taskComments task) agentEvents (TaskCore.taskStatus task) now
+ where
+ renderDependency :: (Monad m) => TaskCore.Dependency -> Lucid.HtmlT m ()
+ renderDependency dep =
+ Lucid.li_ <| do
+ Lucid.a_ [Lucid.href_ ("/tasks/" <> TaskCore.depId dep), Lucid.class_ "task-link"] (Lucid.toHtml (TaskCore.depId dep))
+ Lucid.span_ [Lucid.class_ "dep-type"] <| Lucid.toHtml (" [" <> tshow (TaskCore.depType dep) <> "]")
+
+ renderChild :: (Monad m) => TaskCore.Task -> Lucid.HtmlT m ()
+ renderChild child =
+ Lucid.li_ <| do
+ Lucid.a_ [Lucid.href_ ("/tasks/" <> TaskCore.taskId child), Lucid.class_ "task-link"] (Lucid.toHtml (TaskCore.taskId child))
+ Lucid.span_ [Lucid.class_ "child-title"] <| Lucid.toHtml (" - " <> TaskCore.taskTitle child)
+ Lucid.span_ [Lucid.class_ "child-status"] <| Lucid.toHtml (" [" <> tshow (TaskCore.taskStatus child) <> "]")
+
+ renderCommit :: (Monad m) => Text -> GitCommit -> Lucid.HtmlT m ()
+ renderCommit tid c =
+ Lucid.div_ [Lucid.class_ "commit-item"] <| do
+ Lucid.div_ [Lucid.class_ "commit-header"] <| do
+ Lucid.a_
+ [ Lucid.href_ ("/tasks/" <> tid <> "/diff/" <> commitHash c),
+ Lucid.class_ "commit-hash"
+ ]
+ (Lucid.toHtml (commitShortHash c))
+ Lucid.span_ [Lucid.class_ "commit-summary"] (Lucid.toHtml (commitSummary c))
+ Lucid.div_ [Lucid.class_ "commit-meta"] <| do
+ Lucid.span_ [Lucid.class_ "commit-author"] (Lucid.toHtml (commitAuthor c))
+ Lucid.span_ [Lucid.class_ "commit-date"] (Lucid.toHtml (commitRelativeDate c))
+ Lucid.span_ [Lucid.class_ "commit-files"] (Lucid.toHtml (tshow (commitFilesChanged c) <> " files"))
+
+instance Lucid.ToHtml TaskReviewPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (ReviewPageNotFound tid) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Tasks" (Just "/tasks"), Breadcrumb tid (Just ("/tasks/" <> tid)), Breadcrumb "Review" Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Task Not Found - Jr Review"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.h1_ "Task Not Found"
+ Lucid.p_ <| do
+ "The task "
+ Lucid.code_ (Lucid.toHtml tid)
+ " could not be found."
+ toHtml (ReviewPageFound task reviewInfo) =
+ let tid = TaskCore.taskId task
+ crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Tasks" (Just "/tasks"), Breadcrumb tid (Just ("/tasks/" <> tid)), Breadcrumb "Review" Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead ("Review: " <> TaskCore.taskId task <> " - Jr")
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.h1_ "Review Task"
+
+ Lucid.div_ [Lucid.class_ "task-summary"] <| do
+ Lucid.div_ [Lucid.class_ "detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "detail-label"] "ID:"
+ Lucid.code_ [Lucid.class_ "detail-value"] (Lucid.toHtml (TaskCore.taskId task))
+ Lucid.div_ [Lucid.class_ "detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "detail-label"] "Title:"
+ Lucid.span_ [Lucid.class_ "detail-value"] (Lucid.toHtml (TaskCore.taskTitle task))
+ Lucid.div_ [Lucid.class_ "detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "detail-label"] "Status:"
+ Lucid.span_ [Lucid.class_ "detail-value"] <| statusBadge (TaskCore.taskStatus task)
+
+ case reviewInfo of
+ ReviewNoCommit ->
+ Lucid.div_ [Lucid.class_ "no-commit-msg"] <| do
+ Lucid.h3_ "No Commit Found"
+ Lucid.p_ "No commit with this task ID was found in the git history."
+ Lucid.p_ "The worker may not have completed yet, or the commit message doesn't include the task ID."
+ ReviewMergeConflict commitSha conflictFiles ->
+ Lucid.div_ [Lucid.class_ "conflict-warning"] <| do
+ Lucid.h3_ "Merge Conflict Detected"
+ Lucid.p_ <| do
+ "Commit "
+ Lucid.code_ (Lucid.toHtml (Text.take 8 commitSha))
+ " cannot be cleanly merged."
+ Lucid.p_ "Conflicting files:"
+ Lucid.ul_ <| traverse_ (Lucid.li_ <. Lucid.toHtml) conflictFiles
+ ReviewReady commitSha diffText -> do
+ Lucid.div_ [Lucid.class_ "diff-section"] <| do
+ Lucid.h3_ <| do
+ "Commit: "
+ Lucid.code_ (Lucid.toHtml (Text.take 8 commitSha))
+ Lucid.pre_ [Lucid.class_ "diff-block"] (Lucid.toHtml diffText)
+
+ Lucid.div_ [Lucid.class_ "review-actions"] <| do
+ Lucid.form_
+ [ Lucid.method_ "POST",
+ Lucid.action_ ("/tasks/" <> TaskCore.taskId task <> "/accept"),
+ Lucid.class_ "inline-form"
+ ]
+ <| do
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "accept-btn"] "Accept"
+
+ Lucid.form_
+ [ Lucid.method_ "POST",
+ Lucid.action_ ("/tasks/" <> TaskCore.taskId task <> "/reject"),
+ Lucid.class_ "reject-form"
+ ]
+ <| do
+ Lucid.textarea_
+ [ Lucid.name_ "notes",
+ Lucid.class_ "reject-notes",
+ Lucid.placeholder_ "Rejection notes (optional)"
+ ]
+ ""
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "reject-btn"] "Reject"
+
+instance Lucid.ToHtml TaskDiffPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (DiffPageNotFound tid commitHash') =
+ let shortHash = Text.take 8 commitHash'
+ crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Tasks" (Just "/tasks"), Breadcrumb tid (Just ("/tasks/" <> tid)), Breadcrumb ("Diff " <> shortHash) Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Commit Not Found - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.h1_ "Commit Not Found"
+ Lucid.p_ <| do
+ "Could not find commit "
+ Lucid.code_ (Lucid.toHtml commitHash')
+ Lucid.a_ [Lucid.href_ ("/tasks/" <> tid), Lucid.class_ "back-link"] "← Back to task"
+ toHtml (DiffPageFound tid commitHash' diffOutput) =
+ let shortHash = Text.take 8 commitHash'
+ crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Tasks" (Just "/tasks"), Breadcrumb tid (Just ("/tasks/" <> tid)), Breadcrumb ("Diff " <> shortHash) Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead ("Diff " <> shortHash <> " - Jr")
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.div_ [Lucid.class_ "diff-header"] <| do
+ Lucid.a_ [Lucid.href_ ("/tasks/" <> tid), Lucid.class_ "back-link"] "← Back to task"
+ Lucid.h1_ <| do
+ "Commit "
+ Lucid.code_ (Lucid.toHtml shortHash)
+ Lucid.pre_ [Lucid.class_ "diff-block"] (Lucid.toHtml diffOutput)
+
+instance Lucid.ToHtml StatsPage where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (StatsPage stats maybeEpic) =
+ let crumbs = [Breadcrumb "Jr" (Just "/"), Breadcrumb "Stats" Nothing]
+ in Lucid.doctypehtml_ <| do
+ pageHead "Task Statistics - Jr"
+ pageBodyWithCrumbs crumbs <| do
+ Lucid.div_ [Lucid.class_ "container"] <| do
+ Lucid.h1_ <| case maybeEpic of
+ Nothing -> "Task Statistics"
+ Just epicId -> Lucid.toHtml ("Statistics for Epic: " <> epicId)
+
+ Lucid.form_ [Lucid.method_ "GET", Lucid.action_ "/stats", Lucid.class_ "filter-form"] <| do
+ Lucid.div_ [Lucid.class_ "filter-row"] <| do
+ Lucid.div_ [Lucid.class_ "filter-group"] <| do
+ Lucid.label_ [Lucid.for_ "epic"] "Epic:"
+ Lucid.input_
+ [ Lucid.type_ "text",
+ Lucid.name_ "epic",
+ Lucid.id_ "epic",
+ Lucid.class_ "filter-input",
+ Lucid.placeholder_ "Epic ID (optional)",
+ Lucid.value_ (fromMaybe "" maybeEpic)
+ ]
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "filter-btn"] "Filter"
+ Lucid.a_ [Lucid.href_ "/stats", Lucid.class_ "clear-btn"] "Clear"
+
+ Lucid.h2_ "By Status"
+ multiColorProgressBar stats
+ Lucid.div_ [Lucid.class_ "stats-grid"] <| do
+ statCard "Open" (TaskCore.openTasks stats) (TaskCore.totalTasks stats)
+ statCard "In Progress" (TaskCore.inProgressTasks stats) (TaskCore.totalTasks stats)
+ statCard "Review" (TaskCore.reviewTasks stats) (TaskCore.totalTasks stats)
+ statCard "Approved" (TaskCore.approvedTasks stats) (TaskCore.totalTasks stats)
+ statCard "Done" (TaskCore.doneTasks stats) (TaskCore.totalTasks stats)
+
+ Lucid.h2_ "By Priority"
+ Lucid.div_ [Lucid.class_ "stats-section"] <| do
+ traverse_ (uncurry renderPriorityRow) (TaskCore.tasksByPriority stats)
+
+ Lucid.h2_ "By Namespace"
+ Lucid.div_ [Lucid.class_ "stats-section"] <| do
+ if null (TaskCore.tasksByNamespace stats)
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No namespaces found."
+ else traverse_ (uncurry (renderNamespaceRow (TaskCore.totalTasks stats))) (TaskCore.tasksByNamespace stats)
+
+ Lucid.h2_ "Summary"
+ Lucid.div_ [Lucid.class_ "summary-section"] <| do
+ Lucid.div_ [Lucid.class_ "detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "detail-label"] "Total Tasks:"
+ Lucid.span_ [Lucid.class_ "detail-value"] (Lucid.toHtml (tshow (TaskCore.totalTasks stats)))
+ Lucid.div_ [Lucid.class_ "detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "detail-label"] "Epics:"
+ Lucid.span_ [Lucid.class_ "detail-value"] (Lucid.toHtml (tshow (TaskCore.totalEpics stats)))
+ Lucid.div_ [Lucid.class_ "detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "detail-label"] "Ready:"
+ Lucid.span_ [Lucid.class_ "detail-value"] (Lucid.toHtml (tshow (TaskCore.readyTasks stats)))
+ Lucid.div_ [Lucid.class_ "detail-row"] <| do
+ Lucid.span_ [Lucid.class_ "detail-label"] "Blocked:"
+ Lucid.span_ [Lucid.class_ "detail-value"] (Lucid.toHtml (tshow (TaskCore.blockedTasks stats)))
+ where
+ statCard :: (Monad m) => Text -> Int -> Int -> Lucid.HtmlT m ()
+ statCard label count total =
+ let pct = if total == 0 then 0 else (count * 100) `div` total
+ in Lucid.div_ [Lucid.class_ "stat-card"] <| do
+ Lucid.div_ [Lucid.class_ "stat-count"] (Lucid.toHtml (tshow count))
+ Lucid.div_ [Lucid.class_ "stat-label"] (Lucid.toHtml label)
+ Lucid.div_ [Lucid.class_ "progress-bar"] <| do
+ Lucid.div_
+ [ Lucid.class_ "progress-fill",
+ Lucid.style_ ("width: " <> tshow pct <> "%")
+ ]
+ ""
+
+ renderPriorityRow :: (Monad m) => TaskCore.Priority -> Int -> Lucid.HtmlT m ()
+ renderPriorityRow priority count =
+ let total = TaskCore.totalTasks stats
+ pct = if total == 0 then 0 else (count * 100) `div` total
+ in Lucid.div_ [Lucid.class_ "stats-row"] <| do
+ Lucid.span_ [Lucid.class_ "stats-label"] (Lucid.toHtml (tshow priority))
+ Lucid.div_ [Lucid.class_ "stats-bar-container"] <| do
+ Lucid.div_ [Lucid.class_ "progress-bar"] <| do
+ Lucid.div_
+ [ Lucid.class_ "progress-fill",
+ Lucid.style_ ("width: " <> tshow pct <> "%")
+ ]
+ ""
+ Lucid.span_ [Lucid.class_ "stats-count"] (Lucid.toHtml (tshow count))
+
+ renderNamespaceRow :: (Monad m) => Int -> Text -> Int -> Lucid.HtmlT m ()
+ renderNamespaceRow total ns count =
+ let pct = if total == 0 then 0 else (count * 100) `div` total
+ in Lucid.div_ [Lucid.class_ "stats-row"] <| do
+ Lucid.span_ [Lucid.class_ "stats-label"] (Lucid.toHtml ns)
+ Lucid.div_ [Lucid.class_ "stats-bar-container"] <| do
+ Lucid.div_ [Lucid.class_ "progress-bar"] <| do
+ Lucid.div_
+ [ Lucid.class_ "progress-fill",
+ Lucid.style_ ("width: " <> tshow pct <> "%")
+ ]
+ ""
+ Lucid.span_ [Lucid.class_ "stats-count"] (Lucid.toHtml (tshow count))
diff --git a/Omni/Jr/Web/Partials.hs b/Omni/Jr/Web/Partials.hs
new file mode 100644
index 0000000..2660441
--- /dev/null
+++ b/Omni/Jr/Web/Partials.hs
@@ -0,0 +1,274 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+{-# OPTIONS_GHC -Wno-orphans #-}
+
+-- : dep lucid
+-- : dep servant-lucid
+module Omni.Jr.Web.Partials
+ ( -- Re-export instances for use by Web.hs
+ )
+where
+
+import Alpha
+import qualified Data.Text as Text
+import Data.Time (UTCTime, diffUTCTime)
+import qualified Lucid
+import qualified Lucid.Base as Lucid
+import Numeric (showFFloat)
+import Omni.Jr.Web.Components
+ ( aggregateCostMetrics,
+ commentForm,
+ complexityBadgeWithForm,
+ formatCostHeader,
+ formatTokensHeader,
+ metaSep,
+ priorityBadgeWithForm,
+ renderAutoscrollToggle,
+ renderListGroupItem,
+ renderLiveToggle,
+ renderMarkdown,
+ renderRelativeTimestamp,
+ renderTimelineEvent,
+ statusBadgeWithForm,
+ timelineScrollScript,
+ )
+import Omni.Jr.Web.Types
+ ( AgentEventsPartial (..),
+ ComplexityBadgePartial (..),
+ DescriptionEditPartial (..),
+ DescriptionViewPartial (..),
+ PriorityBadgePartial (..),
+ ReadyCountPartial (..),
+ RecentActivityMorePartial (..),
+ RecentActivityNewPartial (..),
+ StatusBadgePartial (..),
+ TaskListPartial (..),
+ TaskMetricsPartial (..),
+ )
+import qualified Omni.Task.Core as TaskCore
+
+instance Lucid.ToHtml RecentActivityNewPartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (RecentActivityNewPartial tasks maybeNewestTs) = do
+ traverse_ renderListGroupItem tasks
+ case maybeNewestTs of
+ Nothing -> pure ()
+ Just ts ->
+ Lucid.div_
+ [ Lucid.id_ "recent-activity",
+ Lucid.makeAttribute "data-newest-ts" (tshow ts),
+ Lucid.makeAttribute "hx-swap-oob" "attributes:#recent-activity data-newest-ts"
+ ]
+ ""
+
+instance Lucid.ToHtml RecentActivityMorePartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (RecentActivityMorePartial tasks nextOffset hasMore) = do
+ traverse_ renderListGroupItem tasks
+ if hasMore
+ then
+ Lucid.button_
+ [ Lucid.id_ "activity-load-more",
+ Lucid.class_ "btn btn-secondary load-more-btn",
+ Lucid.makeAttribute "hx-get" ("/partials/recent-activity-more?offset=" <> tshow nextOffset),
+ Lucid.makeAttribute "hx-target" "#activity-list",
+ Lucid.makeAttribute "hx-swap" "beforeend",
+ Lucid.makeAttribute "hx-swap-oob" "true"
+ ]
+ "Load More"
+ else Lucid.span_ [Lucid.id_ "activity-load-more", Lucid.makeAttribute "hx-swap-oob" "true"] ""
+
+instance Lucid.ToHtml ReadyCountPartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (ReadyCountPartial count) =
+ Lucid.a_ [Lucid.href_ "/ready", Lucid.class_ "ready-link"]
+ <| Lucid.toHtml ("(" <> tshow count <> " tasks)")
+
+instance Lucid.ToHtml StatusBadgePartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (StatusBadgePartial status tid) =
+ statusBadgeWithForm status tid
+
+instance Lucid.ToHtml PriorityBadgePartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (PriorityBadgePartial priority tid) =
+ priorityBadgeWithForm priority tid
+
+instance Lucid.ToHtml ComplexityBadgePartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (ComplexityBadgePartial complexity tid) =
+ complexityBadgeWithForm complexity tid
+
+instance Lucid.ToHtml TaskListPartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (TaskListPartial tasks) =
+ if null tasks
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No tasks match the current filters."
+ else Lucid.div_ [Lucid.class_ "list-group"] <| traverse_ renderListGroupItem tasks
+
+instance Lucid.ToHtml TaskMetricsPartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (TaskMetricsPartial _tid activities maybeRetry now) =
+ let runningActs = filter (\a -> TaskCore.activityStage a == TaskCore.Running) activities
+ in if null runningActs
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No worker execution data available."
+ else
+ Lucid.div_ [Lucid.class_ "execution-details"] <| do
+ let totalCost = sum [c | act <- runningActs, Just c <- [TaskCore.activityCostCents act]]
+ totalDuration = sum [calcDurSecs act | act <- runningActs]
+ attemptCount = length runningActs
+
+ case maybeRetry of
+ Nothing -> pure ()
+ Just ctx ->
+ Lucid.div_ [Lucid.class_ "metric-row"] <| do
+ Lucid.span_ [Lucid.class_ "metric-label"] "Retry Attempt:"
+ Lucid.span_ [Lucid.class_ "metric-value retry-count"] (Lucid.toHtml (tshow (TaskCore.retryAttempt ctx) <> "/3"))
+
+ when (attemptCount > 1) <| do
+ Lucid.div_ [Lucid.class_ "metric-row"] <| do
+ Lucid.span_ [Lucid.class_ "metric-label"] "Total Attempts:"
+ Lucid.span_ [Lucid.class_ "metric-value"] (Lucid.toHtml (tshow attemptCount))
+ Lucid.div_ [Lucid.class_ "metric-row"] <| do
+ Lucid.span_ [Lucid.class_ "metric-label"] "Total Duration:"
+ Lucid.span_ [Lucid.class_ "metric-value"] (Lucid.toHtml (formatDurSecs totalDuration))
+ when (totalCost > 0)
+ <| Lucid.div_ [Lucid.class_ "metric-row"]
+ <| do
+ Lucid.span_ [Lucid.class_ "metric-label"] "Total Cost:"
+ Lucid.span_ [Lucid.class_ "metric-value"] (Lucid.toHtml (formatCost totalCost))
+ Lucid.hr_ [Lucid.class_ "attempts-divider"]
+
+ traverse_ (renderAttempt attemptCount now) (zip [1 ..] (reverse runningActs))
+ where
+ calcDurSecs :: TaskCore.TaskActivity -> Int
+ calcDurSecs act = case (TaskCore.activityStartedAt act, TaskCore.activityCompletedAt act) of
+ (Just start, Just end) -> floor (diffUTCTime end start)
+ _ -> 0
+
+ formatDurSecs :: Int -> Text
+ formatDurSecs secs
+ | secs < 60 = tshow secs <> "s"
+ | secs < 3600 = tshow (secs `div` 60) <> "m " <> tshow (secs `mod` 60) <> "s"
+ | otherwise = tshow (secs `div` 3600) <> "h " <> tshow ((secs `mod` 3600) `div` 60) <> "m"
+
+ renderAttempt :: (Monad m) => Int -> UTCTime -> (Int, TaskCore.TaskActivity) -> Lucid.HtmlT m ()
+ renderAttempt totalAttempts currentTime (attemptNum, act) = do
+ when (totalAttempts > 1)
+ <| Lucid.div_ [Lucid.class_ "attempt-header"] (Lucid.toHtml ("Attempt " <> tshow attemptNum :: Text))
+ case TaskCore.activityThreadUrl act of
+ Nothing -> pure ()
+ Just url ->
+ Lucid.div_ [Lucid.class_ "metric-row"] <| do
+ Lucid.span_ [Lucid.class_ "metric-label"] "Session:"
+ Lucid.a_ [Lucid.href_ url, Lucid.target_ "_blank", Lucid.class_ "amp-thread-btn"] "View in Amp ↗"
+
+ case (TaskCore.activityStartedAt act, TaskCore.activityCompletedAt act) of
+ (Just start, Just end) ->
+ Lucid.div_ [Lucid.class_ "metric-row"] <| do
+ Lucid.span_ [Lucid.class_ "metric-label"] "Duration:"
+ Lucid.span_ [Lucid.class_ "metric-value"] (Lucid.toHtml (formatDuration start end))
+ (Just start, Nothing) ->
+ Lucid.div_ [Lucid.class_ "metric-row"] <| do
+ Lucid.span_ [Lucid.class_ "metric-label"] "Started:"
+ Lucid.span_ [Lucid.class_ "metric-value"] (renderRelativeTimestamp currentTime start)
+ _ -> pure ()
+
+ case TaskCore.activityCostCents act of
+ Nothing -> pure ()
+ Just cents ->
+ Lucid.div_ [Lucid.class_ "metric-row"] <| do
+ Lucid.span_ [Lucid.class_ "metric-label"] "Cost:"
+ Lucid.span_ [Lucid.class_ "metric-value"] (Lucid.toHtml (formatCost cents))
+
+ Lucid.div_ [Lucid.class_ "metric-row"] <| do
+ Lucid.span_ [Lucid.class_ "metric-label"] "Timestamp:"
+ Lucid.span_ [Lucid.class_ "metric-value"] (renderRelativeTimestamp currentTime (TaskCore.activityTimestamp act))
+
+ formatDuration :: UTCTime -> UTCTime -> Text
+ formatDuration start end =
+ let diffSecs = floor (diffUTCTime end start) :: Int
+ mins = diffSecs `div` 60
+ secs = diffSecs `mod` 60
+ in if mins > 0
+ then tshow mins <> "m " <> tshow secs <> "s"
+ else tshow secs <> "s"
+
+ formatCost :: Int -> Text
+ formatCost cents =
+ let dollars = fromIntegral cents / 100.0 :: Double
+ in "$" <> Text.pack (showFFloat (Just 2) dollars "")
+
+instance Lucid.ToHtml DescriptionViewPartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (DescriptionViewPartial tid desc isEpic) =
+ Lucid.div_ [Lucid.id_ "description-block", Lucid.class_ "description-block"] <| do
+ Lucid.div_ [Lucid.class_ "description-header"] <| do
+ Lucid.h3_ (if isEpic then "Design" else "Description")
+ Lucid.a_
+ [ Lucid.href_ "#",
+ Lucid.class_ "edit-link",
+ Lucid.makeAttribute "hx-get" ("/tasks/" <> tid <> "/description/edit"),
+ Lucid.makeAttribute "hx-target" "#description-block",
+ Lucid.makeAttribute "hx-swap" "outerHTML"
+ ]
+ "Edit"
+ if Text.null desc
+ then Lucid.p_ [Lucid.class_ "empty-msg"] (if isEpic then "No design document yet." else "No description yet.")
+ else Lucid.div_ [Lucid.class_ "markdown-content"] (renderMarkdown desc)
+
+instance Lucid.ToHtml DescriptionEditPartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (DescriptionEditPartial tid desc isEpic) =
+ Lucid.div_ [Lucid.id_ "description-block", Lucid.class_ "description-block editing"] <| do
+ Lucid.div_ [Lucid.class_ "description-header"] <| do
+ Lucid.h3_ (if isEpic then "Design" else "Description")
+ Lucid.button_
+ [ Lucid.type_ "button",
+ Lucid.class_ "cancel-link",
+ Lucid.makeAttribute "hx-get" ("/tasks/" <> tid <> "/description/view"),
+ Lucid.makeAttribute "hx-target" "#description-block",
+ Lucid.makeAttribute "hx-swap" "outerHTML",
+ Lucid.makeAttribute "hx-confirm" "Discard changes?"
+ ]
+ "Cancel"
+ Lucid.form_
+ [ Lucid.makeAttribute "hx-post" ("/tasks/" <> tid <> "/description"),
+ Lucid.makeAttribute "hx-target" "#description-block",
+ Lucid.makeAttribute "hx-swap" "outerHTML"
+ ]
+ <| do
+ Lucid.textarea_
+ [ Lucid.name_ "description",
+ Lucid.class_ "description-textarea",
+ Lucid.rows_ (if isEpic then "15" else "10"),
+ Lucid.placeholder_ (if isEpic then "Enter design in Markdown..." else "Enter description...")
+ ]
+ (Lucid.toHtml desc)
+ Lucid.div_ [Lucid.class_ "form-actions"] <| do
+ Lucid.button_ [Lucid.type_ "submit", Lucid.class_ "btn btn-primary"] "Save"
+
+instance Lucid.ToHtml AgentEventsPartial where
+ toHtmlRaw = Lucid.toHtml
+ toHtml (AgentEventsPartial tid events isInProgress now) = do
+ let nonCostEvents = filter (\e -> TaskCore.storedEventType e /= "Cost") events
+ eventCount = length nonCostEvents
+ (totalCents, totalTokens) = aggregateCostMetrics events
+ Lucid.h3_ <| do
+ Lucid.toHtml ("Timeline (" <> tshow eventCount <> ")")
+ when (totalCents > 0 || totalTokens > 0) <| do
+ Lucid.span_ [Lucid.class_ "timeline-cost-summary"] <| do
+ metaSep
+ when (totalCents > 0) <| Lucid.toHtml (formatCostHeader totalCents)
+ when (totalCents > 0 && totalTokens > 0) <| metaSep
+ when (totalTokens > 0) <| Lucid.toHtml (formatTokensHeader totalTokens <> " tokens")
+ when isInProgress <| do
+ renderLiveToggle
+ renderAutoscrollToggle
+ if null nonCostEvents
+ then Lucid.p_ [Lucid.class_ "empty-msg"] "No activity yet."
+ else do
+ Lucid.div_ [Lucid.class_ "timeline-events"] <| do
+ traverse_ (renderTimelineEvent now) nonCostEvents
+ when isInProgress <| timelineScrollScript
+ commentForm tid
diff --git a/Omni/Jr/Web/Style.hs b/Omni/Jr/Web/Style.hs
new file mode 100644
index 0000000..f75b33c
--- /dev/null
+++ b/Omni/Jr/Web/Style.hs
@@ -0,0 +1,2260 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- : dep clay
+module Omni.Jr.Web.Style
+ ( css,
+ statusBadgeClass,
+ priorityBadgeClass,
+ )
+where
+
+import Alpha hiding (wrap, (**), (|>))
+import Clay
+import qualified Clay.Flexbox as Flexbox
+import qualified Clay.Media as Media
+import qualified Clay.Stylesheet as Stylesheet
+import qualified Data.Text.Lazy as LazyText
+
+css :: LazyText.Text
+css = render stylesheet
+
+stylesheet :: Css
+stylesheet = do
+ baseStyles
+ layoutStyles
+ navigationStyles
+ breadcrumbStyles
+ cardStyles
+ listGroupStyles
+ statusBadges
+ buttonStyles
+ formStyles
+ executionDetailsStyles
+ activityTimelineStyles
+ commitStyles
+ markdownStyles
+ retryBannerStyles
+ commentStyles
+ taskMetaStyles
+ timeFilterStyles
+ sortDropdownStyles
+ timelineEventStyles
+ unifiedTimelineStyles
+ responsiveStyles
+ darkModeStyles
+
+baseStyles :: Css
+baseStyles = do
+ star ? boxSizing borderBox
+ html <> body ? do
+ margin (px 0) (px 0) (px 0) (px 0)
+ padding (px 0) (px 0) (px 0) (px 0)
+ body ? do
+ fontFamily
+ [ "-apple-system",
+ "BlinkMacSystemFont",
+ "Segoe UI",
+ "Roboto",
+ "Helvetica Neue",
+ "Arial",
+ "Noto Sans",
+ "sans-serif"
+ ]
+ [sansSerif]
+ fontSize (px 14)
+ lineHeight (em 1.3)
+ color "#1f2937"
+ backgroundColor "#f5f5f5"
+ minHeight (vh 100)
+ "h1" ? do
+ fontSize (px 20)
+ fontWeight bold
+ margin (px 0) (px 0) (em 0.3) (px 0)
+ "h2" ? do
+ fontSize (px 16)
+ fontWeight (weight 600)
+ color "#374151"
+ margin (em 1) (px 0) (em 0.5) (px 0)
+ "h3" ? do
+ fontSize (px 14)
+ fontWeight (weight 600)
+ color "#374151"
+ margin (em 0.75) (px 0) (em 0.25) (px 0)
+ a ? do
+ color "#0066cc"
+ textDecoration none
+ a # hover ? textDecoration underline
+ code ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (em 0.9)
+ backgroundColor "#f3f4f6"
+ padding (px 1) (px 4) (px 1) (px 4)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ pre ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 12)
+ backgroundColor "#1e1e1e"
+ color "#d4d4d4"
+ padding (px 8) (px 8) (px 8) (px 8)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ overflow auto
+ whiteSpace preWrap
+ maxHeight (px 500)
+
+layoutStyles :: Css
+layoutStyles = do
+ ".container" ? do
+ width (pct 100)
+ maxWidth (px 960)
+ margin (px 0) auto (px 0) auto
+ padding (px 8) (px 12) (px 8) (px 12)
+ main_ ? do
+ Stylesheet.key "flex" ("1 0 auto" :: Text)
+ ".page-content" ? do
+ padding (px 0) (px 0) (px 0) (px 0)
+ ".stats-grid" ? do
+ display grid
+ Stylesheet.key "grid-template-columns" ("repeat(auto-fit, minmax(80px, 1fr))" :: Text)
+ Stylesheet.key "gap" ("6px" :: Text)
+ ".task-list" ? do
+ display flex
+ flexDirection column
+ Stylesheet.key "gap" ("2px" :: Text)
+ ".detail-row" ? do
+ display flex
+ flexWrap Flexbox.wrap
+ padding (px 6) (px 0) (px 6) (px 0)
+ marginBottom (px 4)
+ ".detail-label" ? do
+ fontWeight (weight 600)
+ width (px 100)
+ color "#6b7280"
+ minWidth (px 80)
+ fontSize (px 13)
+ ".detail-value" ? do
+ Stylesheet.key "flex" ("1" :: Text)
+ minWidth (px 0)
+ ".detail-section" ? do
+ marginTop (em 0.75)
+ paddingTop (em 0.75)
+ borderTop (px 1) solid "#e5e7eb"
+ ".dep-list" <> ".child-list" ? do
+ margin (px 4) (px 0) (px 4) (px 0)
+ paddingLeft (px 16)
+ (".dep-list" ** li) <> (".child-list" ** li) ? margin (px 2) (px 0) (px 2) (px 0)
+ ".dep-type" <> ".child-status" ? do
+ color "#6b7280"
+ fontSize (px 12)
+ ".child-title" ? color "#374151"
+ ".priority-desc" ? do
+ color "#6b7280"
+ marginLeft (px 4)
+
+navigationStyles :: Css
+navigationStyles = do
+ ".navbar" ? do
+ backgroundColor white
+ padding (px 6) (px 12) (px 6) (px 12)
+ borderBottom (px 1) solid "#d0d0d0"
+ marginBottom (px 8)
+ display flex
+ alignItems center
+ justifyContent spaceBetween
+ flexWrap Flexbox.wrap
+ Stylesheet.key "gap" ("8px" :: Text)
+ ".navbar-brand" ? do
+ fontSize (px 18)
+ fontWeight bold
+ color "#0066cc"
+ textDecoration none
+ ".navbar-brand" # hover ? textDecoration none
+ ".navbar-toggle-checkbox" ? display none
+ ".navbar-hamburger" ? do
+ display none
+ flexDirection column
+ justifyContent center
+ alignItems center
+ width (px 32)
+ height (px 32)
+ cursor pointer
+ Stylesheet.key "gap" ("4px" :: Text)
+ ".hamburger-line" ? do
+ display block
+ width (px 20)
+ height (px 2)
+ backgroundColor "#374151"
+ borderRadius (px 1) (px 1) (px 1) (px 1)
+ transition "all" (ms 200) ease (sec 0)
+ ".navbar-links" ? do
+ display flex
+ Stylesheet.key "gap" ("2px" :: Text)
+ flexWrap Flexbox.wrap
+ alignItems center
+ ".navbar-link" ? do
+ display inlineBlock
+ padding (px 4) (px 10) (px 4) (px 10)
+ color "#374151"
+ textDecoration none
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ fontSize (px 13)
+ fontWeight (weight 500)
+ transition "background-color" (ms 150) ease (sec 0)
+ ".navbar-link" # hover ? do
+ backgroundColor "#f3f4f6"
+ textDecoration none
+ ".navbar-dropdown" ? do
+ position relative
+ display inlineBlock
+ ".navbar-dropdown-btn" ? do
+ display inlineBlock
+ padding (px 4) (px 10) (px 4) (px 10)
+ color "#374151"
+ backgroundColor transparent
+ border (px 0) none transparent
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ fontSize (px 13)
+ fontWeight (weight 500)
+ cursor pointer
+ transition "background-color" (ms 150) ease (sec 0)
+ ".navbar-dropdown-btn" # hover ? backgroundColor "#f3f4f6"
+ ".navbar-dropdown-content" ? do
+ display none
+ position absolute
+ left (px 0)
+ top (pct 100)
+ backgroundColor white
+ minWidth (px 120)
+ Stylesheet.key "box-shadow" ("0 2px 8px rgba(0,0,0,0.15)" :: Text)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ zIndex 100
+ Stylesheet.key "overflow" ("hidden" :: Text)
+ ".navbar-dropdown" # hover |> ".navbar-dropdown-content" ? display block
+ ".navbar-dropdown.open" |> ".navbar-dropdown-content" ? display block
+ ".navbar-dropdown-item" ? do
+ display block
+ padding (px 8) (px 12) (px 8) (px 12)
+ color "#374151"
+ textDecoration none
+ fontSize (px 13)
+ transition "background-color" (ms 150) ease (sec 0)
+ ".navbar-dropdown-item" # hover ? do
+ backgroundColor "#f3f4f6"
+ textDecoration none
+ header ? do
+ backgroundColor white
+ padding (px 6) (px 12) (px 6) (px 12)
+ borderBottom (px 1) solid "#d0d0d0"
+ marginBottom (px 8)
+ ".nav-content" ? do
+ maxWidth (px 960)
+ margin (px 0) auto (px 0) auto
+ display flex
+ alignItems center
+ justifyContent spaceBetween
+ flexWrap Flexbox.wrap
+ Stylesheet.key "gap" ("8px" :: Text)
+ ".nav-brand" ? do
+ fontSize (px 16)
+ fontWeight bold
+ color "#1f2937"
+ textDecoration none
+ ".nav-brand" # hover ? textDecoration none
+ ".nav-links" ? do
+ display flex
+ Stylesheet.key "gap" ("4px" :: Text)
+ flexWrap Flexbox.wrap
+ ".actions" ? do
+ display flex
+ flexDirection row
+ flexWrap Flexbox.wrap
+ Stylesheet.key "gap" ("6px" :: Text)
+ marginBottom (px 8)
+
+breadcrumbStyles :: Css
+breadcrumbStyles = do
+ ".breadcrumb-container" ? do
+ backgroundColor transparent
+ padding (px 6) (px 0) (px 6) (px 0)
+ ".breadcrumb-list" ? do
+ display flex
+ alignItems center
+ flexWrap Flexbox.wrap
+ Stylesheet.key "gap" ("4px" :: Text)
+ margin (px 0) (px 0) (px 0) (px 0)
+ padding (px 0) (px 0) (px 0) (px 0)
+ listStyleType none
+ fontSize (px 12)
+ ".breadcrumb-item" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("4px" :: Text)
+ ".breadcrumb-sep" ? do
+ color "#9ca3af"
+ Stylesheet.key "user-select" ("none" :: Text)
+ ".breadcrumb-current" ? do
+ color "#6b7280"
+ fontWeight (weight 500)
+ (".breadcrumb-list" ** a) ? do
+ color "#0066cc"
+ textDecoration none
+ (".breadcrumb-list" ** a) # hover ? textDecoration underline
+
+cardStyles :: Css
+cardStyles = do
+ ".card"
+ <> ".task-card"
+ <> ".stat-card"
+ <> ".task-detail"
+ <> ".task-summary"
+ <> ".filter-form"
+ <> ".status-form"
+ <> ".diff-section"
+ <> ".review-actions"
+ ? do
+ backgroundColor white
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ padding (px 8) (px 10) (px 8) (px 10)
+ border (px 1) solid "#d0d0d0"
+ ".review-actions" ? do
+ display flex
+ flexDirection row
+ flexWrap Flexbox.wrap
+ alignItems center
+ Stylesheet.key "gap" ("8px" :: Text)
+ ".stat-card" ? textAlign center
+ ".stat-count" ? do
+ fontSize (px 22)
+ fontWeight bold
+ ".stat-label" ? do
+ fontSize (px 11)
+ color "#6b7280"
+ marginTop (px 2)
+ ".stat-card.badge-open" ? do
+ borderLeft (px 4) solid "#f59e0b"
+ (".stat-card.badge-open" |> ".stat-count") ? color "#92400e"
+ ".stat-card.badge-inprogress" ? borderLeft (px 4) solid "#3b82f6"
+ (".stat-card.badge-inprogress" |> ".stat-count") ? color "#1e40af"
+ ".stat-card.badge-review" ? borderLeft (px 4) solid "#8b5cf6"
+ (".stat-card.badge-review" |> ".stat-count") ? color "#6b21a8"
+ ".stat-card.badge-approved" ? borderLeft (px 4) solid "#06b6d4"
+ (".stat-card.badge-approved" |> ".stat-count") ? color "#0e7490"
+ ".stat-card.badge-done" ? borderLeft (px 4) solid "#10b981"
+ (".stat-card.badge-done" |> ".stat-count") ? color "#065f46"
+ ".stat-card.badge-neutral" ? borderLeft (px 4) solid "#6b7280"
+ (".stat-card.badge-neutral" |> ".stat-count") ? color "#374151"
+ ".task-card" ? do
+ transition "border-color" (ms 150) ease (sec 0)
+ ".task-card" # hover ? do
+ borderColor "#999"
+ ".task-card-link" ? do
+ display block
+ textDecoration none
+ color inherit
+ cursor pointer
+ ".task-card-link" # hover ? textDecoration none
+ ".task-header" ? do
+ display flex
+ flexWrap Flexbox.wrap
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ marginBottom (px 4)
+ ".task-id" ? do
+ fontFamily ["SF Mono", "Monaco", "monospace"] [monospace]
+ color "#0066cc"
+ textDecoration none
+ fontSize (px 12)
+ padding (px 2) (px 0) (px 2) (px 0)
+ ".task-id" # hover ? textDecoration underline
+ ".priority" ? do
+ fontSize (px 11)
+ color "#6b7280"
+ ".blocking-impact" ? do
+ fontSize (px 10)
+ color "#6b7280"
+ backgroundColor "#e5e7eb"
+ padding (px 1) (px 6) (px 1) (px 6)
+ borderRadius (px 8) (px 8) (px 8) (px 8)
+ marginLeft auto
+ ".task-title" ? do
+ fontSize (px 14)
+ margin (px 0) (px 0) (px 0) (px 0)
+ ".empty-msg" ? do
+ color "#6b7280"
+ fontStyle italic
+ ".info-msg" ? do
+ color "#6b7280"
+ marginBottom (px 12)
+ ".kb-preview" ? do
+ color "#6b7280"
+ fontSize (px 12)
+ marginTop (px 4)
+ overflow hidden
+ Stylesheet.key "text-overflow" ("ellipsis" :: Text)
+ ".ready-link" ? do
+ fontSize (px 13)
+ color "#0066cc"
+ ".count-badge" ? do
+ backgroundColor "#0066cc"
+ color white
+ padding (px 2) (px 8) (px 2) (px 8)
+ borderRadius (px 10) (px 10) (px 10) (px 10)
+ fontSize (px 12)
+ verticalAlign middle
+ ".description" ? do
+ backgroundColor "#f9fafb"
+ padding (px 8) (px 8) (px 8) (px 8)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ margin (px 0) (px 0) (px 0) (px 0)
+ color "#374151"
+ fontSize (px 13)
+ ".description-block" ? do
+ pure ()
+ ".description-header" ? do
+ display flex
+ justifyContent spaceBetween
+ alignItems center
+ marginBottom (px 8)
+ (".description-header" |> "h3") ? do
+ margin (px 0) (px 0) (px 0) (px 0)
+ ".edit-link" <> ".cancel-link" ? do
+ fontSize (px 12)
+ color "#0066cc"
+ "button.cancel-link" ? do
+ color "#dc2626"
+ backgroundColor transparent
+ border (px 0) solid transparent
+ padding (px 0) (px 0) (px 0) (px 0)
+ cursor pointer
+ textDecoration underline
+ ".diff-block" ? do
+ maxHeight (px 600)
+ overflowY auto
+ ".progress-bar" ? do
+ height (px 6)
+ backgroundColor "#e5e7eb"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ overflow hidden
+ marginTop (px 6)
+ ".progress-fill" ? do
+ height (pct 100)
+ backgroundColor "#0066cc"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ transition "width" (ms 300) ease (sec 0)
+ ".multi-progress-container" ? do
+ marginBottom (px 12)
+ ".multi-progress-bar" ? do
+ display flex
+ height (px 8)
+ backgroundColor "#e5e7eb"
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ overflow hidden
+ marginTop (px 6)
+ ".multi-progress-segment" ? do
+ height (pct 100)
+ transition "width" (ms 300) ease (sec 0)
+ ".progress-done" ? backgroundColor "#10b981"
+ ".progress-inprogress" ? backgroundColor "#f59e0b"
+ ".progress-open" ? backgroundColor "#3b82f6"
+ ".progress-legend" ? do
+ display flex
+ Stylesheet.key "gap" ("16px" :: Text)
+ marginTop (px 6)
+ fontSize (px 12)
+ color "#6b7280"
+ ".legend-item" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("4px" :: Text)
+ ".legend-dot" ? do
+ display inlineBlock
+ width (px 10)
+ height (px 10)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ ".legend-done" ? backgroundColor "#10b981"
+ ".legend-inprogress" ? backgroundColor "#f59e0b"
+ ".legend-open" ? backgroundColor "#3b82f6"
+ ".stats-section" ? do
+ backgroundColor white
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ padding (px 8) (px 10) (px 8) (px 10)
+ border (px 1) solid "#d0d0d0"
+ ".stats-row" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("8px" :: Text)
+ padding (px 4) (px 0) (px 4) (px 0)
+ marginBottom (px 2)
+ ".stats-label" ? do
+ minWidth (px 80)
+ fontWeight (weight 500)
+ fontSize (px 13)
+ ".stats-bar-container" ? do
+ Stylesheet.key "flex" ("1" :: Text)
+ ".stats-count" ? do
+ minWidth (px 32)
+ textAlign (alignSide sideRight)
+ fontWeight (weight 500)
+ fontSize (px 13)
+ ".summary-section" ? do
+ backgroundColor white
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ padding (px 8) (px 10) (px 8) (px 10)
+ border (px 1) solid "#d0d0d0"
+ ".no-commit-msg" ? do
+ backgroundColor "#fff3cd"
+ border (px 1) solid "#ffc107"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ padding (px 8) (px 10) (px 8) (px 10)
+ margin (px 8) (px 0) (px 8) (px 0)
+ ".conflict-warning" ? do
+ backgroundColor "#fee2e2"
+ border (px 1) solid "#ef4444"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ padding (px 8) (px 10) (px 8) (px 10)
+ margin (px 8) (px 0) (px 8) (px 0)
+
+listGroupStyles :: Css
+listGroupStyles = do
+ ".list-group" ? do
+ display flex
+ flexDirection column
+ backgroundColor white
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ border (px 1) solid "#d0d0d0"
+ overflow hidden
+ ".list-group-item" ? do
+ display flex
+ alignItems center
+ justifyContent spaceBetween
+ padding (px 8) (px 10) (px 8) (px 10)
+ borderBottom (px 1) solid "#e5e7eb"
+ textDecoration none
+ color inherit
+ transition "background-color" (ms 150) ease (sec 0)
+ ".list-group-item" # lastChild ? borderBottom (px 0) none transparent
+ ".list-group-item" # hover ? do
+ backgroundColor "#f9fafb"
+ textDecoration none
+ ".list-group-item-content" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("8px" :: Text)
+ Stylesheet.key "flex" ("1" :: Text)
+ minWidth (px 0)
+ overflow hidden
+ ".list-group-item-id" ? do
+ fontFamily ["SF Mono", "Monaco", "monospace"] [monospace]
+ color "#0066cc"
+ fontSize (px 12)
+ flexShrink 0
+ ".list-group-item-title" ? do
+ fontSize (px 13)
+ color "#374151"
+ overflow hidden
+ Stylesheet.key "text-overflow" ("ellipsis" :: Text)
+ whiteSpace nowrap
+ ".list-group-item-meta" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ flexShrink 0
+
+statusBadges :: Css
+statusBadges = do
+ ".badge" ? do
+ display inlineBlock
+ padding (px 2) (px 6) (px 2) (px 6)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ fontSize (px 11)
+ fontWeight (weight 500)
+ whiteSpace nowrap
+ ".badge-open" ? do
+ backgroundColor "#fef3c7"
+ color "#92400e"
+ ".badge-inprogress" ? do
+ backgroundColor "#dbeafe"
+ color "#1e40af"
+ ".badge-review" ? do
+ backgroundColor "#ede9fe"
+ color "#6b21a8"
+ ".badge-approved" ? do
+ backgroundColor "#cffafe"
+ color "#0e7490"
+ ".badge-done" ? do
+ backgroundColor "#d1fae5"
+ color "#065f46"
+ ".badge-needshelp" ? do
+ backgroundColor "#fef3c7"
+ color "#92400e"
+ ".status-badge-dropdown" ? do
+ position relative
+ display inlineBlock
+ ".status-badge-clickable" ? do
+ cursor pointer
+ Stylesheet.key "user-select" ("none" :: Text)
+ ".status-badge-clickable" # hover ? do
+ opacity 0.85
+ ".dropdown-arrow" ? do
+ fontSize (px 8)
+ marginLeft (px 2)
+ opacity 0.7
+ ".status-dropdown-menu" ? do
+ display none
+ position absolute
+ left (px 0)
+ top (pct 100)
+ marginTop (px 2)
+ backgroundColor white
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ Stylesheet.key "box-shadow" ("0 2px 8px rgba(0,0,0,0.15)" :: Text)
+ zIndex 100
+ padding (px 4) (px 4) (px 4) (px 4)
+ minWidth (px 100)
+ ".status-badge-dropdown.open" |> ".status-dropdown-menu" ? do
+ display block
+ ".status-option-form" ? do
+ margin (px 0) (px 0) (px 0) (px 0)
+ padding (px 0) (px 0) (px 0) (px 0)
+ ".status-dropdown-option" ? do
+ display block
+ width (pct 100)
+ textAlign (alignSide sideLeft)
+ margin (px 2) (px 0) (px 2) (px 0)
+ border (px 0) none transparent
+ cursor pointer
+ transition "opacity" (ms 150) ease (sec 0)
+ ".status-dropdown-option" # hover ? do
+ opacity 0.7
+ ".status-dropdown-option" # focus ? do
+ opacity 0.85
+ Stylesheet.key "outline" ("2px solid #0066cc" :: Text)
+ Stylesheet.key "outline-offset" ("1px" :: Text)
+ ".status-dropdown-option.selected" ? do
+ Stylesheet.key "outline" ("2px solid #0066cc" :: Text)
+ Stylesheet.key "outline-offset" ("1px" :: Text)
+ ".status-badge-clickable" # focus ? do
+ Stylesheet.key "outline" ("2px solid #0066cc" :: Text)
+ Stylesheet.key "outline-offset" ("2px" :: Text)
+ ".badge-p0" ? do
+ backgroundColor "#fee2e2"
+ color "#991b1b"
+ ".badge-p1" ? do
+ backgroundColor "#fef3c7"
+ color "#92400e"
+ ".badge-p2" ? do
+ backgroundColor "#dbeafe"
+ color "#1e40af"
+ ".badge-p3" ? do
+ backgroundColor "#e5e7eb"
+ color "#4b5563"
+ ".badge-p4" ? do
+ backgroundColor "#f3f4f6"
+ color "#6b7280"
+ ".priority-badge-dropdown" ? do
+ position relative
+ display inlineBlock
+ ".priority-badge-clickable" ? do
+ cursor pointer
+ Stylesheet.key "user-select" ("none" :: Text)
+ ".priority-badge-clickable" # hover ? do
+ opacity 0.85
+ ".priority-dropdown-menu" ? do
+ display none
+ position absolute
+ left (px 0)
+ top (pct 100)
+ marginTop (px 2)
+ backgroundColor white
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ Stylesheet.key "box-shadow" ("0 2px 8px rgba(0,0,0,0.15)" :: Text)
+ zIndex 100
+ padding (px 4) (px 4) (px 4) (px 4)
+ minWidth (px 100)
+ ".priority-badge-dropdown.open" |> ".priority-dropdown-menu" ? do
+ display block
+ ".priority-option-form" ? do
+ margin (px 0) (px 0) (px 0) (px 0)
+ padding (px 0) (px 0) (px 0) (px 0)
+ ".priority-dropdown-option" ? do
+ display block
+ width (pct 100)
+ textAlign (alignSide sideLeft)
+ margin (px 2) (px 0) (px 2) (px 0)
+ border (px 0) none transparent
+ cursor pointer
+ transition "opacity" (ms 150) ease (sec 0)
+ ".priority-dropdown-option" # hover ? do
+ opacity 0.7
+ ".priority-dropdown-option" # focus ? do
+ opacity 0.85
+ Stylesheet.key "outline" ("2px solid #0066cc" :: Text)
+ Stylesheet.key "outline-offset" ("1px" :: Text)
+ ".priority-dropdown-option.selected" ? do
+ Stylesheet.key "outline" ("2px solid #0066cc" :: Text)
+ Stylesheet.key "outline-offset" ("1px" :: Text)
+ ".priority-badge-clickable" # focus ? do
+ Stylesheet.key "outline" ("2px solid #0066cc" :: Text)
+ Stylesheet.key "outline-offset" ("2px" :: Text)
+ ".badge-complexity" ? do
+ backgroundColor "#f0f9ff"
+ color "#0c4a6e"
+ ".badge-complexity-1" ? do
+ backgroundColor "#f0fdf4"
+ color "#166534"
+ ".badge-complexity-2" ? do
+ backgroundColor "#f0f9ff"
+ color "#075985"
+ ".badge-complexity-3" ? do
+ backgroundColor "#fef3c7"
+ color "#92400e"
+ ".badge-complexity-4" ? do
+ backgroundColor "#fef3c7"
+ color "#b45309"
+ ".badge-complexity-5" ? do
+ backgroundColor "#fee2e2"
+ color "#991b1b"
+ ".badge-complexity-none" ? do
+ backgroundColor "#f3f4f6"
+ color "#6b7280"
+ ".complexity-badge-dropdown" ? do
+ position relative
+ display inlineBlock
+ ".complexity-badge-clickable" ? do
+ cursor pointer
+ Stylesheet.key "user-select" ("none" :: Text)
+ ".complexity-badge-clickable" # hover ? do
+ opacity 0.85
+ ".complexity-dropdown-menu" ? do
+ display none
+ position absolute
+ left (px 0)
+ top (pct 100)
+ marginTop (px 2)
+ backgroundColor white
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ Stylesheet.key "box-shadow" ("0 2px 8px rgba(0,0,0,0.15)" :: Text)
+ zIndex 100
+ padding (px 4) (px 4) (px 4) (px 4)
+ minWidth (px 100)
+ ".complexity-badge-dropdown.open" |> ".complexity-dropdown-menu" ? do
+ display block
+ ".complexity-option-form" ? do
+ margin (px 0) (px 0) (px 0) (px 0)
+ padding (px 0) (px 0) (px 0) (px 0)
+ ".complexity-dropdown-option" ? do
+ display block
+ width (pct 100)
+ textAlign (alignSide sideLeft)
+ margin (px 2) (px 0) (px 2) (px 0)
+ border (px 0) none transparent
+ cursor pointer
+ transition "opacity" (ms 150) ease (sec 0)
+ ".complexity-dropdown-option" # hover ? do
+ opacity 0.7
+ ".complexity-dropdown-option" # focus ? do
+ opacity 0.85
+ Stylesheet.key "outline" ("2px solid #0066cc" :: Text)
+ Stylesheet.key "outline-offset" ("1px" :: Text)
+ ".complexity-dropdown-option.selected" ? do
+ Stylesheet.key "outline" ("2px solid #0066cc" :: Text)
+ Stylesheet.key "outline-offset" ("1px" :: Text)
+ ".complexity-badge-clickable" # focus ? do
+ Stylesheet.key "outline" ("2px solid #0066cc" :: Text)
+ Stylesheet.key "outline-offset" ("2px" :: Text)
+
+buttonStyles :: Css
+buttonStyles = do
+ ".btn"
+ <> ".action-btn"
+ <> ".filter-btn"
+ <> ".submit-btn"
+ <> ".accept-btn"
+ <> ".reject-btn"
+ <> ".review-link-btn"
+ ? do
+ display inlineBlock
+ minHeight (px 32)
+ padding (px 6) (px 12) (px 6) (px 12)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ border (px 0) none transparent
+ fontSize (px 13)
+ fontWeight (weight 500)
+ textDecoration none
+ cursor pointer
+ textAlign center
+ transition "all" (ms 150) ease (sec 0)
+ Stylesheet.key "touch-action" ("manipulation" :: Text)
+ ".action-btn" ? do
+ backgroundColor white
+ border (px 1) solid "#d1d5db"
+ color "#374151"
+ ".action-btn" # hover ? do
+ backgroundColor "#f9fafb"
+ borderColor "#9ca3af"
+ ".action-btn-primary" <> ".filter-btn" <> ".submit-btn" ? do
+ backgroundColor "#0066cc"
+ color white
+ borderColor "#0066cc"
+ ".action-btn-primary"
+ # hover
+ <> ".filter-btn"
+ # hover
+ <> ".submit-btn"
+ # hover
+ ? do
+ backgroundColor "#0052a3"
+ ".accept-btn" ? do
+ backgroundColor "#10b981"
+ color white
+ ".accept-btn" # hover ? backgroundColor "#059669"
+ ".reject-btn" ? do
+ backgroundColor "#ef4444"
+ color white
+ ".reject-btn" # hover ? backgroundColor "#dc2626"
+ ".clear-btn" ? do
+ display inlineBlock
+ minHeight (px 32)
+ padding (px 6) (px 10) (px 6) (px 10)
+ backgroundColor "#6b7280"
+ color white
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ textDecoration none
+ fontSize (px 13)
+ cursor pointer
+ ".clear-btn" # hover ? backgroundColor "#4b5563"
+ ".review-link-btn" ? do
+ backgroundColor "#8b5cf6"
+ color white
+ ".review-link-btn" # hover ? backgroundColor "#7c3aed"
+ ".review-link-section" ? margin (px 8) (px 0) (px 8) (px 0)
+ ".btn-secondary" <> ".load-more-btn" ? do
+ backgroundColor "#6b7280"
+ color white
+ width (pct 100)
+ marginTop (px 8)
+ ".btn-secondary" # hover <> ".load-more-btn" # hover ? backgroundColor "#4b5563"
+
+formStyles :: Css
+formStyles = do
+ ".filter-row" ? do
+ display flex
+ flexWrap Flexbox.wrap
+ Stylesheet.key "gap" ("8px" :: Text)
+ alignItems flexEnd
+ ".filter-group" ? do
+ display flex
+ flexDirection row
+ alignItems center
+ Stylesheet.key "gap" ("4px" :: Text)
+ (".filter-group" |> label) ? do
+ fontSize (px 12)
+ color "#6b7280"
+ fontWeight (weight 500)
+ whiteSpace nowrap
+ ".filter-select" <> ".filter-input" <> ".status-select" ? do
+ minHeight (px 32)
+ padding (px 6) (px 10) (px 6) (px 10)
+ border (px 1) solid "#d1d5db"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ fontSize (px 13)
+ minWidth (px 100)
+ ".filter-input" ? minWidth (px 120)
+ ".inline-form" ? display inlineBlock
+ ".reject-form" ? do
+ display flex
+ Stylesheet.key "gap" ("6px" :: Text)
+ Stylesheet.key "flex" ("1" :: Text)
+ minWidth (px 200)
+ flexWrap Flexbox.wrap
+ ".reject-notes" ? do
+ Stylesheet.key "flex" ("1" :: Text)
+ minWidth (px 160)
+ minHeight (px 32)
+ padding (px 6) (px 10) (px 6) (px 10)
+ border (px 1) solid "#d1d5db"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ fontSize (px 13)
+ Stylesheet.key "resize" ("vertical" :: Text)
+ ".edit-description" ? do
+ marginTop (px 8)
+ padding (px 8) (px 0) (px 0) (px 0)
+ borderTop (px 1) solid "#e5e7eb"
+ (".edit-description" |> "summary") ? do
+ cursor pointer
+ color "#0066cc"
+ fontSize (px 13)
+ fontWeight (weight 500)
+ (".edit-description" |> "summary") # hover ? textDecoration underline
+ ".description-textarea" ? do
+ width (pct 100)
+ minHeight (px 250)
+ padding (px 8) (px 10) (px 8) (px 10)
+ border (px 1) solid "#d1d5db"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ fontSize (px 13)
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ lineHeight (em 1.5)
+ Stylesheet.key "resize" ("vertical" :: Text)
+ marginTop (px 8)
+ ".form-actions" ? do
+ display flex
+ flexDirection row
+ flexWrap Flexbox.wrap
+ Stylesheet.key "gap" ("8px" :: Text)
+ marginTop (px 8)
+ ".fact-edit-form" ? do
+ marginTop (px 8)
+ ".form-group" ? do
+ marginBottom (px 16)
+ (".form-group" |> label) ? do
+ display block
+ marginBottom (px 4)
+ fontSize (px 13)
+ fontWeight (weight 500)
+ color "#374151"
+ ".form-input" <> ".form-textarea" ? do
+ width (pct 100)
+ padding (px 8) (px 10) (px 8) (px 10)
+ border (px 1) solid "#d1d5db"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ fontSize (px 14)
+ lineHeight (em 1.5)
+ ".form-input" # focus <> ".form-textarea" # focus ? do
+ borderColor "#0066cc"
+ Stylesheet.key "outline" ("none" :: Text)
+ Stylesheet.key "box-shadow" ("0 0 0 2px rgba(0, 102, 204, 0.2)" :: Text)
+ ".form-textarea" ? do
+ minHeight (px 120)
+ Stylesheet.key "resize" ("vertical" :: Text)
+ fontFamily
+ [ "-apple-system",
+ "BlinkMacSystemFont",
+ "Segoe UI",
+ "Roboto",
+ "Helvetica Neue",
+ "Arial",
+ "sans-serif"
+ ]
+ [sansSerif]
+ ".btn" ? do
+ display inlineBlock
+ padding (px 8) (px 16) (px 8) (px 16)
+ border (px 0) none transparent
+ borderRadius (px 3) (px 3) (px 3) (px 3)
+ fontSize (px 14)
+ fontWeight (weight 500)
+ textDecoration none
+ cursor pointer
+ transition "all" (ms 150) ease (sec 0)
+ ".btn-primary" ? do
+ backgroundColor "#0066cc"
+ color white
+ ".btn-primary" # hover ? backgroundColor "#0052a3"
+ ".btn-secondary" ? do
+ backgroundColor "#6b7280"
+ color white
+ ".btn-secondary" # hover ? backgroundColor "#4b5563"
+ ".btn-danger" ? do
+ backgroundColor "#dc2626"
+ color white
+ ".btn-danger" # hover ? backgroundColor "#b91c1c"
+ ".danger-zone" ? do
+ marginTop (px 24)
+ padding (px 16) (px 16) (px 16) (px 16)
+ backgroundColor "#fef2f2"
+ border (px 1) solid "#fecaca"
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ (".danger-zone" |> h2) ? do
+ color "#dc2626"
+ marginBottom (px 12)
+ ".back-link" ? do
+ marginTop (px 24)
+ paddingTop (px 16)
+ borderTop (px 1) solid "#e5e7eb"
+ (".back-link" |> a) ? do
+ color "#6b7280"
+ textDecoration none
+ (".back-link" |> a) # hover ? do
+ color "#374151"
+ textDecoration underline
+ ".task-link" ? do
+ color "#0066cc"
+ textDecoration none
+ fontWeight (weight 500)
+ ".task-link" # hover ? textDecoration underline
+ ".error-msg" ? do
+ color "#dc2626"
+ backgroundColor "#fef2f2"
+ padding (px 16) (px 16) (px 16) (px 16)
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ border (px 1) solid "#fecaca"
+ ".create-fact-section" ? do
+ marginBottom (px 16)
+ ".create-fact-toggle" ? do
+ cursor pointer
+ display inlineBlock
+ ".fact-create-form" ? do
+ marginTop (px 12)
+ padding (px 16) (px 16) (px 16) (px 16)
+ backgroundColor white
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ border (px 1) solid "#d1d5db"
+
+executionDetailsStyles :: Css
+executionDetailsStyles = do
+ ".execution-section" ? do
+ marginTop (em 1)
+ backgroundColor white
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ padding (px 8) (px 10) (px 8) (px 10)
+ border (px 1) solid "#d0d0d0"
+ ".execution-details" ? do
+ marginTop (px 8)
+ ".metric-row" ? do
+ display flex
+ flexWrap Flexbox.wrap
+ padding (px 4) (px 0) (px 4) (px 0)
+ marginBottom (px 2)
+ ".metric-label" ? do
+ fontWeight (weight 600)
+ width (px 120)
+ color "#6b7280"
+ fontSize (px 13)
+ ".metric-value" ? do
+ Stylesheet.key "flex" ("1" :: Text)
+ fontSize (px 13)
+ ".amp-link" ? do
+ color "#0066cc"
+ textDecoration none
+ wordBreak breakAll
+ ".amp-link" # hover ? textDecoration underline
+ ".amp-thread-btn" ? do
+ display inlineBlock
+ padding (px 4) (px 10) (px 4) (px 10)
+ backgroundColor "#7c3aed"
+ color white
+ borderRadius (px 3) (px 3) (px 3) (px 3)
+ textDecoration none
+ fontSize (px 12)
+ fontWeight (weight 500)
+ transition "background-color" (ms 150) ease (sec 0)
+ ".amp-thread-btn" # hover ? do
+ backgroundColor "#6d28d9"
+ textDecoration none
+ ".retry-count" ? do
+ color "#f97316"
+ fontWeight (weight 600)
+ ".attempts-divider" ? do
+ margin (px 12) (px 0) (px 12) (px 0)
+ border (px 0) none transparent
+ borderTop (px 1) solid "#e5e7eb"
+ ".attempt-header" ? do
+ fontWeight (weight 600)
+ fontSize (px 13)
+ color "#374151"
+ marginTop (px 8)
+ marginBottom (px 4)
+ paddingBottom (px 4)
+ borderBottom (px 1) solid "#f3f4f6"
+ ".aggregated-metrics" ? do
+ marginTop (em 0.5)
+ paddingTop (em 0.75)
+ ".metrics-grid" ? do
+ display grid
+ Stylesheet.key "grid-template-columns" ("repeat(auto-fit, minmax(100px, 1fr))" :: Text)
+ Stylesheet.key "gap" ("10px" :: Text)
+ marginTop (px 8)
+ ".metric-card" ? do
+ backgroundColor "#f9fafb"
+ border (px 1) solid "#e5e7eb"
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ padding (px 10) (px 12) (px 10) (px 12)
+ textAlign center
+ (".metric-card" |> ".metric-value") ? do
+ fontSize (px 20)
+ fontWeight bold
+ color "#374151"
+ display block
+ marginBottom (px 2)
+ width auto
+ (".metric-card" |> ".metric-label") ? do
+ fontSize (px 11)
+ color "#6b7280"
+ fontWeight (weight 400)
+ width auto
+
+activityTimelineStyles :: Css
+activityTimelineStyles = do
+ ".activity-section" ? do
+ marginTop (em 1)
+ backgroundColor white
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ padding (px 8) (px 10) (px 8) (px 10)
+ border (px 1) solid "#d0d0d0"
+ ".activity-timeline" ? do
+ position relative
+ paddingLeft (px 20)
+ marginTop (px 8)
+ ".activity-timeline" # before ? do
+ Stylesheet.key "content" ("''" :: Text)
+ position absolute
+ left (px 6)
+ top (px 0)
+ bottom (px 0)
+ width (px 2)
+ backgroundColor "#e5e7eb"
+ ".activity-item" ? do
+ position relative
+ display flex
+ Stylesheet.key "gap" ("8px" :: Text)
+ paddingBottom (px 10)
+ marginBottom (px 0)
+ ".activity-item" # lastChild ? paddingBottom (px 0)
+ ".activity-icon" ? do
+ position absolute
+ left (px (-16))
+ width (px 14)
+ height (px 14)
+ borderRadius (pct 50) (pct 50) (pct 50) (pct 50)
+ display flex
+ alignItems center
+ justifyContent center
+ fontSize (px 8)
+ fontWeight bold
+ backgroundColor white
+ border (px 2) solid "#e5e7eb"
+ ".activity-content" ? do
+ Stylesheet.key "flex" ("1" :: Text)
+ ".activity-header" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ marginBottom (px 2)
+ ".activity-stage" ? do
+ fontWeight (weight 600)
+ fontSize (px 12)
+ ".activity-time" ? do
+ fontSize (px 11)
+ color "#6b7280"
+ ".activity-message" ? do
+ margin (px 2) (px 0) (px 0) (px 0)
+ fontSize (px 12)
+ color "#374151"
+ ".activity-metadata" ? do
+ marginTop (px 4)
+ (".activity-metadata" |> "summary") ? do
+ fontSize (px 11)
+ color "#6b7280"
+ cursor pointer
+ ".metadata-json" ? do
+ fontSize (px 10)
+ backgroundColor "#f3f4f6"
+ padding (px 4) (px 6) (px 4) (px 6)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ marginTop (px 2)
+ maxHeight (px 150)
+ overflow auto
+ ".stage-claiming" |> ".activity-icon" ? do
+ borderColor "#3b82f6"
+ color "#3b82f6"
+ ".stage-running" |> ".activity-icon" ? do
+ borderColor "#f59e0b"
+ color "#f59e0b"
+ ".stage-reviewing" |> ".activity-icon" ? do
+ borderColor "#8b5cf6"
+ color "#8b5cf6"
+ ".stage-retrying" |> ".activity-icon" ? do
+ borderColor "#f97316"
+ color "#f97316"
+ ".stage-completed" |> ".activity-icon" ? do
+ borderColor "#10b981"
+ color "#10b981"
+ ".stage-failed" |> ".activity-icon" ? do
+ borderColor "#ef4444"
+ color "#ef4444"
+
+commitStyles :: Css
+commitStyles = do
+ ".commit-list" ? do
+ display flex
+ flexDirection column
+ Stylesheet.key "gap" ("4px" :: Text)
+ marginTop (px 8)
+ ".commit-item" ? do
+ padding (px 6) (px 8) (px 6) (px 8)
+ backgroundColor "#f9fafb"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ border (px 1) solid "#e5e7eb"
+ ".commit-header" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("8px" :: Text)
+ marginBottom (px 2)
+ ".commit-hash" ? do
+ fontFamily ["SF Mono", "Monaco", "monospace"] [monospace]
+ fontSize (px 12)
+ color "#0066cc"
+ textDecoration none
+ backgroundColor "#e5e7eb"
+ padding (px 1) (px 4) (px 1) (px 4)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ ".commit-hash" # hover ? textDecoration underline
+ ".commit-summary" ? do
+ fontSize (px 13)
+ color "#374151"
+ fontWeight (weight 500)
+ ".commit-meta" ? do
+ display flex
+ Stylesheet.key "gap" ("12px" :: Text)
+ fontSize (px 11)
+ color "#6b7280"
+ ".commit-author" ? fontWeight (weight 500)
+ ".commit-files" ? do
+ color "#9ca3af"
+
+markdownStyles :: Css
+markdownStyles = do
+ ".markdown-content" ? do
+ width (pct 100)
+ lineHeight (em 1.6)
+ fontSize (px 14)
+ color "#374151"
+ ".md-h1" ? do
+ fontSize (px 18)
+ fontWeight bold
+ margin (em 1) (px 0) (em 0.5) (px 0)
+ paddingBottom (em 0.3)
+ borderBottom (px 1) solid "#e5e7eb"
+ ".md-h2" ? do
+ fontSize (px 16)
+ fontWeight (weight 600)
+ margin (em 0.8) (px 0) (em 0.4) (px 0)
+ ".md-h3" ? do
+ fontSize (px 14)
+ fontWeight (weight 600)
+ margin (em 0.6) (px 0) (em 0.3) (px 0)
+ ".md-para" ? do
+ margin (em 0.5) (px 0) (em 0.5) (px 0)
+ ".md-code" ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 12)
+ backgroundColor "#f8f8f8"
+ color "#333333"
+ padding (px 10) (px 12) (px 10) (px 12)
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ border (px 1) solid "#e1e4e8"
+ overflow auto
+ whiteSpace preWrap
+ margin (em 0.5) (px 0) (em 0.5) (px 0)
+ ".md-list" ? do
+ margin (em 0.5) (px 0) (em 0.5) (px 0)
+ paddingLeft (px 24)
+ (".md-list" ** li) ? do
+ margin (px 4) (px 0) (px 4) (px 0)
+ ".md-inline-code" ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (em 0.9)
+ backgroundColor "#f3f4f6"
+ padding (px 1) (px 4) (px 1) (px 4)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+
+retryBannerStyles :: Css
+retryBannerStyles = do
+ ".retry-banner" ? do
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ padding (px 12) (px 16) (px 12) (px 16)
+ margin (px 0) (px 0) (px 16) (px 0)
+ ".retry-banner-warning" ? do
+ backgroundColor "#fef3c7"
+ border (px 1) solid "#f59e0b"
+ ".retry-banner-critical" ? do
+ backgroundColor "#fee2e2"
+ border (px 1) solid "#ef4444"
+ ".retry-banner-header" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("8px" :: Text)
+ marginBottom (px 8)
+ ".retry-icon" ? do
+ fontSize (px 18)
+ fontWeight bold
+ ".retry-attempt" ? do
+ fontSize (px 14)
+ fontWeight (weight 600)
+ color "#374151"
+ ".retry-warning-badge" ? do
+ backgroundColor "#dc2626"
+ color white
+ fontSize (px 11)
+ fontWeight (weight 600)
+ padding (px 2) (px 8) (px 2) (px 8)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ marginLeft auto
+ ".retry-banner-details" ? do
+ fontSize (px 13)
+ color "#374151"
+ ".retry-detail-row" ? do
+ display flex
+ alignItems flexStart
+ Stylesheet.key "gap" ("8px" :: Text)
+ margin (px 4) (px 0) (px 4) (px 0)
+ ".retry-label" ? do
+ fontWeight (weight 500)
+ minWidth (px 110)
+ flexShrink 0
+ ".retry-value" ? do
+ color "#4b5563"
+ ".retry-commit" ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (em 0.9)
+ backgroundColor "#f3f4f6"
+ padding (px 1) (px 4) (px 1) (px 4)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ ".retry-conflict-list" ? do
+ margin (px 0) (px 0) (px 0) (px 0)
+ padding (px 0) (px 0) (px 0) (px 16)
+ (".retry-conflict-list" ** li) ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 12)
+ margin (px 2) (px 0) (px 2) (px 0)
+ ".retry-warning-message" ? do
+ marginTop (px 12)
+ padding (px 10) (px 12) (px 10) (px 12)
+ backgroundColor "#fecaca"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ fontSize (px 12)
+ color "#991b1b"
+ fontWeight (weight 500)
+ ".retry-hint" ? do
+ marginTop (px 8)
+ fontSize (px 12)
+ color "#6b7280"
+ fontStyle italic
+
+commentStyles :: Css
+commentStyles = do
+ ".comments-section" ? do
+ marginTop (px 12)
+ ".comment-card" ? do
+ backgroundColor "#f9fafb"
+ border (px 1) solid "#e5e7eb"
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ padding (px 10) (px 12) (px 10) (px 12)
+ marginBottom (px 8)
+ ".comment-text" ? do
+ margin (px 0) (px 0) (px 6) (px 0)
+ fontSize (px 13)
+ color "#374151"
+ whiteSpace preWrap
+ ".comment-meta" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("8px" :: Text)
+ ".comment-author" ? do
+ display inlineBlock
+ padding (px 2) (px 6) (px 2) (px 6)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ fontSize (px 10)
+ fontWeight (weight 600)
+ textTransform uppercase
+ whiteSpace nowrap
+ ".author-human" ? do
+ backgroundColor "#dbeafe"
+ color "#1e40af"
+ ".author-junior" ? do
+ backgroundColor "#d1fae5"
+ color "#065f46"
+ ".comment-time" ? do
+ fontSize (px 11)
+ color "#9ca3af"
+ ".comment-form" ? do
+ marginTop (px 12)
+ display flex
+ flexDirection column
+ Stylesheet.key "gap" ("8px" :: Text)
+ ".comment-textarea" ? do
+ width (pct 100)
+ padding (px 8) (px 10) (px 8) (px 10)
+ fontSize (px 13)
+ border (px 1) solid "#d0d0d0"
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ Stylesheet.key "resize" ("vertical" :: Text)
+ minHeight (px 60)
+ ".comment-textarea" # focus ? do
+ Stylesheet.key "outline" ("none" :: Text)
+ borderColor "#0066cc"
+
+timeFilterStyles :: Css
+timeFilterStyles = do
+ ".time-filter" ? do
+ display flex
+ Stylesheet.key "gap" ("6px" :: Text)
+ marginBottom (px 12)
+ flexWrap Flexbox.wrap
+ ".time-filter-btn" ? do
+ display inlineBlock
+ padding (px 4) (px 12) (px 4) (px 12)
+ fontSize (px 12)
+ fontWeight (weight 500)
+ textDecoration none
+ borderRadius (px 12) (px 12) (px 12) (px 12)
+ border (px 1) solid "#d0d0d0"
+ backgroundColor white
+ color "#374151"
+ transition "all" (ms 150) ease (sec 0)
+ cursor pointer
+ ".time-filter-btn" # hover ? do
+ borderColor "#999"
+ backgroundColor "#f3f4f6"
+ textDecoration none
+ ".time-filter-btn.active" ? do
+ backgroundColor "#0066cc"
+ borderColor "#0066cc"
+ color white
+ ".time-filter-btn.active" # hover ? do
+ backgroundColor "#0055aa"
+ borderColor "#0055aa"
+
+sortDropdownStyles :: Css
+sortDropdownStyles = do
+ ".page-header-row" ? do
+ display flex
+ alignItems center
+ justifyContent spaceBetween
+ flexWrap Flexbox.wrap
+ Stylesheet.key "gap" ("12px" :: Text)
+ marginBottom (px 8)
+ ".page-header-row" |> "h1" ? do
+ margin (px 0) (px 0) (px 0) (px 0)
+ ".sort-dropdown" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ fontSize (px 13)
+ ".sort-label" ? do
+ color "#6b7280"
+ fontWeight (weight 500)
+ ".sort-dropdown-wrapper" ? do
+ position relative
+ ".sort-dropdown-btn" ? do
+ padding (px 4) (px 10) (px 4) (px 10)
+ fontSize (px 13)
+ fontWeight (weight 500)
+ border (px 1) solid "#d0d0d0"
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ backgroundColor white
+ color "#374151"
+ cursor pointer
+ transition "all" (ms 150) ease (sec 0)
+ whiteSpace nowrap
+ ".sort-dropdown-btn" # hover ? do
+ borderColor "#999"
+ backgroundColor "#f3f4f6"
+ ".sort-dropdown-content" ? do
+ minWidth (px 160)
+ right (px 0)
+ left auto
+ ".sort-dropdown-item" ? do
+ padding (px 8) (px 12) (px 8) (px 12)
+ fontSize (px 13)
+ ".sort-dropdown-item.active" ? do
+ backgroundColor "#e0f2fe"
+ fontWeight (weight 600)
+
+taskMetaStyles :: Css
+taskMetaStyles = do
+ ".task-meta" ? do
+ marginBottom (px 12)
+ ".task-meta-primary" ? do
+ display flex
+ alignItems center
+ flexWrap Flexbox.wrap
+ Stylesheet.key "gap" ("6px" :: Text)
+ fontSize (px 14)
+ marginBottom (px 4)
+ ".task-meta-secondary" ? do
+ display flex
+ alignItems center
+ flexWrap Flexbox.wrap
+ Stylesheet.key "gap" ("6px" :: Text)
+ fontSize (px 12)
+ color "#6b7280"
+ ".task-meta-id" ? do
+ fontFamily ["SF Mono", "Monaco", "monospace"] [monospace]
+ fontSize (px 13)
+ backgroundColor "#f3f4f6"
+ padding (px 1) (px 4) (px 1) (px 4)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ ".task-meta-label" ? do
+ color "#6b7280"
+ ".meta-sep" ? do
+ color "#d1d5db"
+ Stylesheet.key "user-select" ("none" :: Text)
+
+timelineEventStyles :: Css
+timelineEventStyles = do
+ ".event-header" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("8px" :: Text)
+ marginBottom (px 4)
+ ".event-icon" ? do
+ fontSize (px 14)
+ width (px 20)
+ textAlign center
+ ".event-label" ? do
+ fontWeight (weight 500)
+ color "#374151"
+ ".event-assistant" ? do
+ padding (px 0) (px 0) (px 0) (px 0)
+ ".event-bubble" ? do
+ backgroundColor "#f3f4f6"
+ padding (px 8) (px 12) (px 8) (px 12)
+ borderRadius (px 8) (px 8) (px 8) (px 8)
+ whiteSpace preWrap
+ lineHeight (em 1.5)
+ ".event-truncated" ? do
+ color "#6b7280"
+ fontStyle italic
+ ".event-tool-call" ? do
+ borderLeft (px 3) solid "#3b82f6"
+ paddingLeft (px 8)
+ ".event-tool-call" |> "summary" ? do
+ cursor pointer
+ listStyleType none
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("8px" :: Text)
+ ".event-tool-call" |> "summary" # before ? do
+ content (stringContent "▶")
+ fontSize (px 10)
+ color "#6b7280"
+ transition "transform" (ms 150) ease (sec 0)
+ ".event-tool-call[open]" |> "summary" # before ? do
+ Stylesheet.key "transform" ("rotate(90deg)" :: Text)
+ ".tool-name" ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ color "#3b82f6"
+ ".tool-summary" ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 12)
+ color "#6b7280"
+ marginLeft (px 8)
+ ".tool-args" ? do
+ marginTop (px 4)
+ paddingLeft (px 20)
+ ".tool-output-pre" ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 11)
+ backgroundColor "#1e1e1e"
+ color "#d4d4d4"
+ padding (px 8) (px 10) (px 8) (px 10)
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ overflowX auto
+ whiteSpace preWrap
+ maxHeight (px 300)
+ margin (px 0) (px 0) (px 0) (px 0)
+ ".event-tool-result" ? do
+ borderLeft (px 3) solid "#10b981"
+ paddingLeft (px 8)
+ ".result-header" ? do
+ fontSize (px 12)
+ ".line-count" ? do
+ fontSize (px 11)
+ color "#6b7280"
+ backgroundColor "#f3f4f6"
+ padding (px 1) (px 6) (px 1) (px 6)
+ borderRadius (px 10) (px 10) (px 10) (px 10)
+ ".result-collapsible" |> "summary" ? do
+ cursor pointer
+ fontSize (px 12)
+ color "#0066cc"
+ marginBottom (px 4)
+ ".result-collapsible" |> "summary" # hover ? textDecoration underline
+ ".tool-output" ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 11)
+ backgroundColor "#1e1e1e"
+ color "#d4d4d4"
+ padding (px 8) (px 10) (px 8) (px 10)
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ overflowX auto
+ whiteSpace preWrap
+ maxHeight (px 300)
+ margin (px 0) (px 0) (px 0) (px 0)
+ ".event-cost" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ fontSize (px 11)
+ color "#6b7280"
+ padding (px 4) (px 0) (px 4) (px 0)
+ ".cost-text" ? do
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ ".event-error" ? do
+ borderLeft (px 3) solid "#ef4444"
+ paddingLeft (px 8)
+ backgroundColor "#fef2f2"
+ padding (px 8) (px 8) (px 8) (px 12)
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ ".event-error" |> ".event-label" ? color "#dc2626"
+ ".error-message" ? do
+ color "#dc2626"
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 12)
+ whiteSpace preWrap
+ ".event-complete" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("8px" :: Text)
+ color "#10b981"
+ fontWeight (weight 500)
+ padding (px 8) (px 0) (px 8) (px 0)
+ ".output-collapsible" |> "summary" ? do
+ cursor pointer
+ fontSize (px 12)
+ color "#0066cc"
+ marginBottom (px 4)
+ ".output-collapsible" |> "summary" # hover ? textDecoration underline
+ Stylesheet.key "@keyframes pulse" ("0%, 100% { opacity: 1; } 50% { opacity: 0.5; }" :: Text)
+
+unifiedTimelineStyles :: Css
+unifiedTimelineStyles = do
+ ".unified-timeline-section" ? do
+ marginTop (em 1.5)
+ paddingTop (em 1)
+ borderTop (px 1) solid "#e5e7eb"
+ ".timeline-live-toggle" ? do
+ fontSize (px 10)
+ fontWeight bold
+ color "#10b981"
+ backgroundColor "#d1fae5"
+ padding (px 2) (px 6) (px 2) (px 6)
+ borderRadius (px 10) (px 10) (px 10) (px 10)
+ marginLeft (px 8)
+ textTransform uppercase
+ border (px 1) solid "#6ee7b7"
+ cursor pointer
+ Stylesheet.key "transition" ("all 0.3s ease" :: Text)
+ Stylesheet.key "animation" ("pulse 2s infinite" :: Text)
+ ".timeline-live-toggle:hover" ? do
+ Stylesheet.key "box-shadow" ("0 0 8px rgba(16,185,129,0.4)" :: Text)
+ ".timeline-live-toggle.timeline-live-paused" ? do
+ color "#6b7280"
+ backgroundColor "#f3f4f6"
+ border (px 1) solid "#d1d5db"
+ Stylesheet.key "animation" ("none" :: Text)
+ ".timeline-autoscroll-toggle" ? do
+ fontSize (px 10)
+ fontWeight bold
+ color "#3b82f6"
+ backgroundColor "#dbeafe"
+ padding (px 2) (px 6) (px 2) (px 6)
+ borderRadius (px 10) (px 10) (px 10) (px 10)
+ marginLeft (px 4)
+ border (px 1) solid "#93c5fd"
+ cursor pointer
+ Stylesheet.key "transition" ("all 0.2s ease" :: Text)
+ ".timeline-autoscroll-toggle:hover" ? do
+ Stylesheet.key "box-shadow" ("0 0 6px rgba(59,130,246,0.3)" :: Text)
+ ".timeline-autoscroll-toggle.timeline-autoscroll-disabled" ? do
+ color "#6b7280"
+ backgroundColor "#f3f4f6"
+ border (px 1) solid "#d1d5db"
+ ".timeline-live" ? do
+ fontSize (px 10)
+ fontWeight bold
+ color "#10b981"
+ backgroundColor "#d1fae5"
+ padding (px 2) (px 6) (px 2) (px 6)
+ borderRadius (px 10) (px 10) (px 10) (px 10)
+ marginLeft (px 8)
+ textTransform uppercase
+ Stylesheet.key "animation" ("pulse 2s infinite" :: Text)
+ ".timeline-events" ? do
+ maxHeight (px 600)
+ overflowY auto
+ display flex
+ flexDirection column
+ Stylesheet.key "gap" ("12px" :: Text)
+ padding (px 12) (px 0) (px 12) (px 0)
+ ".timeline-event" ? do
+ fontSize (px 13)
+ lineHeight (em 1.4)
+ ".actor-label" ? do
+ fontSize (px 11)
+ fontWeight (weight 500)
+ padding (px 1) (px 4) (px 1) (px 4)
+ borderRadius (px 3) (px 3) (px 3) (px 3)
+ marginLeft (px 4)
+ marginRight (px 4)
+ ".actor-human" ? do
+ color "#7c3aed"
+ backgroundColor "#f3e8ff"
+ ".actor-junior" ? do
+ color "#0369a1"
+ backgroundColor "#e0f2fe"
+ ".actor-system" ? do
+ color "#6b7280"
+ backgroundColor "#f3f4f6"
+ ".timeline-comment" ? do
+ paddingLeft (px 4)
+ ".timeline-comment" |> ".comment-bubble" ? do
+ backgroundColor "#f3f4f6"
+ color "#1f2937"
+ padding (px 10) (px 14) (px 10) (px 14)
+ borderRadius (px 8) (px 8) (px 8) (px 8)
+ whiteSpace preWrap
+ marginTop (px 6)
+ ".timeline-status-change" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ flexWrap Flexbox.wrap
+ padding (px 6) (px 8) (px 6) (px 8)
+ backgroundColor "#f0fdf4"
+ borderRadius (px 6) (px 6) (px 6) (px 6)
+ borderLeft (px 3) solid "#22c55e"
+ ".status-change-text" ? do
+ fontWeight (weight 500)
+ color "#166534"
+ ".timeline-activity" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ flexWrap Flexbox.wrap
+ padding (px 4) (px 0) (px 4) (px 0)
+ color "#6b7280"
+ ".activity-detail" ? do
+ fontSize (px 11)
+ color "#9ca3af"
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ ".timeline-error" ? do
+ borderLeft (px 3) solid "#ef4444"
+ backgroundColor "#fef2f2"
+ padding (px 8) (px 12) (px 8) (px 12)
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ ".timeline-error" |> ".error-message" ? do
+ marginTop (px 6)
+ color "#dc2626"
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 12)
+ whiteSpace preWrap
+ ".timeline-thought" ? do
+ paddingLeft (px 4)
+ ".timeline-thought" |> ".thought-bubble" ? do
+ backgroundColor "#fef3c7"
+ color "#78350f"
+ padding (px 8) (px 12) (px 8) (px 12)
+ borderRadius (px 8) (px 8) (px 8) (px 8)
+ whiteSpace preWrap
+ marginTop (px 6)
+ fontSize (px 12)
+ lineHeight (em 1.5)
+ ".timeline-tool-call" ? do
+ borderLeft (px 3) solid "#3b82f6"
+ paddingLeft (px 8)
+ ".timeline-tool-call" |> "summary" ? do
+ cursor pointer
+ listStyleType none
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ ".timeline-tool-call" |> "summary" # before ? do
+ content (stringContent "▶")
+ fontSize (px 10)
+ color "#6b7280"
+ transition "transform" (ms 150) ease (sec 0)
+ ".timeline-tool-call[open]" |> "summary" # before ? do
+ Stylesheet.key "transform" ("rotate(90deg)" :: Text)
+ ".timeline-tool-result" ? do
+ borderLeft (px 3) solid "#10b981"
+ paddingLeft (px 8)
+ ".timeline-tool-result" |> "summary" ? do
+ cursor pointer
+ listStyleType none
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ ".timeline-cost" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ fontSize (px 11)
+ color "#6b7280"
+ padding (px 2) (px 0) (px 2) (px 0)
+ ".timeline-checkpoint" ? do
+ borderLeft (px 3) solid "#8b5cf6"
+ backgroundColor "#faf5ff"
+ padding (px 8) (px 12) (px 8) (px 12)
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ ".timeline-checkpoint" |> ".checkpoint-content" ? do
+ marginTop (px 6)
+ fontSize (px 12)
+ whiteSpace preWrap
+ ".timeline-guardrail" ? do
+ borderLeft (px 3) solid "#f59e0b"
+ backgroundColor "#fffbeb"
+ padding (px 8) (px 12) (px 8) (px 12)
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ ".timeline-guardrail" |> ".guardrail-content" ? do
+ marginTop (px 6)
+ fontSize (px 12)
+ color "#92400e"
+ ".timeline-generic" ? do
+ padding (px 4) (px 0) (px 4) (px 0)
+ color "#6b7280"
+ ".formatted-json" ? do
+ margin (px 0) (px 0) (px 0) (px 0)
+ padding (px 8) (px 8) (px 8) (px 8)
+ backgroundColor "#f9fafb"
+ borderRadius (px 4) (px 4) (px 4) (px 4)
+ overflowX auto
+ fontSize (px 12)
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ whiteSpace preWrap
+ overflowWrap breakWord
+ compactToolStyles
+
+compactToolStyles :: Css
+compactToolStyles = do
+ ".tool-compact" ? do
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 12)
+ padding (px 2) (px 0) (px 2) (px 0)
+ ".tool-check" ? do
+ color "#10b981"
+ fontWeight bold
+ ".tool-label" ? do
+ color "#6b7280"
+ fontWeight (weight 500)
+ ".tool-path" ? do
+ color "#3b82f6"
+ ".tool-pattern" ? do
+ color "#8b5cf6"
+ backgroundColor "#f5f3ff"
+ padding (px 1) (px 4) (px 1) (px 4)
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ ".tool-path-suffix" ? do
+ color "#6b7280"
+ fontSize (px 11)
+ ".tool-bash" ? do
+ display flex
+ alignItems flexStart
+ Stylesheet.key "gap" ("6px" :: Text)
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ fontSize (px 12)
+ padding (px 2) (px 0) (px 2) (px 0)
+ ".tool-bash-prompt" ? do
+ color "#f59e0b"
+ fontWeight bold
+ fontSize (px 14)
+ ".tool-bash-cmd" ? do
+ color "#374151"
+ backgroundColor "#f3f4f6"
+ padding (px 2) (px 6) (px 2) (px 6)
+ borderRadius (px 3) (px 3) (px 3) (px 3)
+ wordBreak breakAll
+ ".tool-generic" ? do
+ fontSize (px 12)
+ fontFamily ["SF Mono", "Monaco", "Consolas", "monospace"] [monospace]
+ ".tool-generic" |> "summary" ? do
+ cursor pointer
+ display flex
+ alignItems center
+ Stylesheet.key "gap" ("6px" :: Text)
+ ".tool-args-pre" ? do
+ margin (px 4) (px 0) (px 0) (px 16)
+ padding (px 6) (px 8) (px 6) (px 8)
+ backgroundColor "#f9fafb"
+ borderRadius (px 3) (px 3) (px 3) (px 3)
+ fontSize (px 11)
+ whiteSpace preWrap
+ maxHeight (px 200)
+ overflowY auto
+ ".tool-result-output" ? do
+ marginLeft (px 16)
+ marginTop (px 2)
+
+responsiveStyles :: Css
+responsiveStyles = do
+ query Media.screen [Media.maxWidth (px 600)] <| do
+ body ? fontSize (px 13)
+ ".container" ? padding (px 6) (px 8) (px 6) (px 8)
+ ".navbar" ? do
+ padding (px 6) (px 8) (px 6) (px 8)
+ flexWrap Flexbox.wrap
+ ".navbar-hamburger" ? do
+ display flex
+ Stylesheet.key "order" ("2" :: Text)
+ ".navbar-links" ? do
+ display none
+ width (pct 100)
+ Stylesheet.key "order" ("3" :: Text)
+ flexDirection column
+ alignItems flexStart
+ paddingTop (px 8)
+ Stylesheet.key "gap" ("0" :: Text)
+ ".navbar-toggle-checkbox" # checked |+ ".navbar-hamburger" |+ ".navbar-links" ? do
+ display flex
+ ".navbar-link" ? do
+ padding (px 8) (px 6) (px 8) (px 6)
+ fontSize (px 13)
+ width (pct 100)
+ ".navbar-dropdown" ? do
+ width (pct 100)
+ ".navbar-dropdown-btn" ? do
+ padding (px 8) (px 6) (px 8) (px 6)
+ fontSize (px 13)
+ width (pct 100)
+ textAlign (alignSide sideLeft)
+ ".navbar-dropdown-content" ? do
+ position static
+ Stylesheet.key "box-shadow" ("none" :: Text)
+ paddingLeft (px 12)
+ backgroundColor transparent
+ ".navbar-dropdown-item" ? do
+ padding (px 6) (px 10) (px 6) (px 10)
+ fontSize (px 12)
+ ".nav-content" ? do
+ flexDirection column
+ alignItems flexStart
+ ".stats-grid" ? do
+ Stylesheet.key "grid-template-columns" ("repeat(2, 1fr)" :: Text)
+ ".detail-row" ? do
+ flexDirection column
+ Stylesheet.key "gap" ("2px" :: Text)
+ ".detail-label" ? width auto
+ ".filter-row" ? do
+ flexWrap Flexbox.wrap
+ ".filter-group" ? do
+ width auto
+ flexWrap Flexbox.nowrap
+ ".filter-select" <> ".filter-input" ? minWidth (px 80)
+ ".review-actions" ? do
+ flexDirection column
+ ".reject-form" ? do
+ width (pct 100)
+ flexDirection column
+ ".reject-notes" ? width (pct 100)
+ ".actions" ? flexDirection column
+ ".action-btn" ? width (pct 100)
+
+darkModeStyles :: Css
+darkModeStyles =
+ query Media.screen [prefersDark] <| do
+ body ? do
+ backgroundColor "#111827"
+ color "#f3f4f6"
+ ".card"
+ <> ".task-card"
+ <> ".stat-card"
+ <> ".task-detail"
+ <> ".task-summary"
+ <> ".filter-form"
+ <> ".status-form"
+ <> ".diff-section"
+ <> ".review-actions"
+ <> ".list-group"
+ ? do
+ backgroundColor "#1f2937"
+ borderColor "#374151"
+ ".list-group-item" ? borderBottomColor "#374151"
+ ".list-group-item" # hover ? backgroundColor "#374151"
+ ".list-group-item-id" ? color "#60a5fa"
+ ".list-group-item-title" ? color "#d1d5db"
+ header ? do
+ backgroundColor "#1f2937"
+ borderColor "#374151"
+ ".navbar" ? do
+ backgroundColor "#1f2937"
+ borderColor "#374151"
+ ".navbar-brand" ? color "#60a5fa"
+ ".navbar-link" ? color "#d1d5db"
+ ".navbar-link" # hover ? backgroundColor "#374151"
+ ".navbar-dropdown-btn" ? color "#d1d5db"
+ ".navbar-dropdown-btn" # hover ? backgroundColor "#374151"
+ ".navbar-dropdown-content" ? do
+ backgroundColor "#1f2937"
+ Stylesheet.key "box-shadow" ("0 2px 8px rgba(0,0,0,0.3)" :: Text)
+ ".navbar-dropdown-item" ? color "#d1d5db"
+ ".navbar-dropdown-item" # hover ? backgroundColor "#374151"
+ ".status-dropdown-menu" ? do
+ backgroundColor "#1f2937"
+ Stylesheet.key "box-shadow" ("0 2px 8px rgba(0,0,0,0.3)" :: Text)
+ ".hamburger-line" ? backgroundColor "#d1d5db"
+ ".nav-brand" ? color "#f3f4f6"
+ "h2" <> "h3" ? color "#d1d5db"
+ a ? color "#60a5fa"
+ ".breadcrumb-container" ? backgroundColor transparent
+ ".breadcrumb-sep" ? color "#6b7280"
+ ".breadcrumb-current" ? color "#9ca3af"
+
+ ".detail-label"
+ <> ".priority"
+ <> ".dep-type"
+ <> ".child-status"
+ <> ".empty-msg"
+ <> ".stat-label"
+ <> ".priority-desc"
+ ? color "#9ca3af"
+ ".child-title" ? color "#d1d5db"
+ code ? do
+ backgroundColor "#374151"
+ color "#f3f4f6"
+ ".task-meta-id" ? do
+ backgroundColor "#374151"
+ color "#e5e7eb"
+ ".task-meta-secondary" ? color "#9ca3af"
+ ".meta-sep" ? color "#4b5563"
+ ".task-meta-label" ? color "#9ca3af"
+ ".detail-section" ? borderTopColor "#374151"
+ ".description" ? do
+ backgroundColor "#374151"
+ color "#e5e7eb"
+ ".badge-open" ? do
+ backgroundColor "#78350f"
+ color "#fcd34d"
+ ".badge-inprogress" ? do
+ backgroundColor "#1e3a8a"
+ color "#93c5fd"
+ ".badge-review" ? do
+ backgroundColor "#4c1d95"
+ color "#c4b5fd"
+ ".badge-approved" ? do
+ backgroundColor "#164e63"
+ color "#67e8f9"
+ ".badge-done" ? do
+ backgroundColor "#064e3b"
+ color "#6ee7b7"
+ ".badge-needshelp" ? do
+ backgroundColor "#78350f"
+ color "#fcd34d"
+ ".badge-p0" ? do
+ backgroundColor "#7f1d1d"
+ color "#fca5a5"
+ ".badge-p1" ? do
+ backgroundColor "#78350f"
+ color "#fcd34d"
+ ".badge-p2" ? do
+ backgroundColor "#1e3a8a"
+ color "#93c5fd"
+ ".badge-p3" ? do
+ backgroundColor "#374151"
+ color "#d1d5db"
+ ".badge-p4" ? do
+ backgroundColor "#1f2937"
+ color "#9ca3af"
+ ".blocking-impact" ? do
+ backgroundColor "#374151"
+ color "#9ca3af"
+ ".priority-dropdown-menu" ? do
+ backgroundColor "#1f2937"
+ Stylesheet.key "box-shadow" ("0 2px 8px rgba(0,0,0,0.3)" :: Text)
+ ".action-btn" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ color "#f3f4f6"
+ ".action-btn" # hover ? backgroundColor "#4b5563"
+ ".filter-select" <> ".filter-input" <> ".status-select" <> ".reject-notes" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ color "#f3f4f6"
+ ".stats-section" <> ".summary-section" ? do
+ backgroundColor "#1f2937"
+ borderColor "#374151"
+
+ (".stat-card.badge-open" |> ".stat-count") ? color "#fbbf24"
+ (".stat-card.badge-inprogress" |> ".stat-count") ? color "#60a5fa"
+ (".stat-card.badge-review" |> ".stat-count") ? color "#a78bfa"
+ (".stat-card.badge-approved" |> ".stat-count") ? color "#22d3ee"
+ (".stat-card.badge-done" |> ".stat-count") ? color "#34d399"
+ (".stat-card.badge-neutral" |> ".stat-count") ? color "#9ca3af"
+
+ ".progress-bar" ? backgroundColor "#374151"
+ ".progress-fill" ? backgroundColor "#60a5fa"
+ ".multi-progress-bar" ? backgroundColor "#374151"
+ ".progress-legend" ? color "#9ca3af"
+ ".activity-section" ? do
+ backgroundColor "#1f2937"
+ borderColor "#374151"
+ ".activity-timeline" # before ? backgroundColor "#374151"
+ ".activity-icon" ? do
+ backgroundColor "#1f2937"
+ borderColor "#374151"
+ ".activity-time" ? color "#9ca3af"
+ ".activity-message" ? color "#d1d5db"
+ (".activity-metadata" |> "summary") ? color "#9ca3af"
+ ".metadata-json" ? backgroundColor "#374151"
+ ".execution-section" ? do
+ backgroundColor "#1f2937"
+ borderColor "#374151"
+
+ ".metric-label" ? color "#9ca3af"
+ ".metric-value" ? color "#d1d5db"
+ ".metric-card" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ (".metric-card" |> ".metric-value") ? color "#f3f4f6"
+ (".metric-card" |> ".metric-label") ? color "#9ca3af"
+ ".amp-link" ? color "#60a5fa"
+ ".amp-thread-btn" ? do
+ backgroundColor "#8b5cf6"
+ ".amp-thread-btn" # hover ? backgroundColor "#7c3aed"
+ ".markdown-content" ? color "#d1d5db"
+ ".commit-item" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ ".commit-hash" ? do
+ backgroundColor "#4b5563"
+ color "#60a5fa"
+ ".commit-summary" ? color "#d1d5db"
+ ".commit-meta" ? color "#9ca3af"
+ ".md-h1" ? borderBottomColor "#374151"
+ ".md-code" ? do
+ backgroundColor "#1e1e1e"
+ color "#d4d4d4"
+ borderColor "#374151"
+ ".md-inline-code" ? do
+ backgroundColor "#374151"
+ color "#f3f4f6"
+ ".edit-description" ? borderTopColor "#374151"
+ (".edit-description" |> "summary") ? color "#60a5fa"
+ ".edit-link" ? color "#60a5fa"
+ "button.cancel-link" ? do
+ color "#f87171"
+ backgroundColor transparent
+ border (px 0) solid transparent
+ ".description-textarea" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ color "#f3f4f6"
+ ".fact-create-form" ? do
+ backgroundColor "#1f2937"
+ borderColor "#374151"
+ ".time-filter-btn" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ color "#d1d5db"
+ ".time-filter-btn" # hover ? do
+ backgroundColor "#4b5563"
+ borderColor "#6b7280"
+ ".time-filter-btn.active" ? do
+ backgroundColor "#3b82f6"
+ borderColor "#3b82f6"
+ color white
+ ".time-filter-btn.active" # hover ? do
+ backgroundColor "#2563eb"
+ borderColor "#2563eb"
+ ".sort-label" ? color "#9ca3af"
+ ".sort-dropdown-btn" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ color "#d1d5db"
+ ".sort-dropdown-btn" # hover ? do
+ backgroundColor "#4b5563"
+ borderColor "#6b7280"
+ ".sort-dropdown-item.active" ? do
+ backgroundColor "#1e3a5f"
+ ".comment-card" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ ".comment-text" ? color "#d1d5db"
+ ".author-human" ? do
+ backgroundColor "#1e3a8a"
+ color "#93c5fd"
+ ".author-junior" ? do
+ backgroundColor "#064e3b"
+ color "#6ee7b7"
+ ".comment-time" ? color "#9ca3af"
+ ".comment-textarea" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ color "#f3f4f6"
+ ".form-input" <> ".form-textarea" ? do
+ backgroundColor "#374151"
+ borderColor "#4b5563"
+ color "#f3f4f6"
+ (".form-group" |> label) ? color "#d1d5db"
+ ".danger-zone" ? do
+ backgroundColor "#450a0a"
+ borderColor "#991b1b"
+ (".danger-zone" |> h2) ? color "#f87171"
+ ".retry-banner-warning" ? do
+ backgroundColor "#451a03"
+ borderColor "#b45309"
+ ".retry-banner-critical" ? do
+ backgroundColor "#450a0a"
+ borderColor "#dc2626"
+ ".retry-attempt" ? color "#d1d5db"
+ ".retry-banner-details" ? color "#d1d5db"
+ ".retry-value" ? color "#9ca3af"
+ ".retry-commit" ? backgroundColor "#374151"
+ ".event-bubble" ? backgroundColor "#374151"
+ ".comment-bubble" ? do
+ backgroundColor "#374151"
+ color "#d1d5db"
+ ".thought-bubble" ? do
+ backgroundColor "#292524"
+ color "#a8a29e"
+ borderRadius (px 2) (px 2) (px 2) (px 2)
+ ".event-label" ? color "#d1d5db"
+ ".tool-bash-cmd" ? do
+ backgroundColor "#292524"
+ color "#a8a29e"
+ ".tool-label" ? color "#9ca3af"
+ ".tool-path" ? color "#60a5fa"
+ ".tool-pattern" ? do
+ backgroundColor "#3b2f5e"
+ color "#c4b5fd"
+ ".output-collapsible" |> "summary" ? color "#60a5fa"
+ ".timeline-tool-call" |> "summary" # before ? color "#9ca3af"
+ ".line-count" ? do
+ backgroundColor "#374151"
+ color "#9ca3af"
+ ".event-error" ? do
+ backgroundColor "#450a0a"
+ borderColor "#dc2626"
+ ".event-error" |> ".event-label" ? color "#f87171"
+ ".error-message" ? color "#f87171"
+ ".timeline-error" |> ".event-label" ? color "#fca5a5"
+ ".timeline-guardrail" |> ".event-label" ? color "#fbbf24"
+ ".timeline-guardrail" ? do
+ backgroundColor "#451a03"
+ borderColor "#f59e0b"
+ ".timeline-guardrail" |> ".guardrail-content" ? color "#fcd34d"
+ ".formatted-json" ? do
+ backgroundColor "#1e1e1e"
+ color "#d4d4d4"
+ -- Responsive dark mode: dropdown content needs background on mobile
+ query Media.screen [Media.maxWidth (px 600)] <| do
+ ".navbar-dropdown-content" ? do
+ backgroundColor "#1f2937"
+ ".navbar-dropdown-item" # hover ? do
+ backgroundColor "#374151"
+
+prefersDark :: Stylesheet.Feature
+prefersDark =
+ Stylesheet.Feature "prefers-color-scheme" (Just (Clay.value ("dark" :: Text)))
+
+statusBadgeClass :: Text -> Text
+statusBadgeClass status = case status of
+ "Open" -> "badge badge-open"
+ "InProgress" -> "badge badge-inprogress"
+ "Review" -> "badge badge-review"
+ "Approved" -> "badge badge-approved"
+ "Done" -> "badge badge-done"
+ _ -> "badge"
+
+priorityBadgeClass :: Text -> Text
+priorityBadgeClass priority = case priority of
+ "P0" -> "badge badge-p0"
+ "P1" -> "badge badge-p1"
+ "P2" -> "badge badge-p2"
+ "P3" -> "badge badge-p3"
+ "P4" -> "badge badge-p4"
+ _ -> "badge"
diff --git a/Omni/Jr/Web/Types.hs b/Omni/Jr/Web/Types.hs
new file mode 100644
index 0000000..93c8d85
--- /dev/null
+++ b/Omni/Jr/Web/Types.hs
@@ -0,0 +1,365 @@
+{-# LANGUAGE DataKinds #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- : dep servant-server
+-- : dep lucid
+-- : dep http-api-data
+-- : dep aeson
+module Omni.Jr.Web.Types
+ ( TaskFilters (..),
+ TimeRange (..),
+ SortOrder (..),
+ parseSortOrder,
+ sortOrderToParam,
+ sortOrderLabel,
+ sortTasks,
+ parseTimeRange,
+ timeRangeToParam,
+ getTimeRangeStart,
+ startOfDay,
+ startOfWeek,
+ addDays,
+ fromGregorian,
+ daysSinceEpoch,
+ startOfMonth,
+ computeMetricsFromActivities,
+ HomePage (..),
+ ReadyQueuePage (..),
+ BlockedPage (..),
+ InterventionPage (..),
+ TaskListPage (..),
+ TaskDetailPage (..),
+ GitCommit (..),
+ TaskReviewPage (..),
+ ReviewInfo (..),
+ TaskDiffPage (..),
+ StatsPage (..),
+ KBPage (..),
+ FactDetailPage (..),
+ EpicsPage (..),
+ RecentActivityNewPartial (..),
+ RecentActivityMorePartial (..),
+ ReadyCountPartial (..),
+ StatusBadgePartial (..),
+ PriorityBadgePartial (..),
+ ComplexityBadgePartial (..),
+ TaskListPartial (..),
+ TaskMetricsPartial (..),
+ AgentEventsPartial (..),
+ DescriptionViewPartial (..),
+ DescriptionEditPartial (..),
+ FactEditForm (..),
+ FactCreateForm (..),
+ RejectForm (..),
+ StatusForm (..),
+ PriorityForm (..),
+ ComplexityForm (..),
+ DescriptionForm (..),
+ NotesForm (..),
+ CommentForm (..),
+ Breadcrumb (..),
+ Breadcrumbs,
+ CSS,
+ SSE,
+ )
+where
+
+import Alpha
+import qualified Data.ByteString as BS
+import qualified Data.ByteString.Lazy as LBS
+import qualified Data.List as List
+import qualified Data.Text as Text
+import qualified Data.Text.Lazy as LazyText
+import qualified Data.Text.Lazy.Encoding as LazyText
+import Data.Time (Day, DayOfWeek (..), UTCTime (..), dayOfWeek, diffUTCTime, toGregorian)
+import qualified Omni.Task.Core as TaskCore
+import Servant (Accept (..), MimeRender (..))
+import Web.FormUrlEncoded (FromForm (..), lookupUnique, parseUnique)
+
+data TaskFilters = TaskFilters
+ { filterStatus :: Maybe TaskCore.Status,
+ filterPriority :: Maybe TaskCore.Priority,
+ filterNamespace :: Maybe Text,
+ filterType :: Maybe TaskCore.TaskType
+ }
+ deriving (Show, Eq)
+
+data TimeRange = Today | Week | Month | AllTime
+ deriving (Show, Eq)
+
+data SortOrder
+ = SortNewest
+ | SortOldest
+ | SortUpdated
+ | SortPriorityHigh
+ | SortPriorityLow
+ deriving (Show, Eq)
+
+parseSortOrder :: Maybe Text -> SortOrder
+parseSortOrder (Just "oldest") = SortOldest
+parseSortOrder (Just "updated") = SortUpdated
+parseSortOrder (Just "priority-high") = SortPriorityHigh
+parseSortOrder (Just "priority-low") = SortPriorityLow
+parseSortOrder _ = SortNewest
+
+sortOrderToParam :: SortOrder -> Text
+sortOrderToParam SortNewest = "newest"
+sortOrderToParam SortOldest = "oldest"
+sortOrderToParam SortUpdated = "updated"
+sortOrderToParam SortPriorityHigh = "priority-high"
+sortOrderToParam SortPriorityLow = "priority-low"
+
+sortOrderLabel :: SortOrder -> Text
+sortOrderLabel SortNewest = "Newest First"
+sortOrderLabel SortOldest = "Oldest First"
+sortOrderLabel SortUpdated = "Recently Updated"
+sortOrderLabel SortPriorityHigh = "Priority (High to Low)"
+sortOrderLabel SortPriorityLow = "Priority (Low to High)"
+
+sortTasks :: SortOrder -> [TaskCore.Task] -> [TaskCore.Task]
+sortTasks SortNewest = List.sortBy (comparing (Down <. TaskCore.taskCreatedAt))
+sortTasks SortOldest = List.sortBy (comparing TaskCore.taskCreatedAt)
+sortTasks SortUpdated = List.sortBy (comparing (Down <. TaskCore.taskUpdatedAt))
+sortTasks SortPriorityHigh = List.sortBy (comparing TaskCore.taskPriority)
+sortTasks SortPriorityLow = List.sortBy (comparing (Down <. TaskCore.taskPriority))
+
+parseTimeRange :: Maybe Text -> TimeRange
+parseTimeRange (Just "today") = Today
+parseTimeRange (Just "week") = Week
+parseTimeRange (Just "month") = Month
+parseTimeRange _ = AllTime
+
+timeRangeToParam :: TimeRange -> Text
+timeRangeToParam Today = "today"
+timeRangeToParam Week = "week"
+timeRangeToParam Month = "month"
+timeRangeToParam AllTime = "all"
+
+getTimeRangeStart :: TimeRange -> UTCTime -> Maybe UTCTime
+getTimeRangeStart AllTime _ = Nothing
+getTimeRangeStart Today now = Just (startOfDay now)
+getTimeRangeStart Week now = Just (startOfWeek now)
+getTimeRangeStart Month now = Just (startOfMonth now)
+
+startOfDay :: UTCTime -> UTCTime
+startOfDay t = UTCTime (utctDay t) 0
+
+startOfWeek :: UTCTime -> UTCTime
+startOfWeek t =
+ let day = utctDay t
+ dow = dayOfWeek day
+ daysBack = case dow of
+ Monday -> 0
+ Tuesday -> 1
+ Wednesday -> 2
+ Thursday -> 3
+ Friday -> 4
+ Saturday -> 5
+ Sunday -> 6
+ in UTCTime (addDays (negate daysBack) day) 0
+
+addDays :: Integer -> Day -> Day
+addDays n d =
+ let (y, m, dayNum) = toGregorian d
+ in fromGregorian y m (dayNum + fromInteger n)
+
+fromGregorian :: Integer -> Int -> Int -> Day
+fromGregorian y m d = toEnum (fromInteger (daysSinceEpoch y m d))
+
+daysSinceEpoch :: Integer -> Int -> Int -> Integer
+daysSinceEpoch y m d =
+ let a = (14 - m) `div` 12
+ y' = y + 4800 - toInteger a
+ m' = m + 12 * a - 3
+ jdn = d + (153 * m' + 2) `div` 5 + 365 * fromInteger y' + fromInteger y' `div` 4 - fromInteger y' `div` 100 + fromInteger y' `div` 400 - 32045
+ in toInteger jdn - 2440588
+
+startOfMonth :: UTCTime -> UTCTime
+startOfMonth t =
+ let day = utctDay t
+ (y, m, _) = toGregorian day
+ in UTCTime (fromGregorian y m 1) 0
+
+computeMetricsFromActivities :: [TaskCore.Task] -> [TaskCore.TaskActivity] -> TaskCore.AggregatedMetrics
+computeMetricsFromActivities tasks activities =
+ let completedCount = length [t | t <- tasks, TaskCore.taskStatus t == TaskCore.Done]
+ totalCost = sum [c | act <- activities, Just c <- [TaskCore.activityCostCents act]]
+ totalTokens = sum [t | act <- activities, Just t <- [TaskCore.activityTokensUsed act]]
+ totalDuration = sum [calcDuration act | act <- activities]
+ in TaskCore.AggregatedMetrics
+ { TaskCore.aggTotalCostCents = totalCost,
+ TaskCore.aggTotalDurationSeconds = totalDuration,
+ TaskCore.aggCompletedTasks = completedCount,
+ TaskCore.aggTotalTokens = totalTokens
+ }
+ where
+ calcDuration act = case (TaskCore.activityStartedAt act, TaskCore.activityCompletedAt act) of
+ (Just start, Just end) -> floor (diffUTCTime end start)
+ _ -> 0
+
+data CSS
+
+instance Accept CSS where
+ contentType _ = "text/css"
+
+instance MimeRender CSS LazyText.Text where
+ mimeRender _ = LazyText.encodeUtf8
+
+data SSE
+
+instance Accept SSE where
+ contentType _ = "text/event-stream"
+
+instance MimeRender SSE BS.ByteString where
+ mimeRender _ = LBS.fromStrict
+
+data HomePage = HomePage TaskCore.TaskStats [TaskCore.Task] [TaskCore.Task] Bool TaskCore.AggregatedMetrics TimeRange UTCTime
+
+data ReadyQueuePage = ReadyQueuePage [TaskCore.Task] SortOrder UTCTime
+
+data BlockedPage = BlockedPage [(TaskCore.Task, Int)] SortOrder UTCTime
+
+data InterventionPage = InterventionPage TaskCore.HumanActionItems SortOrder UTCTime
+
+data TaskListPage = TaskListPage [TaskCore.Task] TaskFilters SortOrder UTCTime
+
+data TaskDetailPage
+ = TaskDetailFound TaskCore.Task [TaskCore.Task] [TaskCore.TaskActivity] (Maybe TaskCore.RetryContext) [GitCommit] (Maybe TaskCore.AggregatedMetrics) [TaskCore.StoredEvent] UTCTime
+ | TaskDetailNotFound Text
+
+data GitCommit = GitCommit
+ { commitHash :: Text,
+ commitShortHash :: Text,
+ commitSummary :: Text,
+ commitAuthor :: Text,
+ commitRelativeDate :: Text,
+ commitFilesChanged :: Int
+ }
+ deriving (Show, Eq)
+
+data TaskReviewPage
+ = ReviewPageFound TaskCore.Task ReviewInfo
+ | ReviewPageNotFound Text
+
+data ReviewInfo
+ = ReviewNoCommit
+ | ReviewMergeConflict Text [Text]
+ | ReviewReady Text Text
+
+data TaskDiffPage
+ = DiffPageFound Text Text Text
+ | DiffPageNotFound Text Text
+
+data StatsPage = StatsPage TaskCore.TaskStats (Maybe Text)
+
+newtype KBPage = KBPage [TaskCore.Fact]
+
+data FactDetailPage
+ = FactDetailFound TaskCore.Fact UTCTime
+ | FactDetailNotFound Int
+
+data FactEditForm = FactEditForm Text Text Text
+
+instance FromForm FactEditForm where
+ fromForm form = do
+ content <- parseUnique "content" form
+ let files = fromRight "" (lookupUnique "files" form)
+ let confidence = fromRight "0.8" (lookupUnique "confidence" form)
+ Right (FactEditForm content files confidence)
+
+data FactCreateForm = FactCreateForm Text Text Text Text
+
+instance FromForm FactCreateForm where
+ fromForm form = do
+ project <- parseUnique "project" form
+ content <- parseUnique "content" form
+ let files = fromRight "" (lookupUnique "files" form)
+ let confidence = fromRight "0.8" (lookupUnique "confidence" form)
+ Right (FactCreateForm project content files confidence)
+
+data EpicsPage = EpicsPage [TaskCore.Task] [TaskCore.Task] SortOrder
+
+data RecentActivityNewPartial = RecentActivityNewPartial [TaskCore.Task] (Maybe Int)
+
+data RecentActivityMorePartial = RecentActivityMorePartial [TaskCore.Task] Int Bool
+
+newtype ReadyCountPartial = ReadyCountPartial Int
+
+data StatusBadgePartial = StatusBadgePartial TaskCore.Status Text
+
+data PriorityBadgePartial = PriorityBadgePartial TaskCore.Priority Text
+
+data ComplexityBadgePartial = ComplexityBadgePartial (Maybe Int) Text
+
+newtype TaskListPartial = TaskListPartial [TaskCore.Task]
+
+data TaskMetricsPartial = TaskMetricsPartial Text [TaskCore.TaskActivity] (Maybe TaskCore.RetryContext) UTCTime
+
+data AgentEventsPartial = AgentEventsPartial Text [TaskCore.StoredEvent] Bool UTCTime
+
+data DescriptionViewPartial = DescriptionViewPartial Text Text Bool
+
+data DescriptionEditPartial = DescriptionEditPartial Text Text Bool
+
+newtype RejectForm = RejectForm (Maybe Text)
+
+instance FromForm RejectForm where
+ fromForm form = Right (RejectForm (either (const Nothing) Just (lookupUnique "notes" form)))
+
+newtype StatusForm = StatusForm TaskCore.Status
+
+instance FromForm StatusForm where
+ fromForm form = do
+ statusText <- parseUnique "status" form
+ case readMaybe (Text.unpack statusText) of
+ Just s -> Right (StatusForm s)
+ Nothing -> Left "Invalid status"
+
+newtype PriorityForm = PriorityForm TaskCore.Priority
+
+instance FromForm PriorityForm where
+ fromForm form = do
+ priorityText <- parseUnique "priority" form
+ case readMaybe (Text.unpack priorityText) of
+ Just p -> Right (PriorityForm p)
+ Nothing -> Left "Invalid priority"
+
+newtype ComplexityForm = ComplexityForm (Maybe Int)
+
+instance FromForm ComplexityForm where
+ fromForm form = do
+ complexityText <- parseUnique "complexity" form
+ if complexityText == "none"
+ then Right (ComplexityForm Nothing)
+ else case readMaybe (Text.unpack complexityText) of
+ Just c | c >= 1 && c <= 5 -> Right (ComplexityForm (Just c))
+ _ -> Left "Invalid complexity"
+
+newtype DescriptionForm = DescriptionForm Text
+
+instance FromForm DescriptionForm where
+ fromForm form = do
+ desc <- parseUnique "description" form
+ Right (DescriptionForm desc)
+
+newtype NotesForm = NotesForm Text
+
+instance FromForm NotesForm where
+ fromForm form = do
+ notes <- parseUnique "notes" form
+ Right (NotesForm notes)
+
+newtype CommentForm = CommentForm Text
+
+instance FromForm CommentForm where
+ fromForm form = do
+ commentText <- parseUnique "comment" form
+ Right (CommentForm commentText)
+
+data Breadcrumb = Breadcrumb
+ { breadcrumbLabel :: Text,
+ breadcrumbUrl :: Maybe Text
+ }
+
+type Breadcrumbs = [Breadcrumb]
diff --git a/Omni/Keys/Ava.pub b/Omni/Keys/Ava.pub
new file mode 100644
index 0000000..77c314c
--- /dev/null
+++ b/Omni/Keys/Ava.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOOv/fKFUS4exJtmnWqi5Taa3W5jTxqTmAZBtvisKMKH ava@beryl.bensima.com
diff --git a/Omni/Lint.hs b/Omni/Lint.hs
index c6b6878..7a0a888 100755
--- a/Omni/Lint.hs
+++ b/Omni/Lint.hs
@@ -260,7 +260,36 @@ data Result
| NoOp Namespace.Ext
run :: Mode -> Map Namespace.Ext [Namespace] -> IO [Result]
-run mode nsmap = nsmap |> Map.assocs |> traverse (runOne mode) /> concat
+run mode nsmap = do
+ -- Run large file check first (warns but doesn't fail)
+ let allNamespaces = concat (Map.elems nsmap)
+ largeFileResults <- checkLargeFiles allNamespaces
+ traverse_ printResult largeFileResults
+ -- Then run per-extension linters
+ lintResults <- nsmap |> Map.assocs |> traverse (runOne mode) /> concat
+ pure (largeFileResults ++ lintResults)
+
+-- | Check for files exceeding the line limit
+-- Large files cause agent token bloat and edit_file failures
+checkLargeFiles :: [Namespace] -> IO [Result]
+checkLargeFiles ns's = catMaybes </ traverse checkOne ns's
+ where
+ maxLines = 1000 :: Int
+ checkOne ns = do
+ let path = Namespace.toPath ns
+ contents <- readFile path
+ let lineCount = length (lines contents)
+ if lineCount > maxLines
+ then do
+ let msg =
+ Text.pack path
+ <> " has "
+ <> tshow lineCount
+ <> " lines (max "
+ <> tshow maxLines
+ <> "), consider splitting"
+ pure (Just (Warn msg))
+ else pure Nothing
runOne :: Mode -> (Ext, [Namespace]) -> IO [Result]
runOne mode (ext, ns's) = results +> traverse printResult
diff --git a/Omni/Llamacpp.py b/Omni/Llamacpp.py
index eac08fd..c7e9078 100755
--- a/Omni/Llamacpp.py
+++ b/Omni/Llamacpp.py
@@ -9,7 +9,10 @@ sure llama-cpp still works in case I need/want to switch at some point.
# : out llamacpp-test
# : run llama-cpp
-
+import logging
+import Omni.App as App
+import Omni.Log as Log
+import Omni.Test as Test
import os
import sys
import unittest
@@ -23,18 +26,11 @@ class TestLlamaCpp(unittest.TestCase):
self.assertIn("llama-cpp", os.environ.get("PATH", ""))
-def test() -> None:
- """Run this module's test suite."""
- suite = unittest.TestSuite()
- suite.addTests(
- unittest.defaultTestLoader.loadTestsFromTestCase(TestLlamaCpp),
- )
- unittest.TextTestRunner().run(suite)
-
-
def main() -> None:
"""Entrypoint."""
if sys.argv[1] == "test":
- test()
+ logger = logging.getLogger(__name__)
+ Log.setup(logger)
+ Test.run(App.Area.Test, [TestLlamaCpp])
else:
sys.exit(0)
diff --git a/Omni/Log.hs b/Omni/Log.hs
index 91fcb55..c42d5e8 100644
--- a/Omni/Log.hs
+++ b/Omni/Log.hs
@@ -15,6 +15,12 @@
-- * often use `br` after `warn`, unless its really unimportant
--
-- * labels should be roughly hierarchical from general->specific
+--
+-- Future improvements to consider:
+-- * Add timestamps (set via LOG_TIMESTAMPS=1 env var)
+-- * Add log level filtering (set via LOG_LEVEL=warn to suppress info)
+-- * Add structured JSON output (set via LOG_FORMAT=json for machine parsing)
+-- * Add a `debug` level below `info` for verbose debugging
module Omni.Log
( Lvl (..),
good,
@@ -22,6 +28,7 @@ module Omni.Log
info,
warn,
fail,
+ debug,
wipe,
-- * Debugging
@@ -50,7 +57,8 @@ import qualified System.Environment as Env
import qualified System.IO as IO
import System.IO.Unsafe (unsafePerformIO)
-data Lvl = Good | Pass | Info | Warn | Fail | Mark
+data Lvl = Debug | Good | Pass | Info | Warn | Fail | Mark
+ deriving (Eq, Ord)
-- | Get the environment. This should probably return 'Omni.App.Area' instead of
-- 'String', but I don't want to depend on everything in 'Omni.App', so some kind
@@ -60,19 +68,36 @@ area =
Env.lookupEnv "AREA"
/> maybe "Test" identity
+-- | Get the minimum log level from LOG_LEVEL env var (default: Info)
+-- Set LOG_LEVEL=debug to see debug messages, LOG_LEVEL=warn to suppress info
+minLogLevel :: Lvl
+minLogLevel =
+ unsafePerformIO <| do
+ Env.lookupEnv "LOG_LEVEL" /> \case
+ Just "debug" -> Debug
+ Just "info" -> Info
+ Just "warn" -> Warn
+ Just "fail" -> Fail
+ _ -> Info
+{-# NOINLINE minLogLevel #-}
+
msg :: Lvl -> [Text] -> IO ()
-msg lvl labels =
- area +> \case
- "Live" -> putDumb
- _ ->
- Env.getEnv "TERM" +> \case
- "dumb" -> putDumb
- _ -> Rainbow.hPutChunks IO.stderr [fore color <| clear <> chunk txt <> "\r"]
+msg lvl labels
+ | lvl < minLogLevel = pure () -- Skip messages below minimum level
+ | otherwise =
+ area +> \case
+ "Live" -> putDumb
+ _ ->
+ Env.lookupEnv "TERM" +> \case
+ Just "dumb" -> putDumb
+ Nothing -> putDumb
+ _ -> Rainbow.hPutChunks IO.stderr [fore color <| clear <> chunk txt <> "\r"]
where
-- For systemd-journal, emacs *compilation* buffers, etc.
putDumb = putStr <| txt <> "\n"
txt = fmt (label : labels)
(color, label) = case lvl of
+ Debug -> (white, "debg")
Good -> (green, "good")
Pass -> (green, "pass")
Info -> (white, "info")
@@ -94,12 +119,13 @@ br = Rainbow.hPutChunks stderr ["\n"] >> IO.hFlush stderr
wipe :: IO ()
wipe = hPutStr stderr ("\r" :: Text) >> IO.hFlush stderr
-good, pass, info, warn, fail :: [Text] -> IO ()
+good, pass, info, warn, fail, debug :: [Text] -> IO ()
good = msg Good
pass = msg Pass
info = msg Info
warn = msg Warn
fail = msg Fail
+debug = msg Debug
-- | Like 'Debug.trace' but follows the patterns in this module
mark :: (Show a) => Text -> a -> a
diff --git a/Omni/Log.py b/Omni/Log.py
index 7e3fdd3..5b3a618 100644
--- a/Omni/Log.py
+++ b/Omni/Log.py
@@ -7,13 +7,13 @@ import typing
class LowerFormatter(logging.Formatter):
"""A logging formatter that formats logs how I like."""
- def format(self: "LowerFormatter", record: typing.Any) -> typing.Any:
+ def format(self: "LowerFormatter", record: logging.LogRecord) -> typing.Any:
"""Use the format I like for logging."""
record.levelname = record.levelname.lower()
- return super(logging.Formatter, self).format(record) # type: ignore[misc]
+ return super().format(record)
-def setup(level: int = logging.INFO) -> logging.Logger:
+def setup(logger: logging.Logger, level: int = logging.INFO) -> logging.Logger:
"""Run this in your `main()` function."""
logging.basicConfig(
level=level,
@@ -22,7 +22,6 @@ def setup(level: int = logging.INFO) -> logging.Logger:
logging.addLevelName(logging.DEBUG, "dbug")
logging.addLevelName(logging.ERROR, "fail")
logging.addLevelName(logging.INFO, "info")
- logger = logging.getLogger(__name__)
formatter = LowerFormatter()
handler = logging.StreamHandler()
handler.setFormatter(formatter)
@@ -32,5 +31,6 @@ def setup(level: int = logging.INFO) -> logging.Logger:
def main() -> None:
"""Entrypoint to test that this kinda works."""
- logger = setup()
+ logger = logging.getLogger(__name__)
+ setup(logger)
logger.debug("i am doing testing")
diff --git a/Omni/Log/Concurrent.hs b/Omni/Log/Concurrent.hs
new file mode 100644
index 0000000..77131ef
--- /dev/null
+++ b/Omni/Log/Concurrent.hs
@@ -0,0 +1,243 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Concurrent logging with multi-line output support
+module Omni.Log.Concurrent
+ ( LineManager,
+ BuildState (..),
+ withLineManager,
+ initializeLines,
+ updateLine,
+ updateLineState,
+ )
+where
+
+import Alpha
+import Data.IORef (IORef, newIORef, readIORef, writeIORef)
+import qualified Data.Map as Map
+import qualified Data.Text as Text
+import Omni.Log.Terminal (OutputMode (..), TerminalInfo (..), detectTerminal, truncateToWidth)
+import Omni.Namespace (Namespace)
+import qualified Omni.Namespace as Namespace
+import Rainbow (chunk, fore, green, red, white, yellow)
+import qualified Rainbow
+import qualified System.Console.ANSI as ANSI
+import qualified System.IO as IO
+import System.IO.Unsafe (unsafePerformIO)
+
+data BuildState = Analyzing | Pending | Building | Success | Failed | Skipped
+ deriving (Eq, Show)
+
+data LineManager = LineManager
+ { lmNamespaces :: [Namespace],
+ lmTermInfo :: TerminalInfo
+ }
+
+{-# NOINLINE currentLineManager #-}
+currentLineManager :: IORef (Maybe LineManager)
+currentLineManager = unsafePerformIO (newIORef Nothing)
+
+{-# NOINLINE namespaceLines #-}
+namespaceLines :: IORef (Map Namespace Int)
+namespaceLines = unsafePerformIO (newIORef Map.empty)
+
+-- | Tracks if the last output was transient (no newline printed)
+-- When True, cleanup should not add a newline since next manager will overwrite
+{-# NOINLINE lastOutputTransient #-}
+lastOutputTransient :: IORef Bool
+lastOutputTransient = unsafePerformIO (newIORef False)
+
+-- | Tracks if lines have been initialized (prevents duplicate initialization)
+{-# NOINLINE linesInitialized #-}
+linesInitialized :: IORef Bool
+linesInitialized = unsafePerformIO (newIORef False)
+
+-- | Global lock for all terminal operations
+-- ANSI terminal library (ncurses) is not thread-safe, so we must serialize all calls
+-- to prevent segfaults during concurrent builds
+{-# NOINLINE terminalLock #-}
+terminalLock :: MVar ()
+terminalLock = unsafePerformIO (newMVar ())
+
+withLineManager :: [Namespace] -> (LineManager -> IO a) -> IO a
+withLineManager nss action = do
+ -- Check if a manager is already active (reentrant call)
+ existingMgr <- readIORef currentLineManager
+ maybe createNewManager action existingMgr
+ where
+ createNewManager = do
+ termInfo <- detectTerminal
+
+ case tiMode termInfo of
+ SingleLine -> do
+ -- Single-line mode: no reservations, updates in place
+ let mgr = LineManager {lmNamespaces = nss, lmTermInfo = termInfo}
+ writeIORef currentLineManager (Just mgr)
+ writeIORef lastOutputTransient False
+ writeIORef linesInitialized False
+ result <- action mgr
+ -- Only print final newline if last output wasn't transient
+ -- (transient outputs expect to be overwritten by next manager)
+ wasTransient <- readIORef lastOutputTransient
+ unless wasTransient (IO.hPutStrLn IO.stderr "")
+ writeIORef currentLineManager Nothing
+ writeIORef namespaceLines Map.empty
+ writeIORef linesInitialized False
+ pure result
+ MultiLine -> do
+ -- Multi-line mode: reserve lines for each namespace
+ let numLines = min (length nss) (tiHeight termInfo - 2)
+ replicateM_ numLines (IO.hPutStrLn IO.stderr "")
+ withMVar terminalLock <| \_ -> ANSI.hCursorUp IO.stderr numLines
+
+ let mgr = LineManager {lmNamespaces = nss, lmTermInfo = termInfo}
+ writeIORef currentLineManager (Just mgr)
+ writeIORef linesInitialized False
+
+ -- Initialize the namespace -> line mapping
+ writeIORef namespaceLines (Map.fromList <| zip nss [0 ..])
+
+ result <- action mgr
+
+ IO.hPutStrLn IO.stderr ""
+ writeIORef currentLineManager Nothing
+ writeIORef namespaceLines Map.empty
+ writeIORef linesInitialized False
+ pure result
+
+-- | Initialize all lines with pending status
+-- Only initializes once per manager session (prevents duplicate output on reentrant calls)
+initializeLines :: LineManager -> IO ()
+initializeLines LineManager {..} = do
+ alreadyInit <- readIORef linesInitialized
+ unless alreadyInit
+ <| case (tiMode lmTermInfo, tiSupportsANSI lmTermInfo) of
+ (_, False) -> pure () -- No ANSI support, skip initialization
+ (SingleLine, _) -> writeIORef linesInitialized True -- Mark as done even if no-op
+ (MultiLine, _) -> do
+ writeIORef linesInitialized True
+ nsMap <- readIORef namespaceLines
+ forM_ (Map.toList nsMap) <| \(ns, _) ->
+ withMVar terminalLock <| \_ -> do
+ ANSI.hSetCursorColumn IO.stderr 0
+ ANSI.hClearLine IO.stderr
+ let nsText = Text.pack (Namespace.toPath ns)
+ let msg = "[.] " <> nsText -- Pending state before analysis starts
+ let truncated = truncateToWidth (tiWidth lmTermInfo - 1) msg
+ IO.hPutStrLn IO.stderr (Text.unpack truncated)
+ IO.hFlush IO.stderr
+
+updateLine :: Namespace -> Text -> IO ()
+updateLine ns output = do
+ mMgr <- readIORef currentLineManager
+ case mMgr of
+ Nothing -> do
+ IO.hPutStr IO.stderr (Text.unpack <| output <> "\r")
+ IO.hFlush IO.stderr
+ Just LineManager {..} ->
+ case tiMode lmTermInfo of
+ SingleLine ->
+ -- Single line: update in place
+ -- Lock all terminal output to prevent interleaved writes
+ withMVar terminalLock <| \_ -> do
+ let nsText = Text.pack (Namespace.toPath ns)
+ let msg =
+ if Text.null output
+ then "[~] " <> nsText
+ else "[~] " <> nsText <> ": " <> output
+ let truncated = truncateToWidth (tiWidth lmTermInfo - 1) msg
+ -- Clear line and write
+ IO.hPutStr IO.stderr "\r"
+ IO.hPutStr IO.stderr (Text.unpack truncated)
+ -- Pad to clear previous longer text
+ let padding = replicate (tiWidth lmTermInfo - Text.length truncated - 1) ' '
+ IO.hPutStr IO.stderr padding
+ IO.hPutStr IO.stderr "\r"
+ IO.hPutStr IO.stderr (Text.unpack truncated)
+ IO.hFlush IO.stderr
+ MultiLine ->
+ -- Multi-line: use reserved lines with truncation
+ -- Lock covers IORef read + all terminal operations to prevent races
+ withMVar terminalLock <| \_ -> do
+ nsMap <- readIORef namespaceLines
+ case Map.lookup ns nsMap of
+ Nothing -> pure ()
+ Just lineNum -> do
+ let numLines = length lmNamespaces
+ -- Move to the target line from bottom
+ ANSI.hCursorUp IO.stderr (numLines - lineNum)
+ ANSI.hSetCursorColumn IO.stderr 0
+ ANSI.hClearLine IO.stderr
+ let nsText = Text.pack (Namespace.toPath ns)
+ let msg =
+ if Text.null output
+ then "[~] " <> nsText
+ else "[~] " <> nsText <> ": " <> output
+ let truncated = truncateToWidth (tiWidth lmTermInfo - 1) msg
+ IO.hPutStr IO.stderr (Text.unpack truncated)
+ IO.hFlush IO.stderr
+ -- Move back to bottom
+ ANSI.hCursorDown IO.stderr (numLines - lineNum)
+
+updateLineState :: Namespace -> BuildState -> IO ()
+updateLineState ns buildState = do
+ mMgr <- readIORef currentLineManager
+ case mMgr of
+ Nothing -> pure ()
+ Just LineManager {..} ->
+ case tiMode lmTermInfo of
+ SingleLine ->
+ -- Single line: show completion, keep visible for success/failure
+ -- Lock all terminal output to prevent interleaved writes
+ withMVar terminalLock <| \_ -> do
+ let nsText = Text.pack (Namespace.toPath ns)
+ let (symbol, color) = case buildState of
+ Success -> ("✓", green)
+ Failed -> ("x", red)
+ Skipped -> ("_", yellow)
+ Analyzing -> ("+", white)
+ Pending -> (".", white)
+ Building -> ("~", white)
+ let msg = "[" <> symbol <> "] " <> nsText
+ let truncated = truncateToWidth (tiWidth lmTermInfo - 1) msg
+
+ IO.hPutStr IO.stderr "\r"
+ Rainbow.hPutChunks IO.stderr [fore color <| chunk truncated]
+ case buildState of
+ Success -> do
+ IO.hPutStrLn IO.stderr "" -- Keep successes visible
+ writeIORef lastOutputTransient False
+ Failed -> do
+ IO.hPutStrLn IO.stderr "" -- Keep failures visible
+ writeIORef lastOutputTransient False
+ Skipped -> do
+ IO.hPutStrLn IO.stderr "" -- Keep skipped visible
+ writeIORef lastOutputTransient False
+ _ -> writeIORef lastOutputTransient True -- Transient states overwrite
+ IO.hFlush IO.stderr
+ MultiLine ->
+ -- Multi-line: use reserved lines with truncation
+ -- Lock covers IORef read + all terminal operations to prevent races
+ withMVar terminalLock <| \_ -> do
+ nsMap <- readIORef namespaceLines
+ case Map.lookup ns nsMap of
+ Nothing -> pure ()
+ Just lineNum -> do
+ let numLines = length lmNamespaces
+ ANSI.hCursorUp IO.stderr (numLines - lineNum)
+ ANSI.hSetCursorColumn IO.stderr 0
+ ANSI.hClearLine IO.stderr
+ let nsText = Text.pack (Namespace.toPath ns)
+ let (symbol, colorFn) = case buildState of
+ Success -> ("✓", fore green)
+ Failed -> ("x", fore red)
+ Skipped -> ("_", fore yellow)
+ Analyzing -> ("+", identity)
+ Pending -> (".", identity)
+ Building -> ("~", identity)
+ let msg = "[" <> symbol <> "] " <> nsText
+ let truncated = truncateToWidth (tiWidth lmTermInfo - 1) msg
+ Rainbow.hPutChunks IO.stderr [colorFn <| chunk truncated]
+ IO.hFlush IO.stderr
+ ANSI.hCursorDown IO.stderr (numLines - lineNum)
diff --git a/Omni/Log/Terminal.hs b/Omni/Log/Terminal.hs
new file mode 100644
index 0000000..1a4c717
--- /dev/null
+++ b/Omni/Log/Terminal.hs
@@ -0,0 +1,75 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- | Terminal detection and output mode selection
+module Omni.Log.Terminal
+ ( TerminalInfo (..),
+ OutputMode (..),
+ detectTerminal,
+ truncateToWidth,
+ )
+where
+
+import Alpha
+import qualified Control.Exception as Exception
+import qualified Data.Text as Text
+import qualified System.Console.ANSI as ANSI
+import qualified System.Environment as Env
+
+data OutputMode
+ = MultiLine -- Wide terminals (≥80 cols) - reserved lines per namespace
+ | SingleLine -- Narrow terminals (<80 cols) - rotating single line
+ deriving (Eq, Show)
+
+data TerminalInfo = TerminalInfo
+ { tiWidth :: Int,
+ tiHeight :: Int,
+ tiMode :: OutputMode,
+ tiSupportsANSI :: Bool
+ }
+ deriving (Eq, Show)
+
+detectTerminal :: IO TerminalInfo
+detectTerminal = do
+ term <- Env.lookupEnv "TERM"
+ area <- Env.lookupEnv "AREA"
+ noColor <- Env.lookupEnv "NO_COLOR"
+
+ -- Check if we support ANSI
+ let supportsANSI = case (term, area, noColor) of
+ (_, _, Just _) -> False -- NO_COLOR set
+ (Just "dumb", _, _) -> False
+ (_, Just "Live", _) -> False -- production logs
+ (Nothing, _, _) -> False
+ _ -> True
+
+ -- Get terminal size, catching exceptions from stdin issues
+ -- When NO_COLOR is set or ANSI is not supported, skip terminal size detection
+ -- to avoid outputting escape codes
+ mSize <-
+ if supportsANSI
+ then Exception.catch ANSI.getTerminalSize <| \(_ :: Exception.IOException) -> pure Nothing
+ else pure Nothing -- Skip if no ANSI support
+ let (width, height) = case mSize of
+ Just (h, w) -> (w, h)
+ Nothing -> (80, 24) -- sensible default
+
+ -- Determine mode based on ANSI support
+ let mode
+ | not supportsANSI = SingleLine -- Fallback to single line for dumb terminals
+ | otherwise = MultiLine
+
+ pure
+ TerminalInfo
+ { tiWidth = width,
+ tiHeight = height,
+ tiMode = mode,
+ tiSupportsANSI = supportsANSI
+ }
+
+-- | Truncate text to fit width with ellipsis
+truncateToWidth :: Int -> Text -> Text
+truncateToWidth maxWidth text
+ | Text.length text <= maxWidth = text
+ | maxWidth <= 3 = Text.take maxWidth text
+ | otherwise = Text.take (maxWidth - 3) text <> "..."
diff --git a/Omni/Namespace.hs b/Omni/Namespace.hs
index 5884507..a0f8a8e 100644
--- a/Omni/Namespace.hs
+++ b/Omni/Namespace.hs
@@ -111,7 +111,10 @@ toHaskellModule :: Namespace -> String
toHaskellModule = toModule
fromHaskellModule :: String -> Namespace
-fromHaskellModule s = Namespace (List.splitOn "." s) Hs
+fromHaskellModule s =
+ let s' = if ".hs" `List.isSuffixOf` s then List.take (length s - 3) s else s
+ s'' = map (\c -> if c == '/' then '.' else c) s'
+ in Namespace (List.splitOn "." s'') Hs
toSchemeModule :: Namespace -> String
toSchemeModule = toModule
diff --git a/Omni/Os/Base.nix b/Omni/Os/Base.nix
index c18ca1d..a186772 100644
--- a/Omni/Os/Base.nix
+++ b/Omni/Os/Base.nix
@@ -5,7 +5,18 @@ let
in {
boot.tmp.cleanOnBoot = true;
networking.firewall.allowPing = true;
- nix.settings.substituters = ["https://cache.nixos.org"]; # "ssh://dev.simatime.com" ];
+ networking.firewall.allowedTCPPorts = [ports.et];
+ nix.settings.substituters = [
+ "https://cache.nixos.org"
+ "https://nix-community.cachix.org"
+ "s3://omni-nix-cache?profile=digitalocean&scheme=https&endpoint=nyc3.digitaloceanspaces.com"
+ ];
+ nix.settings.trusted-public-keys = [
+ "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
+ "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
+ "omni-cache:vyAhEFT7D8si2T1SjKHcg6BpU37Qj5klMDRagfNHpUI="
+ ];
+ nix.settings.experimental-features = ["nix-command" "flakes"];
nix.gc.automatic = true;
nix.gc.dates = "Sunday 02:15";
nix.optimise.automatic = true;
@@ -15,11 +26,12 @@ in {
programs.ccache.enable = true;
programs.mosh.enable = true;
programs.mosh.withUtempter = true;
- security.acme.defaults.email = "ben@bsima.me";
security.acme.acceptTerms = true;
+ security.acme.defaults.email = "bsima@icloud.com"; # fallback to apple-hosted
security.sudo.wheelNeedsPassword = false;
services.clamav.daemon.enable = true; # security
services.clamav.updater.enable = true; # security
+ services.eternal-terminal.enable = true;
services.fail2ban.enable = true; # security
services.fail2ban.ignoreIP = [ports.bensIp]; # my home IP
services.fail2ban.maxretry = 10;
diff --git a/Omni/Repl.py b/Omni/Repl.py
index d7d2fb4..49b6c1e 100755
--- a/Omni/Repl.py
+++ b/Omni/Repl.py
@@ -20,8 +20,8 @@ additional files to load.
import importlib
import importlib.util
import inspect
+import logging
import mypy.api
-import Omni.Log as Log
import os
import pathlib
import pydoc
@@ -33,7 +33,7 @@ import types
import typing
import unittest
-LOG = Log.setup()
+LOG = logging.getLogger(__name__)
class ReplError(Exception):
@@ -246,7 +246,6 @@ def test() -> None:
def move() -> None:
"""Actual entrypoint."""
- Log.setup()
ns = sys.argv[1]
path = sys.argv[2]
editor = os.environ.get("EDITOR", "$EDITOR")
diff --git a/Omni/Sentry.sh b/Omni/Sentry.sh
index 5c9e0ac..0bad6d2 100755
--- a/Omni/Sentry.sh
+++ b/Omni/Sentry.sh
@@ -14,12 +14,8 @@
clear
printf "%s sentry\n\n" "$(date +%Y.%m.%d..%H.%M)"
urls=(
- http://que.run
- https://dragons.dev
- https://simatime.com
- https://tv.simatime.com
- https://bsima.me
- # https://herocomics.app
+ https://bensima.com
+ https://tv.bensima.com
)
for url in "${urls[@]}"
do
diff --git a/Omni/Syncthing.nix b/Omni/Syncthing.nix
new file mode 100644
index 0000000..fab013b
--- /dev/null
+++ b/Omni/Syncthing.nix
@@ -0,0 +1,19 @@
+{...}: let
+ ports = import ./Cloud/Ports.nix;
+in {
+ services.syncthing = {
+ enable = true;
+ guiAddress = "0.0.0.0:${toString ports.syncthing-gui}";
+ openDefaultPorts = true;
+ systemService = true;
+ configDir = "/var/lib/syncthing/.config/syncthing";
+
+ # Default settings for new users
+ settings = {
+ options = {
+ relaysEnabled = true;
+ urAccepted = -1; # Usage reporting: -1 = not decided, 0 = no, 1 = yes
+ };
+ };
+ };
+}
diff --git a/Omni/Task.hs b/Omni/Task.hs
new file mode 100644
index 0000000..0d8d6c8
--- /dev/null
+++ b/Omni/Task.hs
@@ -0,0 +1,1208 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE QuasiQuotes #-}
+{-# LANGUAGE RecordWildCards #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- : dep sqlite-simple
+module Omni.Task where
+
+import Alpha
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KM
+import qualified Data.ByteString.Lazy.Char8 as BLC
+import qualified Data.Text as T
+import qualified Data.Text.Encoding as TE
+import Data.Time (defaultTimeLocale, formatTime)
+import qualified Omni.Cli as Cli
+import qualified Omni.Namespace as Namespace
+import Omni.Task.Core
+import qualified Omni.Task.MigrationTest as MigrationTest
+import qualified Omni.Task.RaceTest as RaceTest
+import qualified Omni.Test as Test
+import qualified System.Console.Docopt as Docopt
+import System.Directory (createDirectoryIfMissing, doesFileExist, removeFile)
+import System.Environment (setEnv)
+import qualified Test.Tasty as Tasty
+import Web.HttpApiData (parseQueryParam)
+import Prelude (read)
+
+main :: IO ()
+main = Cli.main plan
+
+plan :: Cli.Plan ()
+plan =
+ Cli.Plan
+ { help = help,
+ move = move,
+ test = test,
+ tidy = \_ -> pure ()
+ }
+
+help :: Cli.Docopt
+help =
+ [Cli.docopt|
+task
+
+Usage:
+ task init [--quiet]
+ task create <title> [options]
+ task edit <id> [options]
+ task delete <id> [--json]
+ task comment <id> <message> [--json]
+ task list [options]
+ task ready [--json]
+ task show <id> [--json]
+ task update <id> <status> [options] [--verified]
+ task deps <id> [--json]
+ task tree [<id>] [--json]
+ task progress <id> [--json]
+ task stats [--epic=<id>] [--json]
+ task log <id> [--session=<sid>] [--follow] [--json]
+ task export [-o <file>]
+ task import -i <file>
+ task test
+ task (-h | --help)
+
+Commands:
+ init Initialize task database
+ create Create a new task or epic
+ edit Edit an existing task
+ delete Delete a task
+ comment Add a comment to a task
+ list List all tasks
+ ready Show ready tasks (not blocked)
+ show Show detailed task information
+ update Update task status
+ deps Show dependency tree
+ tree Show task tree (epics with children, or all epics if no ID given)
+ progress Show progress for an epic
+ stats Show task statistics
+ log Show agent event log for a task
+ export Export tasks to JSONL
+ import Import tasks from JSONL file
+ test Run tests
+
+Options:
+ -h --help Show this help
+ --title=<title> Task title
+ --type=<type> Task type: epic or task (default: task)
+ --parent=<id> Parent epic ID
+ --priority=<p> Priority: 0-4 (0=critical, 4=backlog, default: 2)
+ --complexity=<c> Complexity: 1-5 for model selection (1=trivial, 5=expert)
+ --status=<status> Filter by status: draft, open, in-progress, review, approved, done, needs-help
+ --epic=<id> Filter stats by epic (recursive)
+ --deps=<ids> Comma-separated list of dependency IDs
+ --dep-type=<type> Dependency type: blocks, discovered-from, parent-child, related
+ --discovered-from=<id> Shortcut for --deps=<id> --dep-type=discovered-from
+ --namespace=<ns> Optional namespace (e.g., Omni/Task, Biz/Cloud)
+ --description=<desc> Task description
+ --db=<file> Path to SQLite database (overrides TASK_DB_PATH)
+ --flush Force immediate export
+ --json Output in JSON format (for agent use)
+ --quiet Non-interactive mode (for agents)
+ --verified Mark task as verified (code compiles, tests pass, feature works)
+ --session=<sid> Show events for specific session ID
+ --follow Stream events in real-time (like tail -f)
+ -i <file> Input file for import
+ -o <file> Output file for export
+
+Arguments:
+ <title> Task title
+ <id> Task ID
+ <status> Task status (draft, open, in-progress, review, approved, done, needs-help)
+ <message> Comment message
+ <file> JSONL file to import
+|]
+
+-- Helper to check if JSON output is requested
+isJsonMode :: Cli.Arguments -> Bool
+isJsonMode args = args `Cli.has` Cli.longOption "json"
+
+-- Helper to output JSON
+outputJson :: (Aeson.ToJSON a) => a -> IO ()
+outputJson val = BLC.putStrLn <| Aeson.encode val
+
+-- Helper for success message in JSON
+outputSuccess :: Text -> IO ()
+outputSuccess msg = outputJson <| Aeson.object ["success" Aeson..= True, "message" Aeson..= msg]
+
+move :: Cli.Arguments -> IO ()
+move args = do
+ -- Handle --db flag globally
+ for_
+ (Cli.getArg args (Cli.longOption "db"))
+ (setEnv "TASK_DB_PATH")
+
+ move' args
+
+move' :: Cli.Arguments -> IO ()
+move' args
+ | args `Cli.has` Cli.command "init" = do
+ let quiet = args `Cli.has` Cli.longOption "quiet"
+ initTaskDb
+ unless quiet <| putText "Task database initialized. Use 'task create' to add tasks."
+ | args `Cli.has` Cli.command "create" = do
+ title <- getArgText args "title"
+ taskType <- case Cli.getArg args (Cli.longOption "type") of
+ Nothing -> pure WorkTask
+ Just "epic" -> pure Epic
+ Just "task" -> pure WorkTask
+ Just other -> panic <| "Invalid task type: " <> T.pack other <> ". Use: epic or task"
+ parent <- case Cli.getArg args (Cli.longOption "parent") of
+ Nothing -> pure Nothing
+ Just p -> pure <| Just (T.pack p)
+
+ -- Handle --discovered-from as shortcut
+ (depIds, depType) <- case Cli.getArg args (Cli.longOption "discovered-from") of
+ Just discoveredId -> pure ([T.pack discoveredId], DiscoveredFrom)
+ Nothing -> do
+ -- Parse regular --deps and --dep-type
+ ids <- case Cli.getArg args (Cli.longOption "deps") of
+ Nothing -> pure []
+ Just depStr -> pure <| T.splitOn "," (T.pack depStr)
+ dtype <- case Cli.getArg args (Cli.longOption "dep-type") of
+ Nothing -> pure Blocks
+ Just "blocks" -> pure Blocks
+ Just "discovered-from" -> pure DiscoveredFrom
+ Just "parent-child" -> pure ParentChild
+ Just "related" -> pure Related
+ Just other -> panic <| "Invalid dependency type: " <> T.pack other <> ". Use: blocks, discovered-from, parent-child, or related"
+ pure (ids, dtype)
+
+ let deps = map (\did -> Dependency {depId = did, depType = depType}) depIds
+
+ -- Parse priority (default to P2 = medium)
+ priority <- case Cli.getArg args (Cli.longOption "priority") of
+ Nothing -> pure P2
+ Just "0" -> pure P0
+ Just "1" -> pure P1
+ Just "2" -> pure P2
+ Just "3" -> pure P3
+ Just "4" -> pure P4
+ Just other -> panic <| "Invalid priority: " <> T.pack other <> ". Use: 0-4"
+
+ -- Parse complexity (1-5 scale)
+ complexity <- case Cli.getArg args (Cli.longOption "complexity") of
+ Nothing -> pure Nothing
+ Just c -> case readMaybe c of
+ Just n | n >= 1 && n <= 5 -> pure (Just n)
+ _ -> panic <| "Invalid complexity: " <> T.pack c <> ". Use: 1-5"
+
+ namespace <- case Cli.getArg args (Cli.longOption "namespace") of
+ Nothing -> pure Nothing
+ Just ns -> do
+ -- Validate it's a proper namespace by parsing it
+ let validNs = Namespace.fromHaskellModule ns
+ nsPath = T.pack <| Namespace.toPath validNs
+ pure <| Just nsPath
+
+ description <- case Cli.getArg args (Cli.longOption "description") of
+ Nothing -> panic "--description is required for task create"
+ Just d -> pure (T.pack d)
+
+ createdTask <- createTask title taskType parent namespace priority complexity deps description
+ if isJsonMode args
+ then outputJson createdTask
+ else putStrLn <| "Created task: " <> T.unpack (taskId createdTask)
+ | args `Cli.has` Cli.command "edit" = do
+ tid <- getArgText args "id"
+
+ -- Parse optional edits
+ maybeTitle <- pure <| Cli.getArg args (Cli.longOption "title")
+ maybeType <- case Cli.getArg args (Cli.longOption "type") of
+ Nothing -> pure Nothing
+ Just "epic" -> pure <| Just Epic
+ Just "task" -> pure <| Just WorkTask
+ Just other -> panic <| "Invalid task type: " <> T.pack other <> ". Use: epic or task"
+ maybeParent <- pure <| fmap T.pack (Cli.getArg args (Cli.longOption "parent"))
+ maybePriority <- case Cli.getArg args (Cli.longOption "priority") of
+ Nothing -> pure Nothing
+ Just "0" -> pure <| Just P0
+ Just "1" -> pure <| Just P1
+ Just "2" -> pure <| Just P2
+ Just "3" -> pure <| Just P3
+ Just "4" -> pure <| Just P4
+ Just other -> panic <| "Invalid priority: " <> T.pack other <> ". Use: 0-4"
+ maybeComplexity <- case Cli.getArg args (Cli.longOption "complexity") of
+ Nothing -> pure Nothing
+ Just c -> case readMaybe c of
+ Just n | n >= 1 && n <= 5 -> pure (Just (Just n))
+ _ -> panic <| "Invalid complexity: " <> T.pack c <> ". Use: 1-5"
+ maybeStatus <- case Cli.getArg args (Cli.longOption "status") of
+ Nothing -> pure Nothing
+ Just "draft" -> pure <| Just Draft
+ Just "open" -> pure <| Just Open
+ Just "in-progress" -> pure <| Just InProgress
+ Just "review" -> pure <| Just Review
+ Just "done" -> pure <| Just Done
+ Just other -> panic <| "Invalid status: " <> T.pack other <> ". Use: draft, open, in-progress, review, or done"
+ maybeNamespace <- case Cli.getArg args (Cli.longOption "namespace") of
+ Nothing -> pure Nothing
+ Just ns -> do
+ let validNs = Namespace.fromHaskellModule ns
+ nsPath = T.pack <| Namespace.toPath validNs
+ pure <| Just nsPath
+ maybeDesc <- pure <| fmap T.pack (Cli.getArg args (Cli.longOption "description"))
+
+ maybeDeps <- case Cli.getArg args (Cli.longOption "discovered-from") of
+ Just discoveredId -> pure <| Just [Dependency {depId = T.pack discoveredId, depType = DiscoveredFrom}]
+ Nothing -> case Cli.getArg args (Cli.longOption "deps") of
+ Nothing -> pure Nothing
+ Just depStr -> do
+ let ids = T.splitOn "," (T.pack depStr)
+ dtype <- case Cli.getArg args (Cli.longOption "dep-type") of
+ Nothing -> pure Blocks
+ Just "blocks" -> pure Blocks
+ Just "discovered-from" -> pure DiscoveredFrom
+ Just "parent-child" -> pure ParentChild
+ Just "related" -> pure Related
+ Just other -> panic <| "Invalid dependency type: " <> T.pack other
+ pure <| Just (map (\did -> Dependency {depId = did, depType = dtype}) ids)
+
+ let modifyFn task =
+ task
+ { taskTitle = maybe (taskTitle task) T.pack maybeTitle,
+ taskType = fromMaybe (taskType task) maybeType,
+ taskParent = case maybeParent of Nothing -> taskParent task; Just p -> Just p,
+ taskNamespace = case maybeNamespace of Nothing -> taskNamespace task; Just ns -> Just ns,
+ taskStatus = fromMaybe (taskStatus task) maybeStatus,
+ taskPriority = fromMaybe (taskPriority task) maybePriority,
+ taskComplexity = fromMaybe (taskComplexity task) maybeComplexity,
+ taskDescription = fromMaybe (taskDescription task) maybeDesc,
+ taskDependencies = fromMaybe (taskDependencies task) maybeDeps
+ }
+
+ updatedTask <- editTask tid modifyFn
+ if isJsonMode args
+ then outputJson updatedTask
+ else putStrLn <| "Updated task: " <> T.unpack (taskId updatedTask)
+ | args `Cli.has` Cli.command "delete" = do
+ tid <- getArgText args "id"
+ deleteTask tid
+ if isJsonMode args
+ then outputSuccess ("Deleted task " <> tid)
+ else putStrLn <| "Deleted task: " <> T.unpack tid
+ | args `Cli.has` Cli.command "comment" = do
+ tid <- getArgText args "id"
+ message <- getArgText args "message"
+ updatedTask <- addComment tid message Human
+ if isJsonMode args
+ then outputJson updatedTask
+ else putStrLn <| "Added comment to task: " <> T.unpack tid
+ | args `Cli.has` Cli.command "list" = do
+ maybeType <- case Cli.getArg args (Cli.longOption "type") of
+ Nothing -> pure Nothing
+ Just "epic" -> pure <| Just Epic
+ Just "task" -> pure <| Just WorkTask
+ Just other -> panic <| "Invalid task type: " <> T.pack other
+ maybeParent <- case Cli.getArg args (Cli.longOption "parent") of
+ Nothing -> pure Nothing
+ Just p -> pure <| Just (T.pack p)
+ maybeStatus <- case Cli.getArg args (Cli.longOption "status") of
+ Nothing -> pure Nothing
+ Just "draft" -> pure <| Just Draft
+ Just "open" -> pure <| Just Open
+ Just "in-progress" -> pure <| Just InProgress
+ Just "review" -> pure <| Just Review
+ Just "approved" -> pure <| Just Approved
+ Just "done" -> pure <| Just Done
+ Just "needs-help" -> pure <| Just NeedsHelp
+ Just other -> panic <| "Invalid status: " <> T.pack other <> ". Use: draft, open, in-progress, review, approved, done, or needs-help"
+ maybeNamespace <- case Cli.getArg args (Cli.longOption "namespace") of
+ Nothing -> pure Nothing
+ Just ns -> do
+ let validNs = Namespace.fromHaskellModule ns
+ nsPath = T.pack <| Namespace.toPath validNs
+ pure <| Just nsPath
+ tasks <- listTasks maybeType maybeParent maybeStatus maybeNamespace
+ if isJsonMode args
+ then outputJson tasks
+ else traverse_ printTask tasks
+ | args `Cli.has` Cli.command "ready" = do
+ tasks <- getReadyTasks
+ if isJsonMode args
+ then outputJson tasks
+ else do
+ putText "Ready tasks:"
+ traverse_ printTask tasks
+ | args `Cli.has` Cli.command "show" = do
+ tid <- getArgText args "id"
+ tasks <- loadTasks
+ case findTask tid tasks of
+ Nothing -> putText "Task not found"
+ Just task ->
+ if isJsonMode args
+ then outputJson task
+ else showTaskDetailed task
+ | args `Cli.has` Cli.command "update" = do
+ tid <- getArgText args "id"
+ statusStr <- getArgText args "status"
+ let isVerified = args `Cli.has` Cli.longOption "verified"
+
+ -- Handle complexity update
+ maybeComplexity <- case Cli.getArg args (Cli.longOption "complexity") of
+ Nothing -> pure Nothing
+ Just c -> case readMaybe c of
+ Just n | n >= 1 && n <= 5 -> pure (Just n)
+ _ -> panic <| "Invalid complexity: " <> T.pack c <> ". Use: 1-5"
+
+ -- Handle update dependencies
+ deps <- do
+ -- Parse --deps and --dep-type
+ ids <- case Cli.getArg args (Cli.longOption "deps") of
+ Nothing -> pure []
+ Just depStr -> pure <| T.splitOn "," (T.pack depStr)
+ dtype <- case Cli.getArg args (Cli.longOption "dep-type") of
+ Nothing -> pure Blocks
+ Just "blocks" -> pure Blocks
+ Just "discovered-from" -> pure DiscoveredFrom
+ Just "parent-child" -> pure ParentChild
+ Just "related" -> pure Related
+ Just other -> panic <| "Invalid dependency type: " <> T.pack other <> ". Use: blocks, discovered-from, parent-child, or related"
+ pure (map (\d -> Dependency {depId = d, depType = dtype}) ids)
+
+ let newStatus = case statusStr of
+ "draft" -> Draft
+ "open" -> Open
+ "in-progress" -> InProgress
+ "review" -> Review
+ "approved" -> Approved
+ "done" -> Done
+ "needs-help" -> NeedsHelp
+ _ -> panic "Invalid status. Use: draft, open, in-progress, review, approved, done, or needs-help"
+
+ -- Show verification checklist warning when marking Done without --verified
+ when (newStatus == Done && not isVerified && not (isJsonMode args)) <| do
+ putText ""
+ putText "⚠️ VERIFICATION CHECKLIST (use --verified to skip):"
+ putText " [ ] Code compiles (bild succeeds)"
+ putText " [ ] Tests pass (bild --test)"
+ putText " [ ] Feature works in production (manual verification)"
+ putText ""
+
+ updateTaskStatusWithActor tid newStatus deps Human
+
+ -- Update complexity if provided
+ case maybeComplexity of
+ Nothing -> pure ()
+ Just c -> void <| editTask tid (\t -> t {taskComplexity = Just c})
+
+ -- Record verification in activity log if verified
+ when (newStatus == Done && isVerified)
+ <| logActivity tid Completed (Just "{\"verified\":true}")
+
+ if isJsonMode args
+ then
+ if newStatus == Done && isVerified
+ then outputJson <| Aeson.object ["success" Aeson..= True, "message" Aeson..= ("Updated task " <> tid), "verified" Aeson..= True]
+ else outputSuccess <| "Updated task " <> tid
+ else
+ if newStatus == Done && isVerified
+ then putStrLn <| "Updated task " <> T.unpack tid <> " (verified ✓)"
+ else putStrLn <| "Updated task " <> T.unpack tid
+ | args `Cli.has` Cli.command "deps" = do
+ tid <- getArgText args "id"
+ if isJsonMode args
+ then do
+ deps <- getDependencyTree tid
+ outputJson deps
+ else showDependencyTree tid
+ | args `Cli.has` Cli.command "tree" = do
+ maybeId <- case Cli.getArg args (Cli.argument "id") of
+ Nothing -> pure Nothing
+ Just idStr -> pure <| Just (T.pack idStr)
+ if isJsonMode args
+ then do
+ tree <- getTaskTree maybeId
+ outputJson tree
+ else showTaskTree maybeId
+ | args `Cli.has` Cli.command "progress" = do
+ tid <- getArgText args "id"
+ if isJsonMode args
+ then do
+ progress <- getTaskProgress tid
+ outputJson progress
+ else showTaskProgress tid
+ | args `Cli.has` Cli.command "stats" = do
+ maybeEpic <- case Cli.getArg args (Cli.longOption "epic") of
+ Nothing -> pure Nothing
+ Just e -> pure <| Just (T.pack e)
+ if isJsonMode args
+ then do
+ stats <- getTaskStats maybeEpic
+ outputJson stats
+ else showTaskStats maybeEpic
+ | args `Cli.has` Cli.command "log" = do
+ tid <- getArgText args "id"
+ let maybeSession = T.pack </ Cli.getArg args (Cli.longOption "session")
+ followMode = args `Cli.has` Cli.longOption "follow"
+ if followMode
+ then followTaskLog tid maybeSession
+ else showTaskLog tid maybeSession (isJsonMode args)
+ | args `Cli.has` Cli.command "export" = do
+ file <- case Cli.getArg args (Cli.shortOption 'o') of
+ Nothing -> pure Nothing
+ Just f -> pure (Just f)
+ exportTasks file
+ case file of
+ Just f -> putText <| "Exported tasks to " <> T.pack f
+ Nothing -> pure ()
+ | args `Cli.has` Cli.command "import" = do
+ -- Note: -i <file> means the value is stored in option 'i', not argument "file"
+ file <- case Cli.getArg args (Cli.shortOption 'i') of
+ Nothing -> panic "import requires -i <file>"
+ Just f -> pure (T.pack f)
+ importTasks (T.unpack file)
+ putText <| "Imported tasks from " <> file
+ | otherwise = putText (T.pack <| Cli.usage help)
+ where
+ getArgText :: Cli.Arguments -> String -> IO Text
+ getArgText argz name = do
+ maybeArg <- pure <| Cli.getArg argz (Cli.argument name)
+ case maybeArg of
+ Nothing -> panic (T.pack name <> " required")
+ Just val -> pure (T.pack val)
+
+-- | Show task log for a given task ID and optional session
+showTaskLog :: Text -> Maybe Text -> Bool -> IO ()
+showTaskLog tid maybeSession jsonMode = do
+ events <- case maybeSession of
+ Just sid -> getEventsForSession sid
+ Nothing -> getEventsForTask tid
+
+ when (null events && not jsonMode) <| do
+ putText "No events found for this task."
+
+ if jsonMode
+ then outputJson events
+ else traverse_ printEvent events
+
+-- | Follow task log in real-time (poll for new events)
+followTaskLog :: Text -> Maybe Text -> IO ()
+followTaskLog tid maybeSession = do
+ -- Get session ID (use provided or get latest)
+ sid <- getSid
+
+ -- Print initial events
+ events <- getEventsForSession sid
+ traverse_ printEvent events
+
+ -- Start polling for new events
+ let lastEventId = if null events then 0 else maximum (map storedEventId events)
+ pollEvents sid lastEventId
+ where
+ getSid = case maybeSession of
+ Just s -> pure s
+ Nothing -> do
+ maybeSid <- getLatestSessionForTask tid
+ case maybeSid of
+ Nothing -> do
+ putText "No session found for this task. Waiting for events..."
+ threadDelay 1000000
+ getSid -- Recursively retry
+ Just s -> pure s
+
+ pollEvents sid lastId = do
+ threadDelay 500000 -- Poll every 500ms
+ newEvents <- getEventsSince sid lastId
+ unless (null newEvents) <| do
+ traverse_ printEvent newEvents
+ let newLastId = if null newEvents then lastId else maximum (map storedEventId newEvents)
+ pollEvents sid newLastId
+
+-- | Print a single event in human-readable format
+printEvent :: StoredEvent -> IO ()
+printEvent event = do
+ let timestamp = storedEventTimestamp event
+ eventType = storedEventType event
+ content = storedEventContent event
+
+ -- Format timestamp as HH:MM:SS
+ let timeStr = T.pack <| formatTime defaultTimeLocale "%H:%M:%S" timestamp
+
+ -- Parse and format the content based on event type
+ let formatted = case eventType of
+ "Assistant" -> formatAssistant content
+ "ToolCall" -> formatToolCall content
+ "ToolResult" -> formatToolResult content
+ "Cost" -> formatCost content
+ "Error" -> formatError content
+ "Complete" -> "Complete"
+ _ -> eventType <> ": " <> content
+
+ putText ("[" <> timeStr <> "] " <> formatted)
+
+-- Format Assistant messages
+formatAssistant :: Text -> Text
+formatAssistant content =
+ case Aeson.decode (BLC.pack <| T.unpack content) of
+ Just (Aeson.String msg) -> "Assistant: " <> truncateText 200 msg
+ _ -> "Assistant: " <> truncateText 200 content
+
+-- Format ToolCall events
+formatToolCall :: Text -> Text
+formatToolCall content =
+ case Aeson.decode (BLC.pack <| T.unpack content) of
+ Just (Aeson.String msg) -> "Tool: " <> msg
+ Just (Aeson.Object obj) ->
+ let toolName = case KM.lookup "tool" obj of
+ Just (Aeson.String n) -> n
+ _ -> "<unknown>"
+ args = case KM.lookup "args" obj of
+ Just val -> " " <> TE.decodeUtf8 (BLC.toStrict (Aeson.encode val))
+ _ -> ""
+ in "Tool: " <> toolName <> args
+ _ -> "Tool: " <> truncateText 100 content
+
+-- Format ToolResult events
+formatToolResult :: Text -> Text
+formatToolResult content =
+ case Aeson.decode (BLC.pack <| T.unpack content) of
+ Just (Aeson.Object obj) ->
+ let toolName = case KM.lookup "tool" obj of
+ Just (Aeson.String n) -> n
+ _ -> "<unknown>"
+ success = case KM.lookup "success" obj of
+ Just (Aeson.Bool True) -> "ok"
+ Just (Aeson.Bool False) -> "failed"
+ _ -> "?"
+ output = case KM.lookup "output" obj of
+ Just (Aeson.String s) -> " (" <> tshow (T.length s) <> " bytes)"
+ _ -> ""
+ in "Result: " <> toolName <> " (" <> success <> ")" <> output
+ _ -> "Result: " <> truncateText 100 content
+
+-- Format Cost events
+formatCost :: Text -> Text
+formatCost content =
+ case Aeson.decode (BLC.pack <| T.unpack content) of
+ Just (Aeson.Object obj) ->
+ let tokens = case KM.lookup "tokens" obj of
+ Just (Aeson.Number n) -> tshow (round n :: Int)
+ _ -> "?"
+ cents = case KM.lookup "cents" obj of
+ Just (Aeson.Number n) -> tshow (round n :: Int)
+ _ -> "?"
+ in "Cost: " <> tokens <> " tokens, " <> cents <> " cents"
+ _ -> "Cost: " <> content
+
+-- Format Error events
+formatError :: Text -> Text
+formatError content =
+ case Aeson.decode (BLC.pack <| T.unpack content) of
+ Just (Aeson.String msg) -> "Error: " <> msg
+ _ -> "Error: " <> content
+
+-- Truncate text to a maximum length
+truncateText :: Int -> Text -> Text
+truncateText maxLen txt =
+ if T.length txt > maxLen
+ then T.take maxLen txt <> "..."
+ else txt
+
+test :: Test.Tree
+test =
+ Test.group
+ "Omni.Task"
+ [ unitTests,
+ cliTests,
+ Tasty.after Tasty.AllSucceed "Unit tests" RaceTest.test,
+ Tasty.after Tasty.AllSucceed "Unit tests" MigrationTest.test
+ ]
+
+unitTests :: Test.Tree
+unitTests =
+ Test.group
+ "Unit tests"
+ [ Test.unit "setup test database" <| do
+ -- Set up test mode for all tests (uses _/tmp/tasks-test.db)
+ setEnv "TASK_TEST_MODE" "1"
+
+ -- Clean up test database before all tests
+ let testFile = "_/tmp/tasks-test.db"
+ createDirectoryIfMissing True "_/tmp"
+ exists <- doesFileExist testFile
+ when exists <| removeFile testFile
+ initTaskDb
+ True Test.@?= True,
+ Test.unit "can create task" <| do
+ task <- createTask "Test task" WorkTask Nothing Nothing P2 Nothing [] "Test description"
+ taskTitle task Test.@?= "Test task"
+ taskType task Test.@?= WorkTask
+ taskStatus task Test.@?= Open
+ taskPriority task Test.@?= P2
+ null (taskDependencies task) Test.@?= True,
+ Test.unit "ready tasks exclude NeedsHelp tasks" <| do
+ task <- createTask "Needs Help Task" WorkTask Nothing Nothing P2 Nothing [] "Task needing help"
+ updateTaskStatus (taskId task) NeedsHelp []
+ ready <- getReadyTasks
+ (taskId task `notElem` map taskId ready) Test.@?= True,
+ Test.unit "ready tasks exclude draft tasks" <| do
+ task <- createTask "Draft Task" WorkTask Nothing Nothing P2 Nothing [] "Draft description"
+ updateTaskStatus (taskId task) Draft []
+ ready <- getReadyTasks
+ (taskId task `notElem` map taskId ready) Test.@?= True,
+ Test.unit "can create task with description" <| do
+ task <- createTask "Test task" WorkTask Nothing Nothing P2 Nothing [] "My description"
+ taskDescription task Test.@?= "My description",
+ Test.unit "can create task with complexity" <| do
+ task <- createTask "Complex task" WorkTask Nothing Nothing P2 (Just 4) [] "High complexity task"
+ taskComplexity task Test.@?= Just 4,
+ Test.unit "complexity is persisted" <| do
+ task <- createTask "Persisted complexity" WorkTask Nothing Nothing P2 (Just 3) [] "Medium complexity"
+ tasks <- loadTasks
+ case findTask (taskId task) tasks of
+ Nothing -> Test.assertFailure "Could not reload task"
+ Just reloaded -> taskComplexity reloaded Test.@?= Just 3,
+ Test.unit "can list tasks" <| do
+ _ <- createTask "Test task for list" WorkTask Nothing Nothing P2 Nothing [] "List test"
+ tasks <- listTasks Nothing Nothing Nothing Nothing
+ not (null tasks) Test.@?= True,
+ Test.unit "ready tasks exclude blocked ones" <| do
+ task1 <- createTask "First task" WorkTask Nothing Nothing P2 Nothing [] "First description"
+ let blockingDep = Dependency {depId = taskId task1, depType = Blocks}
+ task2 <- createTask "Blocked task" WorkTask Nothing Nothing P2 Nothing [blockingDep] "Blocked description"
+ ready <- getReadyTasks
+ (taskId task1 `elem` map taskId ready) Test.@?= True
+ (taskId task2 `notElem` map taskId ready) Test.@?= True,
+ Test.unit "discovered-from dependencies don't block" <| do
+ task1 <- createTask "Original task" WorkTask Nothing Nothing P2 Nothing [] "Original"
+ let discDep = Dependency {depId = taskId task1, depType = DiscoveredFrom}
+ task2 <- createTask "Discovered work" WorkTask Nothing Nothing P2 Nothing [discDep] "Discovered"
+ ready <- getReadyTasks
+ -- Both should be ready since DiscoveredFrom doesn't block
+ (taskId task1 `elem` map taskId ready) Test.@?= True
+ (taskId task2 `elem` map taskId ready) Test.@?= True,
+ Test.unit "related dependencies don't block" <| do
+ task1 <- createTask "Task A" WorkTask Nothing Nothing P2 Nothing [] "Task A description"
+ let relDep = Dependency {depId = taskId task1, depType = Related}
+ task2 <- createTask "Task B" WorkTask Nothing Nothing P2 Nothing [relDep] "Task B description"
+ ready <- getReadyTasks
+ -- Both should be ready since Related doesn't block
+ (taskId task1 `elem` map taskId ready) Test.@?= True
+ (taskId task2 `elem` map taskId ready) Test.@?= True,
+ Test.unit "ready tasks exclude epics" <| do
+ epic <- createTask "Epic task" Epic Nothing Nothing P2 Nothing [] "Epic description"
+ ready <- getReadyTasks
+ (taskId epic `notElem` map taskId ready) Test.@?= True,
+ Test.unit "ready tasks exclude tasks needing intervention (retry >= 3)" <| do
+ task <- createTask "Failing task" WorkTask Nothing Nothing P2 Nothing [] "Failing description"
+ ready1 <- getReadyTasks
+ (taskId task `elem` map taskId ready1) Test.@?= True
+ setRetryContext
+ RetryContext
+ { retryTaskId = taskId task,
+ retryOriginalCommit = "abc123",
+ retryConflictFiles = [],
+ retryAttempt = 3,
+ retryReason = "test_failure",
+ retryNotes = Nothing
+ }
+ ready2 <- getReadyTasks
+ (taskId task `notElem` map taskId ready2) Test.@?= True,
+ Test.unit "child task gets sequential ID" <| do
+ parent <- createTask "Parent" Epic Nothing Nothing P2 Nothing [] "Parent epic"
+ child1 <- createTask "Child 1" WorkTask (Just (taskId parent)) Nothing P2 Nothing [] "Child 1 description"
+ child2 <- createTask "Child 2" WorkTask (Just (taskId parent)) Nothing P2 Nothing [] "Child 2 description"
+ taskId child1 Test.@?= taskId parent <> ".1"
+ taskId child2 Test.@?= taskId parent <> ".2",
+ Test.unit "grandchild task gets sequential ID" <| do
+ parent <- createTask "Grandparent" Epic Nothing Nothing P2 Nothing [] "Grandparent epic"
+ child <- createTask "Parent" Epic (Just (taskId parent)) Nothing P2 Nothing [] "Parent epic"
+ grandchild <- createTask "Grandchild" WorkTask (Just (taskId child)) Nothing P2 Nothing [] "Grandchild task"
+ taskId grandchild Test.@?= taskId parent <> ".1.1",
+ Test.unit "siblings of grandchild task get sequential ID" <| do
+ parent <- createTask "Grandparent" Epic Nothing Nothing P2 Nothing [] "Grandparent"
+ child <- createTask "Parent" Epic (Just (taskId parent)) Nothing P2 Nothing [] "Parent"
+ grandchild1 <- createTask "Grandchild 1" WorkTask (Just (taskId child)) Nothing P2 Nothing [] "Grandchild 1"
+ grandchild2 <- createTask "Grandchild 2" WorkTask (Just (taskId child)) Nothing P2 Nothing [] "Grandchild 2"
+ taskId grandchild1 Test.@?= taskId parent <> ".1.1"
+ taskId grandchild2 Test.@?= taskId parent <> ".1.2",
+ Test.unit "child ID generation skips gaps" <| do
+ parent <- createTask "Parent with gaps" Epic Nothing Nothing P2 Nothing [] "Parent with gaps"
+ child1 <- createTask "Child 1" WorkTask (Just (taskId parent)) Nothing P2 Nothing [] "Child 1"
+ -- Manually create a task with .3 suffix to simulate a gap (or deleted task)
+ let child3Id = taskId parent <> ".3"
+ child3 =
+ Task
+ { taskId = child3Id,
+ taskTitle = "Child 3",
+ taskType = WorkTask,
+ taskParent = Just (taskId parent),
+ taskNamespace = Nothing,
+ taskStatus = Open,
+ taskPriority = P2,
+ taskComplexity = Nothing,
+ taskDependencies = [],
+ taskDescription = "Child 3",
+ taskComments = [],
+ taskCreatedAt = taskCreatedAt child1,
+ taskUpdatedAt = taskUpdatedAt child1
+ }
+ saveTask child3
+
+ -- Create a new child, it should get .4, not .2
+ child4 <- createTask "Child 4" WorkTask (Just (taskId parent)) Nothing P2 Nothing [] "Child 4"
+ taskId child4 Test.@?= taskId parent <> ".4",
+ Test.unit "can edit task" <| do
+ task <- createTask "Original Title" WorkTask Nothing Nothing P2 Nothing [] "Original"
+ let modifyFn t = t {taskTitle = "New Title", taskPriority = P0}
+ updated <- editTask (taskId task) modifyFn
+ taskTitle updated Test.@?= "New Title"
+ taskPriority updated Test.@?= P0
+ -- Check persistence
+ tasks <- loadTasks
+ case findTask (taskId task) tasks of
+ Nothing -> Test.assertFailure "Could not reload task"
+ Just reloaded -> do
+ taskTitle reloaded Test.@?= "New Title"
+ taskPriority reloaded Test.@?= P0,
+ Test.unit "task lookup is case insensitive" <| do
+ task <- createTask "Case sensitive" WorkTask Nothing Nothing P2 Nothing [] "Case sensitive description"
+ let tid = taskId task
+ upperTid = T.toUpper tid
+ tasks <- loadTasks
+ let found = findTask upperTid tasks
+ case found of
+ Just t -> taskId t Test.@?= tid
+ Nothing -> Test.assertFailure "Could not find task with upper case ID",
+ Test.unit "namespace normalization handles .hs suffix" <| do
+ let ns = "Omni/Task.hs"
+ validNs = Namespace.fromHaskellModule ns
+ Namespace.toPath validNs Test.@?= "Omni/Task.hs",
+ Test.unit "generated IDs are lowercase" <| do
+ task <- createTask "Lowercase check" WorkTask Nothing Nothing P2 Nothing [] "Lowercase description"
+ let tid = taskId task
+ tid Test.@?= T.toLower tid
+ -- check it matches regex for base36 (t-[0-9a-z]+)
+ let isLowerBase36 = T.all (\c -> c `elem` ['0' .. '9'] ++ ['a' .. 'z'] || c == 't' || c == '-') tid
+ isLowerBase36 Test.@?= True,
+ Test.unit "dependencies are case insensitive" <| do
+ task1 <- createTask "Blocker" WorkTask Nothing Nothing P2 Nothing [] "Blocker description"
+ let tid1 = taskId task1
+ -- Use uppercase ID for dependency
+ upperTid1 = T.toUpper tid1
+ dep = Dependency {depId = upperTid1, depType = Blocks}
+ task2 <- createTask "Blocked" WorkTask Nothing Nothing P2 Nothing [dep] "Blocked description"
+
+ -- task1 is Open, so task2 should NOT be ready
+ ready <- getReadyTasks
+ (taskId task2 `notElem` map taskId ready) Test.@?= True
+
+ updateTaskStatus tid1 Done []
+
+ -- task2 should now be ready because dependency check normalizes IDs
+ ready2 <- getReadyTasks
+ (taskId task2 `elem` map taskId ready2) Test.@?= True,
+ Test.unit "can create task with lowercase ID" <| do
+ -- This verifies that lowercase IDs are accepted and not rejected
+ let lowerId = "t-lowercase"
+ let task = Task lowerId "Lower" WorkTask Nothing Nothing Open P2 Nothing [] "Lower description" [] (read "2025-01-01 00:00:00 UTC") (read "2025-01-01 00:00:00 UTC")
+ saveTask task
+ tasks <- loadTasks
+ case findTask lowerId tasks of
+ Just t -> taskId t Test.@?= lowerId
+ Nothing -> Test.assertFailure "Should find task with lowercase ID",
+ Test.unit "generateId produces valid ID" <| do
+ tid <- generateId
+ let task = Task tid "Auto" WorkTask Nothing Nothing Open P2 Nothing [] "Auto description" [] (read "2025-01-01 00:00:00 UTC") (read "2025-01-01 00:00:00 UTC")
+ saveTask task
+ tasks <- loadTasks
+ case findTask tid tasks of
+ Just _ -> pure ()
+ Nothing -> Test.assertFailure "Should find generated task",
+ Test.unit "generateId produces sequential IDs" <| do
+ tid1 <- generateId
+ tid2 <- generateId
+ tid3 <- generateId
+ T.isPrefixOf "t-" tid1 Test.@?= True
+ T.isPrefixOf "t-" tid2 Test.@?= True
+ T.isPrefixOf "t-" tid3 Test.@?= True
+ let num1 = readMaybe (T.unpack (T.drop 2 tid1)) :: Maybe Int
+ num2 = readMaybe (T.unpack (T.drop 2 tid2)) :: Maybe Int
+ num3 = readMaybe (T.unpack (T.drop 2 tid3)) :: Maybe Int
+ case (num1, num2, num3) of
+ (Just n1, Just n2, Just n3) -> do
+ (n2 == n1 + 1) Test.@?= True
+ (n3 == n2 + 1) Test.@?= True
+ _ -> Test.assertFailure "IDs should be sequential integers",
+ Test.unit "lowercase ID does not clash with existing uppercase ID" <| do
+ -- Setup: Create task with Uppercase ID
+ let upperId = "t-UPPER"
+ let task1 = Task upperId "Upper Task" WorkTask Nothing Nothing Open P2 Nothing [] "Upper description" [] (read "2025-01-01 00:00:00 UTC") (read "2025-01-01 00:00:00 UTC")
+ saveTask task1
+
+ -- Action: Try to create task with Lowercase ID (same letters)
+ -- Note: In the current implementation, saveTask blindly appends.
+ -- Ideally, we should be checking for existence if we want to avoid clash.
+ -- OR, we accept that they are the SAME task and this is an update?
+ -- But if they are different tasks (different titles, created at different times),
+ -- treating them as the same is dangerous.
+
+ let lowerId = "t-upper"
+ let task2 = Task lowerId "Lower Task" WorkTask Nothing Nothing Open P2 Nothing [] "Lower description" [] (read "2025-01-01 00:00:01 UTC") (read "2025-01-01 00:00:01 UTC")
+ saveTask task2
+
+ tasks <- loadTasks
+ -- What do we expect?
+ -- If we expect them to be distinct:
+ -- let foundUpper = List.find (\t -> taskId t == upperId) tasks
+ -- let foundLower = List.find (\t -> taskId t == lowerId) tasks
+ -- foundUpper /= Nothing
+ -- foundLower /= Nothing
+
+ -- BUT findTask uses case-insensitive search.
+ -- So findTask upperId returns task1 (probably, as it's first).
+ -- findTask lowerId returns task1.
+ -- task2 is effectively hidden/lost to findTask.
+
+ -- So, "do not clash" implies we shouldn't end up in this state.
+ -- The test should probably fail if we have multiple tasks that match the same ID case-insensitively.
+
+ let matches = filter (\t -> matchesId (taskId t) upperId) tasks
+ length matches Test.@?= 2,
+ Test.unit "FromHttpApiData Priority: empty string returns Left" <| do
+ let result = parseQueryParam "" :: Either Text Priority
+ case result of
+ Left _ -> pure ()
+ Right _ -> Test.assertFailure "Empty string should return Left",
+ Test.unit "FromHttpApiData Priority: valid values parse correctly" <| do
+ (parseQueryParam "P0" :: Either Text Priority) Test.@?= Right P0
+ (parseQueryParam "P1" :: Either Text Priority) Test.@?= Right P1
+ (parseQueryParam "P2" :: Either Text Priority) Test.@?= Right P2
+ (parseQueryParam "P3" :: Either Text Priority) Test.@?= Right P3
+ (parseQueryParam "P4" :: Either Text Priority) Test.@?= Right P4,
+ Test.unit "FromHttpApiData Status: empty string returns Left" <| do
+ let result = parseQueryParam "" :: Either Text Status
+ case result of
+ Left _ -> pure ()
+ Right _ -> Test.assertFailure "Empty string should return Left",
+ Test.unit "FromHttpApiData Status: valid values parse correctly" <| do
+ (parseQueryParam "Open" :: Either Text Status) Test.@?= Right Open
+ (parseQueryParam "InProgress" :: Either Text Status) Test.@?= Right InProgress
+ (parseQueryParam "Done" :: Either Text Status) Test.@?= Right Done,
+ Test.unit "can add comment to task" <| do
+ task <- createTask "Task with comment" WorkTask Nothing Nothing P2 Nothing [] "Description"
+ updatedTask <- addComment (taskId task) "This is a test comment" Human
+ length (taskComments updatedTask) Test.@?= 1
+ case taskComments updatedTask of
+ (c : _) -> do
+ commentText c Test.@?= "This is a test comment"
+ commentAuthor c Test.@?= Human
+ [] -> Test.assertFailure "Expected at least one comment",
+ Test.unit "can add multiple comments to task" <| do
+ task <- createTask "Task with comments" WorkTask Nothing Nothing P2 Nothing [] "Description"
+ _ <- addComment (taskId task) "First comment" Junior
+ updatedTask <- addComment (taskId task) "Second comment" Human
+ length (taskComments updatedTask) Test.@?= 2
+ case taskComments updatedTask of
+ (c1 : c2 : _) -> do
+ commentText c1 Test.@?= "First comment"
+ commentAuthor c1 Test.@?= Junior
+ commentText c2 Test.@?= "Second comment"
+ commentAuthor c2 Test.@?= Human
+ _ -> Test.assertFailure "Expected at least two comments",
+ Test.unit "comments are persisted" <| do
+ task <- createTask "Persistent comments" WorkTask Nothing Nothing P2 Nothing [] "Description"
+ _ <- addComment (taskId task) "Persisted comment" Junior
+ tasks <- loadTasks
+ case findTask (taskId task) tasks of
+ Nothing -> Test.assertFailure "Could not reload task"
+ Just reloaded -> do
+ length (taskComments reloaded) Test.@?= 1
+ case taskComments reloaded of
+ (c : _) -> commentText c Test.@?= "Persisted comment"
+ [] -> Test.assertFailure "Expected at least one comment"
+ ]
+
+-- | Test CLI argument parsing to ensure docopt string matches actual usage
+cliTests :: Test.Tree
+cliTests =
+ Test.group
+ "CLI argument parsing"
+ [ Test.unit "init command" <| do
+ let result = Docopt.parseArgs help ["init"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'init': " <> show err
+ Right args -> args `Cli.has` Cli.command "init" Test.@?= True,
+ Test.unit "init with --quiet flag" <| do
+ let result = Docopt.parseArgs help ["init", "--quiet"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'init --quiet': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "init" Test.@?= True
+ args `Cli.has` Cli.longOption "quiet" Test.@?= True,
+ Test.unit "create with title" <| do
+ let result = Docopt.parseArgs help ["create", "Test task"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'create': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "create" Test.@?= True
+ Cli.getArg args (Cli.argument "title") Test.@?= Just "Test task",
+ Test.unit "create with --json flag" <| do
+ let result = Docopt.parseArgs help ["create", "Test", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'create --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "create" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True,
+ Test.unit "create with --namespace flag" <| do
+ let result = Docopt.parseArgs help ["create", "Test", "--namespace=Omni/Task"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'create --namespace': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "create" Test.@?= True
+ Cli.getArg args (Cli.longOption "namespace") Test.@?= Just "Omni/Task",
+ Test.unit "create with --discovered-from flag" <| do
+ let result = Docopt.parseArgs help ["create", "Test", "--discovered-from=t-abc123"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'create --discovered-from': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "create" Test.@?= True
+ Cli.getArg args (Cli.longOption "discovered-from") Test.@?= Just "t-abc123",
+ Test.unit "create with --priority flag" <| do
+ let result = Docopt.parseArgs help ["create", "Test", "--priority=1"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'create --priority': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "create" Test.@?= True
+ Cli.getArg args (Cli.longOption "priority") Test.@?= Just "1",
+ Test.unit "create with --complexity flag" <| do
+ let result = Docopt.parseArgs help ["create", "Test", "--complexity=3"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'create --complexity': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "create" Test.@?= True
+ Cli.getArg args (Cli.longOption "complexity") Test.@?= Just "3",
+ Test.unit "edit with --complexity flag" <| do
+ let result = Docopt.parseArgs help ["edit", "t-abc123", "--complexity=4"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'edit --complexity': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "edit" Test.@?= True
+ Cli.getArg args (Cli.longOption "complexity") Test.@?= Just "4",
+ Test.unit "edit command" <| do
+ let result = Docopt.parseArgs help ["edit", "t-abc123"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'edit': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "edit" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-abc123",
+ Test.unit "edit with options" <| do
+ let result = Docopt.parseArgs help ["edit", "t-abc123", "--title=New Title", "--priority=0"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'edit' with options: " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "edit" Test.@?= True
+ Cli.getArg args (Cli.longOption "title") Test.@?= Just "New Title"
+ Cli.getArg args (Cli.longOption "priority") Test.@?= Just "0",
+ Test.unit "list command" <| do
+ let result = Docopt.parseArgs help ["list"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'list': " <> show err
+ Right args -> args `Cli.has` Cli.command "list" Test.@?= True,
+ Test.unit "list with --json flag" <| do
+ let result = Docopt.parseArgs help ["list", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'list --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "list" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True,
+ Test.unit "list with --status filter" <| do
+ let result = Docopt.parseArgs help ["list", "--status=open"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'list --status': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "list" Test.@?= True
+ Cli.getArg args (Cli.longOption "status") Test.@?= Just "open",
+ Test.unit "list with --status=approved filter" <| do
+ let result = Docopt.parseArgs help ["list", "--status=approved"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'list --status=approved': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "list" Test.@?= True
+ Cli.getArg args (Cli.longOption "status") Test.@?= Just "approved",
+ Test.unit "list with --status=draft filter" <| do
+ let result = Docopt.parseArgs help ["list", "--status=draft"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'list --status=draft': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "list" Test.@?= True
+ Cli.getArg args (Cli.longOption "status") Test.@?= Just "draft",
+ Test.unit "ready command" <| do
+ let result = Docopt.parseArgs help ["ready"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'ready': " <> show err
+ Right args -> args `Cli.has` Cli.command "ready" Test.@?= True,
+ Test.unit "ready with --json flag" <| do
+ let result = Docopt.parseArgs help ["ready", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'ready --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "ready" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True,
+ Test.unit "update command" <| do
+ let result = Docopt.parseArgs help ["update", "t-abc123", "done"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'update': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "update" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-abc123"
+ Cli.getArg args (Cli.argument "status") Test.@?= Just "done",
+ Test.unit "update command with approved" <| do
+ let result = Docopt.parseArgs help ["update", "t-abc123", "approved"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'update ... approved': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "update" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-abc123"
+ Cli.getArg args (Cli.argument "status") Test.@?= Just "approved",
+ Test.unit "update command with draft" <| do
+ let result = Docopt.parseArgs help ["update", "t-abc123", "draft"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'update ... draft': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "update" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-abc123"
+ Cli.getArg args (Cli.argument "status") Test.@?= Just "draft",
+ Test.unit "update with --json flag" <| do
+ let result = Docopt.parseArgs help ["update", "t-abc123", "done", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'update --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "update" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True,
+ Test.unit "update with --verified flag" <| do
+ let result = Docopt.parseArgs help ["update", "t-abc123", "done", "--verified"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'update --verified': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "update" Test.@?= True
+ args `Cli.has` Cli.longOption "verified" Test.@?= True,
+ Test.unit "update with --verified and --json flags" <| do
+ let result = Docopt.parseArgs help ["update", "t-abc123", "done", "--verified", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'update --verified --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "update" Test.@?= True
+ args `Cli.has` Cli.longOption "verified" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True,
+ Test.unit "deps command" <| do
+ let result = Docopt.parseArgs help ["deps", "t-abc123"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'deps': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "deps" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-abc123",
+ Test.unit "tree command" <| do
+ let result = Docopt.parseArgs help ["tree"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'tree': " <> show err
+ Right args -> args `Cli.has` Cli.command "tree" Test.@?= True,
+ Test.unit "tree with id" <| do
+ let result = Docopt.parseArgs help ["tree", "t-abc123"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'tree <id>': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "tree" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-abc123",
+ Test.unit "export command" <| do
+ let result = Docopt.parseArgs help ["export"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'export': " <> show err
+ Right args -> args `Cli.has` Cli.command "export" Test.@?= True,
+ Test.unit "import command" <| do
+ let result = Docopt.parseArgs help ["import", "-i", "tasks.jsonl"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'import': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "import" Test.@?= True
+ -- Note: -i is a short option, not an argument
+ Cli.getArg args (Cli.shortOption 'i') Test.@?= Just "tasks.jsonl",
+ Test.unit "show command" <| do
+ let result = Docopt.parseArgs help ["show", "t-abc123"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'show': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "show" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-abc123",
+ Test.unit "show with --json flag" <| do
+ let result = Docopt.parseArgs help ["show", "t-abc123", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'show --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "show" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True,
+ Test.unit "stats command" <| do
+ let result = Docopt.parseArgs help ["stats"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'stats': " <> show err
+ Right args -> args `Cli.has` Cli.command "stats" Test.@?= True,
+ Test.unit "stats with --json flag" <| do
+ let result = Docopt.parseArgs help ["stats", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'stats --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "stats" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True,
+ Test.unit "stats with --epic flag" <| do
+ let result = Docopt.parseArgs help ["stats", "--epic=t-abc123"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'stats --epic': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "stats" Test.@?= True
+ Cli.getArg args (Cli.longOption "epic") Test.@?= Just "t-abc123",
+ Test.unit "create with flags in different order" <| do
+ let result = Docopt.parseArgs help ["create", "Test", "--json", "--priority=1", "--namespace=Omni/Task"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'create' with reordered flags: " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "create" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True
+ Cli.getArg args (Cli.longOption "priority") Test.@?= Just "1"
+ Cli.getArg args (Cli.longOption "namespace") Test.@?= Just "Omni/Task",
+ Test.unit "comment command" <| do
+ let result = Docopt.parseArgs help ["comment", "t-abc123", "This is a comment"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'comment': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "comment" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-abc123"
+ Cli.getArg args (Cli.argument "message") Test.@?= Just "This is a comment",
+ Test.unit "comment with --json flag" <| do
+ let result = Docopt.parseArgs help ["comment", "t-abc123", "Test comment", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'comment --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "comment" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True,
+ Test.unit "log command" <| do
+ let result = Docopt.parseArgs help ["log", "t-123"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'log': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "log" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-123",
+ Test.unit "log command with --session flag" <| do
+ let result = Docopt.parseArgs help ["log", "t-123", "--session=s-456"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'log --session': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "log" Test.@?= True
+ Cli.getArg args (Cli.argument "id") Test.@?= Just "t-123"
+ Cli.getArg args (Cli.longOption "session") Test.@?= Just "s-456",
+ Test.unit "log command with --follow flag" <| do
+ let result = Docopt.parseArgs help ["log", "t-123", "--follow"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'log --follow': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "log" Test.@?= True
+ args `Cli.has` Cli.longOption "follow" Test.@?= True,
+ Test.unit "log command with --json flag" <| do
+ let result = Docopt.parseArgs help ["log", "t-123", "--json"]
+ case result of
+ Left err -> Test.assertFailure <| "Failed to parse 'log --json': " <> show err
+ Right args -> do
+ args `Cli.has` Cli.command "log" Test.@?= True
+ args `Cli.has` Cli.longOption "json" Test.@?= True
+ ]
diff --git a/Omni/Task/Core.hs b/Omni/Task/Core.hs
new file mode 100644
index 0000000..0a1187f
--- /dev/null
+++ b/Omni/Task/Core.hs
@@ -0,0 +1,1826 @@
+{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+-- : dep http-api-data
+module Omni.Task.Core where
+
+import Alpha
+import Data.Aeson (FromJSON, ToJSON, decode, encode)
+import qualified Data.Aeson as Aeson
+import qualified Data.Aeson.KeyMap as KeyMap
+import qualified Data.ByteString.Lazy.Char8 as BLC
+import qualified Data.List as List
+import qualified Data.Set as Set
+import qualified Data.Text as T
+import qualified Data.Text.IO as TIO
+import Data.Time (UTCTime, diffUTCTime, getCurrentTime)
+import qualified Database.SQLite.Simple as SQL
+import qualified Database.SQLite.Simple.FromField as SQL
+import qualified Database.SQLite.Simple.Ok as SQLOk
+import qualified Database.SQLite.Simple.ToField as SQL
+import GHC.Generics ()
+import System.Directory (XdgDirectory (..), createDirectoryIfMissing, doesFileExist, getXdgDirectory)
+import System.Environment (lookupEnv)
+import System.FilePath (takeDirectory, (</>))
+import System.IO.Unsafe (unsafePerformIO)
+import Web.HttpApiData (FromHttpApiData (..))
+
+-- Core data types
+data Task = Task
+ { taskId :: Text,
+ taskTitle :: Text,
+ taskType :: TaskType,
+ taskParent :: Maybe Text, -- Parent epic ID
+ taskNamespace :: Maybe Text, -- Optional namespace (e.g., "Omni/Task", "Biz/Cloud")
+ taskStatus :: Status,
+ taskPriority :: Priority, -- Priority level (0-4)
+ taskComplexity :: Maybe Int, -- Complexity 1-5 for model selection
+ taskDependencies :: [Dependency], -- List of dependencies with types
+ taskDescription :: Text, -- Required description
+ taskComments :: [Comment], -- Timestamped comments for extra context
+ taskCreatedAt :: UTCTime,
+ taskUpdatedAt :: UTCTime
+ }
+ deriving (Show, Eq, Generic)
+
+data TaskType = Epic | WorkTask
+ deriving (Show, Eq, Read, Generic)
+
+data Status = Draft | Open | InProgress | Review | Approved | Done | NeedsHelp
+ deriving (Show, Eq, Read, Generic)
+
+-- Priority levels (matching beads convention)
+data Priority = P0 | P1 | P2 | P3 | P4
+ deriving (Show, Eq, Ord, Read, Generic)
+
+data Dependency = Dependency
+ { depId :: Text, -- ID of the task this depends on
+ depType :: DependencyType -- Type of dependency relationship
+ }
+ deriving (Show, Eq, Generic)
+
+data DependencyType
+ = Blocks -- Hard dependency, blocks ready work queue
+ | DiscoveredFrom -- Work discovered during other work
+ | ParentChild -- Epic/subtask relationship
+ | Related -- Soft relationship, doesn't block
+ deriving (Show, Eq, Read, Generic)
+
+data TaskProgress = TaskProgress
+ { progressTaskId :: Text,
+ progressTotal :: Int,
+ progressCompleted :: Int,
+ progressPercentage :: Int
+ }
+ deriving (Show, Eq, Generic)
+
+data EpicForReview = EpicForReview
+ { epicTask :: Task,
+ epicTotal :: Int,
+ epicCompleted :: Int
+ }
+ deriving (Show, Eq, Generic)
+
+data HumanActionItems = HumanActionItems
+ { failedTasks :: [Task],
+ epicsInReview :: [EpicForReview],
+ tasksNeedingHelp :: [Task]
+ }
+ deriving (Show, Eq, Generic)
+
+data AggregatedMetrics = AggregatedMetrics
+ { aggTotalCostCents :: Int,
+ aggTotalDurationSeconds :: Int,
+ aggCompletedTasks :: Int,
+ aggTotalTokens :: Int
+ }
+ deriving (Show, Eq, Generic)
+
+-- Retry context for tasks that failed due to merge conflicts
+data RetryContext = RetryContext
+ { retryTaskId :: Text,
+ retryOriginalCommit :: Text,
+ retryConflictFiles :: [Text],
+ retryAttempt :: Int,
+ retryReason :: Text, -- "merge_conflict" | "ci_failure" | "rejected"
+ retryNotes :: Maybe Text -- Human notes/guidance for intervention
+ }
+ deriving (Show, Eq, Generic)
+
+-- Activity stage for task_activity tracking
+data ActivityStage = Claiming | Running | Reviewing | Retrying | Completed | Failed
+ deriving (Show, Eq, Read, Generic)
+
+-- Task activity log entry
+data TaskActivity = TaskActivity
+ { activityId :: Maybe Int, -- NULL for new entries, set by DB
+ activityTaskId :: Text,
+ activityTimestamp :: UTCTime,
+ activityStage :: ActivityStage,
+ activityMessage :: Maybe Text,
+ activityMetadata :: Maybe Text, -- JSON for extra data
+ activityThreadUrl :: Maybe Text, -- Link to agent session (unused with native Engine)
+ activityStartedAt :: Maybe UTCTime, -- When work started
+ activityCompletedAt :: Maybe UTCTime, -- When work completed
+ activityCostCents :: Maybe Int, -- API cost in cents
+ activityTokensUsed :: Maybe Int -- Total tokens used
+ }
+ deriving (Show, Eq, Generic)
+
+-- Fact for knowledge base
+data Fact = Fact
+ { factId :: Maybe Int,
+ factProject :: Text,
+ factContent :: Text,
+ factRelatedFiles :: [Text],
+ factSourceTask :: Maybe Text,
+ factConfidence :: Double,
+ factCreatedAt :: UTCTime
+ }
+ deriving (Show, Eq, Generic)
+
+-- Comment/event author (also used as Actor for timeline events)
+data CommentAuthor = Human | Junior | System
+ deriving (Show, Eq, Read, Generic)
+
+-- Comment for task notes/context
+data Comment = Comment
+ { commentText :: Text,
+ commentAuthor :: CommentAuthor,
+ commentCreatedAt :: UTCTime
+ }
+ deriving (Show, Eq, Generic)
+
+instance ToJSON TaskType
+
+instance FromJSON TaskType
+
+instance ToJSON Status
+
+instance FromJSON Status
+
+instance ToJSON Priority
+
+instance FromJSON Priority
+
+instance ToJSON DependencyType
+
+instance FromJSON DependencyType
+
+instance ToJSON Dependency
+
+instance FromJSON Dependency
+
+instance ToJSON Task
+
+instance FromJSON Task
+
+instance ToJSON TaskProgress
+
+instance FromJSON TaskProgress
+
+instance ToJSON AggregatedMetrics
+
+instance FromJSON AggregatedMetrics
+
+instance ToJSON RetryContext
+
+instance FromJSON RetryContext
+
+instance ToJSON ActivityStage
+
+instance FromJSON ActivityStage
+
+instance ToJSON TaskActivity
+
+instance FromJSON TaskActivity
+
+instance ToJSON Fact
+
+instance FromJSON Fact
+
+instance ToJSON CommentAuthor
+
+instance FromJSON CommentAuthor
+
+instance ToJSON Comment
+
+instance FromJSON Comment
+
+-- HTTP API Instances (for Servant query params)
+
+instance FromHttpApiData Status where
+ parseQueryParam t
+ | T.null t = Left "No status provided"
+ | otherwise = case readMaybe (T.unpack t) of
+ Just s -> Right s
+ Nothing -> Left ("Invalid status: " <> t)
+
+instance FromHttpApiData Priority where
+ parseQueryParam t
+ | T.null t = Left "No priority provided"
+ | otherwise = case readMaybe (T.unpack t) of
+ Just p -> Right p
+ Nothing -> Left ("Invalid priority: " <> t)
+
+-- SQLite Instances
+
+instance SQL.FromField TaskType where
+ fromField f = do
+ t <- SQL.fromField f :: SQLOk.Ok String
+ case readMaybe t of
+ Just x -> pure x
+ Nothing -> SQL.returnError SQL.ConversionFailed f "Invalid TaskType"
+
+instance SQL.ToField TaskType where
+ toField x = SQL.toField (show x :: String)
+
+instance SQL.FromField Status where
+ fromField f = do
+ t <- SQL.fromField f :: SQLOk.Ok String
+ case readMaybe t of
+ Just x -> pure x
+ Nothing -> SQL.returnError SQL.ConversionFailed f "Invalid Status"
+
+instance SQL.ToField Status where
+ toField x = SQL.toField (show x :: String)
+
+instance SQL.FromField Priority where
+ fromField f = do
+ t <- SQL.fromField f :: SQLOk.Ok String
+ case readMaybe t of
+ Just x -> pure x
+ Nothing -> SQL.returnError SQL.ConversionFailed f "Invalid Priority"
+
+instance SQL.ToField Priority where
+ toField x = SQL.toField (show x :: String)
+
+instance SQL.FromField ActivityStage where
+ fromField f = do
+ t <- SQL.fromField f :: SQLOk.Ok String
+ case readMaybe t of
+ Just x -> pure x
+ Nothing -> SQL.returnError SQL.ConversionFailed f "Invalid ActivityStage"
+
+instance SQL.ToField ActivityStage where
+ toField x = SQL.toField (show x :: String)
+
+-- Store dependencies as JSON text
+instance SQL.FromField [Dependency] where
+ fromField f = do
+ mt <- SQL.fromField f :: SQLOk.Ok (Maybe String)
+ case mt of
+ Nothing -> pure []
+ Just t -> case Aeson.decode (BLC.pack t) of
+ Just x -> pure x
+ Nothing -> pure []
+
+instance SQL.ToField [Dependency] where
+ toField deps = SQL.toField (BLC.unpack (encode deps))
+
+-- Store comments as JSON text
+instance SQL.FromField [Comment] where
+ fromField f = do
+ mt <- SQL.fromField f :: SQLOk.Ok (Maybe String)
+ case mt of
+ Nothing -> pure []
+ Just t -> case Aeson.decode (BLC.pack t) of
+ Just x -> pure x
+ Nothing -> pure []
+
+instance SQL.ToField [Comment] where
+ toField comments = SQL.toField (BLC.unpack (encode comments))
+
+instance SQL.FromRow Task where
+ fromRow =
+ Task
+ </ SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field -- complexity
+ <*> SQL.field
+ <*> (fromMaybe "" </ SQL.field) -- Handle NULL description from legacy data
+ <*> SQL.field -- comments
+ <*> SQL.field
+ <*> SQL.field
+
+instance SQL.ToRow Task where
+ toRow t =
+ [ SQL.toField (taskId t),
+ SQL.toField (taskTitle t),
+ SQL.toField (taskType t),
+ SQL.toField (taskParent t),
+ SQL.toField (taskNamespace t),
+ SQL.toField (taskStatus t),
+ SQL.toField (taskPriority t),
+ SQL.toField (taskComplexity t),
+ SQL.toField (taskDependencies t),
+ SQL.toField (taskDescription t),
+ SQL.toField (taskComments t),
+ SQL.toField (taskCreatedAt t),
+ SQL.toField (taskUpdatedAt t)
+ ]
+
+instance SQL.FromRow TaskActivity where
+ fromRow =
+ TaskActivity
+ </ SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+
+instance SQL.ToRow TaskActivity where
+ toRow a =
+ [ SQL.toField (activityId a),
+ SQL.toField (activityTaskId a),
+ SQL.toField (activityTimestamp a),
+ SQL.toField (activityStage a),
+ SQL.toField (activityMessage a),
+ SQL.toField (activityMetadata a),
+ SQL.toField (activityThreadUrl a),
+ SQL.toField (activityStartedAt a),
+ SQL.toField (activityCompletedAt a),
+ SQL.toField (activityCostCents a),
+ SQL.toField (activityTokensUsed a)
+ ]
+
+instance SQL.FromRow Fact where
+ fromRow = do
+ fid <- SQL.field
+ proj <- SQL.field
+ content <- SQL.field
+ (relatedFilesJson :: String) <- SQL.field
+ sourceTask <- SQL.field
+ confidence <- SQL.field
+ createdAt <- SQL.field
+ let relatedFiles = fromMaybe [] (decode (BLC.pack relatedFilesJson))
+ pure
+ Fact
+ { factId = fid,
+ factProject = proj,
+ factContent = content,
+ factRelatedFiles = relatedFiles,
+ factSourceTask = sourceTask,
+ factConfidence = confidence,
+ factCreatedAt = createdAt
+ }
+
+instance SQL.ToRow Fact where
+ toRow f =
+ [ SQL.toField (factId f),
+ SQL.toField (factProject f),
+ SQL.toField (factContent f),
+ SQL.toField (BLC.unpack (encode (factRelatedFiles f))),
+ SQL.toField (factSourceTask f),
+ SQL.toField (factConfidence f),
+ SQL.toField (factCreatedAt f)
+ ]
+
+-- | Case-insensitive ID comparison
+matchesId :: Text -> Text -> Bool
+matchesId id1 id2 = normalizeId id1 == normalizeId id2
+
+-- | Normalize ID to lowercase
+normalizeId :: Text -> Text
+normalizeId = T.toLower
+
+-- | Find a task by ID (case-insensitive)
+findTask :: Text -> [Task] -> Maybe Task
+findTask tid = List.find (\t -> matchesId (taskId t) tid)
+
+-- | Normalize task IDs (self, parent, dependencies) to lowercase
+normalizeTask :: Task -> Task
+normalizeTask t =
+ t
+ { taskId = normalizeId (taskId t),
+ taskParent = fmap normalizeId (taskParent t),
+ taskDependencies = map normalizeDependency (taskDependencies t)
+ }
+
+normalizeDependency :: Dependency -> Dependency
+normalizeDependency d = d {depId = normalizeId (depId d)}
+
+-- Lock for application-level thread safety (Read-Calc-Write cycles)
+taskLock :: MVar ()
+taskLock = unsafePerformIO (newMVar ())
+{-# NOINLINE taskLock #-}
+
+withTaskLock :: IO a -> IO a
+withTaskLock action = withMVar taskLock (const action)
+
+-- Get the tasks database file path
+getTasksDbPath :: IO FilePath
+getTasksDbPath = do
+ customPath <- lookupEnv "TASK_DB_PATH"
+ testMode <- lookupEnv "TASK_TEST_MODE"
+ case (testMode, customPath) of
+ (Just "1", _) -> pure "_/tmp/tasks-test.db"
+ (_, Just p) -> pure p
+ _ -> do
+ xdgData <- getXdgDirectory XdgData "jr"
+ pure (xdgData </> "jr.db")
+
+-- DB Helper
+withDb :: (SQL.Connection -> IO a) -> IO a
+withDb action = do
+ dbPath <- getTasksDbPath
+ SQL.withConnection dbPath <| \conn -> do
+ SQL.execute_ conn "PRAGMA busy_timeout = 5000"
+ action conn
+
+-- Initialize the task database
+initTaskDb :: IO ()
+initTaskDb = do
+ dbPath <- getTasksDbPath
+ createDirectoryIfMissing True (takeDirectory dbPath)
+ withDb <| \conn -> do
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS tasks (\
+ \ id TEXT PRIMARY KEY, \
+ \ title TEXT NOT NULL, \
+ \ type TEXT NOT NULL, \
+ \ parent TEXT, \
+ \ namespace TEXT, \
+ \ status TEXT NOT NULL, \
+ \ priority TEXT NOT NULL, \
+ \ complexity INTEGER, \
+ \ dependencies TEXT NOT NULL, \
+ \ description TEXT, \
+ \ comments TEXT NOT NULL DEFAULT '[]', \
+ \ created_at TIMESTAMP NOT NULL, \
+ \ updated_at TIMESTAMP NOT NULL \
+ \)"
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS id_counter (\
+ \ id INTEGER PRIMARY KEY CHECK (id = 1), \
+ \ counter INTEGER NOT NULL DEFAULT 0 \
+ \)"
+ SQL.execute_
+ conn
+ "INSERT OR IGNORE INTO id_counter (id, counter) VALUES (1, 0)"
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS retry_context (\
+ \ task_id TEXT PRIMARY KEY, \
+ \ original_commit TEXT NOT NULL, \
+ \ conflict_files TEXT NOT NULL, \
+ \ attempt INTEGER NOT NULL DEFAULT 1, \
+ \ reason TEXT NOT NULL \
+ \)"
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS facts (\
+ \ id INTEGER PRIMARY KEY AUTOINCREMENT, \
+ \ project TEXT NOT NULL, \
+ \ fact TEXT NOT NULL, \
+ \ related_files TEXT NOT NULL, \
+ \ source_task TEXT, \
+ \ confidence REAL NOT NULL, \
+ \ created_at DATETIME DEFAULT CURRENT_TIMESTAMP \
+ \)"
+ runMigrations conn
+
+-- | Run schema migrations to add missing columns to existing tables
+runMigrations :: SQL.Connection -> IO ()
+runMigrations conn = do
+ migrateTable conn "tasks" tasksColumns
+ migrateTable conn "retry_context" retryContextColumns
+ migrateTable conn "facts" factsColumns
+ createAgentEventsTable conn
+
+-- | Create agent_events table if it doesn't exist
+createAgentEventsTable :: SQL.Connection -> IO ()
+createAgentEventsTable conn = do
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS agent_events (\
+ \ id INTEGER PRIMARY KEY AUTOINCREMENT, \
+ \ task_id TEXT NOT NULL, \
+ \ session_id TEXT NOT NULL, \
+ \ timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, \
+ \ event_type TEXT NOT NULL, \
+ \ content TEXT NOT NULL, \
+ \ actor TEXT NOT NULL DEFAULT 'junior' \
+ \)"
+ SQL.execute_ conn "CREATE INDEX IF NOT EXISTS idx_agent_events_task ON agent_events(task_id)"
+ SQL.execute_ conn "CREATE INDEX IF NOT EXISTS idx_agent_events_session ON agent_events(session_id)"
+ -- Add actor column to existing tables (migration)
+ SQL.execute_ conn "ALTER TABLE agent_events ADD COLUMN actor TEXT NOT NULL DEFAULT 'junior'" `catch` ignoreAlterError
+ where
+ ignoreAlterError :: SQL.SQLError -> IO ()
+ ignoreAlterError _ = pure () -- Column already exists
+
+-- | Expected columns for tasks table
+tasksColumns :: [(Text, Text)]
+tasksColumns =
+ [ ("id", "TEXT"),
+ ("title", "TEXT"),
+ ("type", "TEXT"),
+ ("parent", "TEXT"),
+ ("namespace", "TEXT"),
+ ("status", "TEXT"),
+ ("priority", "TEXT"),
+ ("complexity", "INTEGER"),
+ ("dependencies", "TEXT"),
+ ("description", "TEXT"),
+ ("comments", "TEXT"),
+ ("created_at", "TIMESTAMP"),
+ ("updated_at", "TIMESTAMP")
+ ]
+
+-- | Expected columns for retry_context table
+retryContextColumns :: [(Text, Text)]
+retryContextColumns =
+ [ ("task_id", "TEXT"),
+ ("original_commit", "TEXT"),
+ ("conflict_files", "TEXT"),
+ ("attempt", "INTEGER"),
+ ("reason", "TEXT"),
+ ("notes", "TEXT")
+ ]
+
+-- | Expected columns for facts table
+factsColumns :: [(Text, Text)]
+factsColumns =
+ [ ("id", "INTEGER"),
+ ("project", "TEXT"),
+ ("fact", "TEXT"),
+ ("related_files", "TEXT"),
+ ("source_task", "TEXT"),
+ ("confidence", "REAL"),
+ ("created_at", "DATETIME")
+ ]
+
+-- | Migrate a table by adding any missing columns
+migrateTable :: SQL.Connection -> Text -> [(Text, Text)] -> IO ()
+migrateTable conn tableName expectedCols = do
+ existingCols <- getTableColumns conn tableName
+ let missingCols = filter (\(name, _) -> name `notElem` existingCols) expectedCols
+ traverse_ (addColumn conn tableName) missingCols
+
+-- | Get list of column names for a table using PRAGMA table_info
+getTableColumns :: SQL.Connection -> Text -> IO [Text]
+getTableColumns conn tableName = do
+ let query = SQL.Query <| "PRAGMA table_info(" <> tableName <> ")"
+ rows <- SQL.query_ conn query :: IO [(Int, Text, Text, Int, Maybe Text, Int)]
+ pure [colName | (_, colName, _, _, _, _) <- rows]
+
+-- | Add a column to a table
+addColumn :: SQL.Connection -> Text -> (Text, Text) -> IO ()
+addColumn conn tableName (colName, colType) = do
+ let sql = "ALTER TABLE " <> tableName <> " ADD COLUMN " <> colName <> " " <> colType
+ SQL.execute_ conn (SQL.Query sql)
+
+-- Generate a sequential task ID (t-1, t-2, t-3, ...)
+generateId :: IO Text
+generateId = do
+ counter <- getNextCounter
+ pure <| "t-" <> T.pack (show counter)
+
+-- Get the next counter value (atomically increments)
+getNextCounter :: IO Int
+getNextCounter =
+ withDb <| \conn -> do
+ SQL.execute_
+ conn
+ "CREATE TABLE IF NOT EXISTS id_counter (\
+ \ id INTEGER PRIMARY KEY CHECK (id = 1), \
+ \ counter INTEGER NOT NULL DEFAULT 0 \
+ \)"
+ SQL.execute_ conn "INSERT OR IGNORE INTO id_counter (id, counter) VALUES (1, 0)"
+ SQL.execute_ conn "UPDATE id_counter SET counter = counter + 1 WHERE id = 1"
+ [SQL.Only c] <- SQL.query_ conn "SELECT counter FROM id_counter WHERE id = 1" :: IO [SQL.Only Int]
+ pure c
+
+-- Generate a child ID based on parent ID
+generateChildId :: Text -> IO Text
+generateChildId parentId = do
+ tasks <- loadTasks
+ pure <| computeNextChildId tasks (normalizeId parentId)
+
+computeNextChildId :: [Task] -> Text -> Text
+computeNextChildId tasks parentId =
+ let suffixes = mapMaybe (getSuffix parentId <. taskId) tasks
+ nextSuffix = case suffixes of
+ [] -> 1
+ s -> maximum s + 1
+ in parentId <> "." <> T.pack (show nextSuffix)
+
+getSuffix :: Text -> Text -> Maybe Int
+getSuffix parent childId =
+ if parent `T.isPrefixOf` childId && T.length childId > T.length parent
+ then
+ let rest = T.drop (T.length parent) childId
+ in if T.head rest == '.'
+ then readMaybe (T.unpack (T.tail rest))
+ else Nothing
+ else Nothing
+
+-- Load all tasks from DB
+loadTasks :: IO [Task]
+loadTasks =
+ withDb <| \conn -> do
+ SQL.query_ conn "SELECT id, title, type, parent, namespace, status, priority, complexity, dependencies, description, comments, created_at, updated_at FROM tasks"
+
+-- Save a single task (UPSERT)
+saveTask :: Task -> IO ()
+saveTask task =
+ withDb <| \conn -> do
+ SQL.execute
+ conn
+ "INSERT OR REPLACE INTO tasks \
+ \ (id, title, type, parent, namespace, status, priority, complexity, dependencies, description, comments, created_at, updated_at) \
+ \ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
+ task
+
+-- Create a new task
+createTask :: Text -> TaskType -> Maybe Text -> Maybe Text -> Priority -> Maybe Int -> [Dependency] -> Text -> IO Task
+createTask title taskType parent namespace priority complexity deps description =
+ withTaskLock <| do
+ let parent' = fmap normalizeId parent
+ deps' = map normalizeDependency deps
+
+ tid <- case parent' of
+ Nothing -> generateUniqueId
+ Just pid -> do
+ tasks <- loadTasks
+ pure <| computeNextChildId tasks pid
+ now <- getCurrentTime
+ let task =
+ Task
+ { taskId = normalizeId tid,
+ taskTitle = title,
+ taskType = taskType,
+ taskParent = parent',
+ taskNamespace = namespace,
+ taskStatus = Open,
+ taskPriority = priority,
+ taskComplexity = complexity,
+ taskDependencies = deps',
+ taskDescription = description,
+ taskComments = [],
+ taskCreatedAt = now,
+ taskUpdatedAt = now
+ }
+ saveTask task
+ pure task
+
+-- Generate a unique ID
+generateUniqueId :: IO Text
+generateUniqueId = do
+ -- We can query DB directly to check existence
+ go
+ where
+ go = do
+ tid <- generateId
+ exists <-
+ withDb <| \conn -> do
+ [SQL.Only c] <- SQL.query conn "SELECT COUNT(*) FROM tasks WHERE id = ?" (SQL.Only tid) :: IO [SQL.Only Int]
+ pure (c > 0)
+ if exists then go else pure tid
+
+-- Update task status
+updateTaskStatus :: Text -> Status -> [Dependency] -> IO ()
+updateTaskStatus tid newStatus newDeps = updateTaskStatusWithActor tid newStatus newDeps System
+
+updateTaskStatusWithActor :: Text -> Status -> [Dependency] -> CommentAuthor -> IO ()
+updateTaskStatusWithActor tid newStatus newDeps actor =
+ withTaskLock <| do
+ maybeOldStatus <-
+ withDb <| \conn -> do
+ rows <- SQL.query conn "SELECT status, dependencies FROM tasks WHERE id = ?" (SQL.Only tid) :: IO [(Status, [Dependency])]
+ case rows of
+ [] -> pure Nothing
+ ((oldStatus, existingDeps) : _) -> do
+ now <- getCurrentTime
+ let finalDeps = if null newDeps then existingDeps else newDeps
+ SQL.execute
+ conn
+ "UPDATE tasks SET status = ?, updated_at = ?, dependencies = ? WHERE id = ?"
+ (newStatus, now, finalDeps, tid)
+ pure (Just oldStatus)
+ case maybeOldStatus of
+ Nothing -> pure ()
+ Just oldStatus ->
+ when (oldStatus /= newStatus) <| do
+ let content = "{\"from\":\"" <> T.pack (show oldStatus) <> "\",\"to\":\"" <> T.pack (show newStatus) <> "\"}"
+ sessionId <- getOrCreateCommentSession tid
+ insertAgentEvent tid sessionId "status_change" content actor
+
+-- Edit a task
+editTask :: Text -> (Task -> Task) -> IO Task
+editTask tid modifyFn =
+ withTaskLock <| do
+ tasks <- loadTasks
+ case findTask tid tasks of
+ Nothing -> panic "Task not found"
+ Just original -> do
+ now <- getCurrentTime
+ let modified = modifyFn original
+ finalTask = modified {taskUpdatedAt = now}
+ saveTask finalTask
+ pure finalTask
+
+-- Delete a task
+deleteTask :: Text -> IO ()
+deleteTask tid =
+ withDb <| \conn ->
+ SQL.execute conn "DELETE FROM tasks WHERE id = ?" (SQL.Only tid)
+
+-- Add a comment to a task
+addComment :: Text -> Text -> CommentAuthor -> IO Task
+addComment tid commentTextContent author =
+ withTaskLock <| do
+ tasks <- loadTasks
+ case findTask tid tasks of
+ Nothing -> panic "Task not found"
+ Just task -> do
+ now <- getCurrentTime
+ sessionId <- getOrCreateCommentSession tid
+ insertAgentEvent tid sessionId "comment" commentTextContent author
+ let updatedTask = task {taskUpdatedAt = now}
+ saveTask updatedTask
+ pure updatedTask
+
+-- | Get or create a session ID for comments on a task
+-- Uses a dedicated "comments" session so comments are grouped together
+getOrCreateCommentSession :: Text -> IO Text
+getOrCreateCommentSession taskId = do
+ let sessionId = "comments-" <> taskId
+ pure sessionId
+
+-- List tasks
+listTasks :: Maybe TaskType -> Maybe Text -> Maybe Status -> Maybe Text -> IO [Task]
+listTasks maybeType maybeParent maybeStatus maybeNamespace = do
+ -- Implementing specific filters in SQL would be more efficient, but for MVP and API compat:
+ tasks <- loadTasks
+ let filtered =
+ tasks
+ |> filterByType maybeType
+ |> filterByParent maybeParent
+ |> filterByStatus maybeStatus
+ |> filterByNamespace maybeNamespace
+ pure filtered
+ where
+ filterByType Nothing ts = ts
+ filterByType (Just typ) ts = filter (\t -> taskType t == typ) ts
+ filterByParent Nothing ts = ts
+ filterByParent (Just pid) ts = filter (\t -> taskParent t == Just pid) ts
+ filterByStatus Nothing ts = ts
+ filterByStatus (Just status) ts = filter (\t -> taskStatus t == status) ts
+ filterByNamespace Nothing ts = ts
+ filterByNamespace (Just ns) ts = filter (\t -> taskNamespace t == Just ns) ts
+
+-- Get ready tasks
+getReadyTasks :: IO [Task]
+getReadyTasks = do
+ allTasks <- loadTasks
+ retryContexts <- getAllRetryContexts
+ let openTasks = filter (\t -> taskStatus t `elem` [Open, InProgress]) allTasks
+ doneIds = map taskId <| filter (\t -> taskStatus t == Done) allTasks
+
+ parentIds = mapMaybe taskParent allTasks
+ isParent tid = tid `elem` parentIds
+
+ -- Tasks with retry_attempt >= 3 need human intervention
+ needsInterventionIds = [retryTaskId ctx | ctx <- retryContexts, retryAttempt ctx >= 3]
+
+ blockingDepIds task = [depId dep | dep <- taskDependencies task, depType dep `elem` [Blocks, ParentChild]]
+ isReady task =
+ taskType task
+ /= Epic
+ && not (isParent (taskId task))
+ && all (`elem` doneIds) (blockingDepIds task)
+ && taskStatus task
+ /= NeedsHelp
+ && taskId task
+ `notElem` needsInterventionIds
+ readyTasks = filter isReady openTasks
+ -- Sort by priority (P0 first) then by creation time (oldest first)
+ sorted = List.sortBy (comparing taskPriority <> comparing taskCreatedAt) readyTasks
+ pure sorted
+
+-- Get dependency tree
+getDependencyTree :: Text -> IO [Task]
+getDependencyTree tid = do
+ tasks <- loadTasks
+ case findTask tid tasks of
+ Nothing -> pure []
+ Just task -> pure <| collectDeps tasks task
+ where
+ collectDeps allTasks task =
+ let depIds = map depId (taskDependencies task)
+ deps = filter (\t -> any (matchesId (taskId t)) depIds) allTasks
+ in task : concatMap (collectDeps allTasks) deps
+
+-- Get task progress
+getTaskProgress :: Text -> IO TaskProgress
+getTaskProgress tidRaw = do
+ let tid = normalizeId tidRaw
+ -- Could be SQL optimized
+ tasks <- loadTasks
+ case findTask tid tasks of
+ Nothing -> panic "Task not found"
+ Just _ -> do
+ let children = filter (\child -> taskParent child == Just tid) tasks
+ total = length children
+ completed = length <| filter (\child -> taskStatus child == Done) children
+ percentage = if total == 0 then 0 else (completed * 100) `div` total
+ pure
+ TaskProgress
+ { progressTaskId = tid,
+ progressTotal = total,
+ progressCompleted = completed,
+ progressPercentage = percentage
+ }
+
+showTaskProgress :: Text -> IO ()
+showTaskProgress tid = do
+ progress <- getTaskProgress tid
+ putText <| "Progress for " <> tid <> ": " <> T.pack (show (progressCompleted progress)) <> "/" <> T.pack (show (progressTotal progress)) <> " (" <> T.pack (show (progressPercentage progress)) <> "%)"
+
+showDependencyTree :: Text -> IO ()
+showDependencyTree tid = do
+ tasks <- loadTasks
+ case findTask tid tasks of
+ Nothing -> putText "Task not found"
+ Just task -> printTree tasks task 0
+ where
+ printTree :: [Task] -> Task -> Int -> IO ()
+ printTree allTasks task indent = do
+ putText <| T.pack (replicate (indent * 2) ' ') <> taskId task <> ": " <> taskTitle task
+ let depIds = map depId (taskDependencies task)
+ deps = filter (\t -> any (matchesId (taskId t)) depIds) allTasks
+ traverse_ (\dep -> printTree allTasks dep (indent + 1)) deps
+
+getTaskTree :: Maybe Text -> IO [Task]
+getTaskTree maybeId = do
+ tasks <- loadTasks
+ case maybeId of
+ Nothing -> do
+ let epics = filter (\t -> taskType t == Epic) tasks
+ in pure <| concatMap (collectChildren tasks) epics
+ Just tid -> do
+ case findTask tid tasks of
+ Nothing -> pure []
+ Just task -> pure <| collectChildren tasks task
+ where
+ collectChildren allTasks task =
+ let children = filter (maybe False (`matchesId` taskId task) <. taskParent) allTasks
+ in task : concatMap (collectChildren allTasks) children
+
+showTaskTree :: Maybe Text -> IO ()
+showTaskTree maybeId = do
+ tasks <- loadTasks
+ case maybeId of
+ Nothing -> do
+ let epics = filter (\t -> taskType t == Epic) tasks
+ if null epics
+ then putText "No epics found"
+ else traverse_ (printEpicTree tasks) epics
+ Just tid -> do
+ case findTask tid tasks of
+ Nothing -> putText "Task not found"
+ Just task -> printEpicTree tasks task
+ where
+ printEpicTree allTasks task = printTreeNode allTasks task (0 :: Int)
+
+ printTreeNode allTasks task indent = printTreeNode' allTasks task indent []
+
+ printTreeNode' allTasks task indent ancestry = do
+ let children = filter (maybe False (`matchesId` taskId task) <. taskParent) allTasks
+ prefix =
+ if indent == 0
+ then ""
+ else
+ let ancestorPrefixes = map (\hasMore -> if hasMore then "│ " else " ") (List.init ancestry)
+ myPrefix = if List.last ancestry then "├── " else "└── "
+ in T.pack <| concat ancestorPrefixes ++ myPrefix
+ statusStr = case taskType task of
+ Epic ->
+ let total = length children
+ completed = length <| filter (\t -> taskStatus t == Done) children
+ in "[" <> T.pack (show completed) <> "/" <> T.pack (show total) <> "]"
+ _ -> case taskStatus task of
+ Draft -> "[.]"
+ Open -> "[ ]"
+ InProgress -> "[~]"
+ Review -> "[?]"
+ Approved -> "[+]"
+ Done -> "[✓]"
+ NeedsHelp -> "[!]"
+
+ coloredStatusStr = case taskType task of
+ Epic -> magenta statusStr
+ _ -> case taskStatus task of
+ Draft -> gray statusStr
+ Open -> bold statusStr
+ InProgress -> yellow statusStr
+ Review -> magenta statusStr
+ Approved -> green statusStr
+ Done -> green statusStr
+ NeedsHelp -> yellow statusStr
+
+ nsStr = case taskNamespace task of
+ Nothing -> ""
+ Just ns -> "[" <> ns <> "] "
+
+ coloredNsStr = case taskNamespace task of
+ Nothing -> ""
+ Just _ -> gray nsStr
+
+ usedWidth = T.length prefix + T.length (taskId task) + T.length statusStr + T.length nsStr + 2
+ availableWidth = max 20 (80 - usedWidth)
+ truncatedTitle =
+ if T.length (taskTitle task) > availableWidth
+ then T.take (availableWidth - 3) (taskTitle task) <> "..."
+ else taskTitle task
+
+ coloredTitle = if taskType task == Epic then bold truncatedTitle else truncatedTitle
+
+ putText <| prefix <> cyan (taskId task) <> " " <> coloredStatusStr <> " " <> coloredNsStr <> coloredTitle
+
+ let indexedChildren = zip [1 ..] children
+ totalChildren = length children
+ traverse_
+ ( \(idx, child) ->
+ let hasMoreSiblings = idx < totalChildren
+ in printTreeNode' allTasks child (indent + 1) (ancestry ++ [hasMoreSiblings])
+ )
+ indexedChildren
+
+printTask :: Task -> IO ()
+printTask t = do
+ tasks <- loadTasks
+ let progressInfo =
+ if taskType t == Epic
+ then
+ let children = filter (maybe False (`matchesId` taskId t) <. taskParent) tasks
+ total = length children
+ completed = length <| filter (\child -> taskStatus child == Done) children
+ in " [" <> T.pack (show completed) <> "/" <> T.pack (show total) <> "]"
+ else ""
+
+ parentInfo = case taskParent t of
+ Nothing -> ""
+ Just p -> " (parent: " <> p <> ")"
+
+ namespaceInfo = case taskNamespace t of
+ Nothing -> ""
+ Just ns -> " [" <> ns <> "]"
+
+ coloredStatus =
+ let s = "[" <> T.pack (show (taskStatus t)) <> "]"
+ in case taskStatus t of
+ Draft -> gray s
+ Open -> bold s
+ InProgress -> yellow s
+ Review -> magenta s
+ Approved -> green s
+ Done -> green s
+ NeedsHelp -> yellow s
+
+ coloredTitle = if taskType t == Epic then bold (taskTitle t) else taskTitle t
+ coloredProgress = if taskType t == Epic then magenta progressInfo else progressInfo
+ coloredNamespace = case taskNamespace t of
+ Nothing -> ""
+ Just _ -> gray namespaceInfo
+ coloredParent = case taskParent t of
+ Nothing -> ""
+ Just _ -> gray parentInfo
+
+ putText
+ <| cyan (taskId t)
+ <> " ["
+ <> T.pack (show (taskType t))
+ <> "] "
+ <> coloredStatus
+ <> coloredProgress
+ <> " "
+ <> coloredTitle
+ <> coloredParent
+ <> coloredNamespace
+
+showTaskDetailed :: Task -> IO ()
+showTaskDetailed t = do
+ tasks <- loadTasks
+ putText ""
+ putText <| "Title: " <> taskTitle t <> " (ID: " <> taskId t <> ")"
+ putText <| "Type: " <> T.pack (show (taskType t))
+ putText <| "Status: " <> T.pack (show (taskStatus t))
+ putText <| "Priority: " <> T.pack (show (taskPriority t)) <> priorityDesc
+ case taskComplexity t of
+ Nothing -> pure ()
+ Just c -> putText ("Complexity: " <> T.pack (show c) <> "/5")
+
+ when (taskType t == Epic) <| do
+ let children = filter (maybe False (`matchesId` taskId t) <. taskParent) tasks
+ total = length children
+ completed = length <| filter (\child -> taskStatus child == Done) children
+ percentage = if total == 0 then 0 else (completed * 100) `div` total
+ putText <| "Progress: " <> T.pack (show completed) <> "/" <> T.pack (show total) <> " (" <> T.pack (show percentage) <> "%)"
+
+ case taskParent t of
+ Nothing -> pure ()
+ Just p -> putText <| "Parent: " <> p
+ case taskNamespace t of
+ Nothing -> pure ()
+ Just ns -> putText <| "Namespace: " <> ns
+ putText <| "Created: " <> T.pack (show (taskCreatedAt t))
+ putText <| "Updated: " <> T.pack (show (taskUpdatedAt t))
+
+ unless (null (taskDependencies t)) <| do
+ putText ""
+ putText "Dependencies:"
+ traverse_ printDependency (taskDependencies t)
+
+ unless (T.null (taskDescription t)) <| do
+ putText ""
+ putText "Description:"
+ let indented = T.unlines <| map (" " <>) (T.lines (taskDescription t))
+ putText indented
+
+ unless (null (taskComments t)) <| do
+ putText ""
+ putText "Comments:"
+ traverse_ printComment (taskComments t)
+
+ putText ""
+ where
+ priorityDesc = case taskPriority t of
+ P0 -> " (Critical)"
+ P1 -> " (High)"
+ P2 -> " (Medium)"
+ P3 -> " (Low)"
+ P4 -> " (Backlog)"
+
+ printDependency dep =
+ putText <| " - " <> depId dep <> " [" <> T.pack (show (depType dep)) <> "]"
+
+ printComment c =
+ putText <| " [" <> T.pack (show (commentCreatedAt c)) <> "] [" <> T.pack (show (commentAuthor c)) <> "] " <> commentText c
+
+red, green, yellow, blue, magenta, cyan, gray, bold :: Text -> Text
+red t = "\ESC[31m" <> t <> "\ESC[0m"
+green t = "\ESC[32m" <> t <> "\ESC[0m"
+yellow t = "\ESC[33m" <> t <> "\ESC[0m"
+blue t = "\ESC[34m" <> t <> "\ESC[0m"
+magenta t = "\ESC[35m" <> t <> "\ESC[0m"
+cyan t = "\ESC[36m" <> t <> "\ESC[0m"
+gray t = "\ESC[90m" <> t <> "\ESC[0m"
+bold t = "\ESC[1m" <> t <> "\ESC[0m"
+
+-- Export tasks: Dump SQLite to JSONL
+exportTasks :: Maybe FilePath -> IO ()
+exportTasks maybePath = do
+ tasks <- loadTasks
+ case maybePath of
+ Just path -> do
+ TIO.writeFile path ""
+ traverse_ (saveTaskToJsonl path) tasks
+ Nothing ->
+ -- Stream to stdout
+ traverse_ (BLC.putStrLn <. encode) tasks
+
+saveTaskToJsonl :: FilePath -> Task -> IO ()
+saveTaskToJsonl path task = do
+ let json = encode task
+ BLC.appendFile path (json <> "\n")
+
+data TaskStats = TaskStats
+ { totalTasks :: Int,
+ draftTasks :: Int,
+ openTasks :: Int,
+ inProgressTasks :: Int,
+ reviewTasks :: Int,
+ approvedTasks :: Int,
+ doneTasks :: Int,
+ totalEpics :: Int,
+ readyTasks :: Int,
+ blockedTasks :: Int,
+ tasksByPriority :: [(Priority, Int)],
+ tasksByNamespace :: [(Text, Int)]
+ }
+ deriving (Show, Eq, Generic)
+
+instance ToJSON TaskStats
+
+instance FromJSON TaskStats
+
+getTaskStats :: Maybe Text -> IO TaskStats
+getTaskStats maybeEpicId = do
+ allTasks <- loadTasks
+
+ targetTasks <- case maybeEpicId of
+ Nothing -> pure allTasks
+ Just epicId ->
+ case findTask epicId allTasks of
+ Nothing -> panic "Epic not found"
+ Just task -> pure <| getAllDescendants allTasks (taskId task)
+
+ globalReady <- getReadyTasks
+ let readyIds = map taskId globalReady
+ readyCount = length <| filter (\t -> taskId t `elem` readyIds) targetTasks
+
+ tasks = targetTasks
+ total = length tasks
+ draft = length <| filter (\t -> taskStatus t == Draft) tasks
+ open = length <| filter (\t -> taskStatus t == Open) tasks
+ inProg = length <| filter (\t -> taskStatus t == InProgress) tasks
+ review = length <| filter (\t -> taskStatus t == Review) tasks
+ approved = length <| filter (\t -> taskStatus t == Approved) tasks
+ done = length <| filter (\t -> taskStatus t == Done) tasks
+ epics = length <| filter (\t -> taskType t == Epic) tasks
+ readyCount' = readyCount
+ blockedCount = total - readyCount' - done - draft
+ byPriority =
+ [ (P0, length <| filter (\t -> taskPriority t == P0) tasks),
+ (P1, length <| filter (\t -> taskPriority t == P1) tasks),
+ (P2, length <| filter (\t -> taskPriority t == P2) tasks),
+ (P3, length <| filter (\t -> taskPriority t == P3) tasks),
+ (P4, length <| filter (\t -> taskPriority t == P4) tasks)
+ ]
+ namespaces = mapMaybe taskNamespace tasks
+ uniqueNs = List.nub namespaces
+ byNamespace = map (\ns -> (ns, length <| filter (\t -> taskNamespace t == Just ns) tasks)) uniqueNs
+ pure
+ TaskStats
+ { totalTasks = total,
+ draftTasks = draft,
+ openTasks = open,
+ inProgressTasks = inProg,
+ reviewTasks = review,
+ approvedTasks = approved,
+ doneTasks = done,
+ totalEpics = epics,
+ readyTasks = readyCount',
+ blockedTasks = blockedCount,
+ tasksByPriority = byPriority,
+ tasksByNamespace = byNamespace
+ }
+
+getAllDescendants :: [Task] -> Text -> [Task]
+getAllDescendants allTasks parentId =
+ let children = filter (maybe False (`matchesId` parentId) <. taskParent) allTasks
+ in children ++ concatMap (getAllDescendants allTasks <. taskId) children
+
+computeTaskStatsFromList :: [Task] -> TaskStats
+computeTaskStatsFromList tasks =
+ let total = length tasks
+ draft = length [t | t <- tasks, taskStatus t == Draft]
+ open = length [t | t <- tasks, taskStatus t == Open]
+ inProg = length [t | t <- tasks, taskStatus t == InProgress]
+ review = length [t | t <- tasks, taskStatus t == Review]
+ approved = length [t | t <- tasks, taskStatus t == Approved]
+ done = length [t | t <- tasks, taskStatus t == Done]
+ epics = length [t | t <- tasks, taskType t == Epic]
+ readyCount = open + inProg
+ blockedCount = 0
+ byPriority =
+ [ (P0, length [t | t <- tasks, taskPriority t == P0]),
+ (P1, length [t | t <- tasks, taskPriority t == P1]),
+ (P2, length [t | t <- tasks, taskPriority t == P2]),
+ (P3, length [t | t <- tasks, taskPriority t == P3]),
+ (P4, length [t | t <- tasks, taskPriority t == P4])
+ ]
+ namespaces = mapMaybe taskNamespace tasks
+ uniqueNs = List.nub namespaces
+ byNamespace = [(ns, length [t | t <- tasks, taskNamespace t == Just ns]) | ns <- uniqueNs]
+ in TaskStats
+ { totalTasks = total,
+ draftTasks = draft,
+ openTasks = open,
+ inProgressTasks = inProg,
+ reviewTasks = review,
+ approvedTasks = approved,
+ doneTasks = done,
+ totalEpics = epics,
+ readyTasks = readyCount,
+ blockedTasks = blockedCount,
+ tasksByPriority = byPriority,
+ tasksByNamespace = byNamespace
+ }
+
+showTaskStats :: Maybe Text -> IO ()
+showTaskStats maybeEpicId = do
+ stats <- getTaskStats maybeEpicId
+ putText ""
+ case maybeEpicId of
+ Nothing -> putText "Task Statistics"
+ Just epicId -> putText <| "Task Statistics for Epic " <> epicId
+ putText ""
+ putText <| "Total tasks: " <> T.pack (show (totalTasks stats))
+ putText <| " Draft: " <> T.pack (show (draftTasks stats))
+ putText <| " Open: " <> T.pack (show (openTasks stats))
+ putText <| " In Progress: " <> T.pack (show (inProgressTasks stats))
+ putText <| " Review: " <> T.pack (show (reviewTasks stats))
+ putText <| " Approved: " <> T.pack (show (approvedTasks stats))
+ putText <| " Done: " <> T.pack (show (doneTasks stats))
+ putText ""
+ putText <| "Epics: " <> T.pack (show (totalEpics stats))
+ putText ""
+ putText <| "Ready to work: " <> T.pack (show (readyTasks stats))
+ putText <| "Blocked: " <> T.pack (show (blockedTasks stats))
+ putText ""
+ putText "By Priority:"
+ traverse_ printPriority (tasksByPriority stats)
+ unless (null (tasksByNamespace stats)) <| do
+ putText ""
+ putText "By Namespace:"
+ traverse_ printNamespace (tasksByNamespace stats)
+ putText ""
+ where
+ printPriority (p, count) =
+ let label = case p of
+ P0 -> "P0 (Critical)"
+ P1 -> "P1 (High)"
+ P2 -> "P2 (Medium)"
+ P3 -> "P3 (Low)"
+ P4 -> "P4 (Backlog)"
+ in putText <| " " <> T.pack (show count) <> " " <> label
+ printNamespace (ns, count) =
+ putText <| " " <> T.pack (show count) <> " " <> ns
+
+-- Import tasks: Read from JSONL and insert/update DB
+importTasks :: FilePath -> IO ()
+importTasks filePath = do
+ exists <- doesFileExist filePath
+ unless exists <| panic (T.pack filePath <> " does not exist")
+
+ content <- TIO.readFile filePath
+ let importLines = T.lines content
+ importedTasks = map normalizeTask (mapMaybe decodeTask importLines)
+
+ -- Save all imported tasks (UPSERT logic handles updates)
+ traverse_ saveTask importedTasks
+ where
+ decodeTask :: Text -> Maybe Task
+ decodeTask line =
+ if T.null line
+ then Nothing
+ else decode (BLC.pack <| T.unpack line)
+
+-- Retry context management
+
+-- | Get retry context for a task (if any)
+getRetryContext :: Text -> IO (Maybe RetryContext)
+getRetryContext tid =
+ withDb <| \conn -> do
+ rows <-
+ SQL.query
+ conn
+ "SELECT task_id, original_commit, conflict_files, attempt, reason, notes FROM retry_context WHERE task_id = ?"
+ (SQL.Only tid) ::
+ IO [(Text, Text, Text, Int, Text, Maybe Text)]
+ case rows of
+ [] -> pure Nothing
+ ((taskId', commit, filesJson, attempt, reason, notes) : _) ->
+ let files = fromMaybe [] (decode (BLC.pack <| T.unpack filesJson))
+ in pure
+ <| Just
+ RetryContext
+ { retryTaskId = taskId',
+ retryOriginalCommit = commit,
+ retryConflictFiles = files,
+ retryAttempt = attempt,
+ retryReason = reason,
+ retryNotes = notes
+ }
+
+-- | Set retry context for a task (upsert)
+setRetryContext :: RetryContext -> IO ()
+setRetryContext ctx =
+ withDb <| \conn -> do
+ let filesJson = T.pack <| BLC.unpack <| encode (retryConflictFiles ctx)
+ SQL.execute
+ conn
+ "INSERT OR REPLACE INTO retry_context (task_id, original_commit, conflict_files, attempt, reason, notes) VALUES (?, ?, ?, ?, ?, ?)"
+ (retryTaskId ctx, retryOriginalCommit ctx, filesJson, retryAttempt ctx, retryReason ctx, retryNotes ctx)
+
+-- | Clear retry context for a task (on successful merge)
+clearRetryContext :: Text -> IO ()
+clearRetryContext tid =
+ withDb <| \conn ->
+ SQL.execute conn "DELETE FROM retry_context WHERE task_id = ?" (SQL.Only tid)
+
+-- | Increment retry attempt and return new count
+incrementRetryAttempt :: Text -> IO Int
+incrementRetryAttempt tid = do
+ maybeCtx <- getRetryContext tid
+ case maybeCtx of
+ Nothing -> pure 1
+ Just ctx -> do
+ let newAttempt = retryAttempt ctx + 1
+ setRetryContext ctx {retryAttempt = newAttempt}
+ pure newAttempt
+
+-- | Map ActivityStage to event_type string for agent_events
+activityStageToEventType :: ActivityStage -> Text
+activityStageToEventType Claiming = "claim"
+activityStageToEventType Running = "running"
+activityStageToEventType Reviewing = "reviewing"
+activityStageToEventType Retrying = "retrying"
+activityStageToEventType Completed = "complete"
+activityStageToEventType Failed = "error"
+
+-- | Log activity to agent_events table (unified timeline)
+logActivity :: Text -> ActivityStage -> Maybe Text -> IO ()
+logActivity tid stage metadata = do
+ sessionId <- getOrCreateCommentSession tid
+ let eventType = activityStageToEventType stage
+ content = fromMaybe "" metadata
+ insertAgentEvent tid sessionId eventType content Junior
+
+-- | Log activity with worker metrics (timing, cost stored in metadata JSON)
+logActivityWithMetrics :: Text -> ActivityStage -> Maybe Text -> Maybe Text -> Maybe UTCTime -> Maybe UTCTime -> Maybe Int -> Maybe Int -> IO Int
+logActivityWithMetrics tid stage baseMetadata _ampUrl startedAt completedAt costCents tokens = do
+ sessionId <- getOrCreateCommentSession tid
+ let eventType = activityStageToEventType stage
+ metricsJson = buildMetricsJson baseMetadata startedAt completedAt costCents tokens
+ withDb <| \conn -> do
+ SQL.execute
+ conn
+ "INSERT INTO agent_events (task_id, session_id, event_type, content, actor) VALUES (?, ?, ?, ?, ?)"
+ (tid, sessionId, eventType, metricsJson, Junior)
+ [SQL.Only actId] <- SQL.query_ conn "SELECT last_insert_rowid()" :: IO [SQL.Only Int]
+ pure actId
+
+-- | Build metrics JSON for activity metadata
+buildMetricsJson :: Maybe Text -> Maybe UTCTime -> Maybe UTCTime -> Maybe Int -> Maybe Int -> Text
+buildMetricsJson baseMetadata startedAt completedAt costCents tokens =
+ let base = fromMaybe "{}" baseMetadata
+ additions =
+ catMaybes
+ [ fmap (\t -> "\"started_at\":\"" <> T.pack (show t) <> "\"") startedAt,
+ fmap (\t -> "\"completed_at\":\"" <> T.pack (show t) <> "\"") completedAt,
+ fmap (\c -> "\"cost_cents\":" <> T.pack (show c)) costCents,
+ fmap (\t -> "\"tokens_used\":" <> T.pack (show t)) tokens
+ ]
+ in if null additions
+ then base
+ else
+ if base == "{}"
+ then "{" <> T.intercalate "," additions <> "}"
+ else T.init base <> "," <> T.intercalate "," additions <> "}"
+
+-- | Update an existing activity record with metrics (in agent_events)
+updateActivityMetrics :: Int -> Maybe Text -> Maybe UTCTime -> Maybe Int -> Maybe Int -> IO ()
+updateActivityMetrics actId _ampUrl completedAt costCents tokens =
+ withDb <| \conn -> do
+ [SQL.Only currentContent] <- SQL.query conn "SELECT content FROM agent_events WHERE id = ?" (SQL.Only actId) :: IO [SQL.Only Text]
+ let updatedContent = buildMetricsJson (Just currentContent) Nothing completedAt costCents tokens
+ SQL.execute conn "UPDATE agent_events SET content = ? WHERE id = ?" (updatedContent, actId)
+
+-- | Get all activities for a task from agent_events, ordered by timestamp descending
+-- Returns TaskActivity for backward compatibility
+getActivitiesForTask :: Text -> IO [TaskActivity]
+getActivitiesForTask tid = do
+ events <- getAllEventsForTask tid
+ let activityEvents = filter (isActivityEvent <. storedEventType) events
+ pure <| map storedEventToActivity (reverse activityEvents)
+
+-- | Check if an event type is an activity event
+isActivityEvent :: Text -> Bool
+isActivityEvent t = t `elem` ["claim", "running", "reviewing", "retrying", "complete", "error"]
+
+-- | Convert StoredEvent to TaskActivity for backward compatibility
+storedEventToActivity :: StoredEvent -> TaskActivity
+storedEventToActivity evt =
+ let stage = eventTypeToActivityStage (storedEventType evt)
+ (startedAt, completedAt, costCents, tokens) = parseMetricsFromContent (storedEventContent evt)
+ in TaskActivity
+ { activityId = Just (storedEventId evt),
+ activityTaskId = storedEventTaskId evt,
+ activityTimestamp = storedEventTimestamp evt,
+ activityStage = stage,
+ activityMessage = Nothing,
+ activityMetadata = Just (storedEventContent evt),
+ activityThreadUrl = Nothing,
+ activityStartedAt = startedAt,
+ activityCompletedAt = completedAt,
+ activityCostCents = costCents,
+ activityTokensUsed = tokens
+ }
+
+-- | Map event_type back to ActivityStage
+eventTypeToActivityStage :: Text -> ActivityStage
+eventTypeToActivityStage "claim" = Claiming
+eventTypeToActivityStage "running" = Running
+eventTypeToActivityStage "reviewing" = Reviewing
+eventTypeToActivityStage "retrying" = Retrying
+eventTypeToActivityStage "complete" = Completed
+eventTypeToActivityStage "error" = Failed
+eventTypeToActivityStage _ = Running
+
+-- | Parse metrics from content JSON (best effort)
+parseMetricsFromContent :: Text -> (Maybe UTCTime, Maybe UTCTime, Maybe Int, Maybe Int)
+parseMetricsFromContent content =
+ case Aeson.decode (BLC.pack (T.unpack content)) of
+ Just (Aeson.Object obj) ->
+ let getCents = case KeyMap.lookup "cost_cents" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ getTokens = case KeyMap.lookup "tokens_used" obj of
+ Just (Aeson.Number n) -> Just (round n)
+ _ -> Nothing
+ in (Nothing, Nothing, getCents, getTokens)
+ _ -> (Nothing, Nothing, Nothing, Nothing)
+
+-- | Get the most recent running activity for a task (for metrics display)
+getLatestRunningActivity :: Text -> IO (Maybe TaskActivity)
+getLatestRunningActivity tid = do
+ activities <- getActivitiesForTask tid
+ pure <| List.find (\a -> activityStage a == Running) activities
+
+-- | Get aggregated metrics for all descendants of an epic
+getAggregatedMetrics :: Text -> IO AggregatedMetrics
+getAggregatedMetrics epicId = do
+ allTasks <- loadTasks
+ let descendants = getAllDescendants allTasks epicId
+ descendantIds = map taskId descendants
+ completedCount = length [t | t <- descendants, taskStatus t == Done]
+ activities <- concat </ traverse getActivitiesForTask descendantIds
+ let totalCost = sum [c | act <- activities, Just c <- [activityCostCents act]]
+ totalTokens = sum [t | act <- activities, Just t <- [activityTokensUsed act]]
+ totalDuration = sum [calcDuration act | act <- activities]
+ pure
+ AggregatedMetrics
+ { aggTotalCostCents = totalCost,
+ aggTotalDurationSeconds = totalDuration,
+ aggCompletedTasks = completedCount,
+ aggTotalTokens = totalTokens
+ }
+ where
+ calcDuration act = case (activityStartedAt act, activityCompletedAt act) of
+ (Just start, Just end) -> floor (diffUTCTime end start)
+ _ -> 0
+
+-- | Get aggregated metrics for all tasks globally (not scoped to an epic)
+getGlobalAggregatedMetrics :: IO AggregatedMetrics
+getGlobalAggregatedMetrics = do
+ allTasks <- loadTasks
+ let completedCount = length [t | t <- allTasks, taskStatus t == Done]
+ taskIds = map taskId allTasks
+ activities <- concat </ traverse getActivitiesForTask taskIds
+ let totalCost = sum [c | act <- activities, Just c <- [activityCostCents act]]
+ totalTokens = sum [t | act <- activities, Just t <- [activityTokensUsed act]]
+ totalDuration = sum [calcDuration act | act <- activities]
+ pure
+ AggregatedMetrics
+ { aggTotalCostCents = totalCost,
+ aggTotalDurationSeconds = totalDuration,
+ aggCompletedTasks = completedCount,
+ aggTotalTokens = totalTokens
+ }
+ where
+ calcDuration act = case (activityStartedAt act, activityCompletedAt act) of
+ (Just start, Just end) -> floor (diffUTCTime end start)
+ _ -> 0
+
+-- | Get tasks with unmet blocking dependencies (not ready, not done)
+getBlockedTasks :: IO [Task]
+getBlockedTasks = do
+ allTasks <- loadTasks
+ readyTasks <- getReadyTasks
+ let readyIds = map taskId readyTasks
+ doneIds = [taskId t | t <- allTasks, taskStatus t == Done]
+ isBlocked task =
+ taskStatus task
+ `elem` [Open, InProgress]
+ && taskId task
+ `notElem` readyIds
+ && taskId task
+ `notElem` doneIds
+ pure [t | t <- allTasks, isBlocked t]
+
+-- | Count how many tasks are transitively blocked by this task
+getBlockingImpact :: [Task] -> Task -> Int
+getBlockingImpact allTasks task =
+ length (getTransitiveDependents allTasks (taskId task))
+
+-- | Get all tasks that depend on this task (directly or transitively)
+-- Uses a Set to track visited nodes and avoid infinite loops from circular deps
+getTransitiveDependents :: [Task] -> Text -> [Task]
+getTransitiveDependents allTasks tid = go Set.empty [tid]
+ where
+ go :: Set.Set Text -> [Text] -> [Task]
+ go _ [] = []
+ go visited (current : rest)
+ | Set.member current visited = go visited rest
+ | otherwise =
+ let directDeps = [t | t <- allTasks, dependsOnTask current t]
+ newIds = [taskId t | t <- directDeps, not (Set.member (taskId t) visited)]
+ visited' = Set.insert current visited
+ in directDeps ++ go visited' (newIds ++ rest)
+
+-- | Check if task depends on given ID with Blocks dependency type
+dependsOnTask :: Text -> Task -> Bool
+dependsOnTask tid task =
+ any (\d -> matchesId (depId d) tid && depType d == Blocks) (taskDependencies task)
+
+-- | Get tasks that have failed 3+ times and need human intervention
+getInterventionTasks :: IO [Task]
+getInterventionTasks = do
+ allTasks <- loadTasks
+ retryContexts <- getAllRetryContexts
+ let highRetryIds = [retryTaskId ctx | ctx <- retryContexts, retryAttempt ctx >= 3]
+ pure [t | t <- allTasks, taskId t `elem` highRetryIds]
+
+-- | Get all items needing human action
+getHumanActionItems :: IO HumanActionItems
+getHumanActionItems = do
+ allTasks <- loadTasks
+ retryContexts <- getAllRetryContexts
+ let highRetryIds = [retryTaskId ctx | ctx <- retryContexts, retryAttempt ctx >= 3]
+ failed = [t | t <- allTasks, taskId t `elem` highRetryIds]
+ epics = [t | t <- allTasks, taskType t == Epic, taskStatus t /= Done]
+ epicsReady =
+ [ EpicForReview
+ { epicTask = e,
+ epicTotal = total,
+ epicCompleted = completed
+ }
+ | e <- epics,
+ let children = [c | c <- allTasks, taskParent c == Just (taskId e)],
+ let total = length children,
+ total > 0,
+ let completed = length [c | c <- children, taskStatus c == Done],
+ completed == total
+ ]
+ needingHelp = [t | t <- allTasks, taskStatus t == NeedsHelp]
+ pure
+ HumanActionItems
+ { failedTasks = failed,
+ epicsInReview = epicsReady,
+ tasksNeedingHelp = needingHelp
+ }
+
+-- | Get all retry contexts from the database
+getAllRetryContexts :: IO [RetryContext]
+getAllRetryContexts =
+ withDb <| \conn -> do
+ rows <-
+ SQL.query_
+ conn
+ "SELECT task_id, original_commit, conflict_files, attempt, reason, notes FROM retry_context" ::
+ IO [(Text, Text, Text, Int, Text, Maybe Text)]
+ pure
+ [ RetryContext
+ { retryTaskId = tid,
+ retryOriginalCommit = commit,
+ retryConflictFiles = fromMaybe [] (decode (BLC.pack (T.unpack filesJson))),
+ retryAttempt = attempt,
+ retryReason = reason,
+ retryNotes = notes
+ }
+ | (tid, commit, filesJson, attempt, reason, notes) <- rows
+ ]
+
+-- | Update just the notes field for a retry context
+updateRetryNotes :: Text -> Text -> IO ()
+updateRetryNotes tid notes = do
+ maybeCtx <- getRetryContext tid
+ case maybeCtx of
+ Nothing ->
+ setRetryContext
+ RetryContext
+ { retryTaskId = tid,
+ retryOriginalCommit = "",
+ retryConflictFiles = [],
+ retryAttempt = 0,
+ retryReason = "",
+ retryNotes = Just notes
+ }
+ Just ctx ->
+ setRetryContext ctx {retryNotes = Just notes}
+
+-- Fact management
+
+-- | Save a fact to the database
+saveFact :: Fact -> IO Int
+saveFact f =
+ withDb <| \conn -> do
+ let filesJson = T.pack <| BLC.unpack <| encode (factRelatedFiles f)
+ SQL.execute
+ conn
+ "INSERT INTO facts (project, fact, related_files, source_task, confidence, created_at) \
+ \VALUES (?, ?, ?, ?, ?, ?)"
+ (factProject f, factContent f, filesJson, factSourceTask f, factConfidence f, factCreatedAt f)
+ [SQL.Only factIdVal] <- SQL.query_ conn "SELECT last_insert_rowid()" :: IO [SQL.Only Int]
+ pure factIdVal
+
+-- | Load all facts from the database
+loadFacts :: IO [Fact]
+loadFacts =
+ withDb <| \conn ->
+ SQL.query_
+ conn
+ "SELECT id, project, fact, related_files, source_task, confidence, created_at FROM facts"
+
+-- | Get facts for a specific project
+getFactsForProject :: Text -> IO [Fact]
+getFactsForProject proj =
+ withDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT id, project, fact, related_files, source_task, confidence, created_at \
+ \FROM facts WHERE project = ? ORDER BY confidence DESC"
+ (SQL.Only proj)
+
+-- | Get facts related to a specific file
+getFactsForFile :: Text -> IO [Fact]
+getFactsForFile filePath =
+ withDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT id, project, fact, related_files, source_task, confidence, created_at \
+ \FROM facts WHERE related_files LIKE ? ORDER BY confidence DESC"
+ (SQL.Only ("%" <> filePath <> "%"))
+
+-- | Delete a fact by ID
+deleteFact :: Int -> IO ()
+deleteFact fid =
+ withDb <| \conn ->
+ SQL.execute conn "DELETE FROM facts WHERE id = ?" (SQL.Only fid)
+
+-- ============================================================================
+-- Agent Events (for observability)
+-- ============================================================================
+
+instance SQL.FromField CommentAuthor where
+ fromField f = do
+ t <- SQL.fromField f :: SQLOk.Ok String
+ case t of
+ "human" -> pure Human
+ "junior" -> pure Junior
+ "system" -> pure System
+ _ -> SQL.returnError SQL.ConversionFailed f "Invalid CommentAuthor"
+
+instance SQL.ToField CommentAuthor where
+ toField Human = SQL.toField ("human" :: String)
+ toField Junior = SQL.toField ("junior" :: String)
+ toField System = SQL.toField ("system" :: String)
+
+-- | Stored agent event record
+data StoredEvent = StoredEvent
+ { storedEventId :: Int,
+ storedEventTaskId :: Text,
+ storedEventSessionId :: Text,
+ storedEventTimestamp :: UTCTime,
+ storedEventType :: Text,
+ storedEventContent :: Text,
+ storedEventActor :: CommentAuthor
+ }
+ deriving (Show, Eq, Generic)
+
+instance ToJSON StoredEvent
+
+instance FromJSON StoredEvent
+
+instance SQL.FromRow StoredEvent where
+ fromRow =
+ StoredEvent
+ </ SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+ <*> SQL.field
+
+-- | Generate a new session ID (timestamp-based for simplicity)
+generateSessionId :: IO Text
+generateSessionId = do
+ now <- getCurrentTime
+ pure <| "s-" <> T.pack (show now)
+
+-- | Insert an agent event with actor
+insertAgentEvent :: Text -> Text -> Text -> Text -> CommentAuthor -> IO ()
+insertAgentEvent taskId sessionId eventType content actor =
+ withDb <| \conn ->
+ SQL.execute
+ conn
+ "INSERT INTO agent_events (task_id, session_id, event_type, content, actor) VALUES (?, ?, ?, ?, ?)"
+ (taskId, sessionId, eventType, content, actor)
+
+-- | Get all events for a task (most recent session)
+getEventsForTask :: Text -> IO [StoredEvent]
+getEventsForTask taskId = do
+ maybeSession <- getLatestSessionForTask taskId
+ case maybeSession of
+ Nothing -> pure []
+ Just sid -> getEventsForSession sid
+
+-- | Get all events for a specific session
+getEventsForSession :: Text -> IO [StoredEvent]
+getEventsForSession sessionId =
+ withDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT id, task_id, session_id, timestamp, event_type, content, actor \
+ \FROM agent_events WHERE session_id = ? ORDER BY id ASC"
+ (SQL.Only sessionId)
+
+-- | Get all sessions for a task
+getSessionsForTask :: Text -> IO [Text]
+getSessionsForTask taskId =
+ withDb <| \conn -> do
+ rows <-
+ SQL.query
+ conn
+ "SELECT DISTINCT session_id FROM agent_events WHERE task_id = ? ORDER BY session_id DESC"
+ (SQL.Only taskId) ::
+ IO [SQL.Only Text]
+ pure [sid | SQL.Only sid <- rows]
+
+-- | Get the most recent session ID for a task
+getLatestSessionForTask :: Text -> IO (Maybe Text)
+getLatestSessionForTask taskId =
+ withDb <| \conn -> do
+ rows <-
+ SQL.query
+ conn
+ "SELECT session_id FROM agent_events WHERE task_id = ? ORDER BY id DESC LIMIT 1"
+ (SQL.Only taskId) ::
+ IO [SQL.Only Text]
+ pure <| case rows of
+ [SQL.Only sid] -> Just sid
+ _ -> Nothing
+
+-- | Get events for a task since a given event ID (for streaming/polling)
+getEventsSince :: Text -> Int -> IO [StoredEvent]
+getEventsSince sessionId lastId =
+ withDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT id, task_id, session_id, timestamp, event_type, content, actor \
+ \FROM agent_events WHERE session_id = ? AND id > ? ORDER BY id ASC"
+ (sessionId, lastId)
+
+-- | Insert a checkpoint event (for progress tracking)
+insertCheckpoint :: Text -> Text -> Text -> IO ()
+insertCheckpoint taskId sessionId content =
+ insertAgentEvent taskId sessionId "Checkpoint" content Junior
+
+-- | Get all checkpoints for a task (across all sessions)
+getCheckpointsForTask :: Text -> IO [StoredEvent]
+getCheckpointsForTask taskId =
+ withDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT id, task_id, session_id, timestamp, event_type, content, actor \
+ \FROM agent_events WHERE task_id = ? AND event_type = 'Checkpoint' ORDER BY id ASC"
+ (SQL.Only taskId)
+
+-- | Get progress summary for a task (concatenated checkpoint contents)
+getProgressSummary :: Text -> IO (Maybe Text)
+getProgressSummary taskId = do
+ checkpoints <- getCheckpointsForTask taskId
+ if null checkpoints
+ then pure Nothing
+ else pure <| Just <| T.intercalate "\n\n---\n\n" [storedEventContent e | e <- checkpoints]
+
+-- | Get all comments for a task (from agent_events)
+getCommentsForTask :: Text -> IO [StoredEvent]
+getCommentsForTask taskId =
+ withDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT id, task_id, session_id, timestamp, event_type, content, actor \
+ \FROM agent_events WHERE task_id = ? AND event_type = 'comment' ORDER BY id ASC"
+ (SQL.Only taskId)
+
+-- | Convert stored events to Comment type for backward compatibility
+storedEventToComment :: StoredEvent -> Comment
+storedEventToComment evt =
+ Comment
+ { commentText = storedEventContent evt,
+ commentAuthor = storedEventActor evt,
+ commentCreatedAt = storedEventTimestamp evt
+ }
+
+-- | Get all timeline events for a task (across all sessions)
+-- Includes: comments, status changes, tool calls, checkpoints, errors, etc.
+getAllEventsForTask :: Text -> IO [StoredEvent]
+getAllEventsForTask taskId =
+ withDb <| \conn ->
+ SQL.query
+ conn
+ "SELECT id, task_id, session_id, timestamp, event_type, content, actor \
+ \FROM agent_events WHERE task_id = ? ORDER BY timestamp ASC, id ASC"
+ (SQL.Only taskId)
diff --git a/Omni/Task/DESIGN.md b/Omni/Task/DESIGN.md
new file mode 100644
index 0000000..0dbf3b5
--- /dev/null
+++ b/Omni/Task/DESIGN.md
@@ -0,0 +1,232 @@
+# Task Manager Improvement Plan
+
+Based on beads project planning patterns, here are proposed improvements for Omni/Task.hs.
+
+## Current State
+
+**What we have:**
+- ✅ Basic CRUD operations (create, list, update, ready)
+- ✅ Dependency tracking (--deps for blocking)
+- ✅ JSONL storage with git sync
+- ✅ Short base62 task IDs
+- ✅ Optional namespace field
+- ✅ Project field for grouping
+- ✅ Three status levels (open, in-progress, done)
+
+**What we're missing:**
+- ❌ Multiple dependency types (blocks, discovered-from, parent-child, related)
+- ❌ Hierarchical task IDs (parent.1, parent.2)
+- ❌ Task types (epic vs task) - epics will replace "project"
+- ❌ Dependency tree visualization
+- ❌ Work discovery tracking
+- ❌ Epic/child task relationships
+
+## Proposed Improvements (Priority Order)
+
+### Phase 1: Core Features (High Priority)
+
+#### 1.1 Add Task Types (Epic vs Task)
+```haskell
+data TaskType = Epic | Task
+ deriving (Show, Eq, Generic)
+```
+
+**Benefits:**
+- Epics are containers for related tasks (replace "project" concept)
+- Tasks are the actual work items
+- Simple two-level hierarchy
+- Epic-based planning support
+
+**Schema Changes:**
+- Replace `taskProject :: Text` with `taskType :: TaskType`
+- Add `taskParent :: Maybe Text` for parent epic
+- Epics can contain tasks or other epics (for nested structure)
+
+**Commands:**
+```bash
+# Create an epic (container)
+task create "User Authentication System" --type=epic
+
+# Create tasks within an epic
+task create "Design API" --type=task --parent=t-abc123
+task create "Implement JWT" --type=task --parent=t-abc123
+
+# Create a sub-epic (optional, for complex projects)
+task create "OAuth Integration" --type=epic --parent=t-abc123
+```
+
+#### 1.2 Enhanced Dependency Types
+```haskell
+data DependencyType = Blocks | DiscoveredFrom | ParentChild | Related
+ deriving (Show, Eq, Generic)
+```
+
+**Benefits:**
+- Track work discovery context
+- Maintain audit trail
+- Support epic hierarchies
+
+**Commands:**
+```bash
+task create "Fix bug" project --discovered-from=t-abc123
+task create "Subtask 1" project --parent=t-epic-id
+task dep add t-123 t-124 --type=related
+```
+
+### Phase 2: Hierarchical Tasks (Medium Priority)
+
+#### 2.1 Parent-Child Task IDs
+**Pattern:** `t-abc123.1`, `t-abc123.2`, `t-abc123.3`
+
+**Benefits:**
+- Human-friendly sequential IDs under epic
+- Natural work breakdown
+- Up to 3 levels of nesting
+
+**Schema Changes:**
+```haskell
+data Task = Task
+ { ...
+ taskParent :: Maybe Text -- Parent task ID
+ ...
+ }
+
+-- New table for child counters
+CREATE TABLE child_counters (
+ parent_id TEXT PRIMARY KEY,
+ last_child INTEGER NOT NULL DEFAULT 0,
+ FOREIGN KEY (parent_id) REFERENCES tasks(id) ON DELETE CASCADE
+);
+```
+
+**Commands:**
+```bash
+task create "Design auth API" project --parent=t-abc123
+# Creates: t-abc123.1
+
+task create "Implement JWT" project --parent=t-abc123
+# Creates: t-abc123.2
+```
+
+#### 2.2 Dependency Tree Visualization
+```bash
+task tree t-epic-id
+# Shows:
+# t-abc123 [Epic] User Authentication System
+# t-abc123.1 [Task] Design auth API
+# t-abc123.2 [Task] Implement JWT
+# t-abc123.2.1 [Task] Add token generation
+# t-abc123.2.2 [Task] Add token validation
+# t-abc123.3 [Task] Add password hashing
+```
+
+### Phase 3: Project Management (Lower Priority)
+
+#### 3.1 Task Filtering and Queries
+```bash
+task list --type=epic
+task list --status=open
+task list --parent=t-epic-id # List all children
+```
+
+#### 3.2 Epic Statistics
+```bash
+task stats # Overall stats
+task stats --epic=t-abc123 # Epic-specific
+task progress t-epic-id # Epic completion %
+```
+
+#### 3.3 Discovered Work Tracking
+```bash
+task create "Found memory leak" project --discovered-from=t-abc123
+# Automatically links context
+# Shows in dependency tree as "discovered during t-abc123"
+```
+
+## Implementation Strategy
+
+### Milestone 1: Type System Foundations
+- [ ] Add TaskType enum (Epic | Task)
+- [ ] Add DependencyType enum
+- [ ] Update Task data structure (replace project with type and parent)
+- [ ] Update CLI commands
+- [ ] Update tests
+- [ ] Update AGENTS.md
+- [ ] Migration: existing tasks default to type=Task, project becomes epic name
+
+### Milestone 2: Enhanced Dependencies
+- [ ] Add `discovered-from` support
+- [ ] Add `related` dependency type
+- [ ] Track dependency metadata (who, when, why)
+- [ ] Update ready work algorithm to respect dependency types
+
+### Milestone 3: Hierarchical Structure
+- [ ] Add parent field to Task
+- [ ] Implement child ID generation (t-abc123.1)
+- [ ] Add child_counters table/storage
+- [ ] Update createTask to handle --parent flag
+- [ ] Implement parent-child dependency creation
+
+### Milestone 4: Visualization & Reporting
+- [ ] Implement `task tree` command
+- [ ] Implement `task stats` command
+- [ ] Implement `task progress` for epics
+- [ ] Add filtering by type, priority
+- [ ] Improve task list display with colors/formatting
+
+## Open Questions
+
+1. **Storage Format**: Should we keep JSONL or move to SQLite like beads?
+ - JSONL: Simple, git-friendly, human-readable
+ - SQLite: Fast queries, complex relationships, beads-compatible
+ - **Recommendation**: Start with JSONL, can add SQLite later for caching
+
+2. **Child Counter Storage**: Where to store child counters?
+ - Option A: Separate .tasks/counters.jsonl file
+ - Option B: In-memory during session, persist to JSONL
+ - Option C: Add SQLite just for this
+ - **Recommendation**: Option A - separate JSONL file
+
+3. **Dependency Storage**: How to store complex dependencies?
+ - Current: List of text IDs in task
+ - Beads: Separate dependencies table
+ - **Recommendation**: Add dependencies field with type info:
+ ```haskell
+ data Dependency = Dependency
+ { depId :: Text
+ , depType :: DependencyType
+ }
+ ```
+
+4. **Backward Compatibility**: How to handle existing tasks?
+ - Add sensible defaults (type=Task, priority=Medium)
+ - Migration script or auto-upgrade on load?
+ - **Recommendation**: Auto-upgrade with defaults on import
+
+## Benefits Summary
+
+**For AI Agents:**
+- Better work discovery and context tracking
+- Clearer project structure
+- Easier to understand what work is related
+- Natural way to break down large tasks
+
+**For Humans:**
+- Epic-based planning for large features
+- Priority-driven work queue
+- Visual dependency trees
+- Better project tracking and reporting
+
+**For Collaboration:**
+- Discovered work maintains context
+- Related work is easily found
+- Epic structure provides clear organization
+- Dependency tracking prevents duplicate work
+
+## Next Steps
+
+1. Create tasks for each milestone
+2. Start with Milestone 1 (Type System Foundations)
+3. Get feedback on hierarchical ID format
+4. Implement incrementally, test thoroughly
+5. Update AGENTS.md with new patterns
diff --git a/Omni/Task/MigrationTest.hs b/Omni/Task/MigrationTest.hs
new file mode 100644
index 0000000..f16f782
--- /dev/null
+++ b/Omni/Task/MigrationTest.hs
@@ -0,0 +1,42 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+module Omni.Task.MigrationTest where
+
+import Alpha
+import qualified Data.Set as Set
+import Omni.Task.Core
+import qualified Omni.Test as Test
+import System.Directory (doesFileExist, removeFile)
+import System.Environment (setEnv)
+
+test :: Test.Tree
+test = Test.group "Omni.Task.Migration" [migrationStartupTest]
+
+migrationStartupTest :: Test.Tree
+migrationStartupTest =
+ Test.unit "database initializes with schema migrations" <| do
+ setEnv "TASK_TEST_MODE" "1"
+
+ let testFile = "_/tmp/tasks-test.db"
+ exists <- doesFileExist testFile
+ when exists <| removeFile testFile
+
+ initTaskDb
+
+ withDb <| \conn -> do
+ tasksCols <- getTableColumns conn "tasks"
+ activityCols <- getTableColumns conn "task_activity"
+ retryCols <- getTableColumns conn "retry_context"
+
+ Set.fromList ["id", "title", "status"]
+ `Set.isSubsetOf` Set.fromList tasksCols
+ Test.@?= True
+ Set.fromList ["id", "task_id", "stage"]
+ `Set.isSubsetOf` Set.fromList activityCols
+ Test.@?= True
+ Set.fromList ["task_id", "attempt", "reason"]
+ `Set.isSubsetOf` Set.fromList retryCols
+ Test.@?= True
+
+ removeFile testFile
diff --git a/Omni/Task/README.md b/Omni/Task/README.md
new file mode 100644
index 0000000..4025b74
--- /dev/null
+++ b/Omni/Task/README.md
@@ -0,0 +1,374 @@
+# Task Manager for AI Agents
+
+The task manager is a dependency-aware issue tracker inspired by beads. It uses:
+- **Storage**: SQLite database (`~/.local/share/jr/jr.db`)
+- **Dependencies**: Tasks can block other tasks
+- **Ready work detection**: Automatically finds unblocked tasks
+
+**IMPORTANT**: You MUST use `task` for ALL issue tracking. NEVER use markdown TODOs, todo_write, task lists, or any other tracking methods.
+
+## Human Setup vs Agent Usage
+
+**If you see "database not found" or similar errors:**
+```bash
+task init --quiet # Non-interactive, auto-setup, no prompts
+```
+
+**Why `--quiet`?** The regular `task init` may have interactive prompts. The `--quiet` flag makes it fully non-interactive and safe for agent-driven setup.
+
+**If `task init --quiet` fails:** Ask the human to run `task init` manually, then continue.
+
+## Create a Task
+```bash
+task create "<title>" [--type=<type>] [--parent=<id>] [--deps=<ids>] [--dep-type=<type>] [--discovered-from=<id>] [--namespace=<ns>]
+```
+
+Examples:
+```bash
+# Create an epic (container for tasks)
+task create "User Authentication System" --type=epic
+
+# Create a task within an epic
+task create "Design auth API" --parent=t-abc123
+
+# Create a task with blocking dependency
+task create "Write tests" --deps=t-a1b2c3 --dep-type=blocks
+
+# Create work discovered during implementation (shortcut)
+task create "Fix memory leak" --discovered-from=t-abc123
+
+# Create related work (doesn't block)
+task create "Update documentation" --deps=t-abc123 --dep-type=related
+
+# Associate with a namespace
+task create "Fix type errors" --namespace="Omni/Task"
+```
+
+**Task Types:**
+- `epic` - Container for related tasks
+- `task` - Individual work item (default)
+- `human` - Task specifically for human operators (excluded from agent work queues)
+
+**Dependency Types:**
+- `blocks` - Hard dependency, blocks ready work queue (default)
+- `discovered-from` - Work discovered during other work, doesn't block
+- `parent-child` - Epic/subtask relationship, blocks ready work
+- `related` - Soft relationship, doesn't block
+
+The `--namespace` option associates the task with a specific namespace in the monorepo (e.g., `Omni/Task`, `Biz/Cloud`). This helps organize tasks by the code they relate to.
+
+## List Tasks
+```bash
+task list [options] # Flags can be in any order
+```
+
+Examples:
+```bash
+task list # All tasks
+task list --type=epic # All epics
+task list --parent=t-abc123 # All tasks in an epic
+task list --status=open # All open tasks
+task list --status=done # All completed tasks
+task list --namespace="Omni/Task" # All tasks for a namespace
+task list --parent=t-abc123 --status=open # Combine filters: open tasks in epic
+```
+
+## Get Ready Work
+```bash
+task ready
+```
+
+Shows all tasks that are:
+- Not closed
+- Not blocked by incomplete dependencies
+
+## Update Task Status
+```bash
+task update <id> <status>
+```
+
+Status values: `open`, `in-progress`, `done`
+
+Examples:
+```bash
+task update t-20241108120000 in-progress
+task update t-20241108120000 done
+```
+
+**Note**: Task updates are immediately saved to the SQLite database.
+
+## View Dependencies
+```bash
+task deps <id>
+```
+
+Shows the dependency tree for a task.
+
+## View Task Tree
+```bash
+task tree [<id>]
+```
+
+Shows task hierarchy with visual status indicators:
+- `[ ]` - Open
+- `[~]` - In Progress
+- `[✓]` - Done
+
+Examples:
+```bash
+task tree # Show all epics with their children
+task tree t-abc123 # Show specific epic/task with its children
+```
+
+## Export Tasks
+```bash
+task export [-o <file>]
+```
+
+Exports tasks to JSONL format (stdout by default, or to a file with `-o`).
+
+## Import Tasks
+```bash
+task import -i <file>
+```
+
+Imports tasks from a JSONL file, merging with existing tasks. Newer tasks (based on `updatedAt` timestamp) take precedence.
+
+Examples:
+```bash
+task import -i /path/to/backup.jsonl
+```
+
+## Initialize (First Time)
+```bash
+task init
+```
+
+Creates the SQLite database at `~/.cache/omni/tasks/tasks.db`.
+
+## Common Workflows
+
+### Starting New Work
+
+1. **Find what's ready to work on:**
+ ```bash
+ task ready
+ ```
+
+2. **Pick a task and mark it in progress:**
+ ```bash
+ task update t-20241108120000 in-progress
+ ```
+
+3. **When done, mark it complete:**
+ ```bash
+ task update t-20241108120000 done
+ ```
+
+### Creating Dependent Tasks
+
+When you discover work that depends on other work:
+
+```bash
+# Create the blocking task first
+task create "Design API" --type=task
+
+# Note the ID (e.g., t-20241108120000)
+
+# Create dependent task with blocking dependency
+task create "Implement API client" --deps=t-20241108120000 --dep-type=blocks
+```
+
+The dependent task won't show up in `task ready` until the blocker is marked `done`.
+
+### Discovered Work Pattern
+
+When you find work during implementation, use the `--discovered-from` flag:
+
+```bash
+# While working on t-abc123, you discover a bug
+task create "Fix memory leak in parser" --discovered-from=t-abc123
+
+# This is equivalent to:
+task create "Fix memory leak in parser" --deps=t-abc123 --dep-type=discovered-from
+```
+
+The `discovered-from` dependency type maintains context but **doesn't block** the ready work queue. This allows AI agents to track what work was found during other work while still being able to work on it immediately.
+
+### Working with Epics
+
+```bash
+# Create an epic for a larger feature
+task create "User Authentication System" --type=epic
+# Note ID: t-abc123
+
+# Create child tasks within the epic
+task create "Design login flow" --parent=t-abc123
+task create "Implement OAuth" --parent=t-abc123
+task create "Add password reset" --parent=t-abc123
+
+# List all tasks in an epic
+task list --parent=t-abc123
+
+# List all epics
+task list --type=epic
+```
+
+## Agent Best Practices
+
+### 1. ALWAYS Check Ready Work First
+Before asking what to do, you MUST check `task ready --json` to see unblocked tasks.
+
+### 2. ALWAYS Create Tasks for Discovered Work
+When you encounter work during implementation, you MUST create linked tasks:
+```bash
+task create "Fix type error in auth module" --discovered-from=t-abc123 --json
+task create "Add missing test coverage" --discovered-from=t-abc123 --json
+```
+
+**Bug Discovery Pattern**
+
+When you discover a bug or unexpected behavior:
+```bash
+# CORRECT: Immediately file a task
+task create "Command X fails when Y" --discovered-from=<current-task-id> --json
+
+# WRONG: Ignoring it and moving on
+# WRONG: Leaving a TODO comment
+# WRONG: Mentioning it but not filing a task
+```
+
+**Examples of bugs you MUST file:**
+- "Expected `--flag value` to work but only `--flag=value` works"
+- "Documentation says X but actual behavior is Y"
+- "Combining two flags causes parsing error"
+- "Feature is missing that would be useful"
+
+**CRITICAL: File bugs immediately when you discover them:**
+- If a command doesn't work as documented → create a task
+- If a command doesn't work as you expected → create a task
+- If behavior is inconsistent or confusing → create a task
+- If documentation is wrong or misleading → create a task
+- If you find yourself working around a limitation → create a task
+
+**NEVER leave TODO comments in code.** Create a task instead.
+
+**NEVER ignore bugs or unexpected behavior.** File a task for it immediately.
+
+### 3. Forbidden Patterns
+
+**Markdown checklist (NEVER do this):**
+```markdown
+❌ Wrong:
+- [ ] Refactor auth module
+- [ ] Add tests
+- [ ] Update docs
+
+✅ Correct:
+task create "Refactor auth module" -p 2 --json
+task create "Add tests for auth" -p 2 --json
+task create "Update auth docs" -p 3 --json
+```
+
+**todo_write tool (NEVER do this):**
+```
+❌ Wrong: todo_write({todos: [{content: "Fix bug", ...}]})
+✅ Correct: task create "Fix bug in parser" -p 1 --json
+```
+
+**Inline code comments (NEVER do this):**
+```python
+❌ Wrong:
+# TODO: write tests for this function
+# FIXME: handle edge case
+
+✅ Correct:
+# Create task instead:
+task create "Write tests for parse_config" -p 2 --namespace="Omni/Config" --json
+task create "Handle edge case in parser" -p 1 --discovered-from=<current-id> --json
+```
+
+### 4. Track Dependencies
+If work depends on other work, use `--deps`:
+```bash
+# Can't write tests until implementation is done
+task create "Test auth flow" --deps=t-20241108120000 --dep-type=blocks --json
+```
+
+### 5. Use Descriptive Titles
+Good: `"Add JWT token validation to auth middleware"`
+Bad: `"Fix auth"`
+
+### 6. Use Epics for Organization
+Organize related work using epics:
+- Create an epic for larger features: `task create "Feature Name" --type=epic --json`
+- Add tasks to the epic using `--parent=<epic-id>`
+- Use `--discovered-from` to track work found during implementation
+
+### 7. ALWAYS Store AI Planning Docs in `_/llm` Directory
+AI assistants often create planning and design documents during development:
+- PLAN.md, DESIGN.md, TESTING_GUIDE.md, tmp, and similar files
+- **You MUST use a dedicated directory for these ephemeral files**
+- Store ALL AI-generated planning/design docs in `_/llm`
+- The `_` directory is ignored by git and all of our temporary files related to the omnirepo go there
+- NEVER commit planning docs to the repo root
+
+## Dependency Rules
+
+- A task is **blocked** if any of its dependencies are not `done`
+- A task is **ready** if all its dependencies are `done` (or it has no dependencies)
+- `task ready` only shows tasks with status `open` or `in-progress` that are not blocked
+
+## Storage
+
+Tasks are stored in a SQLite database at `~/.cache/omni/tasks/tasks.db`. This is a local database, not git-tracked.
+
+To back up or transfer tasks, use `task export` and `task import`.
+
+## Testing and Development
+
+**CRITICAL**: When manually testing task functionality, use the test database:
+
+```bash
+# Set test mode to protect production database
+export TASK_TEST_MODE=1
+
+# Now all task operations use _/tmp/tasks-test.db
+task create "Test task" --type=task
+task list
+task tree
+
+# Unset when done
+unset TASK_TEST_MODE
+```
+
+**The test suite automatically uses test mode** - you don't need to set it manually when running `task test` or `bild --test Omni/Task.hs`.
+
+## Troubleshooting
+
+### "Task not found"
+- Check the task ID is correct with `task list`
+- Ensure you've run `task init`
+
+### "Database not initialized"
+Run: `task init`
+
+### Dependencies not working
+- Verify dependency IDs exist: `task list`
+- Check dependency tree: `task deps <id>`
+
+## Reinforcement: Critical Rules
+
+Remember these non-negotiable rules:
+
+- ✅ Use `task` for ALL task tracking (with `--json` flag)
+- ✅ Link discovered work with `--discovered-from` dependencies
+- ✅ File bugs IMMEDIATELY when you discover unexpected behavior
+- ✅ Check `task ready --json` before asking "what should I work on?"
+- ❌ NEVER create markdown TODO lists or task checklists
+- ❌ NEVER put TODOs or FIXMEs in code comments
+- ❌ NEVER use external issue trackers
+- ❌ NEVER duplicate tracking systems
+- ❌ NEVER clutter repo root with planning documents
+
+**If you find yourself about to use todo_write or create a markdown checklist, STOP and use `task create` instead.**
diff --git a/Omni/Task/RaceTest.hs b/Omni/Task/RaceTest.hs
new file mode 100644
index 0000000..8ab797a
--- /dev/null
+++ b/Omni/Task/RaceTest.hs
@@ -0,0 +1,58 @@
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NoImplicitPrelude #-}
+
+module Omni.Task.RaceTest where
+
+import Alpha
+import Control.Concurrent.Async (mapConcurrently)
+import Data.List (nub)
+import qualified Data.Text as T
+import Omni.Task.Core
+import qualified Omni.Test as Test
+import System.Directory (doesFileExist, removeFile)
+import System.Environment (setEnv)
+
+test :: Test.Tree
+test = Test.group "Omni.Task.Race" [raceTest]
+
+raceTest :: Test.Tree
+raceTest =
+ Test.unit "concurrent child creation (race condition)" <| do
+ -- Set up test mode (uses _/tmp/tasks-test.db)
+ setEnv "TASK_TEST_MODE" "1"
+
+ -- Clean up test database
+ let testFile = "_/tmp/tasks-test.db"
+ exists <- doesFileExist testFile
+ when exists <| removeFile testFile
+ initTaskDb
+
+ -- Create a parent epic
+ parent <- createTask "Parent Epic" Epic Nothing Nothing P2 Nothing [] "Parent Epic description"
+ let parentId = taskId parent
+
+ -- Create multiple children concurrently
+ -- We'll create 10 children in parallel
+ let childCount = 10
+ indices = [1 .. childCount]
+
+ -- Run concurrent creations
+ children <-
+ mapConcurrently
+ (\i -> createTask ("Child " <> tshow i) WorkTask (Just parentId) Nothing P2 Nothing [] ("Child " <> tshow i <> " description"))
+ indices
+
+ -- Check for duplicates in generated IDs
+ let ids = map taskId children
+ uniqueIds = nub ids
+
+ -- If there was a race condition, we'd have fewer unique IDs than children
+ length uniqueIds Test.@?= length children
+ length uniqueIds Test.@?= childCount
+
+ -- Verify IDs follow the pattern parentId.N
+ for_ ids <| \tid -> do
+ (parentId `T.isPrefixOf` tid) Test.@?= True
+
+ -- Cleanup
+ removeFile testFile
diff --git a/Omni/Test.py b/Omni/Test.py
index 495334a..71ac32a 100644
--- a/Omni/Test.py
+++ b/Omni/Test.py
@@ -6,10 +6,17 @@ import Omni.Log as Log
import typing
import unittest
+TestCase = unittest.TestCase
+
+
+class TestError(Exception):
+ """When the test environment or harness encounters a problem."""
+
def run(area: App.Area, tests: list[typing.Any]) -> None:
"""Run the given tests with loglevel determined by area."""
- Log.setup(logging.DEBUG if area == App.Area.Test else logging.ERROR)
+ logger = logging.getLogger(__name__)
+ Log.setup(logger, logging.DEBUG if area == App.Area.Test else logging.ERROR)
suite = unittest.TestSuite()
suite.addTests([
unittest.defaultTestLoader.loadTestsFromTestCase(tc) for tc in tests
diff --git a/Omni/Users.nix b/Omni/Users.nix
index 043c5fd..4ae8c17 100644
--- a/Omni/Users.nix
+++ b/Omni/Users.nix
@@ -30,6 +30,13 @@ in {
openssh.authorizedKeys.keys = readKeys ./Keys/Deploy.pub;
extraGroups = ["wheel"];
};
+ ava = {
+ description = "Ava Telegram bot";
+ isNormalUser = true;
+ home = "/home/ava";
+ openssh.authorizedKeys.keys = readKeys ./Keys/Ava.pub;
+ extraGroups = ["git"];
+ };
#
# humans
#
@@ -40,7 +47,7 @@ in {
isNormalUser = true;
home = "/home/ben";
openssh.authorizedKeys.keys = readKeys ./Keys/Ben.pub;
- extraGroups = ["wheel" "docker" "bitcoind-mainnet" "git"];
+ extraGroups = ["wheel" "docker" "bitcoind-mainnet" "git" "audio" "video"];
hashedPassword = "$6$SGhdoRB6DhWe$elW8RQE1ebe8JKf1ALW8jGZTPCyn2rpq/0J8MV/A9y8qFMEhA.Z2eiexMgptohZAK5kcGOc6HIUgNzJqnDUvY.";
};
dre = {