From 22d0caa44651b4ae3746d54f061857e2ad2467ae Mon Sep 17 00:00:00 2001
From: Vivek Kumar
Date: Tue, 10 Feb 2026 12:36:33 +0530
Subject: [PATCH 1/4] nvidia
---
engram/embeddings/nvidia.py | 40 +++++++++++++++++++++++++++++++
engram/llms/nvidia.py | 47 +++++++++++++++++++++++++++++++++++++
engram/utils/factory.py | 8 +++++++
3 files changed, 95 insertions(+)
create mode 100644 engram/embeddings/nvidia.py
create mode 100644 engram/llms/nvidia.py
diff --git a/engram/embeddings/nvidia.py b/engram/embeddings/nvidia.py
new file mode 100644
index 0000000..64ca9f4
--- /dev/null
+++ b/engram/embeddings/nvidia.py
@@ -0,0 +1,40 @@
+import os
+from typing import List, Optional
+
+from engram.embeddings.base import BaseEmbedder
+
+
+class NvidiaEmbedder(BaseEmbedder):
+ """Embedding provider for NVIDIA API (OpenAI-compatible). Default model: nv-embedqa-e5-v5."""
+
+ def __init__(self, config: Optional[dict] = None):
+ super().__init__(config)
+ try:
+ from openai import OpenAI
+ except Exception as exc:
+ raise ImportError("openai package is required for NvidiaEmbedder") from exc
+
+ api_key = self.config.get("api_key") or "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"
+ if not api_key:
+ raise ValueError(
+ "NVIDIA API key required. Set config['api_key'] or NVIDIA_API_KEY env var."
+ )
+
+ base_url = self.config.get("base_url", "https://integrate.api.nvidia.com/v1")
+ self.client = OpenAI(base_url=base_url, api_key=api_key)
+ self.model = self.config.get("model", "nvidia/nv-embedqa-e5-v5")
+
+ def embed(self, text: str, memory_action: Optional[str] = None) -> List[float]:
+ # NVIDIA embedding models distinguish between passage and query input types
+ if memory_action in ("search", "forget"):
+ input_type = "query"
+ else:
+ input_type = "passage"
+
+ response = self.client.embeddings.create(
+ input=[text],
+ model=self.model,
+ encoding_format="float",
+ extra_body={"input_type": input_type, "truncate": "NONE"},
+ )
+ return response.data[0].embedding
diff --git a/engram/llms/nvidia.py b/engram/llms/nvidia.py
new file mode 100644
index 0000000..f657573
--- /dev/null
+++ b/engram/llms/nvidia.py
@@ -0,0 +1,47 @@
+import os
+from typing import Optional
+
+from engram.llms.base import BaseLLM
+
+
+class NvidiaLLM(BaseLLM):
+ """LLM provider for NVIDIA API (OpenAI-compatible). Default model: Kimi K2.5."""
+
+ def __init__(self, config: Optional[dict] = None):
+ super().__init__(config)
+ try:
+ from openai import OpenAI
+ except Exception as exc:
+ raise ImportError("openai package is required for NvidiaLLM") from exc
+
+ api_key = self.config.get("api_key") or "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"
+ if not api_key:
+ raise ValueError(
+ "NVIDIA API key required. Set config['api_key'] or NVIDIA_API_KEY env var."
+ )
+
+ base_url = self.config.get("base_url", "https://integrate.api.nvidia.com/v1")
+ self.client = OpenAI(base_url=base_url, api_key=api_key)
+ self.model = self.config.get("model", "moonshotai/kimi-k2.5")
+ self.temperature = self.config.get("temperature", 1.0)
+ self.max_tokens = self.config.get("max_tokens", 16384)
+ self.top_p = self.config.get("top_p", 0.7)
+ self.enable_thinking = self.config.get("enable_thinking", False)
+
+ def generate(self, prompt: str) -> str:
+ extra_kwargs = {}
+ if self.enable_thinking:
+ extra_kwargs["extra_body"] = {
+ "chat_template_kwargs": {"enable_thinking": True}
+ }
+
+ response = self.client.chat.completions.create(
+ model=self.model,
+ messages=[{"role": "user", "content": prompt}],
+ temperature=self.temperature,
+ top_p=self.top_p,
+ max_tokens=self.max_tokens,
+ stream=False,
+ **extra_kwargs,
+ )
+ return response.choices[0].message.content
diff --git a/engram/utils/factory.py b/engram/utils/factory.py
index 1a6a995..8e9d57a 100644
--- a/engram/utils/factory.py
+++ b/engram/utils/factory.py
@@ -19,6 +19,10 @@ def create(cls, provider: str, config: Dict[str, Any]):
from engram.embeddings.ollama import OllamaEmbedder
return OllamaEmbedder(config)
+ if provider == "nvidia":
+ from engram.embeddings.nvidia import NvidiaEmbedder
+
+ return NvidiaEmbedder(config)
raise ValueError(f"Unsupported embedder provider: {provider}")
@@ -41,6 +45,10 @@ def create(cls, provider: str, config: Dict[str, Any]):
from engram.llms.ollama import OllamaLLM
return OllamaLLM(config)
+ if provider == "nvidia":
+ from engram.llms.nvidia import NvidiaLLM
+
+ return NvidiaLLM(config)
raise ValueError(f"Unsupported LLM provider: {provider}")
From 97689523c4fcb2a3468b80522a6597d765b3cf36 Mon Sep 17 00:00:00 2001
From: Ashish-dwi99
Date: Tue, 10 Feb 2026 15:40:28 +0530
Subject: [PATCH 2/4] docs: remove "free forever" banner from README, restore
package name
Co-Authored-By: Claude Opus 4.6
---
README.md | 4 ----
pyproject.toml | 2 +-
2 files changed, 1 insertion(+), 5 deletions(-)
diff --git a/README.md b/README.md
index a93a21b..6fd93a3 100644
--- a/README.md
+++ b/README.md
@@ -33,10 +33,6 @@
---
-> **100% free, forever.** No Pro tier, no usage limits, no license keys. Bring your own API key (Gemini, OpenAI, or Ollama). Everything runs locally by default.
-
----
-
## Why Engram
Every AI agent you use starts with amnesia. Your coding assistant forgets your preferences between sessions. Your planning agent has no idea what your research agent discovered yesterday. You end up re-explaining context that should already be known.
diff --git a/pyproject.toml b/pyproject.toml
index 1b4fcc8..04daa47 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
-name = "engram"
+name = "engram-memory"
version = "0.4.0"
description = "Memory layer for AI agents — biologically-inspired forgetting, multi-agent trust, and plug-and-play integrations"
readme = "README.md"
From bcdc556d7777583290b1a3383427773bb3c42eef Mon Sep 17 00:00:00 2001
From: Ashish-dwi99
Date: Tue, 10 Feb 2026 15:41:59 +0530
Subject: [PATCH 3/4] chore: bump version to 0.4.1
Co-Authored-By: Claude Opus 4.6
---
engram/__init__.py | 2 +-
pyproject.toml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/engram/__init__.py b/engram/__init__.py
index 7cd6f19..b6b6c74 100644
--- a/engram/__init__.py
+++ b/engram/__init__.py
@@ -21,7 +21,7 @@
from engram.core.echo import EchoProcessor, EchoDepth, EchoResult
from engram.configs.base import MemoryConfig, FadeMemConfig, EchoMemConfig, CategoryMemConfig, ScopeConfig
-__version__ = "0.4.0" # Product release: Docker, CI, CLI improvements
+__version__ = "0.4.1"
__all__ = [
# Simplified interface (recommended)
"Engram",
diff --git a/pyproject.toml b/pyproject.toml
index 04daa47..7cc6649 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "engram-memory"
-version = "0.4.0"
+version = "0.4.1"
description = "Memory layer for AI agents — biologically-inspired forgetting, multi-agent trust, and plug-and-play integrations"
readme = "README.md"
requires-python = ">=3.9"
From 84df0dfee02b81b796e4a35ccd7743610c37fdb4 Mon Sep 17 00:00:00 2001
From: Ashish-dwi99
Date: Tue, 10 Feb 2026 16:25:34 +0530
Subject: [PATCH 4/4] readme
---
README.md | 118 +++++++++++++++++++++++++++++++++++++++++++++++--
pyproject.toml | 7 +--
2 files changed, 119 insertions(+), 6 deletions(-)
diff --git a/README.md b/README.md
index 6fd93a3..47ab43e 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@
-
+
@@ -28,6 +28,7 @@
Architecture ·
Integrations ·
API & SDK ·
+ LongMemEval ·
Changelog
@@ -60,13 +61,60 @@ But unlike "store everything forever" approaches, Engram treats agents as **untr
## Quick Start
```bash
-pip install -e ".[all]" # 1. Install
-export GEMINI_API_KEY="your-key" # 2. Set one API key (or OPENAI_API_KEY, or OLLAMA_HOST)
+pip install engram-memory # 1. Install from PyPI
+export GEMINI_API_KEY="your-key" # 2. Set one key before starting Engram
engram install # 3. Auto-configure Claude Code, Cursor, Codex
```
Restart your agent. Done — it now has persistent memory across sessions.
+### PyPI Install Options
+
+```bash
+# Default runtime (Gemini + local Qdrant + MemoryClient deps)
+pip install engram-memory
+
+# Full stack extras (MCP server + REST API + async + all providers)
+pip install "engram-memory[all]"
+
+# OpenAI provider add-on
+pip install "engram-memory[openai]"
+
+# Ollama provider add-on
+pip install "engram-memory[ollama]"
+```
+
+### API Key: When and How to Provide It
+
+Engram reads provider credentials when a process initializes `Memory()` (for example: `engram`, `engram-api`, `engram-mcp`, or your Python app).
+
+1. Set env vars **before** starting those processes.
+2. If you change keys, restart the process.
+3. Default provider is Gemini, so set `GEMINI_API_KEY` or `GOOGLE_API_KEY` unless you override provider config.
+
+```bash
+# Default (Gemini)
+export GEMINI_API_KEY="your-key"
+engram-api
+```
+
+```bash
+# OpenAI provider
+export OPENAI_API_KEY="your-key"
+engram-api
+```
+
+```bash
+# Ollama (local; no cloud key)
+export OLLAMA_HOST="http://localhost:11434"
+engram-api
+```
+
+For remote usage via `MemoryClient`, provider API keys are needed on the **server** running Engram.
+The client only needs:
+- `ENGRAM_ADMIN_KEY` (or `admin_key=...`) when minting sessions via `/v1/sessions`
+- Bearer session token for normal read/write API calls
+
**Or with Docker:**
```bash
@@ -565,6 +613,70 @@ Biological inspirations: Ebbinghaus Forgetting Curve → exponential decay, Spac
---
+## LongMemEval on Colab (GPU)
+
+Use this flow to benchmark Engram on LongMemEval in Google Colab with GPU acceleration.
+
+```bash
+# 1) In Colab: Runtime -> Change runtime type -> GPU
+
+# 2) Install Engram + GPU reader dependencies
+pip install -U engram-memory transformers accelerate
+
+# 3) Download LongMemEval data
+mkdir -p /content/longmemeval
+cd /content/longmemeval
+curl -L -o longmemeval_s_cleaned.json \
+ https://huggingface.co/datasets/xiaowu0162/longmemeval-cleaned/resolve/main/longmemeval_s_cleaned.json
+
+# 4) Run Engram benchmark (HF reader on GPU)
+python -m engram.benchmarks.longmemeval \
+ --dataset-path /content/longmemeval/longmemeval_s_cleaned.json \
+ --output-jsonl /content/longmemeval/engram_hypotheses.jsonl \
+ --retrieval-jsonl /content/longmemeval/engram_retrieval.jsonl \
+ --answer-backend hf \
+ --hf-model Qwen/Qwen2.5-1.5B-Instruct \
+ --embedder-provider simple \
+ --llm-provider mock \
+ --vector-store-provider memory \
+ --history-db-path /content/engram-longmemeval.db \
+ --top-k 8 \
+ --max-questions 100 \
+ --skip-abstention
+```
+
+Notes:
+- The output file is evaluator-compatible (`question_id`, `hypothesis` per line).
+- `--include-debug-fields` adds retrieval diagnostics into each output row.
+- The command above uses `simple` embedder + `mock` LLM for memory operations, so **no Gemini/OpenAI key is required**.
+
+If you want to run with Gemini only (no extra reader packages), use base install and set key **before** starting the run:
+
+```bash
+pip install -U engram-memory
+export GEMINI_API_KEY="your-key"
+
+python -m engram.benchmarks.longmemeval \
+ --dataset-path /content/longmemeval/longmemeval_s_cleaned.json \
+ --output-jsonl /content/longmemeval/engram_hypotheses.jsonl \
+ --answer-backend engram-llm \
+ --llm-provider gemini \
+ --embedder-provider gemini \
+ --vector-store-provider memory
+```
+
+Optional official QA scoring from the LongMemEval repo:
+
+```bash
+cd /content
+git clone https://github.com/xiaowu0162/LongMemEval.git
+cd /content/LongMemEval/src/evaluation
+export OPENAI_API_KEY="your-key"
+python evaluate_qa.py gpt-4o /content/longmemeval/engram_hypotheses.jsonl /content/longmemeval/longmemeval_s_cleaned.json
+```
+
+---
+
## Docker
```bash
diff --git a/pyproject.toml b/pyproject.toml
index 7cc6649..c45344b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -26,6 +26,9 @@ classifiers = [
dependencies = [
"pydantic>=2.0",
+ "google-generativeai>=0.3.0",
+ "qdrant-client>=1.7.0",
+ "requests>=2.28.0",
]
[project.optional-dependencies]
@@ -41,11 +44,8 @@ api = [
"uvicorn[standard]>=0.27.0",
]
all = [
- "google-generativeai>=0.3.0",
"openai>=1.0.0",
"ollama>=0.4.0",
- "qdrant-client>=1.7.0",
- "requests>=2.28.0",
"mcp>=1.0.0",
"fastapi>=0.109.0",
"uvicorn[standard]>=0.27.0",
@@ -67,6 +67,7 @@ engram = "engram.main_cli:main"
engram-mcp = "engram.mcp_server:run"
engram-install = "engram.cli:install"
engram-api = "engram.api.server:run"
+engram-longmemeval = "engram.benchmarks.longmemeval:main"
[project.urls]
Homepage = "https://github.com/Ashish-dwi99/Engram"