Use this file to discover all available pages before exploring further.
This guide provides detailed guidelines for contributing to the Memori Python SDK. Follow these standards to ensure your contributions can be reviewed and merged efficiently.
Memori requires Python 3.10+ and uses modern Python features:
# Good: Use modern type hintsfrom typing import Optionaldef process_memory( entity_id: str, memories: list[dict], threshold: float = 0.1) -> list[dict]: return [m for m in memories if m["score"] >= threshold]# Avoid: Old-style type hintsfrom typing import List, Dictdef process_memory(entity_id, memories, threshold=0.1): # type: (str, List[Dict], float) -> List[Dict] return [m for m in memories if m["score"] >= threshold]
Maximum line length: 88 characters (Black-compatible)
# Good: Line is 88 characters or lessresponse = client.chat.completions.create( model="gpt-4o-mini", messages=[{"role": "user", "content": message}])# Avoid: Line too longresponse = client.chat.completions.create(model="gpt-4o-mini", messages=[{"role": "user", "content": message}])
# Good: Complete type hintsfrom typing import Optionaldef recall_memories( entity_id: str, process_id: str, limit: int = 10, threshold: Optional[float] = None) -> list[dict]: """Recall memories for entity and process. Args: entity_id: Unique identifier for the entity process_id: Unique identifier for the process limit: Maximum number of memories to recall threshold: Minimum similarity threshold (optional) Returns: List of memory dictionaries """ ...# Avoid: Missing type hintsdef recall_memories(entity_id, process_id, limit=10, threshold=None): ...
def calculate_similarity(embedding_a: np.ndarray, embedding_b: np.ndarray) -> float: """Calculate cosine similarity between two embeddings. Args: embedding_a: First embedding vector embedding_b: Second embedding vector Returns: Cosine similarity score between 0 and 1 Raises: ValueError: If embeddings have different dimensions """ if embedding_a.shape != embedding_b.shape: raise ValueError("Embeddings must have same dimensions") return np.dot(embedding_a, embedding_b) / ( np.linalg.norm(embedding_a) * np.linalg.norm(embedding_b) )
Internal functions can omit docstrings if the code is self-documenting:
import pytestimport osfrom memori import Memorifrom openai import OpenAI@pytest.mark.integrationdef test_openai_memory_persistence(): """Test memory persistence with real OpenAI calls.""" # Skip if no API key if not os.getenv("OPENAI_API_KEY"): pytest.skip("OPENAI_API_KEY not set") client = OpenAI() mem = Memori().llm.register(client) mem.attribution(entity_id="test_user", process_id="test_agent") # First interaction response1 = client.chat.completions.create( model="gpt-4o-mini", messages=[{"role": "user", "content": "My favorite color is blue."}] ) # Second interaction should recall first response2 = client.chat.completions.create( model="gpt-4o-mini", messages=[{"role": "user", "content": "What's my favorite color?"}] ) assert "blue" in response2.choices[0].message.content.lower()
## Description[Concise description of the changes]## Motivation[Why is this change needed? What problem does it solve?]## Changes- [Bullet point list of changes]- [Include what was added, modified, or removed]## Related IssuesCloses #123Related to #456## Testing- [ ] Unit tests added/updated- [ ] Integration tests added/updated (if applicable)- [ ] All tests passing locally- [ ] Manual testing performed## Checklist- [ ] Code follows project style guidelines- [ ] Tests added for new functionality- [ ] Documentation updated (if needed)- [ ] CHANGELOG.md updated- [ ] Pre-commit hooks pass- [ ] No breaking changes (or clearly documented)## Screenshots/Examples[If applicable, add screenshots or code examples]
## [Unreleased]### Changed- **BREAKING**: `Memori.attribution()` now requires `entity_id` parameter (#130) **Migration:** ```python # Before mem.attribution(process_id="agent") # After mem.attribution(entity_id="user_123", process_id="agent")
<Warning>**Breaking changes should be rare and well-justified.** Discuss with maintainers before introducing breaking changes.</Warning>## Deprecation PolicyWhen deprecating features:1. **Add deprecation warning:** ```python import warnings def old_function(): warnings.warn( "old_function() is deprecated and will be removed in v4.0. " "Use new_function() instead.", DeprecationWarning, stacklevel=2 ) return new_function()
# Good: Use environment variablesimport osapi_key = os.getenv("MEMORI_API_KEY")if not api_key: raise ValueError("MEMORI_API_KEY environment variable not set")# Avoid: Hardcoded secretsapi_key = "sk-1234567890" # NEVER DO THIS
class Memori: """Main Memori SDK class for memory-augmented LLM interactions. The Memori class provides methods to register LLM clients, configure storage backends, and manage memory attribution. Example: ```python from memori import Memori from openai import OpenAI client = OpenAI() mem = Memori().llm.register(client) mem.attribution(entity_id="user_123", process_id="agent")
"""def attribution(self, entity_id: str, process_id: str) -> “Memori”:
"""Set attribution for memory storage.Args:
entity_id: Unique identifier for the entity (user, org, etc.)
process_id: Unique identifier for the process (agent, workflow, etc.)Returns:
Self for method chainingRaises:
ValueError: If entity_id or process_id is emptyExample:
## Performance Considerations### Optimization Guidelines**When to optimize:**1. **Profile first**: Don't optimize without measuring2. **Focus on hot paths**: Memory recall, embeddings, database queries3. **Maintain readability**: Don't sacrifice clarity for minor gains```python# Good: Readable and efficientdef batch_process(items: list[str], batch_size: int = 100) -> list[Result]: results = [] for i in range(0, len(items), batch_size): batch = items[i:i+batch_size] results.extend(process_batch(batch)) return results# Avoid: Premature optimization that hurts readabilitydef batch_process(items, batch_size=100): batches = [items[i:i+batch_size] for i in range(0, len(items), batch_size)] return [r for batch in batches for r in process_batch(batch)]
import pytest@pytest.mark.benchmarkdef test_embeddings_generation_performance(benchmark): """Benchmark embeddings generation speed.""" from memori.embeddings import generate_embedding text = "This is a test sentence for benchmarking." result = benchmark(generate_embedding, text) # Assert reasonable performance assert benchmark.stats['mean'] < 0.1 # <100ms average