AgenticAI Framework provides 7 specialized memory managers designed for different use cases, from general-purpose storage to specialized domain-specific memory systems.
Enterprise Storage
The framework also includes 14 enterprise storage & caching modules for production deployments including Redis, distributed cache, and multi-tier caching.
The MemoryManager is the general-purpose memory solution for any agent. It provides semantic search, automatic compression, and flexible storage backends.
# Save to diskmemory.save("./checkpoints/memory_backup.pkl")# Load from diskmemory=MemoryManager.load("./checkpoints/memory_backup.pkl")# Export to JSONmemory.export_json("./exports/memory.json")
fromagenticaiframeworkimportAgentMemoryManager# Initialize for specific agentagent_memory=AgentMemoryManager(agent_id="researcher_01")# Store agent contextagent_memory.store_context(task_type="research",context={"topic":"AI safety","depth":"comprehensive","sources_used":["arxiv","google_scholar"]})# Store learned preferencesagent_memory.store_preference(key="response_style",value="detailed_with_citations",confidence=0.9)# Retrieve agent contextcontext=agent_memory.get_context(task_type="research")
importlogginglogger=logging.getLogger(__name__)fromagenticaiframeworkimportWorkflowMemoryManager# Initialize for workflowworkflow_memory=WorkflowMemoryManager(workflow_id="content_pipeline")# Record workflow stepworkflow_memory.record_step(step_name="research",status="completed",input_data={"topic":"AI trends"},output_data=research_results,duration_ms=5000)# Get workflow statusstatus=workflow_memory.get_status()logger.info(f"Current step: {status.current_step}")logger.info(f"Completed: {status.completed_steps}")logger.info(f"Remaining: {status.remaining_steps}")
# Create checkpointcheckpoint_id=workflow_memory.create_checkpoint(name="after_research",data={"research_results":research_data,"next_step":"writing"})# Restore from checkpointworkflow_memory.restore_checkpoint(checkpoint_id)# List all checkpointscheckpoints=workflow_memory.list_checkpoints()
# Record branch decisionworkflow_memory.record_branch(decision_point="content_type",selected_branch="technical_article",conditions={"audience":"developers"},alternatives=["blog_post","whitepaper"])# Get branch historybranches=workflow_memory.get_branch_history()
# Record errorworkflow_memory.record_error(step_name="api_call",error_type="RateLimitError",error_message="API rate limit exceeded",retry_count=3)# Get recovery suggestionssuggestions=workflow_memory.get_recovery_suggestions("api_call")
# Send message between agentsorch_memory.send_message(from_agent="leader",to_agent="researcher",message_type="task_assignment",content={"task":"Research competitor analysis","priority":"high","deadline":"2024-01-15"})# Get pending messagesmessages=orch_memory.get_messages(agent_id="researcher")
importlogginglogger=logging.getLogger(__name__)# Record agent vote/opinionorch_memory.record_vote(topic="approach_selection",agent_id="researcher",vote="option_a",confidence=0.8,reasoning="Based on data accuracy requirements")# Get consensusconsensus=orch_memory.get_consensus("approach_selection")logger.info(f"Selected: {consensus.selected_option}")logger.info(f"Agreement: {consensus.agreement_level}")
# Configure RAG settingsknowledge=KnowledgeMemoryManager(embedding_model="text-embedding-3-large",retrieval_strategy="hybrid",# Options: vector, keyword, hybridrerank_model="cross-encoder")# RAG query with contextcontext=knowledge.retrieve_context(query="How do I configure authentication?",top_k=5,include_metadata=True)# Generate answer with contextanswer=agent.execute(prompt=f"Based on this context: {context}\n\nAnswer: {query}")
# Create categoriesknowledge.create_category("technical_docs",parent=None)knowledge.create_category("api_reference",parent="technical_docs")# Add to categoryknowledge.add_document(content=api_doc,category="api_reference")# Query specific categoryresults=knowledge.query(question="API authentication",category="api_reference")
# Get tool recommendationsrecommendations=tool_memory.get_recommendations(task_type="information_retrieval",based_on="success_rate"# Options: success_rate, latency, cost)# Record tool failures for learningtool_memory.record_failure(tool_name="api_call",error_type="RateLimitError",input_params=params,recovery_action="retry_with_backoff")
fromagenticaiframeworkimportSpeechMemoryManager# Initialize speech memoryspeech_memory=SpeechMemoryManager()# Store transcriptspeech_memory.store_transcript(session_id="voice_001",speaker="user",text="What's the weather like today?",timestamp="2024-01-15T10:30:00Z",audio_metadata={"duration_ms":2500,"sample_rate":16000,"format":"wav"})# Get conversation historyhistory=speech_memory.get_conversation(session_id="voice_001")
# Store voice profilespeech_memory.store_voice_profile(user_id="user_123",profile={"voice_embedding":voice_embedding,"language":"en-US","speaking_rate":1.2,"preferred_tts_voice":"alloy"})# Get voice profileprofile=speech_memory.get_voice_profile(user_id="user_123")
# Record multi-speaker conversationspeech_memory.store_transcript(session_id="meeting_001",speaker="speaker_1",speaker_label="Alice",text="Let's discuss the project timeline.",timestamp="2024-01-15T14:00:00Z")speech_memory.store_transcript(session_id="meeting_001",speaker="speaker_2",speaker_label="Bob",text="I think we need two more weeks.",timestamp="2024-01-15T14:00:05Z")# Get speaker-segmented transcripttranscript=speech_memory.get_conversation(session_id="meeting_001",include_speaker_labels=True)
# Regular cleanup of old memoriesmemory.cleanup(older_than_days=30,keep_important=True)# Compress old memoriesmemory.compress_old(older_than_days=7,strategy="summarize")