Skip to content

Commit d8775f5

Browse files
v31.7.0 (#138)
1 parent fcb6013 commit d8775f5

File tree

6 files changed

+165
-13
lines changed

6 files changed

+165
-13
lines changed

README.md

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ Smart workflows are as easy as combining your tools and prompts.
8888
* [OpenAI](https://openai.com) - AI Model Provider
8989
* [Grok](https://x.ai) - Alternative AI Model Provider (optional)
9090
* [Groq](https://groq.com) - Alternative AI Model Provider (optional)
91+
* [Cerebras](https://cerebras.ai) - Alternative AI Model Provider (optional)
9192
* [MongoDB](https://mongodb.com) - Conversational History (optional)
9293
* [Zep Cloud](https://getzep.com) - Conversational Memory (optional)
9394
* [Pinecone](https://pinecone.io) - Knowledge Base (optional)
@@ -607,6 +608,40 @@ config = {
607608
- ✅ Structured outputs (via Instructor TOOLS_STRICT and JSON modes)
608609
- ✅ Native JSON mode
609610

611+
### Cerebras
612+
613+
Solana Agent supports using Cerebras as an alternative to OpenAI. When Cerebras is configured, it will be used for all LLM operations except embeddings, TTS, and STT (which still require OpenAI).
614+
615+
**Note:** Grok takes priority over Groq, Groq takes priority over Cerebras, and Cerebras takes priority over OpenAI. If multiple are present, the highest priority provider will be used.
616+
617+
```python
618+
config = {
619+
"cerebras": {
620+
"api_key": "your-cerebras-api-key",
621+
"base_url": "https://api.cerebras.ai/v1", # Optional, defaults to https://api.cerebras.ai/v1
622+
"model": "gpt-oss-120b" # Optional, defaults to gpt-oss-120b
623+
},
624+
# You can still include OpenAI for embeddings, TTS, and STT
625+
"openai": {
626+
"api_key": "your-openai-api-key"
627+
},
628+
"agents": [
629+
{
630+
"name": "research_specialist",
631+
"instructions": "You are an expert researcher.",
632+
"specialization": "Research",
633+
}
634+
],
635+
}
636+
```
637+
638+
**Verified Capabilities:**
639+
- ✅ Chat completions
640+
- ✅ Streaming responses
641+
- ✅ Function calling/Tool usage
642+
- ✅ Structured outputs (via Instructor TOOLS_STRICT and JSON modes)
643+
- ✅ Native JSON mode
644+
610645
### Business Alignment
611646

612647
```python

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "solana-agent"
3-
version = "31.6.0"
3+
version = "31.7.0"
44
description = "AI Agents for Solana"
55
authors = ["Bevan Hunt <[email protected]>"]
66
license = "MIT"

solana_agent/adapters/openai_adapter.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def __init__(
6565
self.api_key = api_key
6666
self.base_url = base_url
6767

68-
# Create client with base_url if provided (for Grok/Groq support)
68+
# Create client with base_url if provided (for Grok/Groq/Cerebras support)
6969
if base_url:
7070
self.client = AsyncOpenAI(api_key=api_key, base_url=base_url)
7171
else:
@@ -85,9 +85,9 @@ def __init__(
8585
logger.error(f"Failed to configure Logfire: {e}")
8686
self.logfire = False
8787

88-
# Use provided model or defaults (for Grok/Groq or OpenAI)
88+
# Use provided model or defaults (for Grok/Groq/Cerebras or OpenAI)
8989
if model:
90-
# Custom model provided (e.g., from Grok or Groq config)
90+
# Custom model provided (e.g., from Grok, Groq, or Cerebras config)
9191
self.parse_model = model
9292
self.text_model = model
9393
self.vision_model = model

solana_agent/factories/agent_factory.py

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,8 @@ def create_from_config(config: Dict[str, Any]) -> QueryService: # pragma: no co
104104
else:
105105
db_adapter = None
106106

107-
# Determine which LLM provider to use (Grok, Groq, or OpenAI)
108-
# Priority: grok > groq > openai
107+
# Determine which LLM provider to use (Grok, Groq, Cerebras, or OpenAI)
108+
# Priority: grok > groq > cerebras > openai
109109
llm_api_key = None
110110
llm_base_url = None
111111
llm_model = None
@@ -122,6 +122,13 @@ def create_from_config(config: Dict[str, Any]) -> QueryService: # pragma: no co
122122
)
123123
llm_model = config["groq"].get("model", "openai/gpt-oss-120b")
124124
logger.info(f"Using Groq as LLM provider with model: {llm_model}")
125+
elif "cerebras" in config and "api_key" in config["cerebras"]:
126+
llm_api_key = config["cerebras"]["api_key"]
127+
llm_base_url = config["cerebras"].get(
128+
"base_url", "https://api.cerebras.ai/v1"
129+
)
130+
llm_model = config["cerebras"].get("model", "gpt-oss-120b")
131+
logger.info(f"Using Cerebras as LLM provider with model: {llm_model}")
125132
elif "openai" in config and "api_key" in config["openai"]:
126133
llm_api_key = config["openai"]["api_key"]
127134
llm_base_url = None # Use default OpenAI endpoint
@@ -132,7 +139,7 @@ def create_from_config(config: Dict[str, Any]) -> QueryService: # pragma: no co
132139
logger.info("Using OpenAI as LLM provider")
133140
else:
134141
raise ValueError(
135-
"Either OpenAI, Grok, or Groq API key is required in config."
142+
"Either OpenAI, Grok, Groq, or Cerebras API key is required in config."
136143
)
137144

138145
if "logfire" in config:
@@ -210,10 +217,10 @@ def create_from_config(config: Dict[str, Any]) -> QueryService: # pragma: no co
210217
)
211218

212219
# Create routing service
213-
# Use Grok/Groq model if configured, otherwise check for OpenAI routing_model override
220+
# Use Grok/Groq/Cerebras model if configured, otherwise check for OpenAI routing_model override
214221
routing_model = llm_model # Use the same model as the main LLM by default
215222
if not routing_model:
216-
# Fall back to OpenAI routing_model config if no Grok/Groq model
223+
# Fall back to OpenAI routing_model config if no Grok/Groq/Cerebras model
217224
routing_model = (
218225
config.get("openai", {}).get("routing_model")
219226
if isinstance(config.get("openai"), dict)

solana_agent/services/routing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def __init__(
3939
if model:
4040
self.model = model
4141
elif base_url:
42-
# Using custom provider (e.g., Grok or Groq) but no model specified - use provider's default
42+
# Using custom provider (e.g., Grok, Groq, or Cerebras) but no model specified - use provider's default
4343
self.model = None # Will use adapter's default
4444
else:
4545
# Using OpenAI - default to small, cheap model for routing

tests/unit/factories/test_agent_factory.py

Lines changed: 113 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -250,6 +250,33 @@ def groq_with_logfire_config(groq_config):
250250
return config
251251

252252

253+
@pytest.fixture
254+
def cerebras_config():
255+
"""Config with Cerebras as the LLM provider."""
256+
return {
257+
"cerebras": {
258+
"api_key": "test-cerebras-key",
259+
"base_url": "https://api.cerebras.ai/v1",
260+
"model": "gpt-oss-120b",
261+
},
262+
"agents": [
263+
{
264+
"name": "test_agent",
265+
"instructions": "You are a test agent.",
266+
"specialization": "Testing",
267+
}
268+
],
269+
}
270+
271+
272+
@pytest.fixture
273+
def cerebras_with_logfire_config(cerebras_config):
274+
"""Config with Cerebras and Logfire enabled."""
275+
config = deepcopy(cerebras_config)
276+
config["logfire"] = {"api_key": "test-logfire-key"}
277+
return config
278+
279+
253280
@pytest.fixture
254281
def openai_with_model_config():
255282
"""Config with OpenAI and a custom model specified."""
@@ -339,10 +366,10 @@ def test_create_from_config_minimal(
339366

340367
def test_missing_openai_section(self, config_missing_openai_section):
341368
"""Test factory creation when the entire openai section is missing."""
342-
# This should raise ValueError since OpenAI, Grok, or Groq API key is required
369+
# This should raise ValueError since OpenAI, Grok, Groq, or Cerebras API key is required
343370
with pytest.raises(
344371
ValueError,
345-
match="Either OpenAI, Grok, or Groq API key is required in config.",
372+
match="Either OpenAI, Grok, Groq, or Cerebras API key is required in config.",
346373
):
347374
SolanaAgentFactory.create_from_config(config_missing_openai_section)
348375

@@ -1665,7 +1692,7 @@ def test_logfire_config_missing_openai_key(self, logfire_config_missing_openai):
16651692
# Based on the current factory code, this should raise a ValueError
16661693
with pytest.raises(
16671694
ValueError,
1668-
match="Either OpenAI, Grok, or Groq API key is required in config.",
1695+
match="Either OpenAI, Grok, Groq, or Cerebras API key is required in config.",
16691696
):
16701697
SolanaAgentFactory.create_from_config(logfire_config_missing_openai)
16711698

@@ -1874,6 +1901,89 @@ def test_create_groq_without_logfire(
18741901
mock_query_service.assert_called_once()
18751902
assert result == mock_query_instance
18761903

1904+
@patch("solana_agent.factories.agent_factory.MongoDBAdapter")
1905+
@patch("solana_agent.factories.agent_factory.OpenAIAdapter")
1906+
@patch("solana_agent.factories.agent_factory.AgentService")
1907+
@patch("solana_agent.factories.agent_factory.RoutingService")
1908+
@patch("solana_agent.factories.agent_factory.QueryService")
1909+
def test_create_cerebras_with_logfire(
1910+
self,
1911+
mock_query_service,
1912+
mock_routing_service,
1913+
mock_agent_service,
1914+
mock_openai_adapter,
1915+
mock_mongo_adapter,
1916+
cerebras_with_logfire_config,
1917+
):
1918+
"""Test creating services with Cerebras and Logfire configuration."""
1919+
# Setup mocks
1920+
mock_openai_instance = MagicMock()
1921+
mock_openai_adapter.return_value = mock_openai_instance
1922+
mock_agent_instance = MagicMock()
1923+
mock_agent_service.return_value = mock_agent_instance
1924+
mock_agent_instance.tool_registry.list_all_tools.return_value = []
1925+
mock_routing_instance = MagicMock()
1926+
mock_routing_service.return_value = mock_routing_instance
1927+
mock_query_instance = MagicMock()
1928+
mock_query_service.return_value = mock_query_instance
1929+
1930+
# Call the factory
1931+
result = SolanaAgentFactory.create_from_config(cerebras_with_logfire_config)
1932+
1933+
# Verify OpenAIAdapter was called with Cerebras config and logfire key
1934+
mock_openai_adapter.assert_called_once_with(
1935+
api_key="test-cerebras-key",
1936+
base_url="https://api.cerebras.ai/v1",
1937+
model="gpt-oss-120b",
1938+
logfire_api_key="test-logfire-key",
1939+
)
1940+
# Verify other services were called
1941+
mock_agent_service.assert_called_once()
1942+
mock_routing_service.assert_called_once()
1943+
mock_query_service.assert_called_once()
1944+
assert result == mock_query_instance
1945+
1946+
@patch("solana_agent.factories.agent_factory.MongoDBAdapter")
1947+
@patch("solana_agent.factories.agent_factory.OpenAIAdapter")
1948+
@patch("solana_agent.factories.agent_factory.AgentService")
1949+
@patch("solana_agent.factories.agent_factory.RoutingService")
1950+
@patch("solana_agent.factories.agent_factory.QueryService")
1951+
def test_create_cerebras_without_logfire(
1952+
self,
1953+
mock_query_service,
1954+
mock_routing_service,
1955+
mock_agent_service,
1956+
mock_openai_adapter,
1957+
mock_mongo_adapter,
1958+
cerebras_config,
1959+
):
1960+
"""Test creating services with Cerebras but no Logfire configuration."""
1961+
# Setup mocks
1962+
mock_openai_instance = MagicMock()
1963+
mock_openai_adapter.return_value = mock_openai_instance
1964+
mock_agent_instance = MagicMock()
1965+
mock_agent_service.return_value = mock_agent_instance
1966+
mock_agent_instance.tool_registry.list_all_tools.return_value = []
1967+
mock_routing_instance = MagicMock()
1968+
mock_routing_service.return_value = mock_routing_instance
1969+
mock_query_instance = MagicMock()
1970+
mock_query_service.return_value = mock_query_instance
1971+
1972+
# Call the factory
1973+
result = SolanaAgentFactory.create_from_config(cerebras_config)
1974+
1975+
# Verify OpenAIAdapter was called with Cerebras config but no logfire key
1976+
mock_openai_adapter.assert_called_once_with(
1977+
api_key="test-cerebras-key",
1978+
base_url="https://api.cerebras.ai/v1",
1979+
model="gpt-oss-120b",
1980+
)
1981+
# Verify other services were called
1982+
mock_agent_service.assert_called_once()
1983+
mock_routing_service.assert_called_once()
1984+
mock_query_service.assert_called_once()
1985+
assert result == mock_query_instance
1986+
18771987
@patch("solana_agent.factories.agent_factory.MongoDBAdapter")
18781988
@patch("solana_agent.factories.agent_factory.OpenAIAdapter")
18791989
@patch("solana_agent.factories.agent_factory.AgentService")

0 commit comments

Comments
 (0)