Skip to content

Commit ac49375

Browse files
v31.4.0 (#135)
1 parent 61d1907 commit ac49375

File tree

12 files changed

+1128
-340
lines changed

12 files changed

+1128
-340
lines changed

README.md

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ Smart workflows are as easy as combining your tools and prompts.
8686

8787
* [Python](https://python.org) - Programming Language
8888
* [OpenAI](https://openai.com) - AI Model Provider
89+
* [Grok](https://x.ai) - Alternative AI Model Provider (optional)
8990
* [MongoDB](https://mongodb.com) - Conversational History (optional)
9091
* [Zep Cloud](https://getzep.com) - Conversational Memory (optional)
9192
* [Pinecone](https://pinecone.io) - Knowledge Base (optional)
@@ -536,6 +537,40 @@ uvx solana-agent --user-id my_cli_session --config ./my_agent_config.json
536537

537538
## Optional Feature Configs
538539

540+
### Grok
541+
542+
Solana Agent supports using Grok from xAI as an alternative to OpenAI. When Grok is configured, it will be used for all LLM operations except embeddings, TTS, and STT (which still require OpenAI).
543+
544+
**Note:** Grok configuration takes priority over OpenAI. If both are present, Grok will be used.
545+
546+
```python
547+
config = {
548+
"grok": {
549+
"api_key": "your-grok-api-key",
550+
"base_url": "https://api.x.ai/v1", # Optional, defaults to https://api.x.ai/v1
551+
"model": "grok-4-1-fast-non-reasoning" # Optional, defaults to grok-4-1-fast-non-reasoning
552+
},
553+
# You can still include OpenAI for embeddings, TTS, and STT
554+
"openai": {
555+
"api_key": "your-openai-api-key"
556+
},
557+
"agents": [
558+
{
559+
"name": "research_specialist",
560+
"instructions": "You are an expert researcher.",
561+
"specialization": "Research",
562+
}
563+
],
564+
}
565+
```
566+
567+
**Verified Capabilities:**
568+
- ✅ Chat completions
569+
- ✅ Streaming responses
570+
- ✅ Function calling/Tool usage
571+
- ✅ Structured outputs (via Instructor TOOLS_STRICT and JSON modes)
572+
- ✅ Native JSON mode
573+
539574
### Business Alignment
540575

541576
```python

config.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@
22
"openai": {
33
"api_key": "your-openai-api-key"
44
},
5+
"grok": {
6+
"api_key": "your-grok-api-key"
7+
},
58
"agents": [
69
{
710
"name": "default_agent",

poetry.lock

Lines changed: 488 additions & 262 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "solana-agent"
3-
version = "31.3.0"
3+
version = "31.4.0"
44
description = "AI Agents for Solana"
55
authors = ["Bevan Hunt <[email protected]>"]
66
license = "MIT"
@@ -24,34 +24,34 @@ testpaths = ["tests"]
2424

2525
[tool.poetry.dependencies]
2626
python = ">=3.12,<4.0"
27-
openai = "1.107.2"
27+
openai = "2.8.1"
2828
pydantic = ">=2"
29-
pymongo = "4.15.0"
30-
zep-cloud = "3.4.3"
31-
instructor = "1.11.3"
32-
pinecone = { version = "7.3.0", extras = ["asyncio"] }
33-
llama-index-core = "0.14.1"
29+
pymongo = "4.15.4"
30+
zep-cloud = "3.13.0"
31+
instructor = "1.13.0"
32+
pinecone = { version = "8.0.0", extras = ["asyncio"] }
33+
llama-index-core = "0.14.8"
3434
llama-index-embeddings-openai = "0.5.1"
35-
pypdf = "6.0.0"
35+
pypdf = "6.4.0"
3636
scrubadub = "2.0.1"
37-
logfire = "4.7.0"
38-
typer = "0.17.4"
37+
logfire = "4.15.1"
38+
typer = "0.20.0"
3939
rich = ">=13,<14.0"
40-
pillow = "11.3.0"
40+
pillow = "12.0.0"
4141
websockets = ">=13,<16"
4242

4343
[tool.poetry.group.dev.dependencies]
44-
pytest = "^8.4.2"
44+
pytest = "^9.0.1"
4545
pytest-cov = "^7.0.0"
46-
pytest-asyncio = "^1.2.0"
47-
pytest-mock = "^3.15.0"
46+
pytest-asyncio = "^1.3.0"
47+
pytest-mock = "^3.15.1"
4848
pytest-github-actions-annotate-failures = "^0.3.0"
4949
sphinx = "^8.2.3"
5050
sphinx-rtd-theme = "^3.0.2"
5151
myst-parser = "^4.0.1"
5252
sphinx-autobuild = "^2025.08.25"
5353
mongomock = "^4.3.0"
54-
ruff = "^0.13.0"
54+
ruff = "^0.14.6"
5555

5656
[tool.poetry.scripts]
5757
solana-agent = "solana_agent.cli:app"

solana_agent/adapters/openai_adapter.py

Lines changed: 40 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -55,23 +55,49 @@
5555
class OpenAIAdapter(LLMProvider):
5656
"""OpenAI implementation of LLMProvider with web search capabilities."""
5757

58-
def __init__(self, api_key: str, logfire_api_key: Optional[str] = None):
58+
def __init__(
59+
self,
60+
api_key: str,
61+
base_url: Optional[str] = None,
62+
model: Optional[str] = None,
63+
logfire_api_key: Optional[str] = None,
64+
):
5965
self.api_key = api_key
60-
self.client = AsyncOpenAI(api_key=api_key)
66+
self.base_url = base_url
67+
68+
# Create client with base_url if provided (for Grok support)
69+
if base_url:
70+
self.client = AsyncOpenAI(api_key=api_key, base_url=base_url)
71+
else:
72+
self.client = AsyncOpenAI(api_key=api_key)
6173

6274
self.logfire = False
6375
if logfire_api_key:
6476
try:
6577
logfire.configure(token=logfire_api_key)
6678
self.logfire = True
67-
logger.info("Logfire configured successfully.")
79+
# Instrument the main client immediately after configuring logfire
80+
logfire.instrument_openai(self.client)
81+
logger.info(
82+
"Logfire configured and OpenAI client instrumented successfully."
83+
)
6884
except Exception as e:
6985
logger.error(f"Failed to configure Logfire: {e}")
7086
self.logfire = False
7187

72-
self.parse_model = DEFAULT_PARSE_MODEL
73-
self.text_model = DEFAULT_CHAT_MODEL
74-
self.vision_model = DEFAULT_VISION_MODEL # Add vision model attribute
88+
# Use provided model or defaults (for Grok or OpenAI)
89+
if model:
90+
# Custom model provided (e.g., from Grok config)
91+
self.parse_model = model
92+
self.text_model = model
93+
self.vision_model = model
94+
else:
95+
# Use OpenAI defaults
96+
self.parse_model = DEFAULT_PARSE_MODEL
97+
self.text_model = DEFAULT_CHAT_MODEL
98+
self.vision_model = DEFAULT_VISION_MODEL
99+
100+
# These remain OpenAI-specific
75101
self.transcription_model = DEFAULT_TRANSCRIPTION_MODEL
76102
self.tts_model = DEFAULT_TTS_MODEL
77103
self.embedding_model = DEFAULT_EMBEDDING_MODEL
@@ -409,6 +435,8 @@ async def chat_stream(
409435
messages: List[Dict[str, Any]],
410436
model: Optional[str] = None,
411437
tools: Optional[List[Dict[str, Any]]] = None,
438+
api_key: Optional[str] = None,
439+
base_url: Optional[str] = None,
412440
) -> AsyncGenerator[Dict[str, Any], None]: # pragma: no cover
413441
"""Stream chat completions with optional tool calls, yielding normalized events."""
414442
try:
@@ -420,7 +448,12 @@ async def chat_stream(
420448
if tools:
421449
request_params["tools"] = tools
422450

423-
client = self.client
451+
# Use custom client if api_key and base_url provided, otherwise use default
452+
if api_key and base_url:
453+
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
454+
else:
455+
client = self.client
456+
424457
if self.logfire:
425458
logfire.instrument_openai(client)
426459

solana_agent/factories/agent_factory.py

Lines changed: 39 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -104,20 +104,39 @@ def create_from_config(config: Dict[str, Any]) -> QueryService: # pragma: no co
104104
else:
105105
db_adapter = None
106106

107+
# Determine which LLM provider to use (Grok or OpenAI)
108+
# Priority: grok > openai
109+
llm_api_key = None
110+
llm_base_url = None
111+
llm_model = None
112+
113+
if "grok" in config and "api_key" in config["grok"]:
114+
llm_api_key = config["grok"]["api_key"]
115+
llm_base_url = config["grok"].get("base_url", "https://api.x.ai/v1")
116+
llm_model = config["grok"].get("model", "grok-4-1-fast-non-reasoning")
117+
logger.info(f"Using Grok as LLM provider with model: {llm_model}")
118+
elif "openai" in config and "api_key" in config["openai"]:
119+
llm_api_key = config["openai"]["api_key"]
120+
llm_base_url = None # Use default OpenAI endpoint
121+
llm_model = None # Will use OpenAI adapter defaults
122+
logger.info("Using OpenAI as LLM provider")
123+
else:
124+
raise ValueError("Either OpenAI or Grok API key is required in config.")
125+
107126
if "logfire" in config:
108127
if "api_key" not in config["logfire"]:
109128
raise ValueError("Pydantic Logfire API key is required.")
110-
if "openai" not in config or "api_key" not in config["openai"]:
111-
raise ValueError("OpenAI API key is required.")
112129
llm_adapter = OpenAIAdapter(
113-
api_key=config["openai"]["api_key"],
130+
api_key=llm_api_key,
131+
base_url=llm_base_url,
132+
model=llm_model,
114133
logfire_api_key=config["logfire"].get("api_key"),
115134
)
116135
else:
117-
if "openai" not in config or "api_key" not in config["openai"]:
118-
raise ValueError("OpenAI API key is required.")
119136
llm_adapter = OpenAIAdapter(
120-
api_key=config["openai"].get("api_key"),
137+
api_key=llm_api_key,
138+
base_url=llm_base_url,
139+
model=llm_model,
121140
)
122141

123142
# Create business mission if specified in config
@@ -172,19 +191,27 @@ def create_from_config(config: Dict[str, Any]) -> QueryService: # pragma: no co
172191
llm_provider=llm_adapter,
173192
business_mission=business_mission,
174193
config=config,
194+
api_key=llm_api_key,
195+
base_url=llm_base_url,
196+
model=llm_model,
175197
output_guardrails=output_guardrails,
176198
)
177199

178200
# Create routing service
179-
# Optional routing model override (use small, cheap model by default in service)
180-
routing_model = (
181-
config.get("openai", {}).get("routing_model")
182-
if isinstance(config.get("openai"), dict)
183-
else None
184-
)
201+
# Use Grok model if configured, otherwise check for OpenAI routing_model override
202+
routing_model = llm_model # Use the same model as the main LLM by default
203+
if not routing_model:
204+
# Fall back to OpenAI routing_model config if no Grok model
205+
routing_model = (
206+
config.get("openai", {}).get("routing_model")
207+
if isinstance(config.get("openai"), dict)
208+
else None
209+
)
185210
routing_service = RoutingService(
186211
llm_provider=llm_adapter,
187212
agent_service=agent_service,
213+
api_key=llm_api_key,
214+
base_url=llm_base_url,
188215
model=routing_model,
189216
)
190217

solana_agent/interfaces/providers/llm.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@ async def chat_stream(
3939
messages: List[Dict[str, Any]],
4040
model: Optional[str] = None,
4141
tools: Optional[List[Dict[str, Any]]] = None,
42+
api_key: Optional[str] = None,
43+
base_url: Optional[str] = None,
4244
) -> AsyncGenerator[Dict[str, Any], None]:
4345
"""Stream chat completion deltas and tool call deltas.
4446

solana_agent/services/agent.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -360,6 +360,8 @@ async def generate_response(
360360
messages=messages,
361361
model=self.model,
362362
tools=tools if tools else None,
363+
api_key=self.api_key,
364+
base_url=self.base_url,
363365
):
364366
etype = event.get("type")
365367
if etype == "content":

solana_agent/services/query.py

Lines changed: 40 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1277,44 +1277,49 @@ async def _drain_out_tr_text():
12771277
agents = self.agent_service.get_all_ai_agents() or {}
12781278
available_agent_names = list(agents.keys())
12791279

1280-
# LLM detects switch intent
1281-
(
1282-
switch_requested,
1283-
requested_agent_raw,
1284-
start_new,
1285-
) = await self._detect_switch_intent(user_text, available_agent_names)
1286-
1287-
# Normalize requested agent to an exact available key
1288-
requested_agent = None
1289-
if requested_agent_raw:
1290-
raw_lower = requested_agent_raw.lower()
1291-
for a in available_agent_names:
1292-
if a.lower() == raw_lower or raw_lower in a.lower():
1293-
requested_agent = a
1294-
break
1280+
# Fast path: if only one agent, skip all routing logic entirely
1281+
if len(available_agent_names) == 1:
1282+
agent_name = available_agent_names[0]
1283+
self._set_sticky_agent(user_id, agent_name, required_complete=False)
1284+
else:
1285+
# LLM detects switch intent (only needed with multiple agents)
1286+
(
1287+
switch_requested,
1288+
requested_agent_raw,
1289+
start_new,
1290+
) = await self._detect_switch_intent(user_text, available_agent_names)
1291+
1292+
# Normalize requested agent to an exact available key
1293+
requested_agent = None
1294+
if requested_agent_raw:
1295+
raw_lower = requested_agent_raw.lower()
1296+
for a in available_agent_names:
1297+
if a.lower() == raw_lower or raw_lower in a.lower():
1298+
requested_agent = a
1299+
break
12951300

1296-
sticky_agent = self._get_sticky_agent(user_id)
1301+
sticky_agent = self._get_sticky_agent(user_id)
12971302

1298-
if sticky_agent and not switch_requested:
1299-
agent_name = sticky_agent
1300-
else:
1301-
try:
1302-
if start_new:
1303-
# Start fresh
1304-
self._clear_sticky_agent(user_id)
1305-
if requested_agent:
1306-
agent_name = requested_agent
1307-
else:
1308-
# Route if no explicit target
1309-
if router:
1310-
agent_name = await router.route_query(routing_input)
1303+
if sticky_agent and not switch_requested:
1304+
agent_name = sticky_agent
1305+
else:
1306+
try:
1307+
if start_new:
1308+
# Start fresh
1309+
self._clear_sticky_agent(user_id)
1310+
if requested_agent:
1311+
agent_name = requested_agent
13111312
else:
1312-
agent_name = await self.routing_service.route_query(
1313-
routing_input
1314-
)
1315-
except Exception:
1316-
agent_name = next(iter(agents.keys())) if agents else "default"
1317-
self._set_sticky_agent(user_id, agent_name, required_complete=False)
1313+
# Route if no explicit target
1314+
if router:
1315+
agent_name = await router.route_query(routing_input)
1316+
else:
1317+
agent_name = await self.routing_service.route_query(
1318+
routing_input
1319+
)
1320+
except Exception:
1321+
agent_name = next(iter(agents.keys())) if agents else "default"
1322+
self._set_sticky_agent(user_id, agent_name, required_complete=False)
13181323

13191324
# 7) Captured data context + incremental save using previous assistant message
13201325
capture_context = ""

0 commit comments

Comments
 (0)