Skip to content

Commit b2363cc

Browse files
committed
fix a bunch of image uri stuff
1 parent d049adb commit b2363cc

File tree

5 files changed

+40
-27
lines changed

5 files changed

+40
-27
lines changed

oneping/curl.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ def prepare_request(
7171
payload = {**payload_model, **payload_message, **kwargs}
7272

7373
# add in max tokens
74+
max_tokens = max_tokens if max_tokens is not None else prov.get('max_tokens_default')
7475
if max_tokens is not None:
7576
payload[prov.max_tokens_name] = max_tokens
7677

oneping/native/google.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from google.genai.types import Part, Content, GenerateContentConfig
77

88
from ..providers import CONFIG as C, PROVIDERS as P
9-
from ..utils import parse_image_uri
9+
from ..utils import ensure_image_uri, parse_image_uri
1010

1111
##
1212
## helper functions
@@ -15,7 +15,8 @@
1515
def make_content(text, image=None):
1616
parts = [Part(text=text)]
1717
if image is not None:
18-
mime_type, data = parse_image_uri(image)
18+
image_url = ensure_image_uri(image)
19+
mime_type, data = parse_image_uri(image_url)
1920
part = Part.from_bytes(data=data, mime_type=mime_type)
2021
parts.append(part)
2122
return parts

oneping/native/openai.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
CONFIG as C, PROVIDERS as P,
88
content_openai, convert_history, payload_openai,
99
response_openai_native, stream_openai_native,
10-
embed_response_openai, transcribe_response_openai
10+
embed_response_openai_native, transcribe_response_openai
1111
)
1212

1313
##
@@ -56,8 +56,8 @@ async def stream_async(query, image=None, history=None, prefill=None, prediction
5656

5757
def embed(query, model=P.openai.embed_model, api_key=None, base_url=None, **kwargs):
5858
client = make_client(base_url=base_url, api_key=api_key)
59-
response = client.embeddings.create(query, model=model, **kwargs)
60-
return embed_response_openai(response)
59+
response = client.embeddings.create(input=query, model=model, **kwargs)
60+
return embed_response_openai_native(response)
6161

6262
def transcribe(audio, model=P.openai.transcribe_model, api_key=None, base_url=None, **kwargs):
6363
client = make_client(base_url=base_url, api_key=api_key)

oneping/providers.py

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import toml
55
from pathlib import Path
66

7-
from .utils import split_image_uri, Config
7+
from .utils import split_image_uri, ensure_image_uri, Config
88

99
##
1010
## authorization headers
@@ -17,7 +17,7 @@ def authorize_openai(api_key):
1717

1818
def authorize_anthropic(api_key):
1919
return {
20-
'x-api-key': api_key,
20+
'X-Api-Key': api_key,
2121
}
2222

2323
##
@@ -27,7 +27,7 @@ def authorize_anthropic(api_key):
2727
def content_openai(text, image=None):
2828
if image is None:
2929
return text
30-
image_url = { 'url': image }
30+
image_url = { 'url': ensure_image_uri(image) }
3131
return [
3232
{ 'type': 'image_url', 'image_url': image_url },
3333
{ 'type': 'text', 'text': text },
@@ -36,7 +36,8 @@ def content_openai(text, image=None):
3636
def content_anthropic(text, image=None):
3737
if image is None:
3838
return text
39-
media_type, data = split_image_uri(image)
39+
image_url = ensure_image_uri(image)
40+
media_type, data = split_image_uri(image_url)
4041
source = {
4142
'type': 'base64', 'media_type': media_type, 'data': data
4243
}
@@ -174,6 +175,11 @@ def embed_response_openai(reply):
174175
item['embedding'] for item in reply['data']
175176
]
176177

178+
def embed_response_openai_native(reply):
179+
return [
180+
item.embedding for item in reply.data
181+
]
182+
177183
def embed_payload_tei(text):
178184
return {'inputs': text}
179185

@@ -264,22 +270,18 @@ def transcribe_response_openai(audio):
264270
## known llm providers
265271
##
266272

267-
# get config paths
268-
library_dir = Path(__file__).parent
269-
xdg_config_home = Path(os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')))
270-
user_config_dir = xdg_config_home / 'oneping'
271-
default_providers_file = library_dir / 'providers.toml'
272-
user_providers_file = user_config_dir / 'providers.toml'
273-
default_config_file = library_dir / 'config.toml'
274-
user_config_file = user_config_dir / 'config.toml'
275-
276273
# fault tolerant toml loader
277274
def load_toml(file):
278275
if os.path.exists(file):
279276
return toml.load(file)
280277
else:
281278
return {}
282279

280+
# get config paths
281+
XDG_LOC = os.path.expanduser('~/.config')
282+
LIB_DIR = Path(__file__).parent
283+
XDG_DIR = Path(os.environ.get('XDG_CONFIG_HOME', XDG_LOC))
284+
283285
# merge config layers
284286
global PROVIDERS
285287
global CONFIG
@@ -288,10 +290,10 @@ def reload():
288290
global CONFIG
289291

290292
# reload config from disk
291-
DEFAULT_CONFIG = load_toml(default_config_file)
292-
USER_CONFIG = load_toml(user_config_file)
293-
DEFAULT_PROVIDERS = load_toml(default_providers_file)
294-
USER_PROVIDERS = load_toml(user_providers_file)
293+
DEFAULT_CONFIG = load_toml(LIB_DIR / 'config.toml')
294+
DEFAULT_PROVIDERS = load_toml(LIB_DIR / 'providers.toml')
295+
USER_CONFIG = load_toml(XDG_DIR / 'oneping' / 'config.toml')
296+
USER_PROVIDERS = load_toml(XDG_DIR / 'oneping' / 'providers.toml')
295297

296298
# merge provider layers
297299
CONFIG = Config({ **DEFAULT_CONFIG, **USER_CONFIG })

oneping/providers.toml

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,44 +46,53 @@ embed_model = "text-embedding-3-large"
4646
[anthropic]
4747
base_url = "https://api.anthropic.com"
4848
authorize = "anthropic"
49-
chat_path = "messages"
49+
chat_path = "v1/messages"
5050
max_tokens_name = "max_tokens"
51+
max_tokens_default = 8192
5152
content = "anthropic"
5253
payload = "anthropic"
5354
response = "anthropic"
5455
stream = "anthropic"
5556
api_key_env = "ANTHROPIC_API_KEY"
56-
chat_model = "claude-opus-4-1-20250805"
57+
chat_model = "claude-sonnet-4-5-20250929"
5758

5859
[anthropic.headers]
5960
anthropic-version = "2023-06-01"
6061

6162
[google]
62-
base_url = "https://generativelanguage.googleapis.com/v1beta/openai"
63+
base_url = "https://generativelanguage.googleapis.com/v1beta"
64+
authorize = "openai"
65+
chat_path = "openai/chat/completions"
66+
embed_path = "openai/embeddings"
6367
api_key_env = "GEMINI_API_KEY"
64-
chat_model = "gemini-2.0-flash-exp"
65-
embed_model = "gemini-embedding-exp-03-07"
68+
chat_model = "gemini-2.5-flash"
69+
embed_model = "gemini-embedding-001"
6670

6771
[xai]
6872
base_url = "https://api.x.ai/v1"
73+
authorize = "openai"
6974
api_key_env = "XAI_API_KEY"
7075
chat_model = "grok-4"
7176

7277
[fireworks]
7378
base_url = "https://api.fireworks.ai/inference"
79+
authorize = "openai"
7480
api_key_env = "FIREWORKS_API_KEY"
7581
chat_model = "accounts/fireworks/models/llama-v3p3-70b-instruct"
7682

7783
[groq]
7884
base_url = "https://api.groq.com/openai"
85+
authorize = "openai"
7986
api_key_env = "GROQ_API_KEY"
8087
chat_model = "llama-3.3-70b-versatile"
8188

8289
[deepseek]
8390
base_url = "https://api.deepseek.com"
91+
authorize = "openai"
8492
api_key_env = "DEEPSEEK_API_KEY"
8593
chat_model = "deepseek-chat"
8694

8795
[azure]
8896
api_key_env = "AZURE_API_KEY"
97+
authorize = "openai"
8998
chat_model = "gpt-5"

0 commit comments

Comments
 (0)