Skip to content

Commit 053b87b

Browse files
committed
fix llama.cpp config
1 parent 79ef56c commit 053b87b

File tree

3 files changed

+3
-3
lines changed

3 files changed

+3
-3
lines changed

oneping/native/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
##
66

77
def has_native(provider):
8-
return provider not in (None, 'llama.cpp', 'tei', 'vllm', 'oneping')
8+
return provider not in (None, 'llama-cpp', 'tei', 'vllm', 'oneping')
99

1010
##
1111
## dummy function

oneping/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ async def cumcat(stream):
4848
## image utils
4949
##
5050

51-
def make_image_uri(data, media_type='png'):
51+
def make_image_uri(data, media_type='image/png'):
5252
data = base64.b64encode(data).decode('utf-8')
5353
return f'data:{media_type};base64,{data}'
5454

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = 'oneping'
3-
version = '0.6.1'
3+
version = '0.6.2'
44
description = 'LLM provider abstraction layer.'
55
readme = { file = 'README.md' , content-type = 'text/markdown' }
66
authors = [{ name = 'Doug Hanley', email = '[email protected]' }]

0 commit comments

Comments
 (0)