Skip to content

Commit e759f9e

Browse files
committed
SDK regeneration
1 parent 07ed37f commit e759f9e

File tree

7 files changed

+22
-151
lines changed

7 files changed

+22
-151
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "cohere"
3-
version = "5.0.0a0"
3+
version = "5.0.0a1"
44
description = ""
55
readme = "README.md"
66
authors = []

src/cohere/__init__.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,6 @@
4343
DatasetType,
4444
DatasetValidationStatus,
4545
DeleteConnectorResponse,
46-
DetectLanguageResponse,
47-
DetectLanguageResponseResultsItem,
4846
DetokenizeResponse,
4947
EmbedByTypeResponse,
5048
EmbedByTypeResponseEmbeddings,
@@ -163,8 +161,6 @@
163161
"DatasetsGetUsageResponse",
164162
"DatasetsListResponse",
165163
"DeleteConnectorResponse",
166-
"DetectLanguageResponse",
167-
"DetectLanguageResponseResultsItem",
168164
"DetokenizeResponse",
169165
"EmbedByTypeResponse",
170166
"EmbedByTypeResponseEmbeddings",

src/cohere/client.py

Lines changed: 20 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626
from .types.classify_request_examples_item import ClassifyRequestExamplesItem
2727
from .types.classify_request_truncate import ClassifyRequestTruncate
2828
from .types.classify_response import ClassifyResponse
29-
from .types.detect_language_response import DetectLanguageResponse
3029
from .types.detokenize_response import DetokenizeResponse
3130
from .types.embed_input_type import EmbedInputType
3231
from .types.embed_request_truncate import EmbedRequestTruncate
@@ -332,6 +331,7 @@ def generate_stream(
332331
presence_penalty: typing.Optional[float] = OMIT,
333332
return_likelihoods: typing.Optional[GenerateStreamRequestReturnLikelihoods] = OMIT,
334333
logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT,
334+
raw_prompting: typing.Optional[bool] = OMIT,
335335
) -> typing.Iterator[GenerateStreamedResponse]:
336336
"""
337337
This endpoint generates realistic text conditioned on a given input.
@@ -385,6 +385,7 @@ def generate_stream(
385385
For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text.
386386
387387
Note: logit bias may not be supported for all custom models.
388+
- raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing.
388389
"""
389390
_request: typing.Dict[str, typing.Any] = {"prompt": prompt}
390391
if model is not OMIT:
@@ -415,6 +416,8 @@ def generate_stream(
415416
_request["return_likelihoods"] = return_likelihoods
416417
if logit_bias is not OMIT:
417418
_request["logit_bias"] = logit_bias
419+
if raw_prompting is not OMIT:
420+
_request["raw_prompting"] = raw_prompting
418421
with self._client_wrapper.httpx_client.stream(
419422
"POST",
420423
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"),
@@ -457,6 +460,7 @@ def generate(
457460
presence_penalty: typing.Optional[float] = OMIT,
458461
return_likelihoods: typing.Optional[GenerateRequestReturnLikelihoods] = OMIT,
459462
logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT,
463+
raw_prompting: typing.Optional[bool] = OMIT,
460464
) -> Generation:
461465
"""
462466
This endpoint generates realistic text conditioned on a given input.
@@ -509,7 +513,9 @@ def generate(
509513
510514
For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text.
511515
512-
Note: logit bias may not be supported for all custom models.---
516+
Note: logit bias may not be supported for all custom models.
517+
- raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing.
518+
---
513519
from cohere import GenerateRequestReturnLikelihoods, GenerateRequestTruncate
514520
from cohere.client import Client
515521
@@ -554,6 +560,8 @@ def generate(
554560
_request["return_likelihoods"] = return_likelihoods
555561
if logit_bias is not OMIT:
556562
_request["logit_bias"] = logit_bias
563+
if raw_prompting is not OMIT:
564+
_request["raw_prompting"] = raw_prompting
557565
_response = self._client_wrapper.httpx_client.request(
558566
"POST",
559567
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"),
@@ -780,43 +788,6 @@ def classify(
780788
raise ApiError(status_code=_response.status_code, body=_response.text)
781789
raise ApiError(status_code=_response.status_code, body=_response_json)
782790

783-
def detect_language(self, *, texts: typing.List[str], model: typing.Optional[str] = OMIT) -> DetectLanguageResponse:
784-
"""
785-
This endpoint identifies which language each of the provided texts is written in.
786-
787-
Parameters:
788-
- texts: typing.List[str]. List of strings to run the detection on.
789-
790-
- model: typing.Optional[str]. The identifier of the model to generate with.
791-
---
792-
from cohere.client import Client
793-
794-
client = Client(
795-
client_name="YOUR_CLIENT_NAME",
796-
token="YOUR_TOKEN",
797-
)
798-
client.detect_language(
799-
texts=[],
800-
)
801-
"""
802-
_request: typing.Dict[str, typing.Any] = {"texts": texts}
803-
if model is not OMIT:
804-
_request["model"] = model
805-
_response = self._client_wrapper.httpx_client.request(
806-
"POST",
807-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/detect-language"),
808-
json=jsonable_encoder(_request),
809-
headers=self._client_wrapper.get_headers(),
810-
timeout=60,
811-
)
812-
if 200 <= _response.status_code < 300:
813-
return pydantic.parse_obj_as(DetectLanguageResponse, _response.json()) # type: ignore
814-
try:
815-
_response_json = _response.json()
816-
except JSONDecodeError:
817-
raise ApiError(status_code=_response.status_code, body=_response.text)
818-
raise ApiError(status_code=_response.status_code, body=_response_json)
819-
820791
def summarize(
821792
self,
822793
*,
@@ -1247,6 +1218,7 @@ async def generate_stream(
12471218
presence_penalty: typing.Optional[float] = OMIT,
12481219
return_likelihoods: typing.Optional[GenerateStreamRequestReturnLikelihoods] = OMIT,
12491220
logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT,
1221+
raw_prompting: typing.Optional[bool] = OMIT,
12501222
) -> typing.AsyncIterator[GenerateStreamedResponse]:
12511223
"""
12521224
This endpoint generates realistic text conditioned on a given input.
@@ -1300,6 +1272,7 @@ async def generate_stream(
13001272
For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text.
13011273
13021274
Note: logit bias may not be supported for all custom models.
1275+
- raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing.
13031276
"""
13041277
_request: typing.Dict[str, typing.Any] = {"prompt": prompt}
13051278
if model is not OMIT:
@@ -1330,6 +1303,8 @@ async def generate_stream(
13301303
_request["return_likelihoods"] = return_likelihoods
13311304
if logit_bias is not OMIT:
13321305
_request["logit_bias"] = logit_bias
1306+
if raw_prompting is not OMIT:
1307+
_request["raw_prompting"] = raw_prompting
13331308
async with self._client_wrapper.httpx_client.stream(
13341309
"POST",
13351310
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"),
@@ -1372,6 +1347,7 @@ async def generate(
13721347
presence_penalty: typing.Optional[float] = OMIT,
13731348
return_likelihoods: typing.Optional[GenerateRequestReturnLikelihoods] = OMIT,
13741349
logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT,
1350+
raw_prompting: typing.Optional[bool] = OMIT,
13751351
) -> Generation:
13761352
"""
13771353
This endpoint generates realistic text conditioned on a given input.
@@ -1424,7 +1400,9 @@ async def generate(
14241400
14251401
For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text.
14261402
1427-
Note: logit bias may not be supported for all custom models.---
1403+
Note: logit bias may not be supported for all custom models.
1404+
- raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing.
1405+
---
14281406
from cohere import GenerateRequestReturnLikelihoods, GenerateRequestTruncate
14291407
from cohere.client import AsyncClient
14301408
@@ -1469,6 +1447,8 @@ async def generate(
14691447
_request["return_likelihoods"] = return_likelihoods
14701448
if logit_bias is not OMIT:
14711449
_request["logit_bias"] = logit_bias
1450+
if raw_prompting is not OMIT:
1451+
_request["raw_prompting"] = raw_prompting
14721452
_response = await self._client_wrapper.httpx_client.request(
14731453
"POST",
14741454
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"),
@@ -1695,45 +1675,6 @@ async def classify(
16951675
raise ApiError(status_code=_response.status_code, body=_response.text)
16961676
raise ApiError(status_code=_response.status_code, body=_response_json)
16971677

1698-
async def detect_language(
1699-
self, *, texts: typing.List[str], model: typing.Optional[str] = OMIT
1700-
) -> DetectLanguageResponse:
1701-
"""
1702-
This endpoint identifies which language each of the provided texts is written in.
1703-
1704-
Parameters:
1705-
- texts: typing.List[str]. List of strings to run the detection on.
1706-
1707-
- model: typing.Optional[str]. The identifier of the model to generate with.
1708-
---
1709-
from cohere.client import AsyncClient
1710-
1711-
client = AsyncClient(
1712-
client_name="YOUR_CLIENT_NAME",
1713-
token="YOUR_TOKEN",
1714-
)
1715-
await client.detect_language(
1716-
texts=[],
1717-
)
1718-
"""
1719-
_request: typing.Dict[str, typing.Any] = {"texts": texts}
1720-
if model is not OMIT:
1721-
_request["model"] = model
1722-
_response = await self._client_wrapper.httpx_client.request(
1723-
"POST",
1724-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/detect-language"),
1725-
json=jsonable_encoder(_request),
1726-
headers=self._client_wrapper.get_headers(),
1727-
timeout=60,
1728-
)
1729-
if 200 <= _response.status_code < 300:
1730-
return pydantic.parse_obj_as(DetectLanguageResponse, _response.json()) # type: ignore
1731-
try:
1732-
_response_json = _response.json()
1733-
except JSONDecodeError:
1734-
raise ApiError(status_code=_response.status_code, body=_response.text)
1735-
raise ApiError(status_code=_response.status_code, body=_response_json)
1736-
17371678
async def summarize(
17381679
self,
17391680
*,

src/cohere/core/client_wrapper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def get_headers(self) -> typing.Dict[str, str]:
2121
headers: typing.Dict[str, str] = {
2222
"X-Fern-Language": "Python",
2323
"X-Fern-SDK-Name": "cohere",
24-
"X-Fern-SDK-Version": "5.0.0a0",
24+
"X-Fern-SDK-Version": "5.0.0a1",
2525
}
2626
if self._client_name is not None:
2727
headers["X-Client-Name"] = self._client_name

src/cohere/types/__init__.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,6 @@
4444
from .dataset_type import DatasetType
4545
from .dataset_validation_status import DatasetValidationStatus
4646
from .delete_connector_response import DeleteConnectorResponse
47-
from .detect_language_response import DetectLanguageResponse
48-
from .detect_language_response_results_item import DetectLanguageResponseResultsItem
4947
from .detokenize_response import DetokenizeResponse
5048
from .embed_by_type_response import EmbedByTypeResponse
5149
from .embed_by_type_response_embeddings import EmbedByTypeResponseEmbeddings
@@ -146,8 +144,6 @@
146144
"DatasetType",
147145
"DatasetValidationStatus",
148146
"DeleteConnectorResponse",
149-
"DetectLanguageResponse",
150-
"DetectLanguageResponseResultsItem",
151147
"DetokenizeResponse",
152148
"EmbedByTypeResponse",
153149
"EmbedByTypeResponseEmbeddings",

src/cohere/types/detect_language_response.py

Lines changed: 0 additions & 33 deletions
This file was deleted.

src/cohere/types/detect_language_response_results_item.py

Lines changed: 0 additions & 29 deletions
This file was deleted.

0 commit comments

Comments
 (0)