|
26 | 26 | from .types.classify_request_examples_item import ClassifyRequestExamplesItem |
27 | 27 | from .types.classify_request_truncate import ClassifyRequestTruncate |
28 | 28 | from .types.classify_response import ClassifyResponse |
29 | | -from .types.detect_language_response import DetectLanguageResponse |
30 | 29 | from .types.detokenize_response import DetokenizeResponse |
31 | 30 | from .types.embed_input_type import EmbedInputType |
32 | 31 | from .types.embed_request_truncate import EmbedRequestTruncate |
@@ -332,6 +331,7 @@ def generate_stream( |
332 | 331 | presence_penalty: typing.Optional[float] = OMIT, |
333 | 332 | return_likelihoods: typing.Optional[GenerateStreamRequestReturnLikelihoods] = OMIT, |
334 | 333 | logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT, |
| 334 | + raw_prompting: typing.Optional[bool] = OMIT, |
335 | 335 | ) -> typing.Iterator[GenerateStreamedResponse]: |
336 | 336 | """ |
337 | 337 | This endpoint generates realistic text conditioned on a given input. |
@@ -385,6 +385,7 @@ def generate_stream( |
385 | 385 | For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. |
386 | 386 |
|
387 | 387 | Note: logit bias may not be supported for all custom models. |
| 388 | + - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. |
388 | 389 | """ |
389 | 390 | _request: typing.Dict[str, typing.Any] = {"prompt": prompt} |
390 | 391 | if model is not OMIT: |
@@ -415,6 +416,8 @@ def generate_stream( |
415 | 416 | _request["return_likelihoods"] = return_likelihoods |
416 | 417 | if logit_bias is not OMIT: |
417 | 418 | _request["logit_bias"] = logit_bias |
| 419 | + if raw_prompting is not OMIT: |
| 420 | + _request["raw_prompting"] = raw_prompting |
418 | 421 | with self._client_wrapper.httpx_client.stream( |
419 | 422 | "POST", |
420 | 423 | urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"), |
@@ -457,6 +460,7 @@ def generate( |
457 | 460 | presence_penalty: typing.Optional[float] = OMIT, |
458 | 461 | return_likelihoods: typing.Optional[GenerateRequestReturnLikelihoods] = OMIT, |
459 | 462 | logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT, |
| 463 | + raw_prompting: typing.Optional[bool] = OMIT, |
460 | 464 | ) -> Generation: |
461 | 465 | """ |
462 | 466 | This endpoint generates realistic text conditioned on a given input. |
@@ -509,7 +513,9 @@ def generate( |
509 | 513 |
|
510 | 514 | For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. |
511 | 515 |
|
512 | | - Note: logit bias may not be supported for all custom models.--- |
| 516 | + Note: logit bias may not be supported for all custom models. |
| 517 | + - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. |
| 518 | + --- |
513 | 519 | from cohere import GenerateRequestReturnLikelihoods, GenerateRequestTruncate |
514 | 520 | from cohere.client import Client |
515 | 521 |
|
@@ -554,6 +560,8 @@ def generate( |
554 | 560 | _request["return_likelihoods"] = return_likelihoods |
555 | 561 | if logit_bias is not OMIT: |
556 | 562 | _request["logit_bias"] = logit_bias |
| 563 | + if raw_prompting is not OMIT: |
| 564 | + _request["raw_prompting"] = raw_prompting |
557 | 565 | _response = self._client_wrapper.httpx_client.request( |
558 | 566 | "POST", |
559 | 567 | urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"), |
@@ -780,43 +788,6 @@ def classify( |
780 | 788 | raise ApiError(status_code=_response.status_code, body=_response.text) |
781 | 789 | raise ApiError(status_code=_response.status_code, body=_response_json) |
782 | 790 |
|
783 | | - def detect_language(self, *, texts: typing.List[str], model: typing.Optional[str] = OMIT) -> DetectLanguageResponse: |
784 | | - """ |
785 | | - This endpoint identifies which language each of the provided texts is written in. |
786 | | -
|
787 | | - Parameters: |
788 | | - - texts: typing.List[str]. List of strings to run the detection on. |
789 | | -
|
790 | | - - model: typing.Optional[str]. The identifier of the model to generate with. |
791 | | - --- |
792 | | - from cohere.client import Client |
793 | | -
|
794 | | - client = Client( |
795 | | - client_name="YOUR_CLIENT_NAME", |
796 | | - token="YOUR_TOKEN", |
797 | | - ) |
798 | | - client.detect_language( |
799 | | - texts=[], |
800 | | - ) |
801 | | - """ |
802 | | - _request: typing.Dict[str, typing.Any] = {"texts": texts} |
803 | | - if model is not OMIT: |
804 | | - _request["model"] = model |
805 | | - _response = self._client_wrapper.httpx_client.request( |
806 | | - "POST", |
807 | | - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/detect-language"), |
808 | | - json=jsonable_encoder(_request), |
809 | | - headers=self._client_wrapper.get_headers(), |
810 | | - timeout=60, |
811 | | - ) |
812 | | - if 200 <= _response.status_code < 300: |
813 | | - return pydantic.parse_obj_as(DetectLanguageResponse, _response.json()) # type: ignore |
814 | | - try: |
815 | | - _response_json = _response.json() |
816 | | - except JSONDecodeError: |
817 | | - raise ApiError(status_code=_response.status_code, body=_response.text) |
818 | | - raise ApiError(status_code=_response.status_code, body=_response_json) |
819 | | - |
820 | 791 | def summarize( |
821 | 792 | self, |
822 | 793 | *, |
@@ -1247,6 +1218,7 @@ async def generate_stream( |
1247 | 1218 | presence_penalty: typing.Optional[float] = OMIT, |
1248 | 1219 | return_likelihoods: typing.Optional[GenerateStreamRequestReturnLikelihoods] = OMIT, |
1249 | 1220 | logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT, |
| 1221 | + raw_prompting: typing.Optional[bool] = OMIT, |
1250 | 1222 | ) -> typing.AsyncIterator[GenerateStreamedResponse]: |
1251 | 1223 | """ |
1252 | 1224 | This endpoint generates realistic text conditioned on a given input. |
@@ -1300,6 +1272,7 @@ async def generate_stream( |
1300 | 1272 | For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. |
1301 | 1273 |
|
1302 | 1274 | Note: logit bias may not be supported for all custom models. |
| 1275 | + - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. |
1303 | 1276 | """ |
1304 | 1277 | _request: typing.Dict[str, typing.Any] = {"prompt": prompt} |
1305 | 1278 | if model is not OMIT: |
@@ -1330,6 +1303,8 @@ async def generate_stream( |
1330 | 1303 | _request["return_likelihoods"] = return_likelihoods |
1331 | 1304 | if logit_bias is not OMIT: |
1332 | 1305 | _request["logit_bias"] = logit_bias |
| 1306 | + if raw_prompting is not OMIT: |
| 1307 | + _request["raw_prompting"] = raw_prompting |
1333 | 1308 | async with self._client_wrapper.httpx_client.stream( |
1334 | 1309 | "POST", |
1335 | 1310 | urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"), |
@@ -1372,6 +1347,7 @@ async def generate( |
1372 | 1347 | presence_penalty: typing.Optional[float] = OMIT, |
1373 | 1348 | return_likelihoods: typing.Optional[GenerateRequestReturnLikelihoods] = OMIT, |
1374 | 1349 | logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT, |
| 1350 | + raw_prompting: typing.Optional[bool] = OMIT, |
1375 | 1351 | ) -> Generation: |
1376 | 1352 | """ |
1377 | 1353 | This endpoint generates realistic text conditioned on a given input. |
@@ -1424,7 +1400,9 @@ async def generate( |
1424 | 1400 |
|
1425 | 1401 | For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. |
1426 | 1402 |
|
1427 | | - Note: logit bias may not be supported for all custom models.--- |
| 1403 | + Note: logit bias may not be supported for all custom models. |
| 1404 | + - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. |
| 1405 | + --- |
1428 | 1406 | from cohere import GenerateRequestReturnLikelihoods, GenerateRequestTruncate |
1429 | 1407 | from cohere.client import AsyncClient |
1430 | 1408 |
|
@@ -1469,6 +1447,8 @@ async def generate( |
1469 | 1447 | _request["return_likelihoods"] = return_likelihoods |
1470 | 1448 | if logit_bias is not OMIT: |
1471 | 1449 | _request["logit_bias"] = logit_bias |
| 1450 | + if raw_prompting is not OMIT: |
| 1451 | + _request["raw_prompting"] = raw_prompting |
1472 | 1452 | _response = await self._client_wrapper.httpx_client.request( |
1473 | 1453 | "POST", |
1474 | 1454 | urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"), |
@@ -1695,45 +1675,6 @@ async def classify( |
1695 | 1675 | raise ApiError(status_code=_response.status_code, body=_response.text) |
1696 | 1676 | raise ApiError(status_code=_response.status_code, body=_response_json) |
1697 | 1677 |
|
1698 | | - async def detect_language( |
1699 | | - self, *, texts: typing.List[str], model: typing.Optional[str] = OMIT |
1700 | | - ) -> DetectLanguageResponse: |
1701 | | - """ |
1702 | | - This endpoint identifies which language each of the provided texts is written in. |
1703 | | -
|
1704 | | - Parameters: |
1705 | | - - texts: typing.List[str]. List of strings to run the detection on. |
1706 | | -
|
1707 | | - - model: typing.Optional[str]. The identifier of the model to generate with. |
1708 | | - --- |
1709 | | - from cohere.client import AsyncClient |
1710 | | -
|
1711 | | - client = AsyncClient( |
1712 | | - client_name="YOUR_CLIENT_NAME", |
1713 | | - token="YOUR_TOKEN", |
1714 | | - ) |
1715 | | - await client.detect_language( |
1716 | | - texts=[], |
1717 | | - ) |
1718 | | - """ |
1719 | | - _request: typing.Dict[str, typing.Any] = {"texts": texts} |
1720 | | - if model is not OMIT: |
1721 | | - _request["model"] = model |
1722 | | - _response = await self._client_wrapper.httpx_client.request( |
1723 | | - "POST", |
1724 | | - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/detect-language"), |
1725 | | - json=jsonable_encoder(_request), |
1726 | | - headers=self._client_wrapper.get_headers(), |
1727 | | - timeout=60, |
1728 | | - ) |
1729 | | - if 200 <= _response.status_code < 300: |
1730 | | - return pydantic.parse_obj_as(DetectLanguageResponse, _response.json()) # type: ignore |
1731 | | - try: |
1732 | | - _response_json = _response.json() |
1733 | | - except JSONDecodeError: |
1734 | | - raise ApiError(status_code=_response.status_code, body=_response.text) |
1735 | | - raise ApiError(status_code=_response.status_code, body=_response_json) |
1736 | | - |
1737 | 1678 | async def summarize( |
1738 | 1679 | self, |
1739 | 1680 | *, |
|
0 commit comments