Showing preview only (2,063K chars total). Download the full file or copy to clipboard to get everything.
Repository: cohere-ai/cohere-python
Branch: main
Commit: 756b1d8ec0e4
Files: 367
Total size: 1.9 MB
Directory structure:
gitextract_zznh6amy/
├── .fern/
│ └── metadata.json
├── .fernignore
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.md
│ │ └── improvement_request.md
│ └── workflows/
│ └── ci.yml
├── .gitignore
├── 4.0.0-5.0.0-migration-guide.md
├── LICENSE
├── README.md
├── mypy.ini
├── pyproject.toml
├── reference.md
├── requirements.txt
├── src/
│ └── cohere/
│ ├── __init__.py
│ ├── _default_clients.py
│ ├── aliases.py
│ ├── audio/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── transcriptions/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ └── audio_transcriptions_create_response.py
│ ├── aws_client.py
│ ├── base_client.py
│ ├── batches/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── batch.py
│ │ ├── batch_status.py
│ │ ├── cancel_batch_response.py
│ │ ├── create_batch_response.py
│ │ ├── get_batch_response.py
│ │ └── list_batches_response.py
│ ├── bedrock_client.py
│ ├── client.py
│ ├── client_v2.py
│ ├── config.py
│ ├── connectors/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ └── raw_client.py
│ ├── core/
│ │ ├── __init__.py
│ │ ├── api_error.py
│ │ ├── client_wrapper.py
│ │ ├── datetime_utils.py
│ │ ├── file.py
│ │ ├── force_multipart.py
│ │ ├── http_client.py
│ │ ├── http_response.py
│ │ ├── http_sse/
│ │ │ ├── __init__.py
│ │ │ ├── _api.py
│ │ │ ├── _decoders.py
│ │ │ ├── _exceptions.py
│ │ │ └── _models.py
│ │ ├── jsonable_encoder.py
│ │ ├── logging.py
│ │ ├── parse_error.py
│ │ ├── pydantic_utilities.py
│ │ ├── query_encoder.py
│ │ ├── remove_none_from_dict.py
│ │ ├── request_options.py
│ │ ├── serialization.py
│ │ └── unchecked_base_model.py
│ ├── datasets/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── datasets_create_response.py
│ │ ├── datasets_get_response.py
│ │ ├── datasets_get_usage_response.py
│ │ └── datasets_list_response.py
│ ├── embed_jobs/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ └── create_embed_job_request_truncate.py
│ ├── environment.py
│ ├── errors/
│ │ ├── __init__.py
│ │ ├── bad_request_error.py
│ │ ├── client_closed_request_error.py
│ │ ├── forbidden_error.py
│ │ ├── gateway_timeout_error.py
│ │ ├── internal_server_error.py
│ │ ├── invalid_token_error.py
│ │ ├── not_found_error.py
│ │ ├── not_implemented_error.py
│ │ ├── service_unavailable_error.py
│ │ ├── too_many_requests_error.py
│ │ ├── unauthorized_error.py
│ │ └── unprocessable_entity_error.py
│ ├── finetuning/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── finetuning/
│ │ │ ├── __init__.py
│ │ │ └── types/
│ │ │ ├── __init__.py
│ │ │ ├── base_model.py
│ │ │ ├── base_type.py
│ │ │ ├── create_finetuned_model_response.py
│ │ │ ├── delete_finetuned_model_response.py
│ │ │ ├── event.py
│ │ │ ├── finetuned_model.py
│ │ │ ├── get_finetuned_model_response.py
│ │ │ ├── hyperparameters.py
│ │ │ ├── list_events_response.py
│ │ │ ├── list_finetuned_models_response.py
│ │ │ ├── list_training_step_metrics_response.py
│ │ │ ├── lora_target_modules.py
│ │ │ ├── settings.py
│ │ │ ├── status.py
│ │ │ ├── strategy.py
│ │ │ ├── training_step_metrics.py
│ │ │ ├── update_finetuned_model_response.py
│ │ │ └── wandb_config.py
│ │ └── raw_client.py
│ ├── manually_maintained/
│ │ ├── __init__.py
│ │ ├── cache.py
│ │ ├── cohere_aws/
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ ├── classification.py
│ │ │ ├── client.py
│ │ │ ├── embeddings.py
│ │ │ ├── error.py
│ │ │ ├── generation.py
│ │ │ ├── mode.py
│ │ │ ├── rerank.py
│ │ │ ├── response.py
│ │ │ └── summary.py
│ │ ├── lazy_aws_deps.py
│ │ ├── lazy_oci_deps.py
│ │ ├── streaming_embed.py
│ │ └── tokenizers.py
│ ├── models/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ └── raw_client.py
│ ├── oci_client.py
│ ├── overrides.py
│ ├── py.typed
│ ├── raw_base_client.py
│ ├── sagemaker_client.py
│ ├── types/
│ │ ├── __init__.py
│ │ ├── api_meta.py
│ │ ├── api_meta_api_version.py
│ │ ├── api_meta_billed_units.py
│ │ ├── api_meta_tokens.py
│ │ ├── assistant_message.py
│ │ ├── assistant_message_response.py
│ │ ├── assistant_message_response_content_item.py
│ │ ├── assistant_message_v2content.py
│ │ ├── assistant_message_v2content_one_item.py
│ │ ├── auth_token_type.py
│ │ ├── chat_citation.py
│ │ ├── chat_citation_generation_event.py
│ │ ├── chat_citation_type.py
│ │ ├── chat_connector.py
│ │ ├── chat_content_delta_event.py
│ │ ├── chat_content_delta_event_delta.py
│ │ ├── chat_content_delta_event_delta_message.py
│ │ ├── chat_content_delta_event_delta_message_content.py
│ │ ├── chat_content_end_event.py
│ │ ├── chat_content_start_event.py
│ │ ├── chat_content_start_event_delta.py
│ │ ├── chat_content_start_event_delta_message.py
│ │ ├── chat_content_start_event_delta_message_content.py
│ │ ├── chat_content_start_event_delta_message_content_type.py
│ │ ├── chat_data_metrics.py
│ │ ├── chat_debug_event.py
│ │ ├── chat_document.py
│ │ ├── chat_document_source.py
│ │ ├── chat_finish_reason.py
│ │ ├── chat_message.py
│ │ ├── chat_message_end_event.py
│ │ ├── chat_message_end_event_delta.py
│ │ ├── chat_message_start_event.py
│ │ ├── chat_message_start_event_delta.py
│ │ ├── chat_message_start_event_delta_message.py
│ │ ├── chat_message_v2.py
│ │ ├── chat_messages.py
│ │ ├── chat_request_citation_quality.py
│ │ ├── chat_request_prompt_truncation.py
│ │ ├── chat_request_safety_mode.py
│ │ ├── chat_search_queries_generation_event.py
│ │ ├── chat_search_query.py
│ │ ├── chat_search_result.py
│ │ ├── chat_search_result_connector.py
│ │ ├── chat_search_results_event.py
│ │ ├── chat_stream_end_event.py
│ │ ├── chat_stream_end_event_finish_reason.py
│ │ ├── chat_stream_event.py
│ │ ├── chat_stream_event_type.py
│ │ ├── chat_stream_request_citation_quality.py
│ │ ├── chat_stream_request_prompt_truncation.py
│ │ ├── chat_stream_request_safety_mode.py
│ │ ├── chat_stream_start_event.py
│ │ ├── chat_text_content.py
│ │ ├── chat_text_generation_event.py
│ │ ├── chat_text_response_format.py
│ │ ├── chat_text_response_format_v2.py
│ │ ├── chat_thinking_content.py
│ │ ├── chat_tool_call_delta_event.py
│ │ ├── chat_tool_call_delta_event_delta.py
│ │ ├── chat_tool_call_delta_event_delta_message.py
│ │ ├── chat_tool_call_delta_event_delta_message_tool_calls.py
│ │ ├── chat_tool_call_delta_event_delta_message_tool_calls_function.py
│ │ ├── chat_tool_call_end_event.py
│ │ ├── chat_tool_call_start_event.py
│ │ ├── chat_tool_call_start_event_delta.py
│ │ ├── chat_tool_call_start_event_delta_message.py
│ │ ├── chat_tool_calls_chunk_event.py
│ │ ├── chat_tool_calls_generation_event.py
│ │ ├── chat_tool_message.py
│ │ ├── chat_tool_plan_delta_event.py
│ │ ├── chat_tool_plan_delta_event_delta.py
│ │ ├── chat_tool_plan_delta_event_delta_message.py
│ │ ├── chat_tool_source.py
│ │ ├── check_api_key_response.py
│ │ ├── citation.py
│ │ ├── citation_end_event.py
│ │ ├── citation_options.py
│ │ ├── citation_options_mode.py
│ │ ├── citation_start_event.py
│ │ ├── citation_start_event_delta.py
│ │ ├── citation_start_event_delta_message.py
│ │ ├── citation_type.py
│ │ ├── classify_data_metrics.py
│ │ ├── classify_example.py
│ │ ├── classify_request_truncate.py
│ │ ├── classify_response.py
│ │ ├── classify_response_classifications_item.py
│ │ ├── classify_response_classifications_item_classification_type.py
│ │ ├── classify_response_classifications_item_labels_value.py
│ │ ├── compatible_endpoint.py
│ │ ├── connector.py
│ │ ├── connector_auth_status.py
│ │ ├── connector_o_auth.py
│ │ ├── content.py
│ │ ├── create_connector_o_auth.py
│ │ ├── create_connector_response.py
│ │ ├── create_connector_service_auth.py
│ │ ├── create_embed_job_response.py
│ │ ├── dataset.py
│ │ ├── dataset_part.py
│ │ ├── dataset_type.py
│ │ ├── dataset_validation_status.py
│ │ ├── delete_connector_response.py
│ │ ├── detokenize_response.py
│ │ ├── document.py
│ │ ├── document_content.py
│ │ ├── embed_by_type_response.py
│ │ ├── embed_by_type_response_embeddings.py
│ │ ├── embed_by_type_response_response_type.py
│ │ ├── embed_content.py
│ │ ├── embed_floats_response.py
│ │ ├── embed_image.py
│ │ ├── embed_image_url.py
│ │ ├── embed_input.py
│ │ ├── embed_input_type.py
│ │ ├── embed_job.py
│ │ ├── embed_job_status.py
│ │ ├── embed_job_truncate.py
│ │ ├── embed_request_truncate.py
│ │ ├── embed_response.py
│ │ ├── embed_text.py
│ │ ├── embedding_type.py
│ │ ├── finetune_dataset_metrics.py
│ │ ├── finish_reason.py
│ │ ├── generate_request_return_likelihoods.py
│ │ ├── generate_request_truncate.py
│ │ ├── generate_stream_end.py
│ │ ├── generate_stream_end_response.py
│ │ ├── generate_stream_error.py
│ │ ├── generate_stream_event.py
│ │ ├── generate_stream_request_return_likelihoods.py
│ │ ├── generate_stream_request_truncate.py
│ │ ├── generate_stream_text.py
│ │ ├── generate_streamed_response.py
│ │ ├── generation.py
│ │ ├── get_connector_response.py
│ │ ├── get_model_response.py
│ │ ├── get_model_response_sampling_defaults.py
│ │ ├── image.py
│ │ ├── image_content.py
│ │ ├── image_url.py
│ │ ├── image_url_detail.py
│ │ ├── json_response_format.py
│ │ ├── json_response_format_v2.py
│ │ ├── label_metric.py
│ │ ├── list_connectors_response.py
│ │ ├── list_embed_job_response.py
│ │ ├── list_models_response.py
│ │ ├── logprob_item.py
│ │ ├── message.py
│ │ ├── metrics.py
│ │ ├── non_streamed_chat_response.py
│ │ ├── o_auth_authorize_response.py
│ │ ├── parse_info.py
│ │ ├── rerank_document.py
│ │ ├── rerank_request_documents_item.py
│ │ ├── rerank_response.py
│ │ ├── rerank_response_results_item.py
│ │ ├── rerank_response_results_item_document.py
│ │ ├── reranker_data_metrics.py
│ │ ├── response_format.py
│ │ ├── response_format_v2.py
│ │ ├── single_generation.py
│ │ ├── single_generation_in_stream.py
│ │ ├── single_generation_token_likelihoods_item.py
│ │ ├── source.py
│ │ ├── streamed_chat_response.py
│ │ ├── summarize_request_extractiveness.py
│ │ ├── summarize_request_format.py
│ │ ├── summarize_request_length.py
│ │ ├── summarize_response.py
│ │ ├── system_message_v2.py
│ │ ├── system_message_v2content.py
│ │ ├── system_message_v2content_one_item.py
│ │ ├── thinking.py
│ │ ├── thinking_type.py
│ │ ├── tokenize_response.py
│ │ ├── tool.py
│ │ ├── tool_call.py
│ │ ├── tool_call_delta.py
│ │ ├── tool_call_v2.py
│ │ ├── tool_call_v2function.py
│ │ ├── tool_content.py
│ │ ├── tool_message_v2.py
│ │ ├── tool_message_v2content.py
│ │ ├── tool_parameter_definitions_value.py
│ │ ├── tool_result.py
│ │ ├── tool_v2.py
│ │ ├── tool_v2function.py
│ │ ├── update_connector_response.py
│ │ ├── usage.py
│ │ ├── usage_billed_units.py
│ │ ├── usage_tokens.py
│ │ ├── user_message_v2.py
│ │ └── user_message_v2content.py
│ ├── utils.py
│ ├── v2/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── v2chat_request_documents_item.py
│ │ ├── v2chat_request_safety_mode.py
│ │ ├── v2chat_request_tool_choice.py
│ │ ├── v2chat_response.py
│ │ ├── v2chat_stream_request_documents_item.py
│ │ ├── v2chat_stream_request_safety_mode.py
│ │ ├── v2chat_stream_request_tool_choice.py
│ │ ├── v2chat_stream_response.py
│ │ ├── v2embed_request_truncate.py
│ │ ├── v2rerank_response.py
│ │ └── v2rerank_response_results_item.py
│ └── version.py
└── tests/
├── __init__.py
├── embed_job.jsonl
├── test_async_client.py
├── test_aws_client_unit.py
├── test_bedrock_client.py
├── test_client.py
├── test_client_init.py
├── test_client_v2.py
├── test_embed_streaming.py
├── test_embed_utils.py
├── test_oci_client.py
├── test_oci_mypy.py
└── test_overrides.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .fern/metadata.json
================================================
{
"cliVersion": "4.63.2",
"generatorName": "fernapi/fern-python-sdk",
"generatorVersion": "5.3.3",
"generatorConfig": {
"inline_request_params": false,
"extras": {
"oci": [
"oci"
]
},
"extra_dependencies": {
"fastavro": "^1.9.4",
"requests": "^2.0.0",
"types-requests": "^2.0.0",
"tokenizers": ">=0.15,<1",
"oci": {
"version": "^2.165.0",
"optional": true
}
},
"improved_imports": true,
"pydantic_config": {
"frozen": false,
"union_naming": "v1",
"require_optional_fields": false,
"extra_fields": "allow",
"use_str_enums": true,
"skip_validation": true
},
"timeout_in_seconds": 300,
"client": {
"class_name": "BaseCohere",
"filename": "base_client.py",
"exported_class_name": "Client",
"exported_filename": "client.py"
},
"additional_init_exports": [
{
"from": "client",
"imports": [
"Client",
"AsyncClient"
]
},
{
"from": "bedrock_client",
"imports": [
"BedrockClient",
"BedrockClientV2"
]
},
{
"from": "sagemaker_client",
"imports": [
"SagemakerClient",
"SagemakerClientV2"
]
},
{
"from": "aws_client",
"imports": [
"AwsClient"
]
},
{
"from": "oci_client",
"imports": [
"OciClient",
"OciClientV2"
]
},
{
"from": "client_v2",
"imports": [
"AsyncClientV2",
"ClientV2"
]
},
{
"from": "aliases",
"imports": [
"StreamedChatResponseV2",
"MessageStartStreamedChatResponseV2",
"MessageEndStreamedChatResponseV2",
"ContentStartStreamedChatResponseV2",
"ContentDeltaStreamedChatResponseV2",
"ContentEndStreamedChatResponseV2",
"ToolCallStartStreamedChatResponseV2",
"ToolCallDeltaStreamedChatResponseV2",
"ToolCallEndStreamedChatResponseV2",
"ChatResponse"
]
}
]
},
"originGitCommit": "8dfb5e03f14a05967c4cdeeb44429eb4c1dca198",
"sdkVersion": "6.1.0"
}
================================================
FILE: .fernignore
================================================
4.0.0-5.0.0-migration-guide.md
banner.png
README.md
src/cohere/client.py
tests
.github/workflows/ci.yml
.github/ISSUE_TEMPLATE
LICENSE
.github/workflows/tests.yml
src/cohere/utils.py
src/cohere/overrides.py
src/cohere/config.py
src/cohere/manually_maintained
src/cohere/manually_maintained/__init__.py
src/cohere/bedrock_client.py
src/cohere/aws_client.py
src/cohere/sagemaker_client.py
src/cohere/oci_client.py
src/cohere/client_v2.py
mypy.ini
src/cohere/aliases.py
================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.md
================================================
---
name: Bug report related to an SDK error
about: Create a report to help us improve
title: ''
labels: ''
---
**SDK Version (required)**
Provide the version you are using. To get the version, run the following python snippet
```python
import cohere
print(cohere.__version__) # 5.6.1
```
**Describe the bug**
A clear and concise description of what the bug is.
**Screenshots**
If applicable, add screenshots to help explain your problem.
================================================
FILE: .github/ISSUE_TEMPLATE/improvement_request.md
================================================
---
name: Improvement request, or addition features
about: Create a request to help us improve
title: ""
labels: ""
---
**Describe the improvement**
A clear and concise description of what the new improvement is.
**Code snippet of expected outcome**
If applicable, add a code snippet of how you'd like to see the feature implemented
================================================
FILE: .github/workflows/ci.yml
================================================
name: ci
on: [push]
jobs:
compile:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v3
- name: Set up python
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Bootstrap poetry
uses: snok/install-poetry@v1
with:
version: 1.5.1
virtualenvs-in-project: false
- name: Install dependencies
run: poetry install
- name: Compile
run: poetry run mypy .
test:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v3
- name: Set up python
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Bootstrap poetry
uses: snok/install-poetry@v1
with:
version: 1.5.1
virtualenvs-in-project: false
- name: Install dependencies
run: poetry install
- name: Install aws deps
run: poetry run pip install boto3 sagemaker botocore
- name: Test
run: poetry run pytest -rP -n auto .
env:
CO_API_KEY: ${{ secrets.COHERE_API_KEY }}
- name: Install aiohttp extra
run: poetry install --extras aiohttp
- name: Test (aiohttp)
run: poetry run pytest -rP -n auto -m aiohttp . || [ $? -eq 5 ]
env:
CO_API_KEY: ${{ secrets.COHERE_API_KEY }}
publish:
needs: [compile, test]
if: github.event_name == 'push' && contains(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v3
- name: Set up python
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Bootstrap poetry
run: |
curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
- name: Install dependencies
run: poetry install
- name: Publish to pypi
run: |
poetry config repositories.remote https://upload.pypi.org/legacy/
poetry --no-interaction -v publish --build --repository remote --username "$PYPI_USERNAME" --password "$PYPI_PASSWORD"
env:
PYPI_USERNAME: ${{ secrets.PYPI_USERNAME }}
PYPI_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
================================================
FILE: .gitignore
================================================
.mypy_cache/
.ruff_cache/
__pycache__/
dist/
poetry.toml
================================================
FILE: 4.0.0-5.0.0-migration-guide.md
================================================
## `cohere==4` to `cohere==5` migration guide
As we migrate from the handwritten, manually-maintained sdk to our auto-generated sdk, there are some breaking changes that must be accommodated during migration. These should mostly improve the developer experience but thank you for bearing with us as we make these changes.
### Installation
To install the latest version of the cohere sdk `pip3 install --upgrade cohere`.
### Migrating usages
#### Migrating function calls
[This diff view](https://github.com/cohere-ai/cohere-python/compare/old-usage...new-usage) enumerates all usages of the old sdk and how they map to the new sdk. Some fields are no longer supported in the new sdk.
#### Migrating streaming usage
The `streaming: boolean` are no longer supported in the new sdk. Instead, you can replace the `chat` function with `chat_stream` and `generate` function with `generate_stream`. These will automatically inject the `streaming` parameter into the request. The following is an example usage for `chat_stream`:
```python
stream = co.chat_stream(
message="Tell me a short story"
)
for event in stream:
if event.event_type == "text-generation":
print(event.text, end='')
```
### Migrating deprecated `num_workers` Client constructor parameter
The Client constructor accepts an `httpx_client` which can be configured to limit the maximum number of connections.
```python
limits = httpx.Limits(max_connections=10)
cohere.Client(httpx_client=httpx.Client(limits=limits))
```
### Removed functionality (subject to change)
The following lists name the functions that are not in the new SDK and what their ongoing support status is.
#### No longer supported
* check_api_key
* loglikelihood
* batch_generate
* codebook
* batch_tokenize
* batch_detokenize
* detect_language
* generate_feedback
* generate_preference_feedback
* create_cluster_job
* get_cluster_job
* list_cluster_jobs
* wait_for_cluster_job
* create_custom_model
* wait_for_custom_model
* get_custom_model
* get_custom_model_by_name
* get_custom_model_metrics
* list_custom_models
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2021 Cohere
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# Cohere Python SDK

[](https://pypi.org/project/cohere/)

[](https://github.com/fern-api/fern)
The Cohere Python SDK allows access to Cohere models across many different platforms: the cohere platform, AWS (Bedrock, Sagemaker), Azure, GCP and Oracle OCI. For a full list of support and snippets, please take a look at the [SDK support docs page](https://docs.cohere.com/docs/cohere-works-everywhere).
## Documentation
Cohere documentation and API reference is available [here](https://docs.cohere.com/).
## Installation
```
pip install cohere
```
## Usage
```Python
import cohere
co = cohere.ClientV2()
response = co.chat(
model="command-r-plus-08-2024",
messages=[{"role": "user", "content": "hello world!"}],
)
print(response)
```
> [!TIP]
> You can set a system environment variable `CO_API_KEY` to avoid writing your api key within your code, e.g. add `export CO_API_KEY=theapikeyforyouraccount`
> in your ~/.zshrc or ~/.bashrc, open a new terminal, then code calling `cohere.Client()` will read this key.
## Streaming
The SDK supports streaming endpoints. To take advantage of this feature for chat,
use `chat_stream`.
```Python
import cohere
co = cohere.ClientV2()
response = co.chat_stream(
model="command-r-plus-08-2024",
messages=[{"role": "user", "content": "hello world!"}],
)
for event in response:
if event.type == "content-delta":
print(event.delta.message.content.text, end="")
```
## Oracle Cloud Infrastructure (OCI)
The SDK supports Oracle Cloud Infrastructure (OCI) Generative AI service. First, install the OCI SDK:
```
pip install 'cohere[oci]'
```
Then use the `OciClient` or `OciClientV2`:
```Python
import cohere
# Using OCI config file authentication (default: ~/.oci/config)
co = cohere.OciClient(
oci_region="us-chicago-1",
oci_compartment_id="ocid1.compartment.oc1...",
)
response = co.embed(
model="embed-english-v3.0",
texts=["Hello world"],
input_type="search_document",
)
print(response.embeddings)
```
### OCI Authentication Methods
**1. Config File (Default)**
```Python
co = cohere.OciClient(
oci_region="us-chicago-1",
oci_compartment_id="ocid1.compartment.oc1...",
# Uses ~/.oci/config with DEFAULT profile
)
```
**2. Custom Profile**
```Python
co = cohere.OciClient(
oci_profile="MY_PROFILE",
oci_region="us-chicago-1",
oci_compartment_id="ocid1.compartment.oc1...",
)
```
**3. Session-based Authentication (Security Token)**
```Python
# Works with OCI CLI session tokens
co = cohere.OciClient(
oci_profile="MY_SESSION_PROFILE", # Profile with security_token_file
oci_region="us-chicago-1",
oci_compartment_id="ocid1.compartment.oc1...",
)
```
**4. Direct Credentials**
```Python
co = cohere.OciClient(
oci_user_id="ocid1.user.oc1...",
oci_fingerprint="xx:xx:xx:...",
oci_tenancy_id="ocid1.tenancy.oc1...",
oci_private_key_path="~/.oci/key.pem",
oci_region="us-chicago-1",
oci_compartment_id="ocid1.compartment.oc1...",
)
```
**5. Instance Principal (for OCI Compute instances)**
```Python
co = cohere.OciClient(
auth_type="instance_principal",
oci_region="us-chicago-1",
oci_compartment_id="ocid1.compartment.oc1...",
)
```
### Supported OCI APIs
The OCI client supports the following Cohere APIs:
- **Embed**: Full support for all embedding models
- **Chat**: Full support with both V1 (`OciClient`) and V2 (`OciClientV2`) APIs
- Streaming available via `chat_stream()`
- Supports Command-R and Command-A model families
### OCI Model Availability and Limitations
**Available on OCI On-Demand Inference:**
- ✅ **Embed models**: available on OCI Generative AI
- ✅ **Chat models**: available via `OciClient` (V1) and `OciClientV2` (V2)
**Not Available on OCI On-Demand Inference:**
- ❌ **Generate API**: OCI TEXT_GENERATION models are base models that require fine-tuning before deployment
- ❌ **Rerank API**: OCI TEXT_RERANK models are base models that require fine-tuning before deployment
- ❌ **Multiple Embedding Types**: OCI on-demand models only support single embedding type per request (cannot request both `float` and `int8` simultaneously)
**Note**: To use Generate or Rerank models on OCI, you need to:
1. Fine-tune the base model using OCI's fine-tuning service
2. Deploy the fine-tuned model to a dedicated endpoint
3. Update your code to use the deployed model endpoint
For the latest model availability, see the [OCI Generative AI documentation](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm).
## Contributing
While we value open-source contributions to this SDK, the code is generated programmatically. Additions made directly would have to be moved over to our generation code, otherwise they would be overwritten upon the next generated release. Feel free to open a PR as a proof of concept, but know that we will not be able to merge it as-is. We suggest opening an issue first to discuss with us!
On the other hand, contributions to the README are always very welcome!
================================================
FILE: mypy.ini
================================================
[mypy]
exclude = src/cohere/manually_maintained/cohere_aws
================================================
FILE: pyproject.toml
================================================
[project]
name = "cohere"
dynamic = ["version"]
[tool.poetry]
name = "cohere"
version = "6.1.0"
description = ""
readme = "README.md"
authors = []
keywords = []
license = "MIT"
classifiers = [
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Programming Language :: Python :: 3.15",
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed",
"License :: OSI Approved :: MIT License"
]
packages = [
{ include = "cohere", from = "src"}
]
[tool.poetry.urls]
Repository = 'https://github.com/cohere-ai/cohere-python'
[tool.poetry.dependencies]
python = "^3.10"
aiohttp = { version = ">=3.10.0,<4", optional = true}
fastavro = "^1.9.4"
httpx = ">=0.21.2"
httpx-aiohttp = { version = "0.1.8", optional = true}
oci = { version = "^2.165.0", optional = true}
pydantic = ">= 1.9.2"
pydantic-core = ">=2.18.2,<2.44.0"
requests = "^2.0.0"
tokenizers = ">=0.15,<1"
types-requests = "^2.0.0"
typing_extensions = ">= 4.0.0"
[tool.poetry.group.dev.dependencies]
mypy = "==1.13.0"
pytest = "^8.2.0"
pytest-asyncio = "^1.0.0"
pytest-xdist = "^3.6.1"
python-dateutil = "^2.9.0"
types-python-dateutil = "^2.9.0.20240316"
ruff = "==0.11.5"
[tool.pytest.ini_options]
testpaths = [ "tests" ]
asyncio_mode = "auto"
markers = [
"aiohttp: tests that require httpx_aiohttp to be installed",
]
[tool.mypy]
plugins = ["pydantic.mypy"]
[tool.ruff]
line-length = 120
[tool.ruff.lint]
select = [
"E", # pycodestyle errors
"F", # pyflakes
"I", # isort
]
ignore = [
"E402", # Module level import not at top of file
"E501", # Line too long
"E711", # Comparison to `None` should be `cond is not None`
"E712", # Avoid equality comparisons to `True`; use `if ...:` checks
"E721", # Use `is` and `is not` for type comparisons, or `isinstance()` for insinstance checks
"E722", # Do not use bare `except`
"E731", # Do not assign a `lambda` expression, use a `def`
"F821", # Undefined name
"F841" # Local variable ... is assigned to but never used
]
[tool.ruff.lint.isort]
section-order = ["future", "standard-library", "third-party", "first-party"]
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.extras]
oci=["oci"]
aiohttp=["aiohttp", "httpx-aiohttp"]
================================================
FILE: reference.md
================================================
# Reference
<details><summary><code>client.<a href="src/cohere/client.py">chat_stream</a>(...) -> typing.Iterator[bytes]</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Generates a streamed text response to a user message.
To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.chat_stream(
model="command-a-03-2025",
message="hello!",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**message:** `str`
Text input for the model to respond to.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**stream:** `typing.Literal`
Defaults to `false`.
When `true`, the response will be a JSON stream of events. The final event will contain the complete response, and will have an `event_type` of `"stream-end"`.
Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**accepts:** `typing.Optional[typing.Literal]` — Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
</dd>
</dl>
<dl>
<dd>
**model:** `typing.Optional[str]`
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
Compatible Deployments: Cohere Platform, Private Deployments
</dd>
</dl>
<dl>
<dd>
**preamble:** `typing.Optional[str]`
When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style, and use the `SYSTEM` role.
The `SYSTEM` role is also used for the contents of the optional `chat_history=` parameter. When used with the `chat_history=` parameter it adds content throughout a conversation. Conversely, when used with the `preamble=` parameter it adds content at the start of the conversation only.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**chat_history:** `typing.Optional[typing.List[Message]]`
A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`.
Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content.
The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**conversation_id:** `typing.Optional[str]`
An alternative to `chat_history`.
Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string.
Compatible Deployments: Cohere Platform
</dd>
</dl>
<dl>
<dd>
**prompt_truncation:** `typing.Optional[ChatStreamRequestPromptTruncation]`
Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.
Dictates how the prompt will be constructed.
With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
Compatible Deployments:
- AUTO: Cohere Platform Only
- AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**connectors:** `typing.Optional[typing.List[ChatConnector]]`
Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/v1/docs/creating-and-deploying-a-connector) one.
When specified, the model's reply will be enriched with information found by querying each of the connectors (RAG).
Compatible Deployments: Cohere Platform
</dd>
</dl>
<dl>
<dd>
**search_queries_only:** `typing.Optional[bool]`
Defaults to `false`.
When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**documents:** `typing.Optional[typing.List[ChatDocument]]`
A list of relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary.
Example:
```
[
{ "title": "Tall penguins", "text": "Emperor penguins are the tallest." },
{ "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica." },
]
```
Keys and values from each document will be serialized to a string and passed to the model. The resulting generation will include citations that reference some of these documents.
Some suggested keys are "text", "author", and "date". For better generation quality, it is recommended to keep the total word count of the strings in the dictionary to under 300 words.
An `id` field (string) can be optionally supplied to identify the document in the citations. This field will not be passed to the model.
An `_excludes` field (array of strings) can be optionally supplied to omit some key-value pairs from being shown to the model. The omitted fields will still show up in the citation object. The "_excludes" field will not be passed to the model.
See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**citation_quality:** `typing.Optional[ChatStreamRequestCitationQuality]`
Defaults to `"enabled"`.
Citations are enabled by default for models that support it, but can be turned off by setting `"type": "disabled"`.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**temperature:** `typing.Optional[float]`
Defaults to `0.3`.
A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
Randomness can be further maximized by increasing the value of the `p` parameter.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**max_tokens:** `typing.Optional[int]`
The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**max_input_tokens:** `typing.Optional[int]`
The maximum number of input tokens to send to the model. If not specified, `max_input_tokens` is the model's context length limit minus a small buffer.
Input will be truncated according to the `prompt_truncation` parameter.
Compatible Deployments: Cohere Platform
</dd>
</dl>
<dl>
<dd>
**k:** `typing.Optional[int]`
Ensures only the top `k` most likely tokens are considered for generation at each step.
Defaults to `0`, min value of `0`, max value of `500`.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**p:** `typing.Optional[float]`
Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**seed:** `typing.Optional[int]`
If specified, the backend will make a best effort to sample tokens
deterministically, such that repeated requests with the same
seed and parameters should return the same result. However,
determinism cannot be totally guaranteed.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**stop_sequences:** `typing.Optional[typing.List[str]]`
A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**frequency_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**presence_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**raw_prompting:** `typing.Optional[bool]`
When enabled, the user's prompt will be sent to the model without
any pre-processing.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**tools:** `typing.Optional[typing.List[Tool]]`
A list of available tools (functions) that the model may suggest invoking before producing a text response.
When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**tool_results:** `typing.Optional[typing.List[ToolResult]]`
A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well.
Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries.
**Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list.
```
tool_results = [
{
"call": {
"name": <tool name>,
"parameters": {
<param name>: <param value>
}
},
"outputs": [{
<key>: <value>
}]
},
...
]
```
**Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**force_single_step:** `typing.Optional[bool]` — Forces the chat to be single step. Defaults to `false`.
</dd>
</dl>
<dl>
<dd>
**response_format:** `typing.Optional[ResponseFormat]`
</dd>
</dl>
<dl>
<dd>
**safety_mode:** `typing.Optional[ChatStreamRequestSafetyMode]`
Used to select the [safety instruction](https://docs.cohere.com/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
When `NONE` is specified, the safety instruction will be omitted.
Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
**Note**: This parameter is only compatible newer Cohere models, starting with [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release) and [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release).
**Note**: `command-r7b-12-2024` and newer models only support `"CONTEXTUAL"` and `"STRICT"` modes.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">chat</a>(...) -> NonStreamedChatResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Generates a text response to a user message.
To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.chat_stream(
model="command-a-03-2025",
message="Tell me about LLMs",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**message:** `str`
Text input for the model to respond to.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**stream:** `typing.Literal`
Defaults to `false`.
When `true`, the response will be a JSON stream of events. The final event will contain the complete response, and will have an `event_type` of `"stream-end"`.
Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**accepts:** `typing.Optional[typing.Literal]` — Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
</dd>
</dl>
<dl>
<dd>
**model:** `typing.Optional[str]`
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
Compatible Deployments: Cohere Platform, Private Deployments
</dd>
</dl>
<dl>
<dd>
**preamble:** `typing.Optional[str]`
When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style, and use the `SYSTEM` role.
The `SYSTEM` role is also used for the contents of the optional `chat_history=` parameter. When used with the `chat_history=` parameter it adds content throughout a conversation. Conversely, when used with the `preamble=` parameter it adds content at the start of the conversation only.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**chat_history:** `typing.Optional[typing.List[Message]]`
A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`.
Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content.
The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**conversation_id:** `typing.Optional[str]`
An alternative to `chat_history`.
Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string.
Compatible Deployments: Cohere Platform
</dd>
</dl>
<dl>
<dd>
**prompt_truncation:** `typing.Optional[ChatRequestPromptTruncation]`
Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.
Dictates how the prompt will be constructed.
With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
Compatible Deployments:
- AUTO: Cohere Platform Only
- AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**connectors:** `typing.Optional[typing.List[ChatConnector]]`
Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/v1/docs/creating-and-deploying-a-connector) one.
When specified, the model's reply will be enriched with information found by querying each of the connectors (RAG).
Compatible Deployments: Cohere Platform
</dd>
</dl>
<dl>
<dd>
**search_queries_only:** `typing.Optional[bool]`
Defaults to `false`.
When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**documents:** `typing.Optional[typing.List[ChatDocument]]`
A list of relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary.
Example:
```
[
{ "title": "Tall penguins", "text": "Emperor penguins are the tallest." },
{ "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica." },
]
```
Keys and values from each document will be serialized to a string and passed to the model. The resulting generation will include citations that reference some of these documents.
Some suggested keys are "text", "author", and "date". For better generation quality, it is recommended to keep the total word count of the strings in the dictionary to under 300 words.
An `id` field (string) can be optionally supplied to identify the document in the citations. This field will not be passed to the model.
An `_excludes` field (array of strings) can be optionally supplied to omit some key-value pairs from being shown to the model. The omitted fields will still show up in the citation object. The "_excludes" field will not be passed to the model.
See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**citation_quality:** `typing.Optional[ChatRequestCitationQuality]`
Defaults to `"enabled"`.
Citations are enabled by default for models that support it, but can be turned off by setting `"type": "disabled"`.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**temperature:** `typing.Optional[float]`
Defaults to `0.3`.
A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
Randomness can be further maximized by increasing the value of the `p` parameter.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**max_tokens:** `typing.Optional[int]`
The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**max_input_tokens:** `typing.Optional[int]`
The maximum number of input tokens to send to the model. If not specified, `max_input_tokens` is the model's context length limit minus a small buffer.
Input will be truncated according to the `prompt_truncation` parameter.
Compatible Deployments: Cohere Platform
</dd>
</dl>
<dl>
<dd>
**k:** `typing.Optional[int]`
Ensures only the top `k` most likely tokens are considered for generation at each step.
Defaults to `0`, min value of `0`, max value of `500`.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**p:** `typing.Optional[float]`
Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**seed:** `typing.Optional[int]`
If specified, the backend will make a best effort to sample tokens
deterministically, such that repeated requests with the same
seed and parameters should return the same result. However,
determinism cannot be totally guaranteed.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**stop_sequences:** `typing.Optional[typing.List[str]]`
A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**frequency_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**presence_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**raw_prompting:** `typing.Optional[bool]`
When enabled, the user's prompt will be sent to the model without
any pre-processing.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**tools:** `typing.Optional[typing.List[Tool]]`
A list of available tools (functions) that the model may suggest invoking before producing a text response.
When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**tool_results:** `typing.Optional[typing.List[ToolResult]]`
A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well.
Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries.
**Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list.
```
tool_results = [
{
"call": {
"name": <tool name>,
"parameters": {
<param name>: <param value>
}
},
"outputs": [{
<key>: <value>
}]
},
...
]
```
**Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**force_single_step:** `typing.Optional[bool]` — Forces the chat to be single step. Defaults to `false`.
</dd>
</dl>
<dl>
<dd>
**response_format:** `typing.Optional[ResponseFormat]`
</dd>
</dl>
<dl>
<dd>
**safety_mode:** `typing.Optional[ChatRequestSafetyMode]`
Used to select the [safety instruction](https://docs.cohere.com/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
When `NONE` is specified, the safety instruction will be omitted.
Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
**Note**: This parameter is only compatible newer Cohere models, starting with [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release) and [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release).
**Note**: `command-r7b-12-2024` and newer models only support `"CONTEXTUAL"` and `"STRICT"` modes.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">generate_stream</a>(...) -> typing.Iterator[bytes]</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
<Warning>
This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat) to start using the Chat with Streaming API.
</Warning>
Generates realistic text conditioned on a given input.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.generate_stream(
prompt="Please explain to me how LLMs work",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**prompt:** `str`
The input text that serves as the starting point for generating the response.
Note: The prompt will be pre-processed and modified before reaching the model.
</dd>
</dl>
<dl>
<dd>
**stream:** `typing.Literal`
When `true`, the response will be a JSON stream of events. Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
The final event will contain the complete response, and will contain an `is_finished` field set to `true`. The event will also contain a `finish_reason`, which can be one of the following:
- `COMPLETE` - the model sent back a finished reply
- `MAX_TOKENS` - the reply was cut off because the model reached the maximum number of tokens for its context length
- `ERROR` - something went wrong when generating the reply
- `ERROR_TOXIC` - the model generated a reply that was deemed toxic
</dd>
</dl>
<dl>
<dd>
**model:** `typing.Optional[str]`
The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental).
Smaller, "light" models are faster, while larger models will perform better. [Custom models](https://docs.cohere.com/docs/training-custom-models) can also be supplied with their full ID.
</dd>
</dl>
<dl>
<dd>
**num_generations:** `typing.Optional[int]` — The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`.
</dd>
</dl>
<dl>
<dd>
**max_tokens:** `typing.Optional[int]`
The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details.
Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt.
</dd>
</dl>
<dl>
<dd>
**truncate:** `typing.Optional[GenerateStreamRequestTruncate]`
One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
</dd>
</dl>
<dl>
<dd>
**temperature:** `typing.Optional[float]`
A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
</dd>
</dl>
<dl>
<dd>
**seed:** `typing.Optional[int]`
If specified, the backend will make a best effort to sample tokens
deterministically, such that repeated requests with the same
seed and parameters should return the same result. However,
determinism cannot be totally guaranteed.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**preset:** `typing.Optional[str]`
Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.com/playground/generate).
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
</dd>
</dl>
<dl>
<dd>
**end_sequences:** `typing.Optional[typing.List[str]]` — The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text.
</dd>
</dl>
<dl>
<dd>
**stop_sequences:** `typing.Optional[typing.List[str]]` — The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text.
</dd>
</dl>
<dl>
<dd>
**k:** `typing.Optional[int]`
Ensures only the top `k` most likely tokens are considered for generation at each step.
Defaults to `0`, min value of `0`, max value of `500`.
</dd>
</dl>
<dl>
<dd>
**p:** `typing.Optional[float]`
Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
</dd>
</dl>
<dl>
<dd>
**frequency_penalty:** `typing.Optional[float]`
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models.
</dd>
</dl>
<dl>
<dd>
**presence_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models.
</dd>
</dl>
<dl>
<dd>
**return_likelihoods:** `typing.Optional[GenerateStreamRequestReturnLikelihoods]`
One of `GENERATION|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`.
If `GENERATION` is selected, the token likelihoods will only be provided for generated text.
WARNING: `ALL` is deprecated, and will be removed in a future release.
</dd>
</dl>
<dl>
<dd>
**raw_prompting:** `typing.Optional[bool]` — When enabled, the user's prompt will be sent to the model without any pre-processing.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">generate</a>(...) -> Generation</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
<Warning>
This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat) to start using the Chat API.
</Warning>
Generates realistic text conditioned on a given input.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.generate_stream(
prompt="Please explain to me how LLMs work",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**prompt:** `str`
The input text that serves as the starting point for generating the response.
Note: The prompt will be pre-processed and modified before reaching the model.
</dd>
</dl>
<dl>
<dd>
**stream:** `typing.Literal`
When `true`, the response will be a JSON stream of events. Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
The final event will contain the complete response, and will contain an `is_finished` field set to `true`. The event will also contain a `finish_reason`, which can be one of the following:
- `COMPLETE` - the model sent back a finished reply
- `MAX_TOKENS` - the reply was cut off because the model reached the maximum number of tokens for its context length
- `ERROR` - something went wrong when generating the reply
- `ERROR_TOXIC` - the model generated a reply that was deemed toxic
</dd>
</dl>
<dl>
<dd>
**model:** `typing.Optional[str]`
The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental).
Smaller, "light" models are faster, while larger models will perform better. [Custom models](https://docs.cohere.com/docs/training-custom-models) can also be supplied with their full ID.
</dd>
</dl>
<dl>
<dd>
**num_generations:** `typing.Optional[int]` — The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`.
</dd>
</dl>
<dl>
<dd>
**max_tokens:** `typing.Optional[int]`
The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details.
Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt.
</dd>
</dl>
<dl>
<dd>
**truncate:** `typing.Optional[GenerateRequestTruncate]`
One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
</dd>
</dl>
<dl>
<dd>
**temperature:** `typing.Optional[float]`
A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
</dd>
</dl>
<dl>
<dd>
**seed:** `typing.Optional[int]`
If specified, the backend will make a best effort to sample tokens
deterministically, such that repeated requests with the same
seed and parameters should return the same result. However,
determinism cannot be totally guaranteed.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
</dd>
</dl>
<dl>
<dd>
**preset:** `typing.Optional[str]`
Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.com/playground/generate).
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
</dd>
</dl>
<dl>
<dd>
**end_sequences:** `typing.Optional[typing.List[str]]` — The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text.
</dd>
</dl>
<dl>
<dd>
**stop_sequences:** `typing.Optional[typing.List[str]]` — The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text.
</dd>
</dl>
<dl>
<dd>
**k:** `typing.Optional[int]`
Ensures only the top `k` most likely tokens are considered for generation at each step.
Defaults to `0`, min value of `0`, max value of `500`.
</dd>
</dl>
<dl>
<dd>
**p:** `typing.Optional[float]`
Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
</dd>
</dl>
<dl>
<dd>
**frequency_penalty:** `typing.Optional[float]`
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models.
</dd>
</dl>
<dl>
<dd>
**presence_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models.
</dd>
</dl>
<dl>
<dd>
**return_likelihoods:** `typing.Optional[GenerateRequestReturnLikelihoods]`
One of `GENERATION|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`.
If `GENERATION` is selected, the token likelihoods will only be provided for generated text.
WARNING: `ALL` is deprecated, and will be removed in a future release.
</dd>
</dl>
<dl>
<dd>
**raw_prompting:** `typing.Optional[bool]` — When enabled, the user's prompt will be sent to the model without any pre-processing.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">embed</a>(...) -> EmbedResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This endpoint returns text and image embeddings. An embedding is a list of floating point numbers that captures semantic information about the content that it represents.
Embeddings can be used to create classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page.
If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](https://docs.cohere.com/docs/semantic-search).
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.embed(
texts=[
"hello",
"goodbye"
],
model="embed-v4.0",
input_type="classification",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**texts:** `typing.Optional[typing.List[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`.
</dd>
</dl>
<dl>
<dd>
**images:** `typing.Optional[typing.List[str]]`
An array of image data URIs for the model to embed. Maximum number of images per call is `1`.
The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg`, `image/png`, `image/webp`, or `image/gif` format and has a maximum size of 5MB.
Images are only supported with Embed v3.0 and newer models.
</dd>
</dl>
<dl>
<dd>
**model:** `typing.Optional[str]` — ID of one of the available [Embedding models](https://docs.cohere.com/docs/cohere-embed).
</dd>
</dl>
<dl>
<dd>
**input_type:** `typing.Optional[EmbedInputType]`
</dd>
</dl>
<dl>
<dd>
**embedding_types:** `typing.Optional[typing.List[EmbeddingType]]`
Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.
* `"float"`: Use this when you want to get back the default float embeddings. Supported with all Embed models.
* `"int8"`: Use this when you want to get back signed int8 embeddings. Supported with Embed v3.0 and newer Embed models.
* `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Supported with Embed v3.0 and newer Embed models.
* `"binary"`: Use this when you want to get back signed binary embeddings. Supported with Embed v3.0 and newer Embed models.
* `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Supported with Embed v3.0 and newer Embed models.
</dd>
</dl>
<dl>
<dd>
**truncate:** `typing.Optional[EmbedRequestTruncate]`
One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">rerank</a>(...) -> RerankResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.rerank(
documents=[
{
"text": "Carson City is the capital city of the American state of Nevada."
},
{
"text": "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan."
},
{
"text": "Capitalization or capitalisation in English grammar is the use of a capital letter at the start of a word. English usage varies from capitalization in other languages."
},
{
"text": "Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district."
},
{
"text": "Capital punishment has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states."
}
],
query="What is the capital of the United States?",
top_n=3,
model="rerank-v4.0-pro",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**query:** `str` — The search query
</dd>
</dl>
<dl>
<dd>
**documents:** `typing.List[RerankRequestDocumentsItem]`
A list of document objects or strings to rerank.
If a document is provided the text fields is required and all other fields will be preserved in the response.
The total max chunks (length of documents * max_chunks_per_doc) must be less than 10000.
We recommend a maximum of 1,000 documents for optimal endpoint performance.
</dd>
</dl>
<dl>
<dd>
**model:** `typing.Optional[str]` — The identifier of the model to use, eg `rerank-v3.5`.
</dd>
</dl>
<dl>
<dd>
**top_n:** `typing.Optional[int]` — The number of most relevant documents or indices to return, defaults to the length of the documents
</dd>
</dl>
<dl>
<dd>
**rank_fields:** `typing.Optional[typing.List[str]]` — If a JSON object is provided, you can specify which keys you would like to have considered for reranking. The model will rerank based on order of the fields passed in (i.e. rank_fields=['title','author','text'] will rerank using the values in title, author, text sequentially. If the length of title, author, and text exceeds the context length of the model, the chunking will not re-consider earlier fields). If not provided, the model will use the default text field for ranking.
</dd>
</dl>
<dl>
<dd>
**return_documents:** `typing.Optional[bool]`
- If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request.
- If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request.
</dd>
</dl>
<dl>
<dd>
**max_chunks_per_doc:** `typing.Optional[int]` — The maximum number of chunks to produce internally from a document
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">classify</a>(...) -> ClassifyResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This endpoint makes a prediction about which label fits the specified text inputs best. To make a prediction, Classify uses the provided `examples` of text + label pairs as a reference.
Note: [Fine-tuned models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client, ClassifyExample
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.classify(
examples=[
ClassifyExample(
text="Dermatologists don\'t like her!",
label="Spam",
),
ClassifyExample(
text="\'Hello, open to this?\'",
label="Spam",
),
ClassifyExample(
text="I need help please wire me $1000 right now",
label="Spam",
),
ClassifyExample(
text="Nice to know you ;)",
label="Spam",
),
ClassifyExample(
text="Please help me?",
label="Spam",
),
ClassifyExample(
text="Your parcel will be delivered today",
label="Not spam",
),
ClassifyExample(
text="Review changes to our Terms and Conditions",
label="Not spam",
),
ClassifyExample(
text="Weekly sync notes",
label="Not spam",
),
ClassifyExample(
text="\'Re: Follow up from today\'s meeting\'",
label="Not spam",
),
ClassifyExample(
text="Pre-read for tomorrow",
label="Not spam",
)
],
inputs=[
"Confirm your email address",
"hey i need u to send some $"
],
model="YOUR-FINE-TUNED-MODEL-ID",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**inputs:** `typing.List[str]`
A list of up to 96 texts to be classified. Each one must be a non-empty string.
There is, however, no consistent, universal limit to the length a particular input can be. We perform classification on the first `x` tokens of each input, and `x` varies depending on which underlying model is powering classification. The maximum token length for each model is listed in the "max tokens" column [here](https://docs.cohere.com/docs/models).
Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts.
</dd>
</dl>
<dl>
<dd>
**examples:** `typing.Optional[typing.List[ClassifyExample]]`
An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`.
Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.
</dd>
</dl>
<dl>
<dd>
**model:** `typing.Optional[str]` — ID of a [Fine-tuned](https://docs.cohere.com/v2/docs/classify-starting-the-training) Classify model
</dd>
</dl>
<dl>
<dd>
**preset:** `typing.Optional[str]` — The ID of a custom playground preset. You can create presets in the [playground](https://dashboard.cohere.com/playground). If you use a preset, all other parameters become optional, and any included parameters will override the preset's parameters.
</dd>
</dl>
<dl>
<dd>
**truncate:** `typing.Optional[ClassifyRequestTruncate]`
One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">summarize</a>(...) -> SummarizeResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
<Warning>
This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat) to start using the Chat API.
</Warning>
Generates a summary in English for a given text.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.summarize(
text="Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases.\n\nThe meaning of the name \"ice cream\" varies from one country to another. In some countries, such as the United States, \"ice cream\" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled \"frozen dairy dessert\" instead. In other countries, such as Italy and Argentina, one word is used fo\r all variants. Analogues made from dairy alternatives, such as goat\'s or sheep\'s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**text:** `str` — The text to generate a summary for. Can be up to 100,000 characters long. Currently the only supported language is English.
</dd>
</dl>
<dl>
<dd>
**length:** `typing.Optional[SummarizeRequestLength]` — One of `short`, `medium`, `long`, or `auto` defaults to `auto`. Indicates the approximate length of the summary. If `auto` is selected, the best option will be picked based on the input text.
</dd>
</dl>
<dl>
<dd>
**format:** `typing.Optional[SummarizeRequestFormat]` — One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text.
</dd>
</dl>
<dl>
<dd>
**model:** `typing.Optional[str]` — The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better.
</dd>
</dl>
<dl>
<dd>
**extractiveness:** `typing.Optional[SummarizeRequestExtractiveness]` — One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text.
</dd>
</dl>
<dl>
<dd>
**temperature:** `typing.Optional[float]` — Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1.
</dd>
</dl>
<dl>
<dd>
**additional_command:** `typing.Optional[str]` — A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda"
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">tokenize</a>(...) -> TokenizeResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This endpoint splits input text into smaller units called tokens using byte-pair encoding (BPE). To learn more about tokenization and byte pair encoding, see the tokens page.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.tokenize(
text="tokenize me! :D",
model="command",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**text:** `str` — The string to be tokenized, the minimum text length is 1 character, and the maximum text length is 65536 characters.
</dd>
</dl>
<dl>
<dd>
**model:** `str` — The input will be tokenized by the tokenizer that is used by this model.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">detokenize</a>(...) -> DetokenizeResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This endpoint takes tokens using byte-pair encoding and returns their text representation. To learn more about tokenization and byte pair encoding, see the tokens page.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.detokenize(
tokens=[
10002,
2261,
2012,
8,
2792,
43
],
model="command",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**tokens:** `typing.List[int]` — The list of tokens to be detokenized.
</dd>
</dl>
<dl>
<dd>
**model:** `str` — An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.<a href="src/cohere/client.py">check_api_key</a>() -> CheckApiKeyResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Checks that the api key in the Authorization header is valid and active
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.check_api_key()
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
## V2
<details><summary><code>client.v2.<a href="src/cohere/v2/client.py">chat_stream</a>(...) -> typing.Iterator[bytes]</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Generates a text response to a user message. To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client, ChatMessageV2_User
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.v2.chat_stream(
model="command-a-03-2025",
messages=[
ChatMessageV2_User(
content="Tell me about LLMs",
)
],
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**stream:** `typing.Literal`
Defaults to `false`.
When `true`, the response will be a SSE stream of events.
Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
</dd>
</dl>
<dl>
<dd>
**model:** `str` — The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models).
</dd>
</dl>
<dl>
<dd>
**messages:** `ChatMessages`
</dd>
</dl>
<dl>
<dd>
**tools:** `typing.Optional[typing.List[ToolV2]]`
A list of tools (functions) available to the model. The model response may contain 'tool_calls' to the specified tools.
Learn more in the [Tool Use guide](https://docs.cohere.com/docs/tools).
</dd>
</dl>
<dl>
<dd>
**strict_tools:** `typing.Optional[bool]`
When set to `true`, tool calls in the Assistant message will be forced to follow the tool definition strictly. Learn more in the [Structured Outputs (Tools) guide](https://docs.cohere.com/docs/structured-outputs-json#structured-outputs-tools).
**Note**: The first few requests with a new set of tools will take longer to process.
</dd>
</dl>
<dl>
<dd>
**documents:** `typing.Optional[typing.List[V2ChatStreamRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata.
</dd>
</dl>
<dl>
<dd>
**citation_options:** `typing.Optional[CitationOptions]`
</dd>
</dl>
<dl>
<dd>
**response_format:** `typing.Optional[ResponseFormatV2]`
</dd>
</dl>
<dl>
<dd>
**safety_mode:** `typing.Optional[V2ChatStreamRequestSafetyMode]`
Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
When `OFF` is specified, the safety instruction will be omitted.
Safety modes are not yet configurable in combination with `tools` and `documents` parameters.
**Note**: This parameter is only compatible newer Cohere models, starting with [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release) and [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release).
**Note**: `command-r7b-12-2024` and newer models only support `"CONTEXTUAL"` and `"STRICT"` modes.
</dd>
</dl>
<dl>
<dd>
**max_tokens:** `typing.Optional[int]`
The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models).
**Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`.
**Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
</dd>
</dl>
<dl>
<dd>
**stop_sequences:** `typing.Optional[typing.List[str]]` — A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
</dd>
</dl>
<dl>
<dd>
**temperature:** `typing.Optional[float]`
Defaults to `0.3`.
A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
Randomness can be further maximized by increasing the value of the `p` parameter.
</dd>
</dl>
<dl>
<dd>
**seed:** `typing.Optional[int]`
If specified, the backend will make a best effort to sample tokens
deterministically, such that repeated requests with the same
seed and parameters should return the same result. However,
determinism cannot be totally guaranteed.
</dd>
</dl>
<dl>
<dd>
**frequency_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
</dd>
</dl>
<dl>
<dd>
**presence_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
</dd>
</dl>
<dl>
<dd>
**k:** `typing.Optional[int]`
Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled.
Defaults to `0`, min value of `0`, max value of `500`.
</dd>
</dl>
<dl>
<dd>
**p:** `typing.Optional[float]`
Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
</dd>
</dl>
<dl>
<dd>
**logprobs:** `typing.Optional[bool]` — Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response.
</dd>
</dl>
<dl>
<dd>
**tool_choice:** `typing.Optional[V2ChatStreamRequestToolChoice]`
Used to control whether or not the model will be forced to use a tool when answering. When `REQUIRED` is specified, the model will be forced to use at least one of the user-defined tools, and the `tools` parameter must be passed in the request.
When `NONE` is specified, the model will be forced **not** to use one of the specified tools, and give a direct response.
If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
**Note**: This parameter is only compatible with models [Command-r7b](https://docs.cohere.com/v2/docs/command-r7b) and newer.
</dd>
</dl>
<dl>
<dd>
**thinking:** `typing.Optional[Thinking]`
</dd>
</dl>
<dl>
<dd>
**priority:** `typing.Optional[int]` — Controls how early the request is handled. Lower numbers indicate higher priority (default: 0, the highest). When the system is under load, higher-priority requests are processed first and are the least likely to be dropped.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.v2.<a href="src/cohere/v2/client.py">chat</a>(...) -> V2ChatResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Generates a text response to a user message and streams it down, token by token. To learn how to use the Chat API with streaming follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client, ChatMessageV2_User
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.v2.chat_stream(
model="command-a-03-2025",
messages=[
ChatMessageV2_User(
content="Tell me about LLMs",
)
],
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**stream:** `typing.Literal`
Defaults to `false`.
When `true`, the response will be a SSE stream of events.
Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
</dd>
</dl>
<dl>
<dd>
**model:** `str` — The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models).
</dd>
</dl>
<dl>
<dd>
**messages:** `ChatMessages`
</dd>
</dl>
<dl>
<dd>
**tools:** `typing.Optional[typing.List[ToolV2]]`
A list of tools (functions) available to the model. The model response may contain 'tool_calls' to the specified tools.
Learn more in the [Tool Use guide](https://docs.cohere.com/docs/tools).
</dd>
</dl>
<dl>
<dd>
**strict_tools:** `typing.Optional[bool]`
When set to `true`, tool calls in the Assistant message will be forced to follow the tool definition strictly. Learn more in the [Structured Outputs (Tools) guide](https://docs.cohere.com/docs/structured-outputs-json#structured-outputs-tools).
**Note**: The first few requests with a new set of tools will take longer to process.
</dd>
</dl>
<dl>
<dd>
**documents:** `typing.Optional[typing.List[V2ChatRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata.
</dd>
</dl>
<dl>
<dd>
**citation_options:** `typing.Optional[CitationOptions]`
</dd>
</dl>
<dl>
<dd>
**response_format:** `typing.Optional[ResponseFormatV2]`
</dd>
</dl>
<dl>
<dd>
**safety_mode:** `typing.Optional[V2ChatRequestSafetyMode]`
Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
When `OFF` is specified, the safety instruction will be omitted.
Safety modes are not yet configurable in combination with `tools` and `documents` parameters.
**Note**: This parameter is only compatible newer Cohere models, starting with [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release) and [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release).
**Note**: `command-r7b-12-2024` and newer models only support `"CONTEXTUAL"` and `"STRICT"` modes.
</dd>
</dl>
<dl>
<dd>
**max_tokens:** `typing.Optional[int]`
The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models).
**Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`.
**Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
</dd>
</dl>
<dl>
<dd>
**stop_sequences:** `typing.Optional[typing.List[str]]` — A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
</dd>
</dl>
<dl>
<dd>
**temperature:** `typing.Optional[float]`
Defaults to `0.3`.
A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
Randomness can be further maximized by increasing the value of the `p` parameter.
</dd>
</dl>
<dl>
<dd>
**seed:** `typing.Optional[int]`
If specified, the backend will make a best effort to sample tokens
deterministically, such that repeated requests with the same
seed and parameters should return the same result. However,
determinism cannot be totally guaranteed.
</dd>
</dl>
<dl>
<dd>
**frequency_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
</dd>
</dl>
<dl>
<dd>
**presence_penalty:** `typing.Optional[float]`
Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
</dd>
</dl>
<dl>
<dd>
**k:** `typing.Optional[int]`
Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled.
Defaults to `0`, min value of `0`, max value of `500`.
</dd>
</dl>
<dl>
<dd>
**p:** `typing.Optional[float]`
Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
</dd>
</dl>
<dl>
<dd>
**logprobs:** `typing.Optional[bool]` — Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response.
</dd>
</dl>
<dl>
<dd>
**tool_choice:** `typing.Optional[V2ChatRequestToolChoice]`
Used to control whether or not the model will be forced to use a tool when answering. When `REQUIRED` is specified, the model will be forced to use at least one of the user-defined tools, and the `tools` parameter must be passed in the request.
When `NONE` is specified, the model will be forced **not** to use one of the specified tools, and give a direct response.
If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
**Note**: This parameter is only compatible with models [Command-r7b](https://docs.cohere.com/v2/docs/command-r7b) and newer.
</dd>
</dl>
<dl>
<dd>
**thinking:** `typing.Optional[Thinking]`
</dd>
</dl>
<dl>
<dd>
**priority:** `typing.Optional[int]` — Controls how early the request is handled. Lower numbers indicate higher priority (default: 0, the highest). When the system is under load, higher-priority requests are processed first and are the least likely to be dropped.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.v2.<a href="src/cohere/v2/client.py">embed</a>(...) -> EmbedByTypeResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents.
Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page.
If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](https://docs.cohere.com/docs/semantic-search).
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.v2.embed(
texts=[
"hello",
"goodbye"
],
model="embed-v4.0",
input_type="classification",
embedding_types=[
"float"
],
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**model:** `str` — ID of one of the available [Embedding models](https://docs.cohere.com/docs/cohere-embed).
</dd>
</dl>
<dl>
<dd>
**input_type:** `EmbedInputType`
</dd>
</dl>
<dl>
<dd>
**texts:** `typing.Optional[typing.List[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`.
</dd>
</dl>
<dl>
<dd>
**images:** `typing.Optional[typing.List[str]]`
An array of image data URIs for the model to embed. Maximum number of images per call is `1`.
The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg`, `image/png`, `image/webp`, or `image/gif` format and has a maximum size of 5MB.
Image embeddings are supported with Embed v3.0 and newer models.
</dd>
</dl>
<dl>
<dd>
**inputs:** `typing.Optional[typing.List[EmbedInput]]` — An array of inputs for the model to embed. Maximum number of inputs per call is `96`. An input can contain a mix of text and image components.
</dd>
</dl>
<dl>
<dd>
**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to embed per input. If the input text is longer than this, it will be truncated according to the `truncate` parameter.
</dd>
</dl>
<dl>
<dd>
**output_dimension:** `typing.Optional[int]`
The number of dimensions of the output embedding. This is only available for `embed-v4` and newer models.
Possible values are `256`, `512`, `1024`, and `1536`. The default is `1536`.
</dd>
</dl>
<dl>
<dd>
**embedding_types:** `typing.Optional[typing.List[EmbeddingType]]`
Specifies the types of embeddings you want to get back. Can be one or more of the following types.
* `"float"`: Use this when you want to get back the default float embeddings. Supported with all Embed models.
* `"int8"`: Use this when you want to get back signed int8 embeddings. Supported with Embed v3.0 and newer Embed models.
* `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Supported with Embed v3.0 and newer Embed models.
* `"binary"`: Use this when you want to get back signed binary embeddings. Supported with Embed v3.0 and newer Embed models.
* `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Supported with Embed v3.0 and newer Embed models.
* `"base64"`: Use this when you want to get back base64 embeddings. Supported with Embed v3.0 and newer Embed models.
</dd>
</dl>
<dl>
<dd>
**truncate:** `typing.Optional[V2EmbedRequestTruncate]`
One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
</dd>
</dl>
<dl>
<dd>
**priority:** `typing.Optional[int]` — Controls how early the request is handled. Lower numbers indicate higher priority (default: 0, the highest). When the system is under load, higher-priority requests are processed first and are the least likely to be dropped.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.v2.<a href="src/cohere/v2/client.py">rerank</a>(...) -> V2RerankResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.v2.rerank(
documents=[
"Carson City is the capital city of the American state of Nevada.",
"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.",
"Capitalization or capitalisation in English grammar is the use of a capital letter at the start of a word. English usage varies from capitalization in other languages.",
"Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district.",
"Capital punishment has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states."
],
query="What is the capital of the United States?",
top_n=3,
model="rerank-v4.0-pro",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**model:** `str` — The identifier of the model to use, eg `rerank-v3.5`.
</dd>
</dl>
<dl>
<dd>
**query:** `str` — The search query
</dd>
</dl>
<dl>
<dd>
**documents:** `typing.List[str]`
A list of texts that will be compared to the `query`.
For optimal performance we recommend against sending more than 1,000 documents in a single request.
**Note**: long documents will automatically be truncated to the value of `max_tokens_per_doc`.
**Note**: structured data should be formatted as YAML strings for best performance.
</dd>
</dl>
<dl>
<dd>
**top_n:** `typing.Optional[int]` — Limits the number of returned rerank results to the specified value. If not passed, all the rerank results will be returned.
</dd>
</dl>
<dl>
<dd>
**max_tokens_per_doc:** `typing.Optional[int]` — Defaults to `4096`. Long documents will be automatically truncated to the specified number of tokens.
</dd>
</dl>
<dl>
<dd>
**priority:** `typing.Optional[int]` — Controls how early the request is handled. Lower numbers indicate higher priority (default: 0, the highest). When the system is under load, higher-priority requests are processed first and are the least likely to be dropped.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
## Batches
<details><summary><code>client.batches.<a href="src/cohere/batches/client.py">list</a>(...) -> ListBatchesResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
List the batches for the current user
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.batches.list(
page_size=1,
page_token="page_token",
order_by="order_by",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**page_size:** `typing.Optional[int]`
The maximum number of batches to return. The service may return fewer than
this value.
If unspecified, at most 50 batches will be returned.
The maximum value is 1000; values above 1000 will be coerced to 1000.
</dd>
</dl>
<dl>
<dd>
**page_token:** `typing.Optional[str]`
A page token, received from a previous `ListBatches` call.
Provide this to retrieve the subsequent page.
</dd>
</dl>
<dl>
<dd>
**order_by:** `typing.Optional[str]`
Batches can be ordered by creation time or last updated time.
Use `created_at` for creation time or `updated_at` for last updated time.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.batches.<a href="src/cohere/batches/client.py">create</a>(...) -> CreateBatchResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Creates and executes a batch from an uploaded dataset of requests
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
from cohere.batches import Batch
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.batches.create(
request=Batch(
name="name",
input_dataset_id="input_dataset_id",
model="model",
),
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**request:** `Batch`
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.batches.<a href="src/cohere/batches/client.py">retrieve</a>(...) -> GetBatchResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Retrieves a batch
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.batches.retrieve(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The batch ID.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.batches.<a href="src/cohere/batches/client.py">cancel</a>(...) -> CancelBatchResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Cancels an in-progress batch
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.batches.cancel(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The batch ID.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
## EmbedJobs
<details><summary><code>client.embed_jobs.<a href="src/cohere/embed_jobs/client.py">list</a>() -> ListEmbedJobResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
The list embed job endpoint allows users to view all embed jobs history for that specific user.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.embed_jobs.list()
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.embed_jobs.<a href="src/cohere/embed_jobs/client.py">create</a>(...) -> CreateEmbedJobResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This API launches an async Embed job for a [Dataset](https://docs.cohere.com/docs/datasets) of type `embed-input`. The result of a completed embed job is new Dataset of type `embed-output`, which contains the original text entries and the corresponding embeddings.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.embed_jobs.create(
model="model",
dataset_id="dataset_id",
input_type="search_document",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**model:** `str`
ID of the embedding model.
Available models and corresponding embedding dimensions:
- `embed-english-v3.0` : 1024
- `embed-multilingual-v3.0` : 1024
- `embed-english-light-v3.0` : 384
- `embed-multilingual-light-v3.0` : 384
</dd>
</dl>
<dl>
<dd>
**dataset_id:** `str` — ID of a [Dataset](https://docs.cohere.com/docs/datasets). The Dataset must be of type `embed-input` and must have a validation status `Validated`
</dd>
</dl>
<dl>
<dd>
**input_type:** `EmbedInputType`
</dd>
</dl>
<dl>
<dd>
**name:** `typing.Optional[str]` — The name of the embed job.
</dd>
</dl>
<dl>
<dd>
**embedding_types:** `typing.Optional[typing.List[EmbeddingType]]`
Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.
* `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.
* `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for v3 and newer model versions.
* `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for v3 and newer model versions.
* `"binary"`: Use this when you want to get back signed binary embeddings. Valid for v3 and newer model versions.
* `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for v3 and newer model versions.
</dd>
</dl>
<dl>
<dd>
**truncate:** `typing.Optional[CreateEmbedJobRequestTruncate]`
One of `START|END` to specify how the API will handle inputs longer than the maximum token length.
Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.embed_jobs.<a href="src/cohere/embed_jobs/client.py">get</a>(...) -> EmbedJob</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This API retrieves the details about an embed job started by the same user.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.embed_jobs.get(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The ID of the embed job to retrieve.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.embed_jobs.<a href="src/cohere/embed_jobs/client.py">cancel</a>(...)</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
This API allows users to cancel an active embed job. Once invoked, the embedding process will be terminated, and users will be charged for the embeddings processed up to the cancellation point. It's important to note that partial results will not be available to users after cancellation.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.embed_jobs.cancel(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The ID of the embed job to cancel.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
## Datasets
<details><summary><code>client.datasets.<a href="src/cohere/datasets/client.py">list</a>(...) -> DatasetsListResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
List datasets that have been created.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
import datetime
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.datasets.list(
dataset_type="datasetType",
before=datetime.datetime.fromisoformat("2024-01-15T09:30:00+00:00"),
after=datetime.datetime.fromisoformat("2024-01-15T09:30:00+00:00"),
limit=1.1,
offset=1.1,
validation_status="unknown",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**dataset_type:** `typing.Optional[str]` — optional filter by dataset type
</dd>
</dl>
<dl>
<dd>
**before:** `typing.Optional[datetime.datetime]` — optional filter before a date
</dd>
</dl>
<dl>
<dd>
**after:** `typing.Optional[datetime.datetime]` — optional filter after a date
</dd>
</dl>
<dl>
<dd>
**limit:** `typing.Optional[float]` — optional limit to number of results
</dd>
</dl>
<dl>
<dd>
**offset:** `typing.Optional[float]` — optional offset to start of results
</dd>
</dl>
<dl>
<dd>
**validation_status:** `typing.Optional[DatasetValidationStatus]` — optional filter by validation status
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.datasets.<a href="src/cohere/datasets/client.py">create</a>(...) -> DatasetsCreateResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Create a dataset by uploading a file. See ['Dataset Creation'](https://docs.cohere.com/docs/datasets#dataset-creation) for more information.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.datasets.create(
name="name",
type="embed-input",
keep_original_file=True,
skip_malformed_input=True,
text_separator="text_separator",
csv_delimiter="csv_delimiter",
data="example_data",
eval_data="example_eval_data",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**name:** `str` — The name of the uploaded dataset.
</dd>
</dl>
<dl>
<dd>
**type:** `DatasetType` — The dataset type, which is used to validate the data. The only valid type is `embed-input` used in conjunction with the Embed Jobs API.
</dd>
</dl>
<dl>
<dd>
**data:** `core.File` — The file to upload
</dd>
</dl>
<dl>
<dd>
**keep_original_file:** `typing.Optional[bool]` — Indicates if the original file should be stored.
</dd>
</dl>
<dl>
<dd>
**skip_malformed_input:** `typing.Optional[bool]` — Indicates whether rows with malformed input should be dropped (instead of failing the validation check). Dropped rows will be returned in the warnings field.
</dd>
</dl>
<dl>
<dd>
**keep_fields:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — List of names of fields that will be persisted in the Dataset. By default the Dataset will retain only the required fields indicated in the [schema for the corresponding Dataset type](https://docs.cohere.com/docs/datasets#dataset-types). For example, datasets of type `embed-input` will drop all fields other than the required `text` field. If any of the fields in `keep_fields` are missing from the uploaded file, Dataset validation will fail.
</dd>
</dl>
<dl>
<dd>
**optional_fields:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — List of names of fields that will be persisted in the Dataset. By default the Dataset will retain only the required fields indicated in the [schema for the corresponding Dataset type](https://docs.cohere.com/docs/datasets#dataset-types). For example, Datasets of type `embed-input` will drop all fields other than the required `text` field. If any of the fields in `optional_fields` are missing from the uploaded file, Dataset validation will pass.
</dd>
</dl>
<dl>
<dd>
**text_separator:** `typing.Optional[str]` — Raw .txt uploads will be split into entries using the text_separator value.
</dd>
</dl>
<dl>
<dd>
**csv_delimiter:** `typing.Optional[str]` — The delimiter used for .csv uploads.
</dd>
</dl>
<dl>
<dd>
**eval_data:** `typing.Optional[core.File]` — An optional evaluation file to upload
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.datasets.<a href="src/cohere/datasets/client.py">get_usage</a>() -> DatasetsGetUsageResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
View the dataset storage usage for your Organization. Each Organization can have up to 10GB of storage across all their users.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.datasets.get_usage()
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.datasets.<a href="src/cohere/datasets/client.py">get</a>(...) -> DatasetsGetResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Retrieve a dataset by ID. See ['Datasets'](https://docs.cohere.com/docs/datasets) for more information.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.datasets.get(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str`
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.datasets.<a href="src/cohere/datasets/client.py">delete</a>(...) -> typing.Dict[str, typing.Any]</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Delete a dataset by ID. Datasets are automatically deleted after 30 days, but they can also be deleted manually.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.datasets.delete(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str`
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
## Connectors
<details><summary><code>client.connectors.<a href="src/cohere/connectors/client.py">list</a>(...) -> ListConnectorsResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Returns a list of connectors ordered by descending creation date (newer first). See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.connectors.list(
limit=1.1,
offset=1.1,
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**limit:** `typing.Optional[float]` — Maximum number of connectors to return [0, 100].
</dd>
</dl>
<dl>
<dd>
**offset:** `typing.Optional[float]` — Number of connectors to skip before returning results [0, inf].
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.connectors.<a href="src/cohere/connectors/client.py">create</a>(...) -> CreateConnectorResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Creates a new connector. The connector is tested during registration and will cancel registration when the test is unsuccessful. See ['Creating and Deploying a Connector'](https://docs.cohere.com/v1/docs/creating-and-deploying-a-connector) for more information.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.connectors.create(
name="name",
url="url",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**name:** `str` — A human-readable name for the connector.
</dd>
</dl>
<dl>
<dd>
**url:** `str` — The URL of the connector that will be used to search for documents.
</dd>
</dl>
<dl>
<dd>
**description:** `typing.Optional[str]` — A description of the connector.
</dd>
</dl>
<dl>
<dd>
**excludes:** `typing.Optional[typing.List[str]]` — A list of fields to exclude from the prompt (fields remain in the document).
</dd>
</dl>
<dl>
<dd>
**oauth:** `typing.Optional[CreateConnectorOAuth]` — The OAuth 2.0 configuration for the connector. Cannot be specified if service_auth is specified.
</dd>
</dl>
<dl>
<dd>
**active:** `typing.Optional[bool]` — Whether the connector is active or not.
</dd>
</dl>
<dl>
<dd>
**continue_on_failure:** `typing.Optional[bool]` — Whether a chat request should continue or not if the request to this connector fails.
</dd>
</dl>
<dl>
<dd>
**service_auth:** `typing.Optional[CreateConnectorServiceAuth]` — The service to service authentication configuration for the connector. Cannot be specified if oauth is specified.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.connectors.<a href="src/cohere/connectors/client.py">get</a>(...) -> GetConnectorResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Retrieve a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.connectors.get(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The ID of the connector to retrieve.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.connectors.<a href="src/cohere/connectors/client.py">delete</a>(...) -> DeleteConnectorResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Delete a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.connectors.delete(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The ID of the connector to delete.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.connectors.<a href="src/cohere/connectors/client.py">update</a>(...) -> UpdateConnectorResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Update a connector by ID. Omitted fields will not be updated. See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.connectors.update(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The ID of the connector to update.
</dd>
</dl>
<dl>
<dd>
**name:** `typing.Optional[str]` — A human-readable name for the connector.
</dd>
</dl>
<dl>
<dd>
**url:** `typing.Optional[str]` — The URL of the connector that will be used to search for documents.
</dd>
</dl>
<dl>
<dd>
**excludes:** `typing.Optional[typing.List[str]]` — A list of fields to exclude from the prompt (fields remain in the document).
</dd>
</dl>
<dl>
<dd>
**oauth:** `typing.Optional[CreateConnectorOAuth]` — The OAuth 2.0 configuration for the connector. Cannot be specified if service_auth is specified.
</dd>
</dl>
<dl>
<dd>
**active:** `typing.Optional[bool]`
</dd>
</dl>
<dl>
<dd>
**continue_on_failure:** `typing.Optional[bool]`
</dd>
</dl>
<dl>
<dd>
**service_auth:** `typing.Optional[CreateConnectorServiceAuth]` — The service to service authentication configuration for the connector. Cannot be specified if oauth is specified.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.connectors.<a href="src/cohere/connectors/client.py">o_auth_authorize</a>(...) -> OAuthAuthorizeResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Authorize the connector with the given ID for the connector oauth app. See ['Connector Authentication'](https://docs.cohere.com/docs/connector-authentication) for more information.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.connectors.o_auth_authorize(
id="id",
after_token_redirect="after_token_redirect",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The ID of the connector to authorize.
</dd>
</dl>
<dl>
<dd>
**after_token_redirect:** `typing.Optional[str]` — The URL to redirect to after the connector has been authorized.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
## Models
<details><summary><code>client.models.<a href="src/cohere/models/client.py">get</a>(...) -> GetModelResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Returns the details of a model, provided its name.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.models.get(
model="command-a-03-2025",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**model:** `str`
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.models.<a href="src/cohere/models/client.py">list</a>(...) -> ListModelsResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Returns a list of models available for use.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.models.list(
page_size=1.1,
page_token="page_token",
endpoint="chat",
default_only=True,
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**page_size:** `typing.Optional[float]`
Maximum number of models to include in a page
Defaults to `20`, min value of `1`, max value of `1000`.
</dd>
</dl>
<dl>
<dd>
**page_token:** `typing.Optional[str]` — Page token provided in the `next_page_token` field of a previous response.
</dd>
</dl>
<dl>
<dd>
**endpoint:** `typing.Optional[CompatibleEndpoint]` — When provided, filters the list of models to only those that are compatible with the specified endpoint.
</dd>
</dl>
<dl>
<dd>
**default_only:** `typing.Optional[bool]` — When provided, filters the list of models to only the default model to the endpoint. This parameter is only valid when `endpoint` is provided.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
## /finetuning
<details><summary><code>client.finetuning.<a href="src/cohere/finetuning/client.py">list_finetuned_models</a>(...) -> ListFinetunedModelsResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Returns a list of fine-tuned models that the user has access to.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.finetuning.list_finetuned_models(
page_size=1,
page_token="page_token",
order_by="order_by",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**page_size:** `typing.Optional[int]`
Maximum number of results to be returned by the server. If 0, defaults to
50.
</dd>
</dl>
<dl>
<dd>
**page_token:** `typing.Optional[str]` — Request a specific page of the list results.
</dd>
</dl>
<dl>
<dd>
**order_by:** `typing.Optional[str]`
Comma separated list of fields. For example: "created_at,name". The default
sorting order is ascending. To specify descending order for a field, append
" desc" to the field name. For example: "created_at desc,name".
Supported sorting fields:
- created_at (default)
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.finetuning.<a href="src/cohere/finetuning/client.py">create_finetuned_model</a>(...) -> CreateFinetunedModelResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Creates a new fine-tuned model. The model will be trained on the dataset specified in the request body. The training process may take some time, and the model will be available once the training is complete.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
from cohere.finetuning.finetuning import FinetunedModel, Settings, BaseModel
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.finetuning.create_finetuned_model(
request=FinetunedModel(
name="name",
settings=Settings(
base_model=BaseModel(
base_type="BASE_TYPE_UNSPECIFIED",
),
dataset_id="dataset_id",
),
),
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**request:** `FinetunedModel`
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.finetuning.<a href="src/cohere/finetuning/client.py">get_finetuned_model</a>(...) -> GetFinetunedModelResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Retrieve a fine-tuned model by its ID.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.finetuning.get_finetuned_model(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The fine-tuned model ID.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.finetuning.<a href="src/cohere/finetuning/client.py">delete_finetuned_model</a>(...) -> DeleteFinetunedModelResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Deletes a fine-tuned model. The model will be removed from the system and will no longer be available for use.
This operation is irreversible.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.finetuning.delete_finetuned_model(
id="id",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — The fine-tuned model ID.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.finetuning.<a href="src/cohere/finetuning/client.py">update_finetuned_model</a>(...) -> UpdateFinetunedModelResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Updates the fine-tuned model with the given ID. The model will be updated with the new settings and name provided in the request body.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
from cohere.finetuning.finetuning import Settings, BaseModel
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.finetuning.update_finetuned_model(
id="id",
name="name",
settings=Settings(
base_model=BaseModel(
base_type="BASE_TYPE_UNSPECIFIED",
),
dataset_id="dataset_id",
),
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**id:** `str` — FinetunedModel ID.
</dd>
</dl>
<dl>
<dd>
**name:** `str` — FinetunedModel name (e.g. `foobar`).
</dd>
</dl>
<dl>
<dd>
**settings:** `Settings` — FinetunedModel settings such as dataset, hyperparameters...
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.finetuning.<a href="src/cohere/finetuning/client.py">list_events</a>(...) -> ListEventsResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Returns a list of events that occurred during the life-cycle of the fine-tuned model.
The events are ordered by creation time, with the most recent event first.
The list can be paginated using `page_size` and `page_token` parameters.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.finetuning.list_events(
finetuned_model_id="finetuned_model_id",
page_size=1,
page_token="page_token",
order_by="order_by",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**finetuned_model_id:** `str` — The parent fine-tuned model ID.
</dd>
</dl>
<dl>
<dd>
**page_size:** `typing.Optional[int]`
Maximum number of results to be returned by the server. If 0, defaults to
50.
</dd>
</dl>
<dl>
<dd>
**page_token:** `typing.Optional[str]` — Request a specific page of the list results.
</dd>
</dl>
<dl>
<dd>
**order_by:** `typing.Optional[str]`
Comma separated list of fields. For example: "created_at,name". The default
sorting order is ascending. To specify descending order for a field, append
" desc" to the field name. For example: "created_at desc,name".
Supported sorting fields:
- created_at (default)
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
<details><summary><code>client.finetuning.<a href="src/cohere/finetuning/client.py">list_training_step_metrics</a>(...) -> ListTrainingStepMetricsResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Returns a list of metrics measured during the training of a fine-tuned model.
The metrics are ordered by step number, with the most recent step first.
The list can be paginated using `page_size` and `page_token` parameters.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.finetuning.list_training_step_metrics(
finetuned_model_id="finetuned_model_id",
page_size=1,
page_token="page_token",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**finetuned_model_id:** `str` — The parent fine-tuned model ID.
</dd>
</dl>
<dl>
<dd>
**page_size:** `typing.Optional[int]`
Maximum number of results to be returned by the server. If 0, defaults to
50.
</dd>
</dl>
<dl>
<dd>
**page_token:** `typing.Optional[str]` — Request a specific page of the list results.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
## Audio Transcriptions
<details><summary><code>client.audio.transcriptions.<a href="src/cohere/audio/transcriptions/client.py">create</a>(...) -> AudioTranscriptionsCreateResponse</code></summary>
<dl>
<dd>
#### 📝 Description
<dl>
<dd>
<dl>
<dd>
Transcribe an audio file.
</dd>
</dl>
</dd>
</dl>
#### 🔌 Usage
<dl>
<dd>
<dl>
<dd>
```python
from cohere import Client
from cohere.environment import ClientEnvironment
client = Client(
token="<token>",
environment=ClientEnvironment.PRODUCTION,
)
client.audio.transcriptions.create(
file="example_file",
model="model",
language="language",
)
```
</dd>
</dl>
</dd>
</dl>
#### ⚙️ Parameters
<dl>
<dd>
<dl>
<dd>
**model:** `str` — ID of the model to use.
</dd>
</dl>
<dl>
<dd>
**language:** `str` — The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format.
</dd>
</dl>
<dl>
<dd>
**file:** `core.File` — The audio file object to transcribe. Supported file extensions are flac, mp3, mpeg, mpga, ogg, and wav.
</dd>
</dl>
<dl>
<dd>
**temperature:** `typing.Optional[float]` — The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic.
</dd>
</dl>
<dl>
<dd>
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
</dd>
</dl>
</dd>
</dl>
</dd>
</dl>
</details>
================================================
FILE: requirements.txt
================================================
fastavro==1.9.4
httpx>=0.21.2
pydantic>= 1.9.2
pydantic-core>=2.18.2,<2.44.0
requests==2.0.0
tokenizers>=0.15,<1
types-requests==2.0.0
typing_extensions>= 4.0.0
================================================
FILE: src/cohere/__init__.py
================================================
# This file was auto-generated by Fern from our API Definition.
# isort: skip_file
import typing
from importlib import import_module
if typing.TYPE_CHECKING:
from .types import (
ApiMeta,
ApiMetaApiVersion,
ApiMetaBilledUnits,
ApiMetaTokens,
AssistantChatMessageV2,
AssistantMessage,
AssistantMessageResponse,
AssistantMessageResponseContentItem,
AssistantMessageV2Content,
AssistantMessageV2ContentOneItem,
AuthTokenType,
ChatCitation,
ChatCitationGenerationEvent,
ChatCitationType,
ChatConnector,
ChatContentDeltaEvent,
ChatContentDeltaEventDelta,
ChatContentDeltaEventDeltaMessage,
ChatContentDeltaEventDeltaMessageContent,
ChatContentEndEvent,
ChatContentStartEvent,
ChatContentStartEventDelta,
ChatContentStartEventDeltaMessage,
ChatContentStartEventDeltaMessageContent,
ChatContentStartEventDeltaMessageContentType,
ChatDataMetrics,
ChatDebugEvent,
ChatDocument,
ChatDocumentSource,
ChatFinishReason,
ChatMessage,
ChatMessageEndEvent,
ChatMessageEndEventDelta,
ChatMessageStartEvent,
ChatMessageStartEventDelta,
ChatMessageStartEventDeltaMessage,
ChatMessageV2,
ChatMessages,
ChatRequestCitationQuality,
ChatRequestPromptTruncation,
ChatRequestSafetyMode,
ChatSearchQueriesGenerationEvent,
ChatSearchQuery,
ChatSearchResult,
ChatSearchResultConnector,
ChatSearchResultsEvent,
ChatStreamEndEvent,
ChatStreamEndEventFinishReason,
ChatStreamEvent,
ChatStreamEventType,
ChatStreamRequestCitationQuality,
ChatStreamRequestPromptTruncation,
ChatStreamRequestSafetyMode,
ChatStreamStartEvent,
ChatTextContent,
ChatTextGenerationEvent,
ChatTextResponseFormat,
ChatTextResponseFormatV2,
ChatThinkingContent,
ChatToolCallDeltaEvent,
ChatToolCallDeltaEventDelta,
ChatToolCallDeltaEventDeltaMessage,
ChatToolCallDeltaEventDeltaMessageToolCalls,
ChatToolCallDeltaEventDeltaMessageToolCallsFunction,
ChatToolCallEndEvent,
ChatToolCallStartEvent,
ChatToolCallStartEventDelta,
ChatToolCallStartEventDeltaMessage,
ChatToolCallsChunkEvent,
ChatToolCallsGenerationEvent,
ChatToolMessage,
ChatToolPlanDeltaEvent,
ChatToolPlanDeltaEventDelta,
ChatToolPlanDeltaEventDeltaMessage,
ChatToolSource,
ChatbotMessage,
CheckApiKeyResponse,
Citation,
CitationEndEvent,
CitationGenerationStreamedChatResponse,
CitationOptions,
CitationOptionsMode,
CitationStartEvent,
CitationStartEventDelta,
CitationStartEventDeltaMessage,
CitationType,
ClassifyDataMetrics,
ClassifyExample,
ClassifyRequestTruncate,
ClassifyResponse,
ClassifyResponseClassificationsItem,
ClassifyResponseClassificationsItemClassificationType,
ClassifyResponseClassificationsItemLabelsValue,
CompatibleEndpoint,
Connector,
ConnectorAuthStatus,
ConnectorOAuth,
Content,
CreateConnectorOAuth,
CreateConnectorResponse,
CreateConnectorServiceAuth,
CreateEmbedJobResponse,
Dataset,
DatasetPart,
DatasetType,
DatasetValidationStatus,
DebugStreamedChatResponse,
DeleteConnectorResponse,
DetokenizeResponse,
Document,
DocumentContent,
DocumentSource,
DocumentToolContent,
EmbedByTypeResponse,
EmbedByTypeResponseEmbeddings,
EmbedByTypeResponseResponseType,
EmbedContent,
EmbedFloatsResponse,
EmbedImage,
EmbedImageUrl,
EmbedInput,
EmbedInputType,
EmbedJob,
EmbedJobStatus,
EmbedJobTruncate,
EmbedRequestTruncate,
EmbedResponse,
EmbedText,
EmbeddingType,
EmbeddingsByTypeEmbedResponse,
EmbeddingsFloatsEmbedResponse,
FinetuneDatasetMetrics,
FinishReason,
GenerateRequestReturnLikelihoods,
GenerateRequestTruncate,
GenerateStreamEnd,
GenerateStreamEndResponse,
GenerateStreamError,
GenerateStreamEvent,
GenerateStreamRequestReturnLikelihoods,
GenerateStreamRequestTruncate,
GenerateStreamText,
GenerateStreamedResponse,
Generation,
GetConnectorResponse,
GetModelResponse,
GetModelResponseSamplingDefaults,
Image,
ImageContent,
ImageUrl,
ImageUrlContent,
ImageUrlDetail,
ImageUrlEmbedContent,
JsonObjectResponseFormat,
JsonObjectResponseFormatV2,
JsonResponseFormat,
JsonResponseFormatV2,
LabelMetric,
ListConnectorsResponse,
ListEmbedJobResponse,
ListModelsResponse,
LogprobItem,
Message,
Metrics,
NonStreamedChatResponse,
OAuthAuthorizeResponse,
ParseInfo,
RerankDocument,
RerankRequestDocumentsItem,
RerankResponse,
RerankResponseResultsItem,
RerankResponseResultsItemDocument,
RerankerDataMetrics,
ResponseFormat,
ResponseFormatV2,
SearchQueriesGenerationStreamedChatResponse,
SearchResultsStreamedChatResponse,
SingleGeneration,
SingleGenerationInStream,
SingleGenerationTokenLikelihoodsItem,
Source,
StreamEndGenerateStreamedResponse,
StreamEndStreamedChatResponse,
StreamErrorGenerateStreamedResponse,
StreamStartStreamedChatResponse,
StreamedChatResponse,
SummarizeRequestExtractiveness,
SummarizeRequestFormat,
SummarizeRequestLength,
SummarizeResponse,
SystemChatMessageV2,
SystemMessage,
SystemMessageV2,
SystemMessageV2Content,
SystemMessageV2ContentOneItem,
TextAssistantMessageResponseContentItem,
TextAssistantMessageV2ContentOneItem,
TextContent,
TextEmbedContent,
TextGenerationGenerateStreamedResponse,
TextGenerationStreamedChatResponse,
TextResponseFormat,
TextResponseFormatV2,
TextSystemMessageV2ContentOneItem,
TextToolContent,
Thinking,
ThinkingAssistantMessageResponseContentItem,
ThinkingAssistantMessageV2ContentOneItem,
ThinkingType,
TokenizeResponse,
Tool,
ToolCall,
ToolCallDelta,
ToolCallV2,
ToolCallV2Function,
ToolCallsChunkStreamedChatResponse,
ToolCallsGenerationStreamedChatResponse,
ToolChatMessageV2,
ToolContent,
ToolMessage,
ToolMessageV2,
ToolMessageV2Content,
ToolParameterDefinitionsValue,
ToolResult,
ToolSource,
ToolV2,
ToolV2Function,
UpdateConnectorResponse,
Usage,
UsageBilledUnits,
UsageTokens,
UserChatMessageV2,
UserMessage,
UserMessageV2,
UserMessageV2Content,
)
from .errors import (
BadRequestError,
ClientClosedRequestError,
ForbiddenError,
GatewayTimeoutError,
InternalServerError,
InvalidTokenError,
NotFoundError,
NotImplementedError,
ServiceUnavailableError,
TooManyRequestsError,
UnauthorizedError,
UnprocessableEntityError,
)
from . import audio, batches, connectors, datasets, embed_jobs, finetuning, models, v2
from ._default_clients import DefaultAioHttpClient, DefaultAsyncHttpxClient
from .aliases import (
ChatResponse,
ContentDeltaStreamedChatResponseV2,
ContentEndStreamedChatResponseV2,
ContentStartStreamedChatResponseV2,
MessageEndStreamedChatResponseV2,
MessageStartStreamedChatResponseV2,
StreamedChatResponseV2,
ToolCallDeltaStreamedChatResponseV2,
ToolCallEndStreamedChatResponseV2,
ToolCallStartStreamedChatResponseV2,
)
from .aws_client import AwsClient
from .batches import (
Batch,
BatchStatus,
CancelBatchResponse,
CreateBatchResponse,
GetBatchResponse,
ListBatchesResponse,
)
from .bedrock_client import BedrockClient, BedrockClientV2
from .client import AsyncClient, Client
from .client_v2 import AsyncClientV2, ClientV2
from .datasets import DatasetsCreateResponse, DatasetsGetResponse, DatasetsGetUsageResponse, DatasetsListResponse
from .embed_jobs import CreateEmbedJobRequestTruncate
from .environment import ClientEnvironment
from .oci_client import OciClient, OciClientV2
from .sagemaker_client import SagemakerClient, SagemakerClientV2
from .v2 import (
CitationEndV2ChatStreamResponse,
CitationStartV2ChatStreamResponse,
ContentDeltaV2ChatStreamResponse,
ContentEndV2ChatStreamResponse,
ContentStartV2ChatStreamResponse,
DebugV2ChatStreamResponse,
MessageEndV2ChatStreamResponse,
MessageStartV2ChatStreamResponse,
ToolCallDeltaV2ChatStreamResponse,
ToolCallEndV2ChatStreamResponse,
ToolCallStartV2ChatStreamResponse,
ToolPlanDeltaV2ChatStreamResponse,
V2ChatRequestDocumentsItem,
V2ChatRequestSafetyMode,
V2ChatRequestToolChoice,
V2ChatResponse,
V2ChatStreamRequestDocumentsItem,
V2ChatStreamRequestSafetyMode,
V2ChatStreamRequestToolChoice,
V2ChatStreamResponse,
V2EmbedRequestTruncate,
V2RerankResponse,
V2RerankResponseResultsItem,
)
from .version import __version__
_dynamic_imports: typing.Dict[str, str] = {
"ApiMeta": ".types",
"ApiMetaApiVersion": ".types",
"ApiMetaBilledUnits": ".types",
"ApiMetaTokens": ".types",
"AssistantChatMessageV2": ".types",
"AssistantMessage": ".types",
"AssistantMessageResponse": ".types",
"AssistantMessageResponseContentItem": ".types",
"AssistantMessageV2Content": ".types",
"AssistantMessageV2ContentOneItem": ".types",
"AsyncClient": ".client",
"AsyncClientV2": ".client_v2",
"AuthTokenType": ".types",
"AwsClient": ".aws_client",
"BadRequestError": ".errors",
"Batch": ".batches",
"BatchStatus": ".batches",
"BedrockClient": ".bedrock_client",
"BedrockClientV2": ".bedrock_client",
"CancelBatchResponse": ".batches",
"ChatCitation": ".types",
"ChatCitationGenerationEvent": ".types",
"ChatCitationType": ".types",
"ChatConnector": ".types",
"ChatContentDeltaEvent": ".types",
"ChatContentDeltaEventDelta": ".types",
"ChatContentDeltaEventDeltaMessage": ".types",
"ChatContentDeltaEventDeltaMessageContent": ".types",
"ChatContentEndEvent": ".types",
"ChatContentStartEvent": ".types",
"ChatContentStartEventDelta": ".types",
"ChatContentStartEventDeltaMessage": ".types",
"ChatContentStartEventDeltaMessageContent": ".types",
"ChatContentStartEventDeltaMessageContentType": ".types",
"ChatDataMetrics": ".types",
"ChatDebugEvent": ".types",
"ChatDocument": ".types",
"ChatDocumentSource": ".types",
"ChatFinishReason": ".types",
"ChatMessage": ".types",
"ChatMessageEndEvent": ".types",
"ChatMessageEndEventDelta": ".types",
"ChatMessageStartEvent": ".types",
"ChatMessageStartEventDelta": ".types",
"ChatMessageStartEventDeltaMessage": ".types",
"ChatMessageV2": ".types",
"ChatMessages": ".types",
"ChatRequestCitationQuality": ".types",
"ChatRequestPromptTruncation": ".types",
"ChatRequestSafetyMode": ".types",
"ChatResponse": ".aliases",
"ChatSearchQueriesGenerationEvent": ".types",
"ChatSearchQuery": ".types",
"ChatSearchResult": ".types",
"ChatSearchResultConnector": ".types",
"ChatSearchResultsEvent": ".types",
"ChatStreamEndEvent": ".types",
"ChatStreamEndEventFinishReason": ".types",
"ChatStreamEvent": ".types",
"ChatStreamEventType": ".types",
"ChatStreamRequestCitationQuality": ".types",
"ChatStreamRequestPromptTruncation": ".types",
"ChatStreamRequestSafetyMode": ".types",
"ChatStreamStartEvent": ".types",
"ChatTextContent": ".types",
"ChatTextGenerationEvent": ".types",
"ChatTextResponseFormat": ".types",
"ChatTextResponseFormatV2": ".types",
"ChatThinkingContent": ".types",
"ChatToolCallDeltaEvent": ".types",
"ChatToolCallDeltaEventDelta": ".types",
"ChatToolCallDeltaEventDeltaMessage": ".types",
"ChatToolCallDeltaEventDeltaMessageToolCalls": ".types",
"ChatToolCallDeltaEventDeltaMessageToolCallsFunction": ".types",
"ChatToolCallEndEvent": ".types",
"ChatToolCallStartEvent": ".types",
"ChatToolCallStartEventDelta": ".types",
"ChatToolCallStartEventDeltaMessage": ".types",
"ChatToolCallsChunkEvent": ".types",
"ChatToolCallsGenerationEvent": ".types",
"ChatToolMessage": ".types",
"ChatToolPlanDeltaEvent": ".types",
"ChatToolPlanDeltaEventDelta": ".types",
"ChatToolPlanDeltaEventDeltaMessage": ".types",
"ChatToolSource": ".types",
"ChatbotMessage": ".types",
"CheckApiKeyResponse": ".types",
"Citation": ".types",
"CitationEndEvent": ".types",
"CitationEndV2ChatStreamResponse": ".v2",
"CitationGenerationStreamedChatResponse": ".types",
"CitationOptions": ".types",
"CitationOptionsMode": ".types",
"CitationStartEvent": ".types",
"CitationStartEventDelta": ".types",
"CitationStartEventDeltaMessage": ".types",
"CitationStartV2ChatStreamResponse": ".v2",
"CitationType": ".types",
"ClassifyDataMetrics": ".types",
"ClassifyExample": ".types",
"ClassifyRequestTruncate": ".types",
"ClassifyResponse": ".types",
"ClassifyResponseClassificationsItem": ".types",
"ClassifyResponseClassificationsItemClassificationType": ".types",
"ClassifyResponseClassificationsItemLabelsValue": ".types",
"Client": ".client",
"ClientClosedRequestError": ".errors",
"ClientEnvironment": ".environment",
"ClientV2": ".client_v2",
"CompatibleEndpoint": ".types",
"Connector": ".types",
"ConnectorAuthStatus": ".types",
"ConnectorOAuth": ".types",
"Content": ".types",
"ContentDeltaStreamedChatResponseV2": ".aliases",
"ContentDeltaV2ChatStreamResponse": ".v2",
"ContentEndStreamedChatResponseV2": ".aliases",
"ContentEndV2ChatStreamResponse": ".v2",
"ContentStartStreamedChatResponseV2": ".aliases",
"ContentStartV2ChatStreamResponse": ".v2",
"CreateBatchResponse": ".batches",
"CreateConnectorOAuth": ".types",
"CreateConnectorResponse": ".types",
"CreateConnectorServiceAuth": ".types",
"CreateEmbedJobRequestTruncate": ".embed_jobs",
"CreateEmbedJobResponse": ".types",
"Dataset": ".types",
"DatasetPart": ".types",
"DatasetType": ".types",
"DatasetValidationStatus": ".types",
"DatasetsCreateResponse": ".datasets",
"DatasetsGetResponse": ".datasets",
"DatasetsGetUsageResponse": ".datasets",
"DatasetsListResponse": ".datasets",
"DebugStreamedChatResponse": ".types",
"DebugV2ChatStreamResponse": ".v2",
"DefaultAioHttpClient": "._default_clients",
"DefaultAsyncHttpxClient": "._default_clients",
"DeleteConnectorResponse": ".types",
"DetokenizeResponse": ".types",
"Document": ".types",
"DocumentContent": ".types",
"DocumentSource": ".types",
"DocumentToolContent": ".types",
"EmbedByTypeResponse": ".types",
"EmbedByTypeResponseEmbeddings": ".types",
"EmbedByTypeResponseResponseType": ".types",
"EmbedContent": ".types",
"EmbedFloatsResponse": ".types",
"EmbedImage": ".types",
"EmbedImageUrl": ".types",
"EmbedInput": ".types",
"EmbedInputType": ".types",
"EmbedJob": ".types",
"EmbedJobStatus": ".types",
"EmbedJobTruncate": ".types",
"EmbedRequestTruncate": ".types",
"EmbedResponse": ".types",
"EmbedText": ".types",
"EmbeddingType": ".types",
"EmbeddingsByTypeEmbedResponse": ".types",
"EmbeddingsFloatsEmbedResponse": ".types",
"FinetuneDatasetMetrics": ".types",
"FinishReason": ".types",
"ForbiddenError": ".errors",
"GatewayTimeoutError": ".errors",
"GenerateRequestReturnLikelihoods": ".types",
"GenerateRequestTruncate": ".types",
"GenerateStreamEnd": ".types",
"GenerateStreamEndResponse": ".types",
"GenerateStreamError": ".types",
"GenerateStreamEvent": ".types",
"GenerateStreamRequestReturnLikelihoods": ".types",
"GenerateStreamRequestTruncate": ".types",
"GenerateStreamText": ".types",
"GenerateStreamedResponse": ".types",
"Generation": ".types",
"GetBatchResponse": ".batches",
"GetConnectorResponse": ".types",
"GetModelResponse": ".types",
"GetModelResponseSamplingDefaults": ".types",
"Image": ".types",
"ImageContent": ".types",
"ImageUrl": ".types",
"ImageUrlContent": ".types",
"ImageUrlDetail": ".types",
"ImageUrlEmbedContent": ".types",
"InternalServerError": ".errors",
"InvalidTokenError": ".errors",
"JsonObjectResponseFormat": ".types",
"JsonObjectResponseFormatV2": ".types",
"JsonResponseFormat": ".types",
"JsonResponseFormatV2": ".types",
"LabelMetric": ".types",
"ListBatchesResponse": ".batches",
"ListConnectorsResponse": ".types",
"ListEmbedJobResponse": ".types",
"ListModelsResponse": ".types",
"LogprobItem": ".types",
"Message": ".types",
"MessageEndStreamedChatResponseV2": ".aliases",
"MessageEndV2ChatStreamResponse": ".v2",
"MessageStartStreamedChatResponseV2": ".aliases",
"MessageStartV2ChatStreamResponse": ".v2",
"Metrics": ".types",
"NonStreamedChatResponse": ".types",
"NotFoundError": ".errors",
"NotImplementedError": ".errors",
"OAuthAuthorizeResponse": ".types",
"OciClient": ".oci_client",
"OciClientV2": ".oci_client",
"ParseInfo": ".types",
"RerankDocument": ".types",
"RerankRequestDocumentsItem": ".types",
"RerankResponse": ".types",
"RerankResponseResultsItem": ".types",
"RerankResponseResultsItemDocument": ".types",
"RerankerDataMetrics": ".types",
"ResponseFormat": ".types",
"ResponseFormatV2": ".types",
"SagemakerClient": ".sagemaker_client",
"SagemakerClientV2": ".sagemaker_client",
"SearchQueriesGenerationStreamedChatResponse": ".types",
"SearchResultsStreamedChatResponse": ".types",
"ServiceUnavailableError": ".errors",
"SingleGeneration": ".types",
"SingleGenerationInStream": ".types",
"SingleGenerationTokenLikelihoodsItem": ".types",
"Source": ".types",
"StreamEndGenerateStreamedResponse": ".types",
"StreamEndStreamedChatResponse": ".types",
"StreamErrorGenerateStreamedResponse": ".types",
"StreamStartStreamedChatResponse": ".types",
"StreamedChatResponse": ".types",
"StreamedChatResponseV2": ".aliases",
"SummarizeRequestExtractiveness": ".types",
"SummarizeRequestFormat": ".types",
"SummarizeRequestLength": ".types",
"SummarizeResponse": ".types",
"SystemChatMessageV2": ".types",
"SystemMessage": ".types",
"SystemMessageV2": ".types",
"SystemMessageV2Content": ".types",
"SystemMessageV2ContentOneItem": ".types",
"TextAssistantMessageResponseContentItem": ".types",
"TextAssistantMessageV2ContentOneItem": ".types",
"TextContent": ".types",
"TextEmbedContent": ".types",
"TextGenerationGenerateStreamedResponse": ".types",
"TextGenerationStreamedChatResponse": ".types",
"TextResponseFormat": ".types",
"TextResponseFormatV2": ".types",
"TextSystemMessageV2ContentOneItem": ".types",
"TextToolContent": ".types",
"Thinking": ".types",
"ThinkingAssistantMessageResponseContentItem": ".types",
"ThinkingAssistantMessageV2ContentOneItem": ".types",
"ThinkingType": ".types",
"TokenizeResponse": ".types",
"TooManyRequestsError": ".errors",
"Tool": ".types",
"ToolCall": ".types",
"ToolCallDelta": ".types",
"ToolCallDeltaStreamedChatResponseV2": ".aliases",
"ToolCallDeltaV2ChatStreamResponse": ".v2",
"ToolCallEndStreamedChatResponseV2": ".aliases",
"ToolCallEndV2ChatStreamResponse": ".v2",
"ToolCallStartStreamedChatResponseV2": ".aliases",
"ToolCallStartV2ChatStreamResponse": ".v2",
"ToolCallV2": ".types",
"ToolCallV2Function": ".types",
"ToolCallsChunkStreamedChatResponse": ".types",
"ToolCallsGenerationStreamedChatResponse": ".types",
"ToolChatMessageV2": ".types",
"ToolContent": ".types",
"ToolMessage": ".types",
"ToolMessageV2": ".types",
"ToolMessageV2Content": ".types",
"ToolParameterDefinitionsValue": ".types",
"ToolPlanDeltaV2ChatStreamResponse": ".v2",
"ToolResult": ".types",
"ToolSource": ".types",
"ToolV2": ".types",
"ToolV2Function": ".types",
"UnauthorizedError": ".errors",
"UnprocessableEntityError": ".errors",
"UpdateConnectorResponse": ".types",
"Usage": ".types",
"UsageBilledUnits": ".types",
"UsageTokens": ".types",
"UserChatMessageV2": ".types",
"UserMessage": ".types",
"UserMessageV2": ".types",
"UserMessageV2Content": ".types",
"V2ChatRequestDocumentsItem": ".v2",
"V2ChatRequestSafetyMode": ".v2",
"V2ChatRequestToolChoice": ".v2",
"V2ChatResponse": ".v2",
"V2ChatStreamRequestDocumentsItem": ".v2",
"V2ChatStreamRequestSafetyMode": ".v2",
"V2ChatStreamRequestToolChoice": ".v2",
"V2ChatStreamResponse": ".v2",
"V2EmbedRequestTruncate": ".v2",
"V2RerankResponse": ".v2",
"V2RerankResponseResultsItem": ".v2",
"__version__": ".version",
"audio": ".audio",
"batches": ".batches",
"connectors": ".connectors",
"datasets": ".datasets",
"embed_jobs": ".embed_jobs",
"finetuning": ".finetuning",
"models": ".models",
"v2": ".v2",
}
def __getattr__(attr_name: str) -> typing.Any:
module_name = _dynamic_imports.get(attr_name)
if module_name is None:
raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
try:
module = import_module(module_name, __package__)
if module_name == f".{attr_name}":
return module
else:
return getattr(module, attr_name)
except ImportError as e:
raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
except AttributeError as e:
raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
def __dir__():
lazy_attrs = list(_dynamic_imports.keys())
return sorted(lazy_attrs)
__all__ = [
"ApiMeta",
"ApiMetaApiVersion",
"ApiMetaBilledUnits",
"ApiMetaTokens",
"AssistantChatMessageV2",
"AssistantMessage",
"AssistantMessageResponse",
"AssistantMessageResponseContentItem",
"AssistantMessageV2Content",
"AssistantMessageV2ContentOneItem",
"AsyncClient",
"AsyncClientV2",
"AuthTokenType",
"AwsClient",
"BadRequestError",
"Batch",
"BatchStatus",
"BedrockClient",
"BedrockClientV2",
"CancelBatchResponse",
"ChatCitation",
"ChatCitationGenerationEvent",
"ChatCitationType",
"ChatConnector",
"ChatContentDeltaEvent",
"ChatContentDeltaEventDelta",
"ChatContentDeltaEventDeltaMessage",
"ChatContentDeltaEventDeltaMessageContent",
"ChatContentEndEvent",
"ChatContentStartEvent",
"ChatContentStartEventDelta",
"ChatContentStartEventDeltaMessage",
"ChatContentStartEventDeltaMessageContent",
"ChatContentStartEventDeltaMessageContentType",
"ChatDataMetrics",
"ChatDebugEvent",
"ChatDocument",
"ChatDocumentSource",
"ChatFinishReason",
"ChatMessage",
"ChatMessageEndEvent",
"ChatMessageEndEventDelta",
"ChatMessageStartEvent",
"ChatMessageStartEventDelta",
"ChatMessageStartEventDeltaMessage",
"ChatMessageV2",
"ChatMessages",
"ChatRequestCitationQuality",
"ChatRequestPromptTruncation",
"ChatRequestSafetyMode",
"ChatResponse",
"ChatSearchQueriesGenerationEvent",
"ChatSearchQuery",
"ChatSearchResult",
"ChatSearchResultConnector",
"ChatSearchResultsEvent",
"ChatStreamEndEvent",
"ChatStreamEndEventFinishReason",
"ChatStreamEvent",
"ChatStreamEventType",
"ChatStreamRequestCitationQuality",
"ChatStreamRequestPromptTruncation",
"ChatStreamRequestSafetyMode",
"ChatStreamStartEvent",
"ChatTextContent",
"ChatTextGenerationEvent",
"ChatTextResponseFormat",
"ChatTextResponseFormatV2",
"ChatThinkingContent",
"ChatToolCallDeltaEvent",
"ChatToolCallDeltaEventDelta",
"ChatToolCallDeltaEventDeltaMessage",
"ChatToolCallDeltaEventDeltaMessageToolCalls",
"ChatToolCallDeltaEventDeltaMessageToolCallsFunction",
"ChatToolCallEndEvent",
"ChatToolCallStartEvent",
"ChatToolCallStartEventDelta",
"ChatToolCallStartEventDeltaMessage",
"ChatToolCallsChunkEvent",
"ChatToolCallsGenerationEvent",
"ChatToolMessage",
"ChatToolPlanDeltaEvent",
"ChatToolPlanDeltaEventDelta",
"ChatToolPlanDeltaEventDeltaMessage",
"ChatToolSource",
"ChatbotMessage",
"CheckApiKeyResponse",
"Citation",
"CitationEndEvent",
"CitationEndV2ChatStreamResponse",
"CitationGenerationStreamedChatResponse",
"CitationOptions",
"CitationOptionsMode",
"CitationStartEvent",
"CitationStartEventDelta",
"CitationStartEventDeltaMessage",
"CitationStartV2ChatStreamResponse",
"CitationType",
"ClassifyDataMetrics",
"ClassifyExample",
"ClassifyRequestTruncate",
"ClassifyResponse",
"ClassifyResponseClassificationsItem",
"ClassifyResponseClassificationsItemClassificationType",
"ClassifyResponseClassificationsItemLabelsValue",
"Client",
"ClientClosedRequestError",
"ClientEnvironment",
"ClientV2",
"CompatibleEndpoint",
"Connector",
"ConnectorAuthStatus",
"ConnectorOAuth",
"Content",
"ContentDeltaStreamedChatResponseV2",
"ContentDeltaV2ChatStreamResponse",
"ContentEndStreamedChatResponseV2",
"ContentEndV2ChatStreamResponse",
"ContentStartStreamedChatResponseV2",
"ContentStartV2ChatStreamResponse",
"CreateBatchResponse",
"CreateConnectorOAuth",
"CreateConnectorResponse",
"CreateConnectorServiceAuth",
"CreateEmbedJobRequestTruncate",
"CreateEmbedJobResponse",
"Dataset",
"DatasetPart",
"DatasetType",
"DatasetValidationStatus",
"DatasetsCreateResponse",
"DatasetsGetResponse",
"DatasetsGetUsageResponse",
"DatasetsListResponse",
"DebugStreamedChatResponse",
"DebugV2ChatStreamResponse",
"DefaultAioHttpClient",
"DefaultAsyncHttpxClient",
"DeleteConnectorResponse",
"DetokenizeResponse",
"Document",
"DocumentContent",
"DocumentSource",
"DocumentToolContent",
"EmbedByTypeResponse",
"EmbedByTypeResponseEmbeddings",
"EmbedByTypeResponseResponseType",
"EmbedContent",
"EmbedFloatsResponse",
"EmbedImage",
"EmbedImageUrl",
"EmbedInput",
"EmbedInputType",
"EmbedJob",
"EmbedJobStatus",
"EmbedJobTruncate",
"EmbedRequestTruncate",
"EmbedResponse",
"EmbedText",
"EmbeddingType",
"EmbeddingsByTypeEmbedResponse",
"EmbeddingsFloatsEmbedResponse",
"FinetuneDatasetMetrics",
"FinishReason",
"ForbiddenError",
"GatewayTimeoutError",
"GenerateRequestReturnLikelihoods",
"GenerateRequestTruncate",
"GenerateStreamEnd",
"GenerateStreamEndResponse",
"GenerateStreamError",
"GenerateStreamEvent",
"GenerateStreamRequestReturnLikelihoods",
"GenerateStreamRequestTruncate",
"GenerateStreamText",
"GenerateStreamedResponse",
"Generation",
"GetBatchResponse",
"GetConnectorResponse",
"GetModelResponse",
"GetModelResponseSamplingDefaults",
"Image",
"ImageContent",
"ImageUrl",
"ImageUrlContent",
"ImageUrlDetail",
"ImageUrlEmbedContent",
"InternalServerError",
"InvalidTokenError",
"JsonObjectResponseFormat",
"JsonObjectResponseFormatV2",
"JsonResponseFormat",
"JsonResponseFormatV2",
"LabelMetric",
"ListBatchesResponse",
"ListConnectorsResponse",
"ListEmbedJobResponse",
"ListModelsResponse",
"LogprobItem",
"Message",
"MessageEndStreamedChatResponseV2",
"MessageEndV2ChatStreamResponse",
"MessageStartStreamedChatResponseV2",
"MessageStartV2ChatStreamResponse",
"Metrics",
"NonStreamedChatResponse",
"NotFoundError",
"NotImplementedError",
"OAuthAuthorizeResponse",
"OciClient",
"OciClientV2",
"ParseInfo",
"RerankDocument",
"RerankRequestDocumentsItem",
"RerankResponse",
"RerankResponseResultsItem",
"RerankResponseResultsItemDocument",
"RerankerDataMetrics",
"ResponseFormat",
"ResponseFormatV2",
"SagemakerClient",
"SagemakerClientV2",
"SearchQueriesGenerationStreamedChatResponse",
"SearchResultsStreamedChatResponse",
"ServiceUnavailableError",
"SingleGeneration",
"SingleGenerationInStream",
"SingleGenerationTokenLikelihoodsItem",
"Source",
"StreamEndGenerateStreamedResponse",
"StreamEndStreamedChatResponse",
"StreamErrorGenerateStreamedResponse",
"StreamStartStreamedChatResponse",
"StreamedChatResponse",
"StreamedChatResponseV2",
"SummarizeRequestExtractiveness",
"SummarizeRequestFormat",
"SummarizeRequestLength",
"SummarizeResponse",
"SystemChatMessageV2",
"SystemMessage",
"SystemMessageV2",
"SystemMessageV2Content",
"SystemMessageV2ContentOneItem",
"TextAssistantMessageResponseContentItem",
"TextAssistantMessageV2ContentOneItem",
"TextContent",
"TextEmbedContent",
"TextGenerationGenerateStreamedResponse",
"TextGenerationStreamedChatResponse",
"TextResponseFormat",
"TextResponseFormatV2",
"TextSystemMessageV2ContentOneItem",
"TextToolContent",
"Thinking",
"ThinkingAssistantMessageResponseContentItem",
"ThinkingAssistantMessageV2ContentOneItem",
"ThinkingType",
"TokenizeResponse",
"TooManyRequestsError",
"Tool",
"ToolCall",
"ToolCallDelta",
"ToolCallDeltaStreamedChatResponseV2",
"ToolCallDeltaV2ChatStreamResponse",
"ToolCallEndStreamedChatResponseV2",
"ToolCallEndV2ChatStreamResponse",
"ToolCallStartStreamedChatResponseV2",
"ToolCallStartV2ChatStreamResponse",
"ToolCallV2",
"ToolCallV2Function",
"ToolCallsChunkStreamedChatResponse",
"ToolCallsGenerationStreamedChatResponse",
"ToolChatMessageV2",
"ToolContent",
"ToolMessage",
"ToolMessageV2",
"ToolMessageV2Content",
"ToolParameterDefinitionsValue",
"ToolPlanDeltaV2ChatStreamResponse",
"ToolResult",
"ToolSource",
"ToolV2",
"ToolV2Function",
"UnauthorizedError",
"UnprocessableEntityError",
"UpdateConnectorResponse",
"Usage",
"UsageBilledUnits",
"UsageTokens",
"UserChatMessageV2",
"UserMessage",
"UserMessageV2",
"UserMessageV2Content",
"V2ChatRequestDocumentsItem",
"V2ChatRequestSafetyMode",
"V2ChatRequestToolChoice",
"V2ChatResponse",
"V2ChatStreamRequestDocumentsItem",
"V2ChatStreamRequestSafetyMode",
"V2ChatStreamRequestToolChoice",
"V2ChatStreamResponse",
"V2EmbedRequestTruncate",
"V2RerankResponse",
"V2RerankResponseResultsItem",
"__version__",
"audio",
"batches",
"connectors",
"datasets",
"embed_jobs",
"finetuning",
"models",
"v2",
]
================================================
FILE: src/cohere/_default_clients.py
================================================
# This file was auto-generated by Fern from our API Definition.
import typing
import httpx
SDK_DEFAULT_TIMEOUT = 60
try:
import httpx_aiohttp # type: ignore[import-not-found]
except ImportError:
class DefaultAioHttpClient(httpx.AsyncClient): # type: ignore
def __init__(self, **kwargs: typing.Any) -> None:
raise RuntimeError("To use the aiohttp client, install the aiohttp extra: pip install cohere[aiohttp]")
else:
class DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore
def __init__(self, **kwargs: typing.Any) -> None:
kwargs.setdefault("timeout", SDK_DEFAULT_TIMEOUT)
kwargs.setdefault("follow_redirects", True)
super().__init__(**kwargs)
class DefaultAsyncHttpxClient(httpx.AsyncClient):
def __init__(self, **kwargs: typing.Any) -> None:
kwargs.setdefault("timeout", SDK_DEFAULT_TIMEOUT)
kwargs.setdefault("follow_redirects", True)
super().__init__(**kwargs)
================================================
FILE: src/cohere/aliases.py
================================================
# Import overrides early to ensure they're applied before types are used
# This is necessary for backwards compatibility patches like ToolCallV2.id being optional
from . import overrides # noqa: F401
from .v2 import (
ContentDeltaV2ChatStreamResponse,
ContentEndV2ChatStreamResponse,
ContentStartV2ChatStreamResponse,
MessageEndV2ChatStreamResponse,
MessageStartV2ChatStreamResponse,
ToolCallDeltaV2ChatStreamResponse,
ToolCallEndV2ChatStreamResponse,
ToolCallStartV2ChatStreamResponse,
V2ChatStreamResponse,
V2ChatResponse
)
# alias classes
StreamedChatResponseV2 = V2ChatStreamResponse
MessageStartStreamedChatResponseV2 = MessageStartV2ChatStreamResponse
MessageEndStreamedChatResponseV2 = MessageEndV2ChatStreamResponse
ContentStartStreamedChatResponseV2 = ContentStartV2ChatStreamResponse
ContentDeltaStreamedChatResponseV2 = ContentDeltaV2ChatStreamResponse
ContentEndStreamedChatResponseV2 = ContentEndV2ChatStreamResponse
ToolCallStartStreamedChatResponseV2 = ToolCallStartV2ChatStreamResponse
ToolCallDeltaStreamedChatResponseV2 = ToolCallDeltaV2ChatStreamResponse
ToolCallEndStreamedChatResponseV2 = ToolCallEndV2ChatStreamResponse
ChatResponse = V2ChatResponse
================================================
FILE: src/cohere/audio/__init__.py
================================================
# This file was auto-generated by Fern from our API Definition.
# isort: skip_file
import typing
from importlib import import_module
if typing.TYPE_CHECKING:
from . import transcriptions
from .transcriptions import AudioTranscriptionsCreateResponse
_dynamic_imports: typing.Dict[str, str] = {
"AudioTranscriptionsCreateResponse": ".transcriptions",
"transcriptions": ".transcriptions",
}
def __getattr__(attr_name: str) -> typing.Any:
module_name = _dynamic_imports.get(attr_name)
if module_name is None:
raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
try:
module = import_module(module_name, __package__)
if module_name == f".{attr_name}":
return module
else:
return getattr(module, attr_name)
except ImportError as e:
raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
except AttributeError as e:
raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
def __dir__():
lazy_attrs = list(_dynamic_imports.keys())
return sorted(lazy_attrs)
__all__ = ["AudioTranscriptionsCreateResponse", "transcriptions"]
================================================
FILE: src/cohere/audio/client.py
================================================
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
import typing
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from .raw_client import AsyncRawAudioClient, RawAudioClient
if typing.TYPE_CHECKING:
from .transcriptions.client import AsyncTranscriptionsClient, TranscriptionsClient
class AudioClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._raw_client = RawAudioClient(client_wrapper=client_wrapper)
self._client_wrapper = client_wrapper
self._transcriptions: typing.Optional[TranscriptionsClient] = None
@property
def with_raw_response(self) -> RawAudioClient:
"""
Retrieves a raw implementation of this client that returns raw responses.
Returns
-------
RawAudioClient
"""
return self._raw_client
@property
def transcriptions(self):
if self._transcriptions is None:
from .transcriptions.client import TranscriptionsClient # noqa: E402
self._transcriptions = TranscriptionsClient(client_wrapper=self._client_wrapper)
return self._transcriptions
class AsyncAudioClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._raw_client = AsyncRawAudioClient(client_wrapper=client_wrapper)
self._client_wrapper = client_wrapper
self._transcriptions: typing.Optional[AsyncTranscriptionsClient] = None
@property
def with_raw_response(self) -> AsyncRawAudioClient:
"""
Retrieves a raw implementation of this client that returns raw responses.
Returns
-------
AsyncRawAudioClient
"""
return self._raw_client
@property
def transcriptions(self):
if self._transcriptions is None:
from .transcriptions.client import AsyncTranscriptionsClient # noqa: E402
self._transcriptions = AsyncTranscriptionsClient(client_wrapper=self._client_wrapper)
return self._transcriptions
================================================
FILE: src/cohere/audio/raw_client.py
================================================
# This file was auto-generated by Fern from our API Definition.
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
class RawAudioClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
class AsyncRawAudioClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
================================================
FILE: src/cohere/audio/transcriptions/__init__.py
================================================
# This file was auto-generated by Fern from our API Definition.
# isort: skip_file
import typing
from importlib import import_module
if typing.TYPE_CHECKING:
from .types import AudioTranscriptionsCreateResponse
_dynamic_imports: typing.Dict[str, str] = {"AudioTranscriptionsCreateResponse": ".types"}
def __getattr__(attr_name: str) -> typing.Any:
module_name = _dynamic_imports.get(attr_name)
if module_name is None:
raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
try:
module = import_module(module_name, __package__)
if module_name == f".{attr_name}":
return module
else:
return getattr(module, attr_name)
except ImportError as e:
raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
except AttributeError as e:
raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
def __dir__():
lazy_attrs = list(_dynamic_imports.keys())
return sorted(lazy_attrs)
__all__ = ["AudioTranscriptionsCreateResponse"]
================================================
FILE: src/cohere/audio/transcriptions/client.py
================================================
# This file was auto-generated by Fern from our API Definition.
import typing
from ... import core
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ...core.request_options import RequestOptions
from .raw_client import AsyncRawTranscriptionsClient, RawTranscriptionsClient
from .types.audio_transcriptions_create_response import AudioTranscriptionsCreateResponse
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
class TranscriptionsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._raw_client = RawTranscriptionsClient(client_wrapper=client_wrapper)
@property
def with_raw_response(self) -> RawTranscriptionsClient:
"""
Retrieves a raw implementation of this client that returns raw responses.
Returns
-------
RawTranscriptionsClient
"""
return self._raw_client
def create(
self,
*,
model: str,
language: str,
file: core.File,
temperature: typing.Optional[float] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AudioTranscriptionsCreateResponse:
"""
Transcribe an audio file.
Parameters
----------
model : str
ID of the model to use.
language : str
The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format.
file : core.File
See core.File for more documentation
temperature : typing.Optional[float]
The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioTranscriptionsCreateResponse
A successful response.
Examples
--------
from cohere import Client
client = Client(
client_name="YOUR_CLIENT_NAME",
token="YOUR_TOKEN",
)
client.audio.transcriptions.create(
model="model",
language="language",
)
"""
_response = self._raw_client.create(
model=model, language=language, file=file, temperature=temperature, request_options=request_options
)
return _response.data
class AsyncTranscriptionsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._raw_client = AsyncRawTranscriptionsClient(client_wrapper=client_wrapper)
@property
def with_raw_response(self) -> AsyncRawTranscriptionsClient:
"""
Retrieves a raw implementation of this client that returns raw responses.
Returns
-------
AsyncRawTranscriptionsClient
"""
return self._raw_client
async def create(
self,
*,
model: str,
language: str,
file: core.File,
temperature: typing.Optional[float] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AudioTranscriptionsCreateResponse:
"""
Transcribe an audio file.
Parameters
----------
model : str
ID of the model to use.
language : str
The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format.
file : core.File
See core.File for more documentation
temperature : typing.Optional[float]
The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioTranscriptionsCreateResponse
A successful response.
Examples
--------
import asyncio
from cohere import AsyncClient
client = AsyncClient(
client_name="YOUR_CLIENT_NAME",
token="YOUR_TOKEN",
)
async def main() -> None:
await client.audio.transcriptions.create(
model="model",
language="language",
)
asyncio.run(main())
"""
_response = await self._raw_client.create(
model=model, language=language, file=file, temperature=temperature, request_options=request_options
)
return _response.data
================================================
FILE: src/cohere/audio/transcriptions/raw_client.py
================================================
# This file was auto-generated by Fern from our API Definition.
import typing
from json.decoder import JSONDecodeError
from ... import core
from ...core.api_error import ApiError
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ...core.http_response import AsyncHttpResponse, HttpResponse
from ...core.parse_error import ParsingError
from ...core.request_options import RequestOptions
from ...core.unchecked_base_model import construct_type
from ...errors.bad_request_error import BadRequestError
from ...errors.client_closed_request_error import ClientClosedRequestError
from ...errors.forbidden_error import ForbiddenError
from ...errors.gateway_timeout_error import GatewayTimeoutError
from ...errors.internal_server_error import InternalServerError
from ...errors.invalid_token_error import InvalidTokenError
from ...errors.not_found_error import NotFoundError
from ...errors.not_implemented_error import NotImplementedError
from ...errors.service_unavailable_error import ServiceUnavailableError
from ...errors.too_many_requests_error import TooManyRequestsError
from ...errors.unauthorized_error import UnauthorizedError
from ...errors.unprocessable_entity_error import UnprocessableEntityError
from .types.audio_transcriptions_create_response import AudioTranscriptionsCreateResponse
from pydantic import ValidationError
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
class RawTranscriptionsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
def create(
self,
*,
model: str,
language: str,
file: core.File,
temperature: typing.Optional[float] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AudioTranscriptionsCreateResponse]:
"""
Transcribe an audio file.
Parameters
----------
model : str
ID of the model to use.
language : str
The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format.
file : core.File
See core.File for more documentation
temperature : typing.Optional[float]
The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AudioTranscriptionsCreateResponse]
A successful response.
"""
_response = self._client_wrapper.httpx_client.request(
"v2/audio/transcriptions",
method="POST",
data={
"model": model,
"language": language,
"temperature": temperature,
},
files={
"file": file,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AudioTranscriptionsCreateResponse,
construct_type(
type_=AudioTranscriptionsCreateResponse, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 400:
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 401:
raise UnauthorizedError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 403:
raise ForbiddenError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 404:
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 429:
raise TooManyRequestsError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 498:
raise InvalidTokenError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 499:
raise ClientClosedRequestError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 500:
raise InternalServerError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 501:
raise NotImplementedError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 503:
raise ServiceUnavailableError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 504:
raise GatewayTimeoutError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
except ValidationError as e:
raise ParsingError(
status_code=_response.status_code, headers=dict(_response.headers), body=_response.json(), cause=e
)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawTranscriptionsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
async def create(
self,
*,
model: str,
language: str,
file: core.File,
temperature: typing.Optional[float] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AudioTranscriptionsCreateResponse]:
"""
Transcribe an audio file.
Parameters
----------
model : str
ID of the model to use.
language : str
The language of the input audio, supplied in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) format.
file : core.File
See core.File for more documentation
temperature : typing.Optional[float]
The sampling temperature, between 0 and 1. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AudioTranscriptionsCreateResponse]
A successful response.
"""
_response = await self._client_wrapper.httpx_client.request(
"v2/audio/transcriptions",
method="POST",
data={
"model": model,
"language": language,
"temperature": temperature,
},
files={
"file": file,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AudioTranscriptionsCreateResponse,
construct_type(
type_=AudioTranscriptionsCreateResponse, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 400:
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 401:
raise UnauthorizedError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 403:
raise ForbiddenError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 404:
raise NotFoundError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 429:
raise TooManyRequestsError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 498:
raise InvalidTokenError(
headers=dict(_response.headers),
body=typing.cast(
typing.Any,
construct_type(
type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
)
if _response.status_code == 499:
raise ClientClosedR
gitextract_zznh6amy/
├── .fern/
│ └── metadata.json
├── .fernignore
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.md
│ │ └── improvement_request.md
│ └── workflows/
│ └── ci.yml
├── .gitignore
├── 4.0.0-5.0.0-migration-guide.md
├── LICENSE
├── README.md
├── mypy.ini
├── pyproject.toml
├── reference.md
├── requirements.txt
├── src/
│ └── cohere/
│ ├── __init__.py
│ ├── _default_clients.py
│ ├── aliases.py
│ ├── audio/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── transcriptions/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ └── audio_transcriptions_create_response.py
│ ├── aws_client.py
│ ├── base_client.py
│ ├── batches/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── batch.py
│ │ ├── batch_status.py
│ │ ├── cancel_batch_response.py
│ │ ├── create_batch_response.py
│ │ ├── get_batch_response.py
│ │ └── list_batches_response.py
│ ├── bedrock_client.py
│ ├── client.py
│ ├── client_v2.py
│ ├── config.py
│ ├── connectors/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ └── raw_client.py
│ ├── core/
│ │ ├── __init__.py
│ │ ├── api_error.py
│ │ ├── client_wrapper.py
│ │ ├── datetime_utils.py
│ │ ├── file.py
│ │ ├── force_multipart.py
│ │ ├── http_client.py
│ │ ├── http_response.py
│ │ ├── http_sse/
│ │ │ ├── __init__.py
│ │ │ ├── _api.py
│ │ │ ├── _decoders.py
│ │ │ ├── _exceptions.py
│ │ │ └── _models.py
│ │ ├── jsonable_encoder.py
│ │ ├── logging.py
│ │ ├── parse_error.py
│ │ ├── pydantic_utilities.py
│ │ ├── query_encoder.py
│ │ ├── remove_none_from_dict.py
│ │ ├── request_options.py
│ │ ├── serialization.py
│ │ └── unchecked_base_model.py
│ ├── datasets/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── datasets_create_response.py
│ │ ├── datasets_get_response.py
│ │ ├── datasets_get_usage_response.py
│ │ └── datasets_list_response.py
│ ├── embed_jobs/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ └── create_embed_job_request_truncate.py
│ ├── environment.py
│ ├── errors/
│ │ ├── __init__.py
│ │ ├── bad_request_error.py
│ │ ├── client_closed_request_error.py
│ │ ├── forbidden_error.py
│ │ ├── gateway_timeout_error.py
│ │ ├── internal_server_error.py
│ │ ├── invalid_token_error.py
│ │ ├── not_found_error.py
│ │ ├── not_implemented_error.py
│ │ ├── service_unavailable_error.py
│ │ ├── too_many_requests_error.py
│ │ ├── unauthorized_error.py
│ │ └── unprocessable_entity_error.py
│ ├── finetuning/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── finetuning/
│ │ │ ├── __init__.py
│ │ │ └── types/
│ │ │ ├── __init__.py
│ │ │ ├── base_model.py
│ │ │ ├── base_type.py
│ │ │ ├── create_finetuned_model_response.py
│ │ │ ├── delete_finetuned_model_response.py
│ │ │ ├── event.py
│ │ │ ├── finetuned_model.py
│ │ │ ├── get_finetuned_model_response.py
│ │ │ ├── hyperparameters.py
│ │ │ ├── list_events_response.py
│ │ │ ├── list_finetuned_models_response.py
│ │ │ ├── list_training_step_metrics_response.py
│ │ │ ├── lora_target_modules.py
│ │ │ ├── settings.py
│ │ │ ├── status.py
│ │ │ ├── strategy.py
│ │ │ ├── training_step_metrics.py
│ │ │ ├── update_finetuned_model_response.py
│ │ │ └── wandb_config.py
│ │ └── raw_client.py
│ ├── manually_maintained/
│ │ ├── __init__.py
│ │ ├── cache.py
│ │ ├── cohere_aws/
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ ├── classification.py
│ │ │ ├── client.py
│ │ │ ├── embeddings.py
│ │ │ ├── error.py
│ │ │ ├── generation.py
│ │ │ ├── mode.py
│ │ │ ├── rerank.py
│ │ │ ├── response.py
│ │ │ └── summary.py
│ │ ├── lazy_aws_deps.py
│ │ ├── lazy_oci_deps.py
│ │ ├── streaming_embed.py
│ │ └── tokenizers.py
│ ├── models/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ └── raw_client.py
│ ├── oci_client.py
│ ├── overrides.py
│ ├── py.typed
│ ├── raw_base_client.py
│ ├── sagemaker_client.py
│ ├── types/
│ │ ├── __init__.py
│ │ ├── api_meta.py
│ │ ├── api_meta_api_version.py
│ │ ├── api_meta_billed_units.py
│ │ ├── api_meta_tokens.py
│ │ ├── assistant_message.py
│ │ ├── assistant_message_response.py
│ │ ├── assistant_message_response_content_item.py
│ │ ├── assistant_message_v2content.py
│ │ ├── assistant_message_v2content_one_item.py
│ │ ├── auth_token_type.py
│ │ ├── chat_citation.py
│ │ ├── chat_citation_generation_event.py
│ │ ├── chat_citation_type.py
│ │ ├── chat_connector.py
│ │ ├── chat_content_delta_event.py
│ │ ├── chat_content_delta_event_delta.py
│ │ ├── chat_content_delta_event_delta_message.py
│ │ ├── chat_content_delta_event_delta_message_content.py
│ │ ├── chat_content_end_event.py
│ │ ├── chat_content_start_event.py
│ │ ├── chat_content_start_event_delta.py
│ │ ├── chat_content_start_event_delta_message.py
│ │ ├── chat_content_start_event_delta_message_content.py
│ │ ├── chat_content_start_event_delta_message_content_type.py
│ │ ├── chat_data_metrics.py
│ │ ├── chat_debug_event.py
│ │ ├── chat_document.py
│ │ ├── chat_document_source.py
│ │ ├── chat_finish_reason.py
│ │ ├── chat_message.py
│ │ ├── chat_message_end_event.py
│ │ ├── chat_message_end_event_delta.py
│ │ ├── chat_message_start_event.py
│ │ ├── chat_message_start_event_delta.py
│ │ ├── chat_message_start_event_delta_message.py
│ │ ├── chat_message_v2.py
│ │ ├── chat_messages.py
│ │ ├── chat_request_citation_quality.py
│ │ ├── chat_request_prompt_truncation.py
│ │ ├── chat_request_safety_mode.py
│ │ ├── chat_search_queries_generation_event.py
│ │ ├── chat_search_query.py
│ │ ├── chat_search_result.py
│ │ ├── chat_search_result_connector.py
│ │ ├── chat_search_results_event.py
│ │ ├── chat_stream_end_event.py
│ │ ├── chat_stream_end_event_finish_reason.py
│ │ ├── chat_stream_event.py
│ │ ├── chat_stream_event_type.py
│ │ ├── chat_stream_request_citation_quality.py
│ │ ├── chat_stream_request_prompt_truncation.py
│ │ ├── chat_stream_request_safety_mode.py
│ │ ├── chat_stream_start_event.py
│ │ ├── chat_text_content.py
│ │ ├── chat_text_generation_event.py
│ │ ├── chat_text_response_format.py
│ │ ├── chat_text_response_format_v2.py
│ │ ├── chat_thinking_content.py
│ │ ├── chat_tool_call_delta_event.py
│ │ ├── chat_tool_call_delta_event_delta.py
│ │ ├── chat_tool_call_delta_event_delta_message.py
│ │ ├── chat_tool_call_delta_event_delta_message_tool_calls.py
│ │ ├── chat_tool_call_delta_event_delta_message_tool_calls_function.py
│ │ ├── chat_tool_call_end_event.py
│ │ ├── chat_tool_call_start_event.py
│ │ ├── chat_tool_call_start_event_delta.py
│ │ ├── chat_tool_call_start_event_delta_message.py
│ │ ├── chat_tool_calls_chunk_event.py
│ │ ├── chat_tool_calls_generation_event.py
│ │ ├── chat_tool_message.py
│ │ ├── chat_tool_plan_delta_event.py
│ │ ├── chat_tool_plan_delta_event_delta.py
│ │ ├── chat_tool_plan_delta_event_delta_message.py
│ │ ├── chat_tool_source.py
│ │ ├── check_api_key_response.py
│ │ ├── citation.py
│ │ ├── citation_end_event.py
│ │ ├── citation_options.py
│ │ ├── citation_options_mode.py
│ │ ├── citation_start_event.py
│ │ ├── citation_start_event_delta.py
│ │ ├── citation_start_event_delta_message.py
│ │ ├── citation_type.py
│ │ ├── classify_data_metrics.py
│ │ ├── classify_example.py
│ │ ├── classify_request_truncate.py
│ │ ├── classify_response.py
│ │ ├── classify_response_classifications_item.py
│ │ ├── classify_response_classifications_item_classification_type.py
│ │ ├── classify_response_classifications_item_labels_value.py
│ │ ├── compatible_endpoint.py
│ │ ├── connector.py
│ │ ├── connector_auth_status.py
│ │ ├── connector_o_auth.py
│ │ ├── content.py
│ │ ├── create_connector_o_auth.py
│ │ ├── create_connector_response.py
│ │ ├── create_connector_service_auth.py
│ │ ├── create_embed_job_response.py
│ │ ├── dataset.py
│ │ ├── dataset_part.py
│ │ ├── dataset_type.py
│ │ ├── dataset_validation_status.py
│ │ ├── delete_connector_response.py
│ │ ├── detokenize_response.py
│ │ ├── document.py
│ │ ├── document_content.py
│ │ ├── embed_by_type_response.py
│ │ ├── embed_by_type_response_embeddings.py
│ │ ├── embed_by_type_response_response_type.py
│ │ ├── embed_content.py
│ │ ├── embed_floats_response.py
│ │ ├── embed_image.py
│ │ ├── embed_image_url.py
│ │ ├── embed_input.py
│ │ ├── embed_input_type.py
│ │ ├── embed_job.py
│ │ ├── embed_job_status.py
│ │ ├── embed_job_truncate.py
│ │ ├── embed_request_truncate.py
│ │ ├── embed_response.py
│ │ ├── embed_text.py
│ │ ├── embedding_type.py
│ │ ├── finetune_dataset_metrics.py
│ │ ├── finish_reason.py
│ │ ├── generate_request_return_likelihoods.py
│ │ ├── generate_request_truncate.py
│ │ ├── generate_stream_end.py
│ │ ├── generate_stream_end_response.py
│ │ ├── generate_stream_error.py
│ │ ├── generate_stream_event.py
│ │ ├── generate_stream_request_return_likelihoods.py
│ │ ├── generate_stream_request_truncate.py
│ │ ├── generate_stream_text.py
│ │ ├── generate_streamed_response.py
│ │ ├── generation.py
│ │ ├── get_connector_response.py
│ │ ├── get_model_response.py
│ │ ├── get_model_response_sampling_defaults.py
│ │ ├── image.py
│ │ ├── image_content.py
│ │ ├── image_url.py
│ │ ├── image_url_detail.py
│ │ ├── json_response_format.py
│ │ ├── json_response_format_v2.py
│ │ ├── label_metric.py
│ │ ├── list_connectors_response.py
│ │ ├── list_embed_job_response.py
│ │ ├── list_models_response.py
│ │ ├── logprob_item.py
│ │ ├── message.py
│ │ ├── metrics.py
│ │ ├── non_streamed_chat_response.py
│ │ ├── o_auth_authorize_response.py
│ │ ├── parse_info.py
│ │ ├── rerank_document.py
│ │ ├── rerank_request_documents_item.py
│ │ ├── rerank_response.py
│ │ ├── rerank_response_results_item.py
│ │ ├── rerank_response_results_item_document.py
│ │ ├── reranker_data_metrics.py
│ │ ├── response_format.py
│ │ ├── response_format_v2.py
│ │ ├── single_generation.py
│ │ ├── single_generation_in_stream.py
│ │ ├── single_generation_token_likelihoods_item.py
│ │ ├── source.py
│ │ ├── streamed_chat_response.py
│ │ ├── summarize_request_extractiveness.py
│ │ ├── summarize_request_format.py
│ │ ├── summarize_request_length.py
│ │ ├── summarize_response.py
│ │ ├── system_message_v2.py
│ │ ├── system_message_v2content.py
│ │ ├── system_message_v2content_one_item.py
│ │ ├── thinking.py
│ │ ├── thinking_type.py
│ │ ├── tokenize_response.py
│ │ ├── tool.py
│ │ ├── tool_call.py
│ │ ├── tool_call_delta.py
│ │ ├── tool_call_v2.py
│ │ ├── tool_call_v2function.py
│ │ ├── tool_content.py
│ │ ├── tool_message_v2.py
│ │ ├── tool_message_v2content.py
│ │ ├── tool_parameter_definitions_value.py
│ │ ├── tool_result.py
│ │ ├── tool_v2.py
│ │ ├── tool_v2function.py
│ │ ├── update_connector_response.py
│ │ ├── usage.py
│ │ ├── usage_billed_units.py
│ │ ├── usage_tokens.py
│ │ ├── user_message_v2.py
│ │ └── user_message_v2content.py
│ ├── utils.py
│ ├── v2/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── raw_client.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── v2chat_request_documents_item.py
│ │ ├── v2chat_request_safety_mode.py
│ │ ├── v2chat_request_tool_choice.py
│ │ ├── v2chat_response.py
│ │ ├── v2chat_stream_request_documents_item.py
│ │ ├── v2chat_stream_request_safety_mode.py
│ │ ├── v2chat_stream_request_tool_choice.py
│ │ ├── v2chat_stream_response.py
│ │ ├── v2embed_request_truncate.py
│ │ ├── v2rerank_response.py
│ │ └── v2rerank_response_results_item.py
│ └── version.py
└── tests/
├── __init__.py
├── embed_job.jsonl
├── test_async_client.py
├── test_aws_client_unit.py
├── test_bedrock_client.py
├── test_client.py
├── test_client_init.py
├── test_client_v2.py
├── test_embed_streaming.py
├── test_embed_utils.py
├── test_oci_client.py
├── test_oci_mypy.py
└── test_overrides.py
SYMBOL INDEX (1368 symbols across 285 files)
FILE: src/cohere/__init__.py
function __getattr__ (line 635) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 651) | def __dir__():
FILE: src/cohere/_default_clients.py
class DefaultAioHttpClient (line 13) | class DefaultAioHttpClient(httpx.AsyncClient): # type: ignore
method __init__ (line 14) | def __init__(self, **kwargs: typing.Any) -> None:
method __init__ (line 20) | def __init__(self, **kwargs: typing.Any) -> None:
class DefaultAioHttpClient (line 19) | class DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: i...
method __init__ (line 14) | def __init__(self, **kwargs: typing.Any) -> None:
method __init__ (line 20) | def __init__(self, **kwargs: typing.Any) -> None:
class DefaultAsyncHttpxClient (line 26) | class DefaultAsyncHttpxClient(httpx.AsyncClient):
method __init__ (line 27) | def __init__(self, **kwargs: typing.Any) -> None:
FILE: src/cohere/audio/__init__.py
function __getattr__ (line 17) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 33) | def __dir__():
FILE: src/cohere/audio/client.py
class AudioClient (line 14) | class AudioClient:
method __init__ (line 15) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method with_raw_response (line 21) | def with_raw_response(self) -> RawAudioClient:
method transcriptions (line 32) | def transcriptions(self):
class AsyncAudioClient (line 40) | class AsyncAudioClient:
method __init__ (line 41) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method with_raw_response (line 47) | def with_raw_response(self) -> AsyncRawAudioClient:
method transcriptions (line 58) | def transcriptions(self):
FILE: src/cohere/audio/raw_client.py
class RawAudioClient (line 6) | class RawAudioClient:
method __init__ (line 7) | def __init__(self, *, client_wrapper: SyncClientWrapper):
class AsyncRawAudioClient (line 11) | class AsyncRawAudioClient:
method __init__ (line 12) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
FILE: src/cohere/audio/transcriptions/__init__.py
function __getattr__ (line 13) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 29) | def __dir__():
FILE: src/cohere/audio/transcriptions/client.py
class TranscriptionsClient (line 15) | class TranscriptionsClient:
method __init__ (line 16) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method with_raw_response (line 20) | def with_raw_response(self) -> RawTranscriptionsClient:
method create (line 30) | def create(
class AsyncTranscriptionsClient (line 83) | class AsyncTranscriptionsClient:
method __init__ (line 84) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method with_raw_response (line 88) | def with_raw_response(self) -> AsyncRawTranscriptionsClient:
method create (line 98) | async def create(
FILE: src/cohere/audio/transcriptions/raw_client.py
class RawTranscriptionsClient (line 32) | class RawTranscriptionsClient:
method __init__ (line 33) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method create (line 36) | def create(
class AsyncRawTranscriptionsClient (line 237) | class AsyncRawTranscriptionsClient:
method __init__ (line 238) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method create (line 241) | async def create(
FILE: src/cohere/audio/transcriptions/types/__init__.py
function __getattr__ (line 13) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 29) | def __dir__():
FILE: src/cohere/audio/transcriptions/types/audio_transcriptions_create_response.py
class AudioTranscriptionsCreateResponse (line 10) | class AudioTranscriptionsCreateResponse(UncheckedBaseModel):
class Config (line 20) | class Config:
FILE: src/cohere/aws_client.py
class AwsClient (line 17) | class AwsClient(Client):
method __init__ (line 18) | def __init__(
class AwsClientV2 (line 48) | class AwsClientV2(ClientV2):
method __init__ (line 49) | def __init__(
function get_event_hooks (line 82) | def get_event_hooks(
class Streamer (line 115) | class Streamer(SyncByteStream):
method __init__ (line 118) | def __init__(self, lines: typing.Iterator[bytes]):
method __iter__ (line 121) | def __iter__(self) -> typing.Iterator[bytes]:
function stream_generator (line 138) | def stream_generator(response: httpx.Response, endpoint: str) -> typing....
function map_token_counts (line 155) | def map_token_counts(response: httpx.Response) -> ApiMeta:
function map_response_from_bedrock (line 164) | def map_response_from_bedrock():
function get_boto3_session (line 199) | def get_boto3_session(
function map_request_to_bedrock (line 207) | def map_request_to_bedrock(
function get_url (line 272) | def get_url(
function get_api_version (line 288) | def get_api_version(*, version: str):
FILE: src/cohere/base_client.py
class BaseCohere (line 65) | class BaseCohere:
method __init__ (line 110) | def __init__(
method with_raw_response (line 152) | def with_raw_response(self) -> RawBaseCohere:
method chat_stream (line 162) | def chat_stream(
method chat (line 463) | def chat(
method generate_stream (line 761) | def generate_stream(
method generate (line 909) | def generate(
method embed (line 1055) | def embed(
method rerank (line 1139) | def rerank(
method classify (line 1234) | def classify(
method summarize (line 1343) | def summarize(
method tokenize (line 1416) | def tokenize(
method detokenize (line 1454) | def detokenize(
method check_api_key (line 1492) | def check_api_key(self, *, request_options: typing.Optional[RequestOpt...
method v2 (line 1520) | def v2(self):
method batches (line 1528) | def batches(self):
method embed_jobs (line 1536) | def embed_jobs(self):
method datasets (line 1544) | def datasets(self):
method connectors (line 1552) | def connectors(self):
method models (line 1560) | def models(self):
method finetuning (line 1568) | def finetuning(self):
method audio (line 1576) | def audio(self):
function _make_default_async_client (line 1584) | def _make_default_async_client(
class AsyncBaseCohere (line 1602) | class AsyncBaseCohere:
method __init__ (line 1650) | def __init__(
method with_raw_response (line 1692) | def with_raw_response(self) -> AsyncRawBaseCohere:
method chat_stream (line 1702) | async def chat_stream(
method chat (line 2012) | async def chat(
method generate_stream (line 2318) | async def generate_stream(
method generate (line 2475) | async def generate(
method embed (line 2629) | async def embed(
method rerank (line 2721) | async def rerank(
method classify (line 2824) | async def classify(
method summarize (line 2941) | async def summarize(
method tokenize (line 3022) | async def tokenize(
method detokenize (line 3068) | async def detokenize(
method check_api_key (line 3114) | async def check_api_key(self, *, request_options: typing.Optional[Requ...
method v2 (line 3150) | def v2(self):
method batches (line 3158) | def batches(self):
method embed_jobs (line 3166) | def embed_jobs(self):
method datasets (line 3174) | def datasets(self):
method connectors (line 3182) | def connectors(self):
method models (line 3190) | def models(self):
method finetuning (line 3198) | def finetuning(self):
method audio (line 3206) | def audio(self):
function _get_base_url (line 3214) | def _get_base_url(*, base_url: typing.Optional[str] = None, environment:...
FILE: src/cohere/batches/__init__.py
function __getattr__ (line 27) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 43) | def __dir__():
FILE: src/cohere/batches/client.py
class BatchesClient (line 18) | class BatchesClient:
method __init__ (line 19) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method with_raw_response (line 23) | def with_raw_response(self) -> RawBatchesClient:
method list (line 33) | def list(
method create (line 87) | def create(self, *, request: Batch, request_options: typing.Optional[R...
method retrieve (line 123) | def retrieve(self, id: str, *, request_options: typing.Optional[Reques...
method cancel (line 155) | def cancel(self, id: str, *, request_options: typing.Optional[RequestO...
class AsyncBatchesClient (line 188) | class AsyncBatchesClient:
method __init__ (line 189) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method with_raw_response (line 193) | def with_raw_response(self) -> AsyncRawBatchesClient:
method list (line 203) | async def list(
method create (line 265) | async def create(
method retrieve (line 311) | async def retrieve(self, id: str, *, request_options: typing.Optional[...
method cancel (line 351) | async def cancel(self, id: str, *, request_options: typing.Optional[Re...
FILE: src/cohere/batches/raw_client.py
class RawBatchesClient (line 31) | class RawBatchesClient:
method __init__ (line 32) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method list (line 35) | def list(
method create (line 165) | def create(
method retrieve (line 278) | def retrieve(
method cancel (line 387) | def cancel(
class AsyncRawBatchesClient (line 497) | class AsyncRawBatchesClient:
method __init__ (line 498) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method list (line 501) | async def list(
method create (line 631) | async def create(
method retrieve (line 744) | async def retrieve(
method cancel (line 853) | async def cancel(
FILE: src/cohere/batches/types/__init__.py
function __getattr__ (line 25) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 41) | def __dir__():
FILE: src/cohere/batches/types/batch.py
class Batch (line 12) | class Batch(UncheckedBaseModel):
class Config (line 97) | class Config:
FILE: src/cohere/batches/types/create_batch_response.py
class CreateBatchResponse (line 11) | class CreateBatchResponse(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/batches/types/get_batch_response.py
class GetBatchResponse (line 11) | class GetBatchResponse(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/batches/types/list_batches_response.py
class ListBatchesResponse (line 11) | class ListBatchesResponse(UncheckedBaseModel):
class Config (line 31) | class Config:
FILE: src/cohere/bedrock_client.py
class BedrockClient (line 8) | class BedrockClient(AwsClient):
method __init__ (line 9) | def __init__(
method rerank (line 28) | def rerank(self, *, query, documents, model = ..., top_n = ..., rank_f...
class BedrockClientV2 (line 31) | class BedrockClientV2(AwsClientV2):
method __init__ (line 32) | def __init__(
FILE: src/cohere/client.py
function validate_args (line 30) | def validate_args(obj: typing.Any, method_name: str, check_fn: typing.Ca...
function throw_if_stream_is_true (line 51) | def throw_if_stream_is_true(*args, **kwargs) -> None:
function moved_function (line 58) | def moved_function(fn_name: str, new_fn_name: str) -> typing.Any:
function deprecated_function (line 72) | def deprecated_function(fn_name: str) -> typing.Any:
function experimental_kwarg_decorator (line 88) | def experimental_kwarg_decorator(func, deprecated_kwarg):
function fix_base_url (line 123) | def fix_base_url(base_url: typing.Optional[str]) -> typing.Optional[str]:
class Client (line 131) | class Client(BaseCohere, CacheMixin):
method __init__ (line 134) | def __init__(
method __enter__ (line 172) | def __enter__(self):
method __exit__ (line 175) | def __exit__(self, exc_type, exc_value, traceback):
method embed (line 180) | def embed(
method embed_stream (line 226) | def embed_stream(
method tokenize (line 326) | def tokenize(
method detokenize (line 348) | def detokenize(
method fetch_tokenizer (line 371) | def fetch_tokenizer(self, *, model: str) -> Tokenizer:
class AsyncClient (line 378) | class AsyncClient(AsyncBaseCohere, CacheMixin):
method __init__ (line 381) | def __init__(
method __aenter__ (line 419) | async def __aenter__(self):
method __aexit__ (line 422) | async def __aexit__(self, exc_type, exc_value, traceback):
method embed (line 427) | async def embed(
method tokenize (line 520) | async def tokenize(
method detokenize (line 541) | async def detokenize(
method fetch_tokenizer (line 562) | async def fetch_tokenizer(self, *, model: str) -> Tokenizer:
function _get_api_key_from_environment (line 569) | def _get_api_key_from_environment() -> typing.Optional[str]:
FILE: src/cohere/client_v2.py
class _CombinedRawClient (line 11) | class _CombinedRawClient:
method __init__ (line 20) | def __init__(self, v1_raw_client: typing.Any, v2_raw_client: typing.Any):
method __getattr__ (line 24) | def __getattr__(self, name: str) -> typing.Any:
class ClientV2 (line 31) | class ClientV2(V2Client, Client): # type: ignore
method __init__ (line 32) | def __init__(
class AsyncClientV2 (line 64) | class AsyncClientV2(AsyncV2Client, AsyncClient): # type: ignore
method __init__ (line 65) | def __init__(
FILE: src/cohere/connectors/client.py
class ConnectorsClient (line 21) | class ConnectorsClient:
method __init__ (line 22) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method with_raw_response (line 26) | def with_raw_response(self) -> RawConnectorsClient:
method list (line 36) | def list(
method create (line 78) | def create(
method get (line 154) | def get(self, id: str, *, request_options: typing.Optional[RequestOpti...
method delete (line 186) | def delete(self, id: str, *, request_options: typing.Optional[RequestO...
method update (line 218) | def update(
method o_auth_authorize (line 291) | def o_auth_authorize(
class AsyncConnectorsClient (line 336) | class AsyncConnectorsClient:
method __init__ (line 337) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method with_raw_response (line 341) | def with_raw_response(self) -> AsyncRawConnectorsClient:
method list (line 351) | async def list(
method create (line 401) | async def create(
method get (line 485) | async def get(self, id: str, *, request_options: typing.Optional[Reque...
method delete (line 525) | async def delete(
method update (line 567) | async def update(
method o_auth_authorize (line 648) | async def o_auth_authorize(
FILE: src/cohere/connectors/raw_client.py
class RawConnectorsClient (line 40) | class RawConnectorsClient:
method __init__ (line 41) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method list (line 44) | def list(
method create (line 230) | def create(
method get (line 454) | def get(
method delete (line 629) | def delete(
method update (line 804) | def update(
method o_auth_authorize (line 1025) | def o_auth_authorize(
class AsyncRawConnectorsClient (line 1211) | class AsyncRawConnectorsClient:
method __init__ (line 1212) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method list (line 1215) | async def list(
method create (line 1401) | async def create(
method get (line 1625) | async def get(
method delete (line 1800) | async def delete(
method update (line 1975) | async def update(
method o_auth_authorize (line 2196) | async def o_auth_authorize(
FILE: src/cohere/core/__init__.py
function __getattr__ (line 73) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 89) | def __dir__():
FILE: src/cohere/core/api_error.py
class ApiError (line 6) | class ApiError(Exception):
method __init__ (line 11) | def __init__(
method __str__ (line 22) | def __str__(self) -> str:
FILE: src/cohere/core/client_wrapper.py
class BaseClientWrapper (line 10) | class BaseClientWrapper:
method __init__ (line 11) | def __init__(
method get_headers (line 28) | def get_headers(self) -> typing.Dict[str, str]:
method _get_token (line 45) | def _get_token(self) -> str:
method get_custom_headers (line 51) | def get_custom_headers(self) -> typing.Optional[typing.Dict[str, str]]:
method get_base_url (line 54) | def get_base_url(self) -> str:
method get_timeout (line 57) | def get_timeout(self) -> typing.Optional[float]:
class SyncClientWrapper (line 61) | class SyncClientWrapper(BaseClientWrapper):
method __init__ (line 62) | def __init__(
class AsyncClientWrapper (line 85) | class AsyncClientWrapper(BaseClientWrapper):
method __init__ (line 86) | def __init__(
method async_get_headers (line 111) | async def async_get_headers(self) -> typing.Dict[str, str]:
FILE: src/cohere/core/datetime_utils.py
function parse_rfc2822_datetime (line 12) | def parse_rfc2822_datetime(v: Any) -> dt.datetime:
class Rfc2822DateTime (line 30) | class Rfc2822DateTime(dt.datetime):
method __get_validators__ (line 38) | def __get_validators__(cls): # type: ignore[no-untyped-def]
method __get_pydantic_core_schema__ (line 42) | def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any...
function serialize_datetime (line 48) | def serialize_datetime(v: dt.datetime) -> str:
FILE: src/cohere/core/file.py
function convert_file_dict_to_httpx_tuples (line 25) | def convert_file_dict_to_httpx_tuples(
function with_content_type (line 46) | def with_content_type(*, file: File, default_content_type: str) -> File:
FILE: src/cohere/core/force_multipart.py
class ForceMultipartDict (line 6) | class ForceMultipartDict(Dict[str, Any]):
method __bool__ (line 14) | def __bool__(self) -> bool:
FILE: src/cohere/core/http_client.py
function _parse_retry_after (line 26) | def _parse_retry_after(response_headers: httpx.Headers) -> typing.Option...
function _add_positive_jitter (line 67) | def _add_positive_jitter(delay: float) -> float:
function _add_symmetric_jitter (line 73) | def _add_symmetric_jitter(delay: float) -> float:
function _parse_x_ratelimit_reset (line 79) | def _parse_x_ratelimit_reset(response_headers: httpx.Headers) -> typing....
function _retry_timeout (line 99) | def _retry_timeout(response: httpx.Response, retries: int) -> float:
function _retry_timeout_from_retries (line 121) | def _retry_timeout_from_retries(retries: int) -> float:
function _should_retry (line 127) | def _should_retry(response: httpx.Response) -> bool:
function _redact_headers (line 154) | def _redact_headers(headers: typing.Dict[str, str]) -> typing.Dict[str, ...
function _build_url (line 158) | def _build_url(base_url: str, path: typing.Optional[str]) -> str:
function _maybe_filter_none_from_multipart_data (line 182) | def _maybe_filter_none_from_multipart_data(
function remove_omit_from_dict (line 197) | def remove_omit_from_dict(
function maybe_filter_request_body (line 210) | def maybe_filter_request_body(
function get_request_body (line 236) | def get_request_body(
class HttpClient (line 266) | class HttpClient:
method __init__ (line 267) | def __init__(
method get_base_url (line 284) | def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str:
method request (line 293) | def request(
method stream (line 453) | def stream(
class AsyncHttpClient (line 548) | class AsyncHttpClient:
method __init__ (line 549) | def __init__(
method _get_headers (line 568) | async def _get_headers(self) -> typing.Dict[str, str]:
method get_base_url (line 573) | def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str:
method request (line 582) | async def request(
method stream (line 745) | async def stream(
FILE: src/cohere/core/http_response.py
class BaseHttpResponse (line 11) | class BaseHttpResponse:
method __init__ (line 16) | def __init__(self, response: httpx.Response):
method headers (line 20) | def headers(self) -> Dict[str, str]:
method status_code (line 24) | def status_code(self) -> int:
class HttpResponse (line 28) | class HttpResponse(Generic[T], BaseHttpResponse):
method __init__ (line 33) | def __init__(self, response: httpx.Response, data: T):
method data (line 38) | def data(self) -> T:
method close (line 41) | def close(self) -> None:
class AsyncHttpResponse (line 45) | class AsyncHttpResponse(Generic[T], BaseHttpResponse):
method __init__ (line 50) | def __init__(self, response: httpx.Response, data: T):
method data (line 55) | def data(self) -> T:
method close (line 58) | async def close(self) -> None:
FILE: src/cohere/core/http_sse/__init__.py
function __getattr__ (line 21) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 37) | def __dir__():
FILE: src/cohere/core/http_sse/_api.py
class EventSource (line 13) | class EventSource:
method __init__ (line 14) | def __init__(self, response: httpx.Response) -> None:
method _check_content_type (line 17) | def _check_content_type(self) -> None:
method _get_charset (line 24) | def _get_charset(self) -> str:
method response (line 45) | def response(self) -> httpx.Response:
method iter_sse (line 48) | def iter_sse(self) -> Iterator[ServerSentEvent]:
method aiter_sse (line 76) | async def aiter_sse(self) -> AsyncGenerator[ServerSentEvent, None]:
function connect_sse (line 91) | def connect_sse(client: httpx.Client, method: str, url: str, **kwargs: A...
function aconnect_sse (line 101) | async def aconnect_sse(
FILE: src/cohere/core/http_sse/_decoders.py
class SSEDecoder (line 8) | class SSEDecoder:
method __init__ (line 9) | def __init__(self) -> None:
method decode (line 15) | def decode(self, line: str) -> Optional[ServerSentEvent]:
FILE: src/cohere/core/http_sse/_exceptions.py
class SSEError (line 6) | class SSEError(httpx.TransportError):
FILE: src/cohere/core/http_sse/_models.py
class ServerSentEvent (line 9) | class ServerSentEvent:
method json (line 15) | def json(self) -> Any:
FILE: src/cohere/core/jsonable_encoder.py
function jsonable_encoder (line 31) | def jsonable_encoder(obj: Any, custom_encoder: Optional[Dict[Any, Callab...
FILE: src/cohere/core/logging.py
class ILogger (line 16) | class ILogger(typing.Protocol):
method debug (line 17) | def debug(self, message: str, **kwargs: typing.Any) -> None: ...
method info (line 18) | def info(self, message: str, **kwargs: typing.Any) -> None: ...
method warn (line 19) | def warn(self, message: str, **kwargs: typing.Any) -> None: ...
method error (line 20) | def error(self, message: str, **kwargs: typing.Any) -> None: ...
class ConsoleLogger (line 23) | class ConsoleLogger:
method __init__ (line 26) | def __init__(self) -> None:
method debug (line 34) | def debug(self, message: str, **kwargs: typing.Any) -> None:
method info (line 37) | def info(self, message: str, **kwargs: typing.Any) -> None:
method warn (line 40) | def warn(self, message: str, **kwargs: typing.Any) -> None:
method error (line 43) | def error(self, message: str, **kwargs: typing.Any) -> None:
class LogConfig (line 47) | class LogConfig(typing.TypedDict, total=False):
class Logger (line 53) | class Logger:
method __init__ (line 58) | def __init__(self, *, level: LogLevel, logger: ILogger, silent: bool) ...
method _should_log (line 63) | def _should_log(self, level: LogLevel) -> bool:
method is_debug (line 66) | def is_debug(self) -> bool:
method is_info (line 69) | def is_info(self) -> bool:
method is_warn (line 72) | def is_warn(self) -> bool:
method is_error (line 75) | def is_error(self) -> bool:
method debug (line 78) | def debug(self, message: str, **kwargs: typing.Any) -> None:
method info (line 82) | def info(self, message: str, **kwargs: typing.Any) -> None:
method warn (line 86) | def warn(self, message: str, **kwargs: typing.Any) -> None:
method error (line 90) | def error(self, message: str, **kwargs: typing.Any) -> None:
function create_logger (line 98) | def create_logger(config: typing.Optional[typing.Union[LogConfig, Logger...
FILE: src/cohere/core/parse_error.py
class ParsingError (line 6) | class ParsingError(Exception):
method __init__ (line 18) | def __init__(
method __str__ (line 34) | def __str__(self) -> str:
FILE: src/cohere/core/pydantic_utilities.py
function parse_datetime (line 42) | def parse_datetime(value: Any) -> dt.datetime: # type: ignore[misc]
function parse_date (line 47) | def parse_date(value: Any) -> dt.date: # type: ignore[misc]
function is_literal_type (line 58) | def is_literal_type(tp: Optional[Type[Any]]) -> bool: # type: ignore[misc]
function is_union (line 61) | def is_union(tp: Optional[Type[Any]]) -> bool: # type: ignore[misc]
function _decimal_encoder (line 93) | def _decimal_encoder(dec_value: Any) -> Any:
function _get_discriminator_and_variants (line 138) | def _get_discriminator_and_variants(type_: Type[Any]) -> Tuple[Optional[...
function _get_field_annotation (line 165) | def _get_field_annotation(model: Type[Any], field_name: str) -> Optional...
function _find_variant_by_discriminator (line 180) | def _find_variant_by_discriminator(
function _is_string_type (line 198) | def _is_string_type(type_: Type[Any]) -> bool:
function parse_sse_obj (line 214) | def parse_sse_obj(sse: "ServerSentEvent", type_: Type[T]) -> T:
function parse_obj_as (line 313) | def parse_obj_as(type_: Type[T], object_: Any) -> T:
function to_jsonable_with_fallback (line 350) | def to_jsonable_with_fallback(obj: Any, fallback_serializer: Callable[[A...
class UniversalBaseModel (line 358) | class UniversalBaseModel(pydantic.BaseModel):
method _coerce_field_names_to_aliases (line 367) | def _coerce_field_names_to_aliases(cls, data: Any) -> Any:
method serialize_model (line 403) | def serialize_model(self) -> Any: # type: ignore[name-defined]
class Config (line 410) | class Config:
method _coerce_field_names_to_aliases (line 415) | def _coerce_field_names_to_aliases(cls, values: Any) -> Any:
method model_construct (line 449) | def model_construct(cls: Type["Model"], _fields_set: Optional[Set[str]...
method construct (line 454) | def construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = No...
method json (line 460) | def json(self, **kwargs: Any) -> str:
method dict (line 470) | def dict(self, **kwargs: Any) -> Dict[str, Any]:
function _union_list_of_pydantic_dicts (line 530) | def _union_list_of_pydantic_dicts(source: List[Any], destination: List[A...
function deep_union_pydantic_dicts (line 543) | def deep_union_pydantic_dicts(source: Dict[str, Any], destination: Dict[...
class V2RootModel (line 561) | class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: igno...
function encode_by_type (line 569) | def encode_by_type(o: Any) -> Any:
function update_forward_refs (line 581) | def update_forward_refs(model: Type["Model"], **localns: Any) -> None:
function universal_root_validator (line 592) | def universal_root_validator(
function universal_field_validator (line 605) | def universal_field_validator(field_name: str, pre: bool = False) -> Cal...
function _get_model_fields (line 617) | def _get_model_fields(model: Type["Model"]) -> Mapping[str, PydanticField]:
function _get_field_default (line 623) | def _get_field_default(field: PydanticField) -> Any:
FILE: src/cohere/core/query_encoder.py
function traverse_query_dict (line 9) | def traverse_query_dict(dict_flat: Dict[str, Any], key_prefix: Optional[...
function single_query_encoder (line 26) | def single_query_encoder(query_key: str, query_value: Any) -> List[Tuple...
function encode_query (line 51) | def encode_query(query: Optional[Dict[str, Any]]) -> Optional[List[Tuple...
FILE: src/cohere/core/remove_none_from_dict.py
function remove_none_from_dict (line 6) | def remove_none_from_dict(original: Mapping[str, Optional[Any]]) -> Dict...
FILE: src/cohere/core/request_options.py
class RequestOptions (line 11) | class RequestOptions(typing.TypedDict, total=False):
FILE: src/cohere/core/serialization.py
class FieldMetadata (line 11) | class FieldMetadata:
method __init__ (line 25) | def __init__(self, *, alias: str) -> None:
function convert_and_respect_annotation_metadata (line 29) | def convert_and_respect_annotation_metadata(
function _convert_mapping (line 157) | def _convert_mapping(
function _get_annotation (line 194) | def _get_annotation(type_: typing.Any) -> typing.Optional[typing.Any]:
function _remove_annotations (line 209) | def _remove_annotations(type_: typing.Any) -> typing.Any:
function get_alias_to_field_mapping (line 223) | def get_alias_to_field_mapping(type_: typing.Any) -> typing.Dict[str, str]:
function get_field_to_alias_mapping (line 228) | def get_field_to_alias_mapping(type_: typing.Any) -> typing.Dict[str, str]:
function _get_alias_to_field_name (line 233) | def _get_alias_to_field_name(
function _get_field_to_alias_name (line 244) | def _get_field_to_alias_name(
function _get_alias_from_type (line 255) | def _get_alias_from_type(type_: typing.Any) -> typing.Optional[str]:
function _alias_key (line 268) | def _alias_key(
FILE: src/cohere/core/unchecked_base_model.py
class UnionMetadata (line 28) | class UnionMetadata:
method __init__ (line 31) | def __init__(self, *, discriminant: str) -> None:
function _maybe_resolve_forward_ref (line 38) | def _maybe_resolve_forward_ref(
class UncheckedBaseModel (line 60) | class UncheckedBaseModel(UniversalBaseModel):
class Config (line 65) | class Config:
method model_construct (line 69) | def model_construct(
method construct (line 80) | def construct(
function _validate_collection_items_compatible (line 151) | def _validate_collection_items_compatible(collection: typing.Any, target...
function _get_literal_field_value (line 177) | def _get_literal_field_value(
function _literal_fields_match_strict (line 192) | def _literal_fields_match_strict(inner_type: typing.Type[typing.Any], ob...
function _convert_undiscriminated_union_type (line 216) | def _convert_undiscriminated_union_type(
function _convert_union_type (line 309) | def _convert_union_type(
function construct_type (line 336) | def construct_type(
function _get_is_populate_by_name (line 452) | def _get_is_populate_by_name(model: typing.Type["Model"]) -> bool:
function _get_model_fields (line 465) | def _get_model_fields(
function _get_field_default (line 474) | def _get_field_default(field: PydanticField) -> typing.Any:
FILE: src/cohere/datasets/__init__.py
function __getattr__ (line 18) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 34) | def __dir__():
FILE: src/cohere/datasets/client.py
class DatasetsClient (line 21) | class DatasetsClient:
method __init__ (line 22) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method with_raw_response (line 26) | def with_raw_response(self) -> RawDatasetsClient:
method list (line 36) | def list(
method create (line 112) | def create(
method get_usage (line 202) | def get_usage(self, *, request_options: typing.Optional[RequestOptions...
method get (line 229) | def get(self, id: str, *, request_options: typing.Optional[RequestOpti...
method delete (line 260) | def delete(
class AsyncDatasetsClient (line 294) | class AsyncDatasetsClient:
method __init__ (line 295) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method with_raw_response (line 299) | def with_raw_response(self) -> AsyncRawDatasetsClient:
method list (line 309) | async def list(
method create (line 392) | async def create(
method get_usage (line 490) | async def get_usage(self, *, request_options: typing.Optional[RequestO...
method get (line 525) | async def get(self, id: str, *, request_options: typing.Optional[Reque...
method delete (line 564) | async def delete(
FILE: src/cohere/datasets/raw_client.py
class RawDatasetsClient (line 40) | class RawDatasetsClient:
method __init__ (line 41) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method list (line 44) | def list(
method create (line 250) | def create(
method get_usage (line 481) | def get_usage(
method get (line 653) | def get(
method delete (line 827) | def delete(
class AsyncRawDatasetsClient (line 1002) | class AsyncRawDatasetsClient:
method __init__ (line 1003) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method list (line 1006) | async def list(
method create (line 1212) | async def create(
method get_usage (line 1443) | async def get_usage(
method get (line 1615) | async def get(
method delete (line 1789) | async def delete(
FILE: src/cohere/datasets/types/__init__.py
function __getattr__ (line 21) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 37) | def __dir__():
FILE: src/cohere/datasets/types/datasets_create_response.py
class DatasetsCreateResponse (line 10) | class DatasetsCreateResponse(UncheckedBaseModel):
class Config (line 20) | class Config:
FILE: src/cohere/datasets/types/datasets_get_response.py
class DatasetsGetResponse (line 11) | class DatasetsGetResponse(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/datasets/types/datasets_get_usage_response.py
class DatasetsGetUsageResponse (line 10) | class DatasetsGetUsageResponse(UncheckedBaseModel):
class Config (line 20) | class Config:
FILE: src/cohere/datasets/types/datasets_list_response.py
class DatasetsListResponse (line 11) | class DatasetsListResponse(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/embed_jobs/__init__.py
function __getattr__ (line 13) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 29) | def __dir__():
FILE: src/cohere/embed_jobs/client.py
class EmbedJobsClient (line 19) | class EmbedJobsClient:
method __init__ (line 20) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method with_raw_response (line 24) | def with_raw_response(self) -> RawEmbedJobsClient:
method list (line 34) | def list(self, *, request_options: typing.Optional[RequestOptions] = N...
method create (line 61) | def create(
method get (line 142) | def get(self, id: str, *, request_options: typing.Optional[RequestOpti...
method cancel (line 174) | def cancel(self, id: str, *, request_options: typing.Optional[RequestO...
class AsyncEmbedJobsClient (line 206) | class AsyncEmbedJobsClient:
method __init__ (line 207) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method with_raw_response (line 211) | def with_raw_response(self) -> AsyncRawEmbedJobsClient:
method list (line 221) | async def list(self, *, request_options: typing.Optional[RequestOption...
method create (line 256) | async def create(
method get (line 345) | async def get(self, id: str, *, request_options: typing.Optional[Reque...
method cancel (line 385) | async def cancel(self, id: str, *, request_options: typing.Optional[Re...
FILE: src/cohere/embed_jobs/raw_client.py
class RawEmbedJobsClient (line 37) | class RawEmbedJobsClient:
method __init__ (line 38) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method list (line 41) | def list(self, *, request_options: typing.Optional[RequestOptions] = N...
method create (line 211) | def create(
method get (line 435) | def get(self, id: str, *, request_options: typing.Optional[RequestOpti...
method cancel (line 608) | def cancel(self, id: str, *, request_options: typing.Optional[RequestO...
class AsyncRawEmbedJobsClient (line 774) | class AsyncRawEmbedJobsClient:
method __init__ (line 775) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method list (line 778) | async def list(
method create (line 950) | async def create(
method get (line 1174) | async def get(
method cancel (line 1349) | async def cancel(
FILE: src/cohere/embed_jobs/types/__init__.py
function __getattr__ (line 13) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 29) | def __dir__():
FILE: src/cohere/environment.py
class ClientEnvironment (line 6) | class ClientEnvironment(enum.Enum):
FILE: src/cohere/errors/__init__.py
function __getattr__ (line 37) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 53) | def __dir__():
FILE: src/cohere/errors/bad_request_error.py
class BadRequestError (line 8) | class BadRequestError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/client_closed_request_error.py
class ClientClosedRequestError (line 8) | class ClientClosedRequestError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/forbidden_error.py
class ForbiddenError (line 8) | class ForbiddenError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/gateway_timeout_error.py
class GatewayTimeoutError (line 8) | class GatewayTimeoutError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/internal_server_error.py
class InternalServerError (line 8) | class InternalServerError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/invalid_token_error.py
class InvalidTokenError (line 8) | class InvalidTokenError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/not_found_error.py
class NotFoundError (line 8) | class NotFoundError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/not_implemented_error.py
class NotImplementedError (line 8) | class NotImplementedError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/service_unavailable_error.py
class ServiceUnavailableError (line 8) | class ServiceUnavailableError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/too_many_requests_error.py
class TooManyRequestsError (line 8) | class TooManyRequestsError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/unauthorized_error.py
class UnauthorizedError (line 8) | class UnauthorizedError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/errors/unprocessable_entity_error.py
class UnprocessableEntityError (line 8) | class UnprocessableEntityError(ApiError):
method __init__ (line 9) | def __init__(self, body: typing.Any, headers: typing.Optional[typing.D...
FILE: src/cohere/finetuning/__init__.py
function __getattr__ (line 53) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 69) | def __dir__():
FILE: src/cohere/finetuning/client.py
class FinetuningClient (line 22) | class FinetuningClient:
method __init__ (line 23) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method with_raw_response (line 27) | def with_raw_response(self) -> RawFinetuningClient:
method list_finetuned_models (line 37) | def list_finetuned_models(
method create_finetuned_model (line 92) | def create_finetuned_model(
method get_finetuned_model (line 134) | def get_finetuned_model(
method delete_finetuned_model (line 168) | def delete_finetuned_model(
method update_finetuned_model (line 203) | def update_finetuned_model(
method list_events (line 253) | def list_events(
method list_training_step_metrics (line 319) | def list_training_step_metrics(
class AsyncFinetuningClient (line 372) | class AsyncFinetuningClient:
method __init__ (line 373) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method with_raw_response (line 377) | def with_raw_response(self) -> AsyncRawFinetuningClient:
method list_finetuned_models (line 387) | async def list_finetuned_models(
method create_finetuned_model (line 450) | async def create_finetuned_model(
method get_finetuned_model (line 500) | async def get_finetuned_model(
method delete_finetuned_model (line 542) | async def delete_finetuned_model(
method update_finetuned_model (line 585) | async def update_finetuned_model(
method list_events (line 643) | async def list_events(
method list_training_step_metrics (line 717) | async def list_training_step_metrics(
FILE: src/cohere/finetuning/finetuning/__init__.py
function __getattr__ (line 51) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 67) | def __dir__():
FILE: src/cohere/finetuning/finetuning/types/__init__.py
function __getattr__ (line 49) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 65) | def __dir__():
FILE: src/cohere/finetuning/finetuning/types/base_model.py
class BaseModel (line 12) | class BaseModel(UncheckedBaseModel):
class Config (line 41) | class Config:
FILE: src/cohere/finetuning/finetuning/types/create_finetuned_model_response.py
class CreateFinetunedModelResponse (line 11) | class CreateFinetunedModelResponse(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/finetuning/finetuning/types/event.py
class Event (line 12) | class Event(UncheckedBaseModel):
class Config (line 36) | class Config:
FILE: src/cohere/finetuning/finetuning/types/finetuned_model.py
class FinetunedModel (line 13) | class FinetunedModel(UncheckedBaseModel):
class Config (line 72) | class Config:
FILE: src/cohere/finetuning/finetuning/types/get_finetuned_model_response.py
class GetFinetunedModelResponse (line 11) | class GetFinetunedModelResponse(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/finetuning/finetuning/types/hyperparameters.py
class Hyperparameters (line 11) | class Hyperparameters(UncheckedBaseModel):
class Config (line 64) | class Config:
FILE: src/cohere/finetuning/finetuning/types/list_events_response.py
class ListEventsResponse (line 11) | class ListEventsResponse(UncheckedBaseModel):
class Config (line 36) | class Config:
FILE: src/cohere/finetuning/finetuning/types/list_finetuned_models_response.py
class ListFinetunedModelsResponse (line 11) | class ListFinetunedModelsResponse(UncheckedBaseModel):
class Config (line 36) | class Config:
FILE: src/cohere/finetuning/finetuning/types/list_training_step_metrics_response.py
class ListTrainingStepMetricsResponse (line 11) | class ListTrainingStepMetricsResponse(UncheckedBaseModel):
class Config (line 31) | class Config:
FILE: src/cohere/finetuning/finetuning/types/settings.py
class Settings (line 13) | class Settings(UncheckedBaseModel):
class Config (line 47) | class Config:
FILE: src/cohere/finetuning/finetuning/types/training_step_metrics.py
class TrainingStepMetrics (line 11) | class TrainingStepMetrics(UncheckedBaseModel):
class Config (line 35) | class Config:
FILE: src/cohere/finetuning/finetuning/types/update_finetuned_model_response.py
class UpdateFinetunedModelResponse (line 11) | class UpdateFinetunedModelResponse(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/finetuning/finetuning/types/wandb_config.py
class WandbConfig (line 10) | class WandbConfig(UncheckedBaseModel):
class Config (line 34) | class Config:
FILE: src/cohere/finetuning/raw_client.py
class RawFinetuningClient (line 35) | class RawFinetuningClient:
method __init__ (line 36) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method list_finetuned_models (line 39) | def list_finetuned_models(
method create_finetuned_model (line 170) | def create_finetuned_model(
method get_finetuned_model (line 283) | def get_finetuned_model(
method delete_finetuned_model (line 392) | def delete_finetuned_model(
method update_finetuned_model (line 502) | def update_finetuned_model(
method list_events (line 627) | def list_events(
method list_training_step_metrics (line 764) | def list_training_step_metrics(
class AsyncRawFinetuningClient (line 892) | class AsyncRawFinetuningClient:
method __init__ (line 893) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method list_finetuned_models (line 896) | async def list_finetuned_models(
method create_finetuned_model (line 1027) | async def create_finetuned_model(
method get_finetuned_model (line 1140) | async def get_finetuned_model(
method delete_finetuned_model (line 1249) | async def delete_finetuned_model(
method update_finetuned_model (line 1359) | async def update_finetuned_model(
method list_events (line 1484) | async def list_events(
method list_training_step_metrics (line 1621) | async def list_training_step_metrics(
FILE: src/cohere/manually_maintained/cache.py
class CacheMixin (line 5) | class CacheMixin:
method _cache_get (line 9) | def _cache_get(self, key: str) -> typing.Any:
method _cache_set (line 19) | def _cache_set(self, key: str, value: typing.Any, ttl: int = 60 * 60) ...
FILE: src/cohere/manually_maintained/cohere_aws/chat.py
class ToolParameterDefinitionsValue (line 10) | class ToolParameterDefinitionsValue(CohereObject, dict):
method __init__ (line 11) | def __init__(
class Tool (line 26) | class Tool(CohereObject, dict):
method __init__ (line 27) | def __init__(
class ToolCall (line 42) | class ToolCall(CohereObject, dict):
method __init__ (line 43) | def __init__(
method from_dict (line 57) | def from_dict(cls, tool_call_res: Dict[str, Any]) -> "ToolCall":
method from_list (line 65) | def from_list(cls, tool_calls_res: Optional[List[Dict[str, Any]]]) -> ...
class Chat (line 73) | class Chat(CohereObject):
method __init__ (line 74) | def __init__(
method from_dict (line 105) | def from_dict(cls, response: Dict[str, Any]) -> "Chat":
class StreamEvent (line 126) | class StreamEvent(str, Enum):
class StreamResponse (line 135) | class StreamResponse(CohereObject):
method __init__ (line 136) | def __init__(
class StreamStart (line 149) | class StreamStart(StreamResponse):
method __init__ (line 150) | def __init__(
class StreamTextGeneration (line 161) | class StreamTextGeneration(StreamResponse):
method __init__ (line 162) | def __init__(
class StreamCitationGeneration (line 171) | class StreamCitationGeneration(StreamResponse):
method __init__ (line 172) | def __init__(
class StreamQueryGeneration (line 181) | class StreamQueryGeneration(StreamResponse):
method __init__ (line 182) | def __init__(
class StreamSearchResults (line 191) | class StreamSearchResults(StreamResponse):
method __init__ (line 192) | def __init__(
class StreamEnd (line 203) | class StreamEnd(StreamResponse):
method __init__ (line 204) | def __init__(
class ChatToolCallsGenerationEvent (line 213) | class ChatToolCallsGenerationEvent(StreamResponse):
method __init__ (line 214) | def __init__(
class StreamingChat (line 222) | class StreamingChat(CohereObject):
method __init__ (line 223) | def __init__(self, stream_response, mode):
method _make_response_item (line 248) | def _make_response_item(self, index, streaming_item) -> Any:
method __iter__ (line 312) | def __iter__(self) -> Generator[StreamResponse, None, None]:
FILE: src/cohere/manually_maintained/cohere_aws/classification.py
class Classification (line 8) | class Classification(CohereObject):
method __init__ (line 9) | def __init__(self, classification: Union[Prediction, ClassificationDic...
method is_multilabel (line 15) | def is_multilabel(self) -> bool:
method prediction (line 23) | def prediction(self) -> Prediction:
method confidence (line 29) | def confidence(self) -> List[float]:
method text (line 37) | def text(self) -> str:
class Classifications (line 45) | class Classifications(CohereObject):
method __init__ (line 46) | def __init__(self, classifications: List[Classification]) -> None:
method __iter__ (line 53) | def __iter__(self) -> Iterator:
method __len__ (line 56) | def __len__(self) -> int:
method is_multilabel (line 59) | def is_multilabel(self) -> bool:
FILE: src/cohere/manually_maintained/cohere_aws/client.py
class Client (line 19) | class Client:
method __init__ (line 20) | def __init__(
method _require_sagemaker (line 43) | def _require_sagemaker(self) -> None:
method _does_endpoint_exist (line 47) | def _does_endpoint_exist(self, endpoint_name: str) -> bool:
method connect_to_endpoint (line 54) | def connect_to_endpoint(self, endpoint_name: str) -> None:
method _s3_models_dir_to_tarfile (line 68) | def _s3_models_dir_to_tarfile(self, s3_models_dir: str) -> str:
method create_endpoint (line 127) | def create_endpoint(
method chat (line 262) | def chat(
method _sagemaker_chat (line 401) | def _sagemaker_chat(self, json_params: Dict[str, Any], variant: str) :
method _bedrock_chat (line 426) | def _bedrock_chat(self, json_params: Dict[str, Any], model_id: str) :
method generate (line 458) | def generate(
method _sagemaker_generations (line 507) | def _sagemaker_generations(self, json_params: Dict[str, Any], variant:...
method _bedrock_generations (line 533) | def _bedrock_generations(self, json_params: Dict[str, Any], model_id: ...
method embed (line 558) | def embed(
method _sagemaker_embed (line 586) | def _sagemaker_embed(self, json_params: Dict[str, Any], variant: str):
method _bedrock_embed (line 615) | def _bedrock_embed(self, json_params: Dict[str, Any], model_id: str):
method rerank (line 640) | def rerank(self,
method classify (line 703) | def classify(self, input: List[str], name: str) -> Classifications:
method create_finetune (line 730) | def create_finetune(
method export_finetune (line 808) | def export_finetune(
method wait_for_finetune_job (line 875) | def wait_for_finetune_job(self, job_id: str, timeout: int = 2*60*60) -...
method provision_throughput (line 895) | def provision_throughput(
method _bedrock_create_finetune (line 923) | def _bedrock_create_finetune(
method summarize (line 962) | def summarize(
method delete_endpoint (line 1015) | def delete_endpoint(self) -> None:
method close (line 1029) | def close(self) -> None:
FILE: src/cohere/manually_maintained/cohere_aws/embeddings.py
class Embedding (line 5) | class Embedding(CohereObject):
method __init__ (line 7) | def __init__(self, embedding: List[float]) -> None:
method __iter__ (line 10) | def __iter__(self) -> Iterator:
method __len__ (line 13) | def __len__(self) -> int:
class Embeddings (line 17) | class Embeddings(CohereObject):
method __init__ (line 19) | def __init__(self, embeddings: List[Embedding]) -> None:
method __iter__ (line 22) | def __iter__(self) -> Iterator:
method __len__ (line 25) | def __len__(self) -> int:
FILE: src/cohere/manually_maintained/cohere_aws/error.py
class CohereError (line 1) | class CohereError(Exception):
method __init__ (line 2) | def __init__(
method __str__ (line 14) | def __str__(self) -> str:
method __repr__ (line 18) | def __repr__(self) -> str:
FILE: src/cohere/manually_maintained/cohere_aws/generation.py
class TokenLikelihood (line 7) | class TokenLikelihood(CohereObject):
method __init__ (line 8) | def __init__(self, token: str, likelihood: float) -> None:
class Generation (line 13) | class Generation(CohereObject):
method __init__ (line 14) | def __init__(self,
class Generations (line 21) | class Generations(CohereObject):
method __init__ (line 22) | def __init__(self,
method from_dict (line 28) | def from_dict(cls, response: Dict[str, Any]) -> List[Generation]:
method __iter__ (line 45) | def __iter__(self) -> iter:
method __next__ (line 48) | def __next__(self) -> next:
class StreamingGenerations (line 58) | class StreamingGenerations(CohereObject):
method __init__ (line 59) | def __init__(self, stream, mode):
method _make_response_item (line 75) | def _make_response_item(self, streaming_item) -> Optional[StreamingText]:
method __iter__ (line 96) | def __iter__(self) -> Generator[StreamingText, None, None]:
FILE: src/cohere/manually_maintained/cohere_aws/mode.py
class Mode (line 4) | class Mode(Enum):
FILE: src/cohere/manually_maintained/cohere_aws/rerank.py
class RerankResult (line 12) | class RerankResult(CohereObject):
method __init__ (line 14) | def __init__(self,
method __repr__ (line 24) | def __repr__(self) -> str:
class Reranking (line 36) | class Reranking(CohereObject):
method __init__ (line 38) | def __init__(self,
method _results (line 45) | def _results(self, response: Dict[str, Any]) -> List[RerankResult]:
method __str__ (line 56) | def __str__(self) -> str:
method __repr__ (line 59) | def __repr__(self) -> str:
method __iter__ (line 62) | def __iter__(self) -> Iterator:
method __getitem__ (line 65) | def __getitem__(self, index) -> RerankResult:
FILE: src/cohere/manually_maintained/cohere_aws/response.py
class CohereObject (line 1) | class CohereObject():
method __repr__ (line 2) | def __repr__(self) -> str:
FILE: src/cohere/manually_maintained/cohere_aws/summary.py
class Summary (line 6) | class Summary(CohereObject):
method __init__ (line 7) | def __init__(self,
method __str__ (line 15) | def __str__(self) -> str:
FILE: src/cohere/manually_maintained/lazy_aws_deps.py
function lazy_sagemaker (line 4) | def lazy_sagemaker():
function lazy_boto3 (line 11) | def lazy_boto3():
function lazy_botocore (line 18) | def lazy_botocore():
FILE: src/cohere/manually_maintained/lazy_oci_deps.py
function lazy_oci (line 16) | def lazy_oci() -> Any:
FILE: src/cohere/manually_maintained/streaming_embed.py
class StreamedEmbedding (line 10) | class StreamedEmbedding:
function extract_embeddings_from_response (line 18) | def extract_embeddings_from_response(
FILE: src/cohere/manually_maintained/tokenizers.py
function tokenizer_cache_key (line 15) | def tokenizer_cache_key(model: str) -> str:
function get_hf_tokenizer (line 19) | def get_hf_tokenizer(co: "Client", model: str) -> Tokenizer:
function local_tokenize (line 43) | def local_tokenize(co: "Client", model: str, text: str) -> typing.List[i...
function local_detokenize (line 49) | def local_detokenize(co: "Client", model: str, tokens: typing.Sequence[i...
function async_get_hf_tokenizer (line 55) | async def async_get_hf_tokenizer(co: "AsyncClient", model: str) -> Token...
function async_local_tokenize (line 80) | async def async_local_tokenize(co: "AsyncClient", model: str, text: str)...
function async_local_detokenize (line 86) | async def async_local_detokenize(co: "AsyncClient", model: str, tokens: ...
function _get_tokenizer_config_size (line 92) | def _get_tokenizer_config_size(tokenizer_url: str) -> float:
FILE: src/cohere/models/client.py
class ModelsClient (line 13) | class ModelsClient:
method __init__ (line 14) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method with_raw_response (line 18) | def with_raw_response(self) -> RawModelsClient:
method get (line 28) | def get(self, model: str, *, request_options: typing.Optional[RequestO...
method list (line 59) | def list(
class AsyncModelsClient (line 119) | class AsyncModelsClient:
method __init__ (line 120) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method with_raw_response (line 124) | def with_raw_response(self) -> AsyncRawModelsClient:
method get (line 134) | async def get(self, model: str, *, request_options: typing.Optional[Re...
method list (line 173) | async def list(
FILE: src/cohere/models/raw_client.py
class RawModelsClient (line 31) | class RawModelsClient:
method __init__ (line 32) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method get (line 35) | def get(
method list (line 209) | def list(
class AsyncRawModelsClient (line 407) | class AsyncRawModelsClient:
method __init__ (line 408) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method get (line 411) | async def get(
method list (line 585) | async def list(
FILE: src/cohere/oci_client.py
class OciClient (line 19) | class OciClient(Client):
method __init__ (line 55) | def __init__(
class OciClientV2 (line 105) | class OciClientV2(ClientV2):
method __init__ (line 169) | def __init__(
function _load_oci_config (line 230) | def _load_oci_config(
function _remove_inherited_session_auth (line 291) | def _remove_inherited_session_auth(
function _usage_from_oci (line 329) | def _usage_from_oci(usage_data: typing.Optional[typing.Dict[str, typing....
function get_event_hooks (line 346) | def get_event_hooks(
function map_request_to_oci (line 377) | def map_request_to_oci(
function map_response_from_oci (line 511) | def map_response_from_oci() -> EventHook:
function get_oci_url (line 559) | def get_oci_url(
function normalize_model_for_oci (line 590) | def normalize_model_for_oci(model: str) -> str:
function transform_request_to_oci (line 625) | def transform_request_to_oci(
function transform_oci_response_to_cohere (line 867) | def transform_oci_response_to_cohere(
function transform_oci_stream_wrapper (line 981) | def transform_oci_stream_wrapper(
function transform_stream_event (line 1164) | def transform_stream_event(
FILE: src/cohere/overrides.py
function get_fields (line 10) | def get_fields(obj) -> typing.List[str]:
function get_aliases_or_field (line 14) | def get_aliases_or_field(obj) -> typing.List[str]:
function get_aliases_and_fields (line 22) | def get_aliases_and_fields(obj):
function allow_access_to_aliases (line 27) | def allow_access_to_aliases(self: typing.Type["Model"], name):
function make_tool_call_v2_id_optional (line 37) | def make_tool_call_v2_id_optional(cls):
function run_overrides (line 62) | def run_overrides():
FILE: src/cohere/raw_base_client.py
class RawBaseCohere (line 69) | class RawBaseCohere:
method __init__ (line 70) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method chat_stream (line 74) | def chat_stream(
method chat (line 548) | def chat(
method generate_stream (line 1002) | def generate_stream(
method generate (line 1314) | def generate(
method embed (line 1606) | def embed(
method rerank (line 1833) | def rerank(
method classify (line 2056) | def classify(
method summarize (line 2269) | def summarize(
method tokenize (line 2487) | def tokenize(
method detokenize (line 2673) | def detokenize(
method check_api_key (line 2859) | def check_api_key(
class AsyncRawBaseCohere (line 3032) | class AsyncRawBaseCohere:
method __init__ (line 3033) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method chat_stream (line 3037) | async def chat_stream(
method chat (line 3511) | async def chat(
method generate_stream (line 3965) | async def generate_stream(
method generate (line 4277) | async def generate(
method embed (line 4569) | async def embed(
method rerank (line 4796) | async def rerank(
method classify (line 5019) | async def classify(
method summarize (line 5232) | async def summarize(
method tokenize (line 5450) | async def tokenize(
method detokenize (line 5636) | async def detokenize(
method check_api_key (line 5822) | async def check_api_key(
FILE: src/cohere/sagemaker_client.py
class SagemakerClient (line 8) | class SagemakerClient(AwsClient):
method __init__ (line 11) | def __init__(
class SagemakerClientV2 (line 35) | class SagemakerClientV2(AwsClientV2):
method __init__ (line 38) | def __init__(
FILE: src/cohere/types/__init__.py
function __getattr__ (line 478) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 494) | def __dir__():
FILE: src/cohere/types/api_meta.py
class ApiMeta (line 13) | class ApiMeta(UncheckedBaseModel):
class Config (line 28) | class Config:
FILE: src/cohere/types/api_meta_api_version.py
class ApiMetaApiVersion (line 10) | class ApiMetaApiVersion(UncheckedBaseModel):
class Config (line 19) | class Config:
FILE: src/cohere/types/api_meta_billed_units.py
class ApiMetaBilledUnits (line 10) | class ApiMetaBilledUnits(UncheckedBaseModel):
class Config (line 45) | class Config:
FILE: src/cohere/types/api_meta_tokens.py
class ApiMetaTokens (line 10) | class ApiMetaTokens(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/types/assistant_message.py
class AssistantMessage (line 13) | class AssistantMessage(UncheckedBaseModel):
class Config (line 31) | class Config:
FILE: src/cohere/types/assistant_message_response.py
class AssistantMessageResponse (line 13) | class AssistantMessageResponse(UncheckedBaseModel):
class Config (line 32) | class Config:
FILE: src/cohere/types/assistant_message_response_content_item.py
class TextAssistantMessageResponseContentItem (line 13) | class TextAssistantMessageResponseContentItem(UncheckedBaseModel):
class Config (line 21) | class Config:
class ThinkingAssistantMessageResponseContentItem (line 26) | class ThinkingAssistantMessageResponseContentItem(UncheckedBaseModel):
class Config (line 34) | class Config:
FILE: src/cohere/types/assistant_message_v2content_one_item.py
class TextAssistantMessageV2ContentOneItem (line 13) | class TextAssistantMessageV2ContentOneItem(UncheckedBaseModel):
class Config (line 21) | class Config:
class ThinkingAssistantMessageV2ContentOneItem (line 26) | class ThinkingAssistantMessageV2ContentOneItem(UncheckedBaseModel):
class Config (line 34) | class Config:
FILE: src/cohere/types/chat_citation.py
class ChatCitation (line 11) | class ChatCitation(UncheckedBaseModel):
class Config (line 45) | class Config:
FILE: src/cohere/types/chat_citation_generation_event.py
class ChatCitationGenerationEvent (line 11) | class ChatCitationGenerationEvent(ChatStreamEvent):
class Config (line 21) | class Config:
FILE: src/cohere/types/chat_connector.py
class ChatConnector (line 10) | class ChatConnector(UncheckedBaseModel):
class Config (line 43) | class Config:
FILE: src/cohere/types/chat_content_delta_event.py
class ChatContentDeltaEvent (line 12) | class ChatContentDeltaEvent(ChatStreamEventType):
class Config (line 25) | class Config:
FILE: src/cohere/types/chat_content_delta_event_delta.py
class ChatContentDeltaEventDelta (line 11) | class ChatContentDeltaEventDelta(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_content_delta_event_delta_message.py
class ChatContentDeltaEventDeltaMessage (line 11) | class ChatContentDeltaEventDeltaMessage(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_content_delta_event_delta_message_content.py
class ChatContentDeltaEventDeltaMessageContent (line 10) | class ChatContentDeltaEventDeltaMessageContent(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_content_end_event.py
class ChatContentEndEvent (line 10) | class ChatContentEndEvent(ChatStreamEventType):
class Config (line 21) | class Config:
FILE: src/cohere/types/chat_content_start_event.py
class ChatContentStartEvent (line 11) | class ChatContentStartEvent(ChatStreamEventType):
class Config (line 23) | class Config:
FILE: src/cohere/types/chat_content_start_event_delta.py
class ChatContentStartEventDelta (line 11) | class ChatContentStartEventDelta(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_content_start_event_delta_message.py
class ChatContentStartEventDeltaMessage (line 11) | class ChatContentStartEventDeltaMessage(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_content_start_event_delta_message_content.py
class ChatContentStartEventDeltaMessageContent (line 11) | class ChatContentStartEventDeltaMessageContent(UncheckedBaseModel):
class Config (line 20) | class Config:
FILE: src/cohere/types/chat_data_metrics.py
class ChatDataMetrics (line 10) | class ChatDataMetrics(UncheckedBaseModel):
class Config (line 30) | class Config:
FILE: src/cohere/types/chat_debug_event.py
class ChatDebugEvent (line 10) | class ChatDebugEvent(ChatStreamEvent):
class Config (line 17) | class Config:
FILE: src/cohere/types/chat_document_source.py
class ChatDocumentSource (line 10) | class ChatDocumentSource(UncheckedBaseModel):
class Config (line 26) | class Config:
FILE: src/cohere/types/chat_message.py
class ChatMessage (line 11) | class ChatMessage(UncheckedBaseModel):
class Config (line 29) | class Config:
FILE: src/cohere/types/chat_message_end_event.py
class ChatMessageEndEvent (line 11) | class ChatMessageEndEvent(ChatStreamEventType):
class Config (line 23) | class Config:
FILE: src/cohere/types/chat_message_end_event_delta.py
class ChatMessageEndEventDelta (line 12) | class ChatMessageEndEventDelta(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/types/chat_message_start_event.py
class ChatMessageStartEvent (line 11) | class ChatMessageStartEvent(ChatStreamEventType):
class Config (line 27) | class Config:
FILE: src/cohere/types/chat_message_start_event_delta.py
class ChatMessageStartEventDelta (line 11) | class ChatMessageStartEventDelta(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_message_start_event_delta_message.py
class ChatMessageStartEventDeltaMessage (line 10) | class ChatMessageStartEventDeltaMessage(UncheckedBaseModel):
class Config (line 20) | class Config:
FILE: src/cohere/types/chat_message_v2.py
class UserChatMessageV2 (line 19) | class UserChatMessageV2(UncheckedBaseModel):
class Config (line 31) | class Config:
class AssistantChatMessageV2 (line 36) | class AssistantChatMessageV2(UncheckedBaseModel):
class Config (line 51) | class Config:
class SystemChatMessageV2 (line 56) | class SystemChatMessageV2(UncheckedBaseModel):
class Config (line 68) | class Config:
class ToolChatMessageV2 (line 73) | class ToolChatMessageV2(UncheckedBaseModel):
class Config (line 86) | class Config:
FILE: src/cohere/types/chat_search_queries_generation_event.py
class ChatSearchQueriesGenerationEvent (line 11) | class ChatSearchQueriesGenerationEvent(ChatStreamEvent):
class Config (line 21) | class Config:
FILE: src/cohere/types/chat_search_query.py
class ChatSearchQuery (line 10) | class ChatSearchQuery(UncheckedBaseModel):
class Config (line 29) | class Config:
FILE: src/cohere/types/chat_search_result.py
class ChatSearchResult (line 12) | class ChatSearchResult(UncheckedBaseModel):
class Config (line 38) | class Config:
FILE: src/cohere/types/chat_search_result_connector.py
class ChatSearchResultConnector (line 10) | class ChatSearchResultConnector(UncheckedBaseModel):
class Config (line 24) | class Config:
FILE: src/cohere/types/chat_search_results_event.py
class ChatSearchResultsEvent (line 12) | class ChatSearchResultsEvent(ChatStreamEvent):
class Config (line 27) | class Config:
FILE: src/cohere/types/chat_stream_end_event.py
class ChatStreamEndEvent (line 12) | class ChatStreamEndEvent(ChatStreamEvent):
class Config (line 31) | class Config:
FILE: src/cohere/types/chat_stream_event.py
class ChatStreamEvent (line 10) | class ChatStreamEvent(UncheckedBaseModel):
class Config (line 15) | class Config:
FILE: src/cohere/types/chat_stream_event_type.py
class ChatStreamEventType (line 10) | class ChatStreamEventType(UncheckedBaseModel):
class Config (line 19) | class Config:
FILE: src/cohere/types/chat_stream_start_event.py
class ChatStreamStartEvent (line 10) | class ChatStreamStartEvent(ChatStreamEvent):
class Config (line 20) | class Config:
FILE: src/cohere/types/chat_text_content.py
class ChatTextContent (line 10) | class ChatTextContent(UncheckedBaseModel):
class Config (line 21) | class Config:
FILE: src/cohere/types/chat_text_generation_event.py
class ChatTextGenerationEvent (line 10) | class ChatTextGenerationEvent(ChatStreamEvent):
class Config (line 20) | class Config:
FILE: src/cohere/types/chat_text_response_format.py
class ChatTextResponseFormat (line 10) | class ChatTextResponseFormat(UncheckedBaseModel):
class Config (line 15) | class Config:
FILE: src/cohere/types/chat_text_response_format_v2.py
class ChatTextResponseFormatV2 (line 10) | class ChatTextResponseFormatV2(UncheckedBaseModel):
class Config (line 15) | class Config:
FILE: src/cohere/types/chat_thinking_content.py
class ChatThinkingContent (line 10) | class ChatThinkingContent(UncheckedBaseModel):
class Config (line 21) | class Config:
FILE: src/cohere/types/chat_tool_call_delta_event.py
class ChatToolCallDeltaEvent (line 11) | class ChatToolCallDeltaEvent(ChatStreamEventType):
class Config (line 23) | class Config:
FILE: src/cohere/types/chat_tool_call_delta_event_delta.py
class ChatToolCallDeltaEventDelta (line 11) | class ChatToolCallDeltaEventDelta(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_tool_call_delta_event_delta_message.py
class ChatToolCallDeltaEventDeltaMessage (line 11) | class ChatToolCallDeltaEventDeltaMessage(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_tool_call_delta_event_delta_message_tool_calls.py
class ChatToolCallDeltaEventDeltaMessageToolCalls (line 13) | class ChatToolCallDeltaEventDeltaMessageToolCalls(UncheckedBaseModel):
class Config (line 20) | class Config:
FILE: src/cohere/types/chat_tool_call_delta_event_delta_message_tool_calls_function.py
class ChatToolCallDeltaEventDeltaMessageToolCallsFunction (line 10) | class ChatToolCallDeltaEventDeltaMessageToolCallsFunction(UncheckedBaseM...
class Config (line 17) | class Config:
FILE: src/cohere/types/chat_tool_call_end_event.py
class ChatToolCallEndEvent (line 10) | class ChatToolCallEndEvent(ChatStreamEventType):
class Config (line 21) | class Config:
FILE: src/cohere/types/chat_tool_call_start_event.py
class ChatToolCallStartEvent (line 11) | class ChatToolCallStartEvent(ChatStreamEventType):
class Config (line 23) | class Config:
FILE: src/cohere/types/chat_tool_call_start_event_delta.py
class ChatToolCallStartEventDelta (line 11) | class ChatToolCallStartEventDelta(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_tool_call_start_event_delta_message.py
class ChatToolCallStartEventDeltaMessage (line 11) | class ChatToolCallStartEventDeltaMessage(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_tool_calls_chunk_event.py
class ChatToolCallsChunkEvent (line 11) | class ChatToolCallsChunkEvent(ChatStreamEvent):
class Config (line 19) | class Config:
FILE: src/cohere/types/chat_tool_calls_generation_event.py
class ChatToolCallsGenerationEvent (line 11) | class ChatToolCallsGenerationEvent(ChatStreamEvent):
class Config (line 23) | class Config:
FILE: src/cohere/types/chat_tool_message.py
class ChatToolMessage (line 11) | class ChatToolMessage(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/chat_tool_plan_delta_event.py
class ChatToolPlanDeltaEvent (line 11) | class ChatToolPlanDeltaEvent(ChatStreamEventType):
class Config (line 22) | class Config:
FILE: src/cohere/types/chat_tool_plan_delta_event_delta.py
class ChatToolPlanDeltaEventDelta (line 11) | class ChatToolPlanDeltaEventDelta(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/chat_tool_plan_delta_event_delta_message.py
class ChatToolPlanDeltaEventDeltaMessage (line 10) | class ChatToolPlanDeltaEventDeltaMessage(UncheckedBaseModel):
class Config (line 17) | class Config:
FILE: src/cohere/types/chat_tool_source.py
class ChatToolSource (line 10) | class ChatToolSource(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/check_api_key_response.py
class CheckApiKeyResponse (line 10) | class CheckApiKeyResponse(UncheckedBaseModel):
class Config (line 19) | class Config:
FILE: src/cohere/types/citation.py
class Citation (line 12) | class Citation(UncheckedBaseModel):
class Config (line 44) | class Config:
FILE: src/cohere/types/citation_end_event.py
class CitationEndEvent (line 10) | class CitationEndEvent(ChatStreamEventType):
class Config (line 21) | class Config:
FILE: src/cohere/types/citation_options.py
class CitationOptions (line 11) | class CitationOptions(UncheckedBaseModel):
class Config (line 26) | class Config:
FILE: src/cohere/types/citation_start_event.py
class CitationStartEvent (line 11) | class CitationStartEvent(ChatStreamEventType):
class Config (line 23) | class Config:
FILE: src/cohere/types/citation_start_event_delta.py
class CitationStartEventDelta (line 11) | class CitationStartEventDelta(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/citation_start_event_delta_message.py
class CitationStartEventDeltaMessage (line 11) | class CitationStartEventDeltaMessage(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/classify_data_metrics.py
class ClassifyDataMetrics (line 11) | class ClassifyDataMetrics(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/classify_example.py
class ClassifyExample (line 10) | class ClassifyExample(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/classify_response.py
class ClassifyResponse (line 12) | class ClassifyResponse(UncheckedBaseModel):
class Config (line 21) | class Config:
FILE: src/cohere/types/classify_response_classifications_item.py
class ClassifyResponseClassificationsItem (line 14) | class ClassifyResponseClassificationsItem(UncheckedBaseModel):
class Config (line 55) | class Config:
FILE: src/cohere/types/classify_response_classifications_item_labels_value.py
class ClassifyResponseClassificationsItemLabelsValue (line 10) | class ClassifyResponseClassificationsItemLabelsValue(UncheckedBaseModel):
class Config (line 17) | class Config:
FILE: src/cohere/types/connector.py
class Connector (line 13) | class Connector(UncheckedBaseModel):
class Config (line 90) | class Config:
FILE: src/cohere/types/connector_o_auth.py
class ConnectorOAuth (line 10) | class ConnectorOAuth(UncheckedBaseModel):
class Config (line 40) | class Config:
FILE: src/cohere/types/content.py
class TextContent (line 14) | class TextContent(UncheckedBaseModel):
class Config (line 26) | class Config:
class ImageUrlContent (line 31) | class ImageUrlContent(UncheckedBaseModel):
class Config (line 43) | class Config:
FILE: src/cohere/types/create_connector_o_auth.py
class CreateConnectorOAuth (line 10) | class CreateConnectorOAuth(UncheckedBaseModel):
class Config (line 40) | class Config:
FILE: src/cohere/types/create_connector_response.py
class CreateConnectorResponse (line 11) | class CreateConnectorResponse(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/create_connector_service_auth.py
class CreateConnectorServiceAuth (line 11) | class CreateConnectorServiceAuth(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/create_embed_job_response.py
class CreateEmbedJobResponse (line 11) | class CreateEmbedJobResponse(UncheckedBaseModel):
class Config (line 23) | class Config:
FILE: src/cohere/types/dataset.py
class Dataset (line 16) | class Dataset(UncheckedBaseModel):
class Config (line 65) | class Config:
FILE: src/cohere/types/dataset_part.py
class DatasetPart (line 10) | class DatasetPart(UncheckedBaseModel):
class Config (line 55) | class Config:
FILE: src/cohere/types/detokenize_response.py
class DetokenizeResponse (line 11) | class DetokenizeResponse(UncheckedBaseModel):
class Config (line 23) | class Config:
FILE: src/cohere/types/document.py
class Document (line 10) | class Document(UncheckedBaseModel):
class Config (line 31) | class Config:
FILE: src/cohere/types/document_content.py
class DocumentContent (line 11) | class DocumentContent(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/embed_by_type_response.py
class EmbedByTypeResponse (line 14) | class EmbedByTypeResponse(UncheckedBaseModel):
class Config (line 38) | class Config:
FILE: src/cohere/types/embed_by_type_response_embeddings.py
class EmbedByTypeResponseEmbeddings (line 12) | class EmbedByTypeResponseEmbeddings(UncheckedBaseModel):
class Config (line 51) | class Config:
FILE: src/cohere/types/embed_content.py
class ImageUrlEmbedContent (line 14) | class ImageUrlEmbedContent(UncheckedBaseModel):
class Config (line 22) | class Config:
class TextEmbedContent (line 27) | class TextEmbedContent(UncheckedBaseModel):
class Config (line 35) | class Config:
FILE: src/cohere/types/embed_floats_response.py
class EmbedFloatsResponse (line 12) | class EmbedFloatsResponse(UncheckedBaseModel):
class Config (line 35) | class Config:
FILE: src/cohere/types/embed_image.py
class EmbedImage (line 11) | class EmbedImage(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/embed_image_url.py
class EmbedImageUrl (line 10) | class EmbedImageUrl(UncheckedBaseModel):
class Config (line 21) | class Config:
FILE: src/cohere/types/embed_input.py
class EmbedInput (line 11) | class EmbedInput(UncheckedBaseModel):
class Config (line 21) | class Config:
FILE: src/cohere/types/embed_job.py
class EmbedJob (line 14) | class EmbedJob(UncheckedBaseModel):
class Config (line 61) | class Config:
FILE: src/cohere/types/embed_response.py
class EmbeddingsFloatsEmbedResponse (line 16) | class EmbeddingsFloatsEmbedResponse(UncheckedBaseModel):
class Config (line 28) | class Config:
class EmbeddingsByTypeEmbedResponse (line 33) | class EmbeddingsByTypeEmbedResponse(UncheckedBaseModel):
class Config (line 45) | class Config:
FILE: src/cohere/types/embed_text.py
class EmbedText (line 10) | class EmbedText(UncheckedBaseModel):
class Config (line 21) | class Config:
FILE: src/cohere/types/finetune_dataset_metrics.py
class FinetuneDatasetMetrics (line 10) | class FinetuneDatasetMetrics(UncheckedBaseModel):
class Config (line 45) | class Config:
FILE: src/cohere/types/generate_stream_end.py
class GenerateStreamEnd (line 12) | class GenerateStreamEnd(GenerateStreamEvent):
class Config (line 21) | class Config:
FILE: src/cohere/types/generate_stream_end_response.py
class GenerateStreamEndResponse (line 11) | class GenerateStreamEndResponse(UncheckedBaseModel):
class Config (line 20) | class Config:
FILE: src/cohere/types/generate_stream_error.py
class GenerateStreamError (line 11) | class GenerateStreamError(GenerateStreamEvent):
class Config (line 28) | class Config:
FILE: src/cohere/types/generate_stream_event.py
class GenerateStreamEvent (line 10) | class GenerateStreamEvent(UncheckedBaseModel):
class Config (line 15) | class Config:
FILE: src/cohere/types/generate_stream_text.py
class GenerateStreamText (line 10) | class GenerateStreamText(GenerateStreamEvent):
class Config (line 27) | class Config:
FILE: src/cohere/types/generate_streamed_response.py
class TextGenerationGenerateStreamedResponse (line 15) | class TextGenerationGenerateStreamedResponse(UncheckedBaseModel):
class Config (line 29) | class Config:
class StreamEndGenerateStreamedResponse (line 34) | class StreamEndGenerateStreamedResponse(UncheckedBaseModel):
class Config (line 48) | class Config:
class StreamErrorGenerateStreamedResponse (line 53) | class StreamErrorGenerateStreamedResponse(UncheckedBaseModel):
class Config (line 68) | class Config:
FILE: src/cohere/types/generation.py
class Generation (line 12) | class Generation(UncheckedBaseModel):
class Config (line 30) | class Config:
FILE: src/cohere/types/get_connector_response.py
class GetConnectorResponse (line 11) | class GetConnectorResponse(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/get_model_response.py
class GetModelResponse (line 12) | class GetModelResponse(UncheckedBaseModel):
class Config (line 66) | class Config:
FILE: src/cohere/types/get_model_response_sampling_defaults.py
class GetModelResponseSamplingDefaults (line 10) | class GetModelResponseSamplingDefaults(UncheckedBaseModel):
class Config (line 26) | class Config:
FILE: src/cohere/types/image.py
class Image (line 10) | class Image(UncheckedBaseModel):
class Config (line 35) | class Config:
FILE: src/cohere/types/image_content.py
class ImageContent (line 11) | class ImageContent(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/image_url.py
class ImageUrl (line 11) | class ImageUrl(UncheckedBaseModel):
class Config (line 26) | class Config:
FILE: src/cohere/types/json_response_format.py
class JsonResponseFormat (line 12) | class JsonResponseFormat(UncheckedBaseModel):
class Config (line 26) | class Config:
FILE: src/cohere/types/json_response_format_v2.py
class JsonResponseFormatV2 (line 10) | class JsonResponseFormatV2(UncheckedBaseModel):
class Config (line 33) | class Config:
FILE: src/cohere/types/label_metric.py
class LabelMetric (line 10) | class LabelMetric(UncheckedBaseModel):
class Config (line 30) | class Config:
FILE: src/cohere/types/list_connectors_response.py
class ListConnectorsResponse (line 11) | class ListConnectorsResponse(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/list_embed_job_response.py
class ListEmbedJobResponse (line 11) | class ListEmbedJobResponse(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/list_models_response.py
class ListModelsResponse (line 11) | class ListModelsResponse(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/logprob_item.py
class LogprobItem (line 10) | class LogprobItem(UncheckedBaseModel):
class Config (line 30) | class Config:
FILE: src/cohere/types/message.py
class ChatbotMessage (line 15) | class ChatbotMessage(UncheckedBaseModel):
class Config (line 24) | class Config:
class SystemMessage (line 29) | class SystemMessage(UncheckedBaseModel):
class Config (line 38) | class Config:
class UserMessage (line 43) | class UserMessage(UncheckedBaseModel):
class Config (line 52) | class Config:
class ToolMessage (line 57) | class ToolMessage(UncheckedBaseModel):
class Config (line 65) | class Config:
FILE: src/cohere/types/metrics.py
class Metrics (line 11) | class Metrics(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/non_streamed_chat_response.py
class NonStreamedChatResponse (line 18) | class NonStreamedChatResponse(UncheckedBaseModel):
class Config (line 72) | class Config:
FILE: src/cohere/types/o_auth_authorize_response.py
class OAuthAuthorizeResponse (line 10) | class OAuthAuthorizeResponse(UncheckedBaseModel):
class Config (line 20) | class Config:
FILE: src/cohere/types/parse_info.py
class ParseInfo (line 10) | class ParseInfo(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/rerank_response.py
class RerankResponse (line 12) | class RerankResponse(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/types/rerank_response_results_item.py
class RerankResponseResultsItem (line 11) | class RerankResponseResultsItem(UncheckedBaseModel):
class Config (line 31) | class Config:
FILE: src/cohere/types/rerank_response_results_item_document.py
class RerankResponseResultsItemDocument (line 10) | class RerankResponseResultsItemDocument(UncheckedBaseModel):
class Config (line 24) | class Config:
FILE: src/cohere/types/reranker_data_metrics.py
class RerankerDataMetrics (line 10) | class RerankerDataMetrics(UncheckedBaseModel):
class Config (line 45) | class Config:
FILE: src/cohere/types/response_format.py
class TextResponseFormat (line 14) | class TextResponseFormat(UncheckedBaseModel):
class Config (line 32) | class Config:
class JsonObjectResponseFormat (line 37) | class JsonObjectResponseFormat(UncheckedBaseModel):
class Config (line 58) | class Config:
FILE: src/cohere/types/response_format_v2.py
class TextResponseFormatV2 (line 13) | class TextResponseFormatV2(UncheckedBaseModel):
class Config (line 34) | class Config:
class JsonObjectResponseFormatV2 (line 39) | class JsonObjectResponseFormatV2(UncheckedBaseModel):
class Config (line 61) | class Config:
FILE: src/cohere/types/single_generation.py
class SingleGeneration (line 11) | class SingleGeneration(UncheckedBaseModel):
class Config (line 29) | class Config:
FILE: src/cohere/types/single_generation_in_stream.py
class SingleGenerationInStream (line 11) | class SingleGenerationInStream(UncheckedBaseModel):
class Config (line 29) | class Config:
FILE: src/cohere/types/single_generation_token_likelihoods_item.py
class SingleGenerationTokenLikelihoodsItem (line 10) | class SingleGenerationTokenLikelihoodsItem(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/source.py
class ToolSource (line 13) | class ToolSource(UncheckedBaseModel):
class Config (line 26) | class Config:
class DocumentSource (line 31) | class DocumentSource(UncheckedBaseModel):
class Config (line 44) | class Config:
FILE: src/cohere/types/streamed_chat_response.py
class StreamStartStreamedChatResponse (line 21) | class StreamStartStreamedChatResponse(UncheckedBaseModel):
class Config (line 33) | class Config:
class SearchQueriesGenerationStreamedChatResponse (line 38) | class SearchQueriesGenerationStreamedChatResponse(UncheckedBaseModel):
class Config (line 50) | class Config:
class SearchResultsStreamedChatResponse (line 55) | class SearchResultsStreamedChatResponse(UncheckedBaseModel):
class Config (line 68) | class Config:
class TextGenerationStreamedChatResponse (line 73) | class TextGenerationStreamedChatResponse(UncheckedBaseModel):
class Config (line 85) | class Config:
class CitationGenerationStreamedChatResponse (line 90) | class CitationGenerationStreamedChatResponse(UncheckedBaseModel):
class Config (line 102) | class Config:
class ToolCallsGenerationStreamedChatResponse (line 107) | class ToolCallsGenerationStreamedChatResponse(UncheckedBaseModel):
class Config (line 120) | class Config:
class StreamEndStreamedChatResponse (line 125) | class StreamEndStreamedChatResponse(UncheckedBaseModel):
class Config (line 138) | class Config:
class ToolCallsChunkStreamedChatResponse (line 143) | class ToolCallsChunkStreamedChatResponse(UncheckedBaseModel):
class Config (line 156) | class Config:
class DebugStreamedChatResponse (line 161) | class DebugStreamedChatResponse(UncheckedBaseModel):
class Config (line 173) | class Config:
FILE: src/cohere/types/summarize_response.py
class SummarizeResponse (line 11) | class SummarizeResponse(UncheckedBaseModel):
class Config (line 28) | class Config:
FILE: src/cohere/types/system_message_v2.py
class SystemMessageV2 (line 11) | class SystemMessageV2(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/system_message_v2content_one_item.py
class TextSystemMessageV2ContentOneItem (line 12) | class TextSystemMessageV2ContentOneItem(UncheckedBaseModel):
class Config (line 20) | class Config:
FILE: src/cohere/types/thinking.py
class Thinking (line 11) | class Thinking(UncheckedBaseModel):
class Config (line 31) | class Config:
FILE: src/cohere/types/tokenize_response.py
class TokenizeResponse (line 11) | class TokenizeResponse(UncheckedBaseModel):
class Config (line 24) | class Config:
FILE: src/cohere/types/tool.py
class Tool (line 11) | class Tool(UncheckedBaseModel):
class Config (line 42) | class Config:
FILE: src/cohere/types/tool_call.py
class ToolCall (line 10) | class ToolCall(UncheckedBaseModel):
class Config (line 29) | class Config:
FILE: src/cohere/types/tool_call_delta.py
class ToolCallDelta (line 10) | class ToolCallDelta(UncheckedBaseModel):
class Config (line 39) | class Config:
FILE: src/cohere/types/tool_call_v2.py
class ToolCallV2 (line 11) | class ToolCallV2(UncheckedBaseModel):
class Config (line 24) | class Config:
FILE: src/cohere/types/tool_call_v2function.py
class ToolCallV2Function (line 10) | class ToolCallV2Function(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/tool_content.py
class TextToolContent (line 14) | class TextToolContent(UncheckedBaseModel):
class Config (line 26) | class Config:
class DocumentToolContent (line 31) | class DocumentToolContent(UncheckedBaseModel):
class Config (line 43) | class Config:
FILE: src/cohere/types/tool_message_v2.py
class ToolMessageV2 (line 11) | class ToolMessageV2(UncheckedBaseModel):
class Config (line 30) | class Config:
FILE: src/cohere/types/tool_parameter_definitions_value.py
class ToolParameterDefinitionsValue (line 10) | class ToolParameterDefinitionsValue(UncheckedBaseModel):
class Config (line 30) | class Config:
FILE: src/cohere/types/tool_result.py
class ToolResult (line 11) | class ToolResult(UncheckedBaseModel):
class Config (line 19) | class Config:
FILE: src/cohere/types/tool_v2.py
class ToolV2 (line 11) | class ToolV2(UncheckedBaseModel):
class Config (line 22) | class Config:
FILE: src/cohere/types/tool_v2function.py
class ToolV2Function (line 10) | class ToolV2Function(UncheckedBaseModel):
class Config (line 34) | class Config:
FILE: src/cohere/types/update_connector_response.py
class UpdateConnectorResponse (line 11) | class UpdateConnectorResponse(UncheckedBaseModel):
class Config (line 18) | class Config:
FILE: src/cohere/types/usage.py
class Usage (line 12) | class Usage(UncheckedBaseModel):
class Config (line 24) | class Config:
FILE: src/cohere/types/usage_billed_units.py
class UsageBilledUnits (line 10) | class UsageBilledUnits(UncheckedBaseModel):
class Config (line 35) | class Config:
FILE: src/cohere/types/usage_tokens.py
class UsageTokens (line 10) | class UsageTokens(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/types/user_message_v2.py
class UserMessageV2 (line 11) | class UserMessageV2(UncheckedBaseModel):
class Config (line 26) | class Config:
FILE: src/cohere/utils.py
function get_terminal_states (line 20) | def get_terminal_states():
function get_success_states (line 24) | def get_success_states():
function get_failed_states (line 28) | def get_failed_states():
function get_id (line 32) | def get_id(
function get_validation_status (line 38) | def get_validation_status(awaitable: typing.Union[EmbedJob, DatasetsGetR...
function get_job (line 42) | def get_job(cohere: typing.Any,
function async_get_job (line 54) | async def async_get_job(cohere: typing.Any, awaitable: typing.Union[Crea...
function get_failure_reason (line 65) | def get_failure_reason(job: typing.Union[EmbedJob, DatasetsGetResponse])...
function wait (line 74) | def wait(
function wait (line 84) | def wait(
function wait (line 93) | def wait(
function async_wait (line 120) | async def async_wait(
function async_wait (line 130) | async def async_wait(
function async_wait (line 139) | async def async_wait(
function sum_fields_if_not_none (line 165) | def sum_fields_if_not_none(obj: typing.Any, field: str) -> Optional[int]:
function merge_meta_field (line 170) | def merge_meta_field(metas: typing.List[ApiMeta]) -> ApiMeta:
function merge_embed_responses (line 190) | def merge_embed_responses(responses: typing.List[EmbedResponse]) -> Embe...
function save_avro (line 250) | def save_avro(dataset: Dataset, filepath: str):
function save_jsonl (line 258) | def save_jsonl(dataset: Dataset, filepath: str):
function save_csv (line 265) | def save_csv(dataset: Dataset, filepath: str):
function dataset_generator (line 274) | def dataset_generator(dataset: Dataset):
class SdkUtils (line 285) | class SdkUtils:
method save_dataset (line 288) | def save_dataset(dataset: Dataset, filepath: str, format: typing.Liter...
class SyncSdkUtils (line 298) | class SyncSdkUtils(SdkUtils):
class AsyncSdkUtils (line 302) | class AsyncSdkUtils(SdkUtils):
FILE: src/cohere/v2/__init__.py
function __getattr__ (line 61) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 77) | def __dir__():
FILE: src/cohere/v2/client.py
class V2Client (line 32) | class V2Client:
method __init__ (line 33) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method with_raw_response (line 37) | def with_raw_response(self) -> RawV2Client:
method chat_stream (line 47) | def chat_stream(
method chat (line 217) | def chat(
method embed (line 385) | def embed(
method rerank (line 492) | def rerank(
class AsyncV2Client (line 572) | class AsyncV2Client:
method __init__ (line 573) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method with_raw_response (line 577) | def with_raw_response(self) -> AsyncRawV2Client:
method chat_stream (line 587) | async def chat_stream(
method chat (line 766) | async def chat(
method embed (line 942) | async def embed(
method rerank (line 1057) | async def rerank(
FILE: src/cohere/v2/raw_client.py
class RawV2Client (line 54) | class RawV2Client:
method __init__ (line 55) | def __init__(self, *, client_wrapper: SyncClientWrapper):
method chat_stream (line 59) | def chat_stream(
method chat (line 409) | def chat(
method embed (line 730) | def embed(
method rerank (line 981) | def rerank(
class AsyncRawV2Client (line 1197) | class AsyncRawV2Client:
method __init__ (line 1198) | def __init__(self, *, client_wrapper: AsyncClientWrapper):
method chat_stream (line 1202) | async def chat_stream(
method chat (line 1552) | async def chat(
method embed (line 1873) | async def embed(
method rerank (line 2124) | async def rerank(
FILE: src/cohere/v2/types/__init__.py
function __getattr__ (line 61) | def __getattr__(attr_name: str) -> typing.Any:
function __dir__ (line 77) | def __dir__():
FILE: src/cohere/v2/types/v2chat_response.py
class V2ChatResponse (line 14) | class V2ChatResponse(UncheckedBaseModel):
class Config (line 29) | class Config:
FILE: src/cohere/v2/types/v2chat_stream_response.py
class MessageStartV2ChatStreamResponse (line 22) | class MessageStartV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 35) | class Config:
class ContentStartV2ChatStreamResponse (line 40) | class ContentStartV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 53) | class Config:
class ContentDeltaV2ChatStreamResponse (line 58) | class ContentDeltaV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 72) | class Config:
class ContentEndV2ChatStreamResponse (line 77) | class ContentEndV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 89) | class Config:
class ToolPlanDeltaV2ChatStreamResponse (line 94) | class ToolPlanDeltaV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 106) | class Config:
class ToolCallStartV2ChatStreamResponse (line 111) | class ToolCallStartV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 124) | class Config:
class ToolCallDeltaV2ChatStreamResponse (line 129) | class ToolCallDeltaV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 142) | class Config:
class ToolCallEndV2ChatStreamResponse (line 147) | class ToolCallEndV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 159) | class Config:
class CitationStartV2ChatStreamResponse (line 164) | class CitationStartV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 177) | class Config:
class CitationEndV2ChatStreamResponse (line 182) | class CitationEndV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 194) | class Config:
class MessageEndV2ChatStreamResponse (line 199) | class MessageEndV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 212) | class Config:
class DebugV2ChatStreamResponse (line 217) | class DebugV2ChatStreamResponse(UncheckedBaseModel):
class Config (line 229) | class Config:
FILE: src/cohere/v2/types/v2rerank_response.py
class V2RerankResponse (line 12) | class V2RerankResponse(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: src/cohere/v2/types/v2rerank_response_results_item.py
class V2RerankResponseResultsItem (line 10) | class V2RerankResponseResultsItem(UncheckedBaseModel):
class Config (line 25) | class Config:
FILE: tests/test_async_client.py
class TestClient (line 12) | class TestClient(unittest.IsolatedAsyncioTestCase):
method setUp (line 15) | def setUp(self) -> None:
method test_token_falls_back_on_env_variable (line 18) | async def test_token_falls_back_on_env_variable(self) -> None:
method test_context_manager (line 22) | async def test_context_manager(self) -> None:
method test_chat (line 26) | async def test_chat(self) -> None:
method test_chat_stream (line 40) | async def test_chat_stream(self) -> None:
method test_stream_equals_true (line 63) | async def test_stream_equals_true(self) -> None:
method test_deprecated_fn (line 70) | async def test_deprecated_fn(self) -> None:
method test_moved_fn (line 74) | async def test_moved_fn(self) -> None:
method test_embed (line 80) | async def test_embed(self) -> None:
method test_embed_batch_types (line 88) | async def test_embed_batch_types(self) -> None:
method test_embed_batch_v1 (line 109) | async def test_embed_batch_v1(self) -> None:
method test_embed_job_crud (line 125) | async def test_embed_job_crud(self) -> None:
method test_rerank (line 157) | async def test_rerank(self) -> None:
method test_datasets_crud (line 174) | async def test_datasets_crud(self) -> None:
method test_save_load (line 194) | async def test_save_load(self) -> None:
method test_tokenize (line 214) | async def test_tokenize(self) -> None:
method test_detokenize (line 222) | async def test_detokenize(self) -> None:
method test_tool_use (line 231) | async def test_tool_use(self) -> None:
method test_local_tokenize (line 303) | async def test_local_tokenize(self) -> None:
method test_local_detokenize (line 310) | async def test_local_detokenize(self) -> None:
method test_tokenize_async_context_with_sync_client (line 317) | async def test_tokenize_async_context_with_sync_client(self) -> None:
FILE: tests/test_aws_client_unit.py
class TestSigV4HostHeader (line 21) | class TestSigV4HostHeader(unittest.TestCase):
method test_sigv4_signs_with_correct_host (line 25) | def test_sigv4_signs_with_correct_host(self) -> None:
class TestModeConditionalInit (line 77) | class TestModeConditionalInit(unittest.TestCase):
method test_sagemaker_mode_creates_sagemaker_clients (line 81) | def test_sagemaker_mode_creates_sagemaker_clients(self) -> None:
method test_bedrock_mode_creates_bedrock_clients (line 103) | def test_bedrock_mode_creates_bedrock_clients(self) -> None:
method test_default_mode_is_sagemaker (line 125) | def test_default_mode_is_sagemaker(self) -> None:
class TestEmbedV4Params (line 132) | class TestEmbedV4Params(unittest.TestCase):
method _make_bedrock_client (line 137) | def _make_bedrock_client(): # type: ignore
method test_embed_accepts_new_params (line 159) | def test_embed_accepts_new_params(self) -> None:
method test_embed_passes_params_to_bedrock (line 168) | def test_embed_passes_params_to_bedrock(self) -> None:
method test_embed_omits_none_params (line 190) | def test_embed_omits_none_params(self) -> None:
method test_embed_with_embedding_types_returns_dict (line 210) | def test_embed_with_embedding_types_returns_dict(self) -> None:
FILE: tests/test_bedrock_client.py
function _setup_boto3_env (line 14) | def _setup_boto3_env():
class TestClient (line 25) | class TestClient(unittest.TestCase):
method setUp (line 33) | def setUp(self) -> None:
method test_rerank (line 41) | def test_rerank(self) -> None:
method test_embed (line 60) | def test_embed(self) -> None:
method test_generate (line 68) | def test_generate(self) -> None:
method test_generate_stream (line 75) | def test_generate_stream(self) -> None:
method test_chat (line 85) | def test_chat(self) -> None:
method test_chat_stream (line 108) | def test_chat_stream(self) -> None:
class TestBedrockClientV2 (line 128) | class TestBedrockClientV2(unittest.TestCase):
method setUp (line 135) | def setUp(self) -> None:
method test_embed (line 143) | def test_embed(self) -> None:
method test_embed_with_output_dimension (line 152) | def test_embed_with_output_dimension(self) -> None:
class TestCohereAwsBedrockClient (line 164) | class TestCohereAwsBedrockClient(unittest.TestCase):
method setUpClass (line 174) | def setUpClass(cls) -> None:
method test_client_is_bedrock_mode (line 180) | def test_client_is_bedrock_mode(self) -> None:
method test_embed (line 184) | def test_embed(self) -> None:
method test_embed_with_embedding_types (line 194) | def test_embed_with_embedding_types(self) -> None:
method test_embed_with_output_dimension (line 206) | def test_embed_with_output_dimension(self) -> None:
method test_embed_without_new_params (line 219) | def test_embed_without_new_params(self) -> None:
FILE: tests/test_client.py
class TestClient (line 15) | class TestClient(unittest.TestCase):
method test_token_falls_back_on_env_variable (line 17) | def test_token_falls_back_on_env_variable(self) -> None:
method test_context_manager (line 21) | def test_context_manager(self) -> None:
method test_chat (line 25) | def test_chat(self) -> None:
method test_chat_stream (line 38) | def test_chat_stream(self) -> None:
method test_stream_equals_true (line 60) | def test_stream_equals_true(self) -> None:
method test_deprecated_fn (line 67) | def test_deprecated_fn(self) -> None:
method test_moved_fn (line 71) | def test_moved_fn(self) -> None:
method test_embed (line 75) | def test_embed(self) -> None:
method test_image_embed (line 100) | def test_image_embed(self) -> None:
method test_embed_batch_types (line 125) | def test_embed_batch_types(self) -> None:
method test_embed_batch_v1 (line 146) | def test_embed_batch_v1(self) -> None:
method test_embed_job_crud (line 162) | def test_embed_job_crud(self) -> None:
method test_rerank (line 194) | def test_rerank(self) -> None:
method test_datasets_crud (line 211) | def test_datasets_crud(self) -> None:
method test_save_load (line 231) | def test_save_load(self) -> None:
method test_tokenize (line 251) | def test_tokenize(self) -> None:
method test_detokenize (line 259) | def test_detokenize(self) -> None:
method test_tool_use (line 269) | def test_tool_use(self) -> None:
method test_local_tokenize (line 341) | def test_local_tokenize(self) -> None:
method test_local_detokenize (line 348) | def test_local_detokenize(self) -> None:
FILE: tests/test_client_init.py
class TestClientInit (line 11) | class TestClientInit(unittest.TestCase):
method test_aws_inits (line 13) | def test_aws_inits(self) -> None:
method test_inits (line 19) | def test_inits(self) -> None:
FILE: tests/test_client_v2.py
class TestClientV2 (line 14) | class TestClientV2(unittest.TestCase):
method test_chat (line 16) | def test_chat(self) -> None:
method test_chat_stream (line 22) | def test_chat_stream(self) -> None:
method test_legacy_methods_available (line 40) | def test_legacy_methods_available(self) -> None:
method test_chat_documents (line 47) | def test_chat_documents(self) -> None:
method test_chat_tools (line 65) | def test_chat_tools(self) -> None:
FILE: tests/test_embed_streaming.py
class TestStreamedEmbedding (line 17) | class TestStreamedEmbedding(unittest.TestCase):
method test_creation (line 20) | def test_creation(self):
method test_text_optional (line 27) | def test_text_optional(self):
class TestExtractEmbeddings (line 32) | class TestExtractEmbeddings(unittest.TestCase):
method test_v1_embeddings_floats (line 35) | def test_v1_embeddings_floats(self):
method test_v1_embeddings_by_type (line 51) | def test_v1_embeddings_by_type(self):
method test_v2_response_format (line 69) | def test_v2_response_format(self):
method test_global_offset (line 82) | def test_global_offset(self):
method test_empty_embeddings (line 93) | def test_empty_embeddings(self):
method test_texts_shorter_than_embeddings (line 99) | def test_texts_shorter_than_embeddings(self):
class TestBatchSizeConstant (line 112) | class TestBatchSizeConstant(unittest.TestCase):
method test_default_batch_size_matches_api_limit (line 115) | def test_default_batch_size_matches_api_limit(self):
FILE: tests/test_embed_utils.py
class TestClient (line 130) | class TestClient(unittest.TestCase):
method test_merge_embeddings_by_type (line 132) | def test_merge_embeddings_by_type(self) -> None:
method test_merge_embeddings_floats (line 165) | def test_merge_embeddings_floats(self) -> None:
method test_merge_partial_embeddings_floats (line 192) | def test_merge_partial_embeddings_floats(self) -> None:
FILE: tests/test_oci_client.py
class TestOciClient (line 75) | class TestOciClient(unittest.TestCase):
method setUp (line 78) | def setUp(self):
method test_embed (line 92) | def test_embed(self):
method test_chat (line 105) | def test_chat(self):
method test_chat_stream (line 115) | def test_chat_stream(self):
class TestOciClientV2 (line 135) | class TestOciClientV2(unittest.TestCase):
method setUp (line 138) | def setUp(self):
method test_embed_v2 (line 153) | def test_embed_v2(self):
method test_embed_with_model_prefix_v2 (line 170) | def test_embed_with_model_prefix_v2(self):
method test_chat_v2 (line 183) | def test_chat_v2(self):
method test_chat_vision_v2 (line 193) | def test_chat_vision_v2(self):
method test_chat_tool_use_v2 (line 224) | def test_chat_tool_use_v2(self):
method test_chat_tool_use_response_type_lowered (line 254) | def test_chat_tool_use_response_type_lowered(self):
method test_chat_multi_turn_tool_use_v2 (line 281) | def test_chat_multi_turn_tool_use_v2(self):
method test_chat_safety_mode_v2 (line 343) | def test_chat_safety_mode_v2(self):
method test_chat_stream_v2 (line 353) | def test_chat_stream_v2(self):
class TestOciClientAuthentication (line 390) | class TestOciClientAuthentication(unittest.TestCase):
method test_config_file_auth (line 393) | def test_config_file_auth(self):
method test_custom_profile_auth (line 416) | def test_custom_profile_auth(self):
class TestOciClientErrors (line 440) | class TestOciClientErrors(unittest.TestCase):
method test_missing_compartment_id (line 443) | def test_missing_compartment_id(self):
method test_invalid_model (line 451) | def test_invalid_model(self):
class TestOciClientModels (line 474) | class TestOciClientModels(unittest.TestCase):
method setUp (line 477) | def setUp(self):
method test_embed_english_v3 (line 492) | def test_embed_english_v3(self):
method test_embed_multilingual_v3 (line 503) | def test_embed_multilingual_v3(self):
method test_command_a_chat (line 514) | def test_command_a_chat(self):
method test_embed_english_light_v3 (line 522) | def test_embed_english_light_v3(self):
method test_embed_multilingual_light_v3 (line 532) | def test_embed_multilingual_light_v3(self):
method test_embed_search_query_input_type (line 542) | def test_embed_search_query_input_type(self):
method test_embed_with_embedding_types (line 552) | def test_embed_with_embedding_types(self):
method test_embed_with_truncate (line 563) | def test_embed_with_truncate(self):
method test_command_r_plus_chat (line 576) | def test_command_r_plus_chat(self):
method test_v2_multi_turn_chat (line 590) | def test_v2_multi_turn_chat(self):
method test_v2_system_message (line 604) | def test_v2_system_message(self):
class TestOciClientTransformations (line 617) | class TestOciClientTransformations(unittest.TestCase):
method test_thinking_parameter_transformation (line 620) | def test_thinking_parameter_transformation(self):
method test_thinking_parameter_disabled (line 641) | def test_thinking_parameter_disabled(self):
method test_thinking_response_transformation (line 660) | def test_thinking_response_transformation(self):
method test_stream_event_thinking_transformation (line 685) | def test_stream_event_thinking_transformation(self):
method test_stream_event_text_transformation (line 702) | def test_stream_event_text_transformation(self):
method test_thinking_parameter_none (line 719) | def test_thinking_parameter_none(self):
method test_v2_response_role_lowercased (line 736) | def test_v2_response_role_lowercased(self):
method test_v2_response_finish_reason_uppercase (line 757) | def test_v2_response_finish_reason_uppercase(self):
method test_v2_response_tool_calls_conversion (line 778) | def test_v2_response_tool_calls_conversion(self):
method test_normalize_model_for_oci (line 809) | def test_normalize_model_for_oci(self):
method test_transform_embed_request (line 823) | def test_transform_embed_request(self):
method test_transform_embed_request_with_optional_params (line 843) | def test_transform_embed_request_with_optional_params(self):
method test_transform_embed_request_rejects_images (line 862) | def test_transform_embed_request_rejects_images(self):
method test_transform_chat_request_optional_params (line 879) | def test_transform_chat_request_optional_params(self):
method test_v2_client_rejects_v1_request (line 909) | def test_v2_client_rejects_v1_request(self):
method test_v1_client_rejects_v2_request (line 922) | def test_v1_client_rejects_v2_request(self):
method test_unsupported_endpoint_raises (line 935) | def test_unsupported_endpoint_raises(self):
method test_v1_chat_request_optional_params (line 944) | def test_v1_chat_request_optional_params(self):
method test_v1_stream_wrapper_preserves_finish_reason (line 980) | def test_v1_stream_wrapper_preserves_finish_reason(self):
method test_transform_chat_request_tool_message_fields (line 1004) | def test_transform_chat_request_tool_message_fields(self):
method test_get_oci_url_known_endpoints (line 1031) | def test_get_oci_url_known_endpoints(self):
method test_get_oci_url_unknown_endpoint_raises (line 1042) | def test_get_oci_url_unknown_endpoint_raises(self):
method test_load_oci_config_missing_private_key_raises (line 1050) | def test_load_oci_config_missing_private_key_raises(self):
method test_load_oci_config_ignores_inherited_session_auth (line 1067) | def test_load_oci_config_ignores_inherited_session_auth(self):
method test_session_auth_prefers_security_token_signer (line 1109) | def test_session_auth_prefers_security_token_signer(self):
method test_session_token_refreshed_on_subsequent_requests (line 1150) | def test_session_token_refreshed_on_subsequent_requests(self):
method test_embed_response_lowercases_embedding_keys (line 1206) | def test_embed_response_lowercases_embedding_keys(self):
method test_embed_response_includes_response_type_v1 (line 1225) | def test_embed_response_includes_response_type_v1(self):
method test_embed_response_includes_response_type_v2 (line 1241) | def test_embed_response_includes_response_type_v2(self):
method test_normalize_model_for_oci_rejects_empty_model (line 1257) | def test_normalize_model_for_oci_rejects_empty_model(self):
method test_stream_wrapper_emits_full_event_lifecycle (line 1265) | def test_stream_wrapper_emits_full_event_lifecycle(self):
method test_stream_wrapper_emits_new_content_block_on_thinking_transition (line 1299) | def test_stream_wrapper_emits_new_content_block_on_thinking_transition...
method test_stream_wrapper_no_spurious_block_on_finish_only_event (line 1327) | def test_stream_wrapper_no_spurious_block_on_finish_only_event(self):
method test_stream_wrapper_skips_malformed_json_with_warning (line 1353) | def test_stream_wrapper_skips_malformed_json_with_warning(self):
method test_stream_wrapper_skips_message_end_for_empty_stream (line 1366) | def test_stream_wrapper_skips_message_end_for_empty_stream(self):
method test_stream_wrapper_done_uses_current_content_index_after_transition (line 1374) | def test_stream_wrapper_done_uses_current_content_index_after_transiti...
method test_stream_wrapper_raises_on_transform_error (line 1394) | def test_stream_wrapper_raises_on_transform_error(self):
method test_stream_event_finish_reason_keeps_final_text (line 1407) | def test_stream_event_finish_reason_keeps_final_text(self):
FILE: tests/test_oci_mypy.py
function _run_mypy (line 35) | def _run_mypy(files: list[str], extra_env: dict[str, str] | None = None)...
class TestOciMypy (line 49) | class TestOciMypy(unittest.TestCase):
method test_oci_source_types (line 52) | def test_oci_source_types(self):
method test_oci_test_types (line 57) | def test_oci_test_types(self):
FILE: tests/test_overrides.py
class TestClient (line 10) | class TestClient(unittest.TestCase):
method test_float_alias (line 12) | def test_float_alias(self) -> None:
Condensed preview — 367 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (2,089K chars).
[
{
"path": ".fern/metadata.json",
"chars": 2304,
"preview": "{\n \"cliVersion\": \"4.63.2\",\n \"generatorName\": \"fernapi/fern-python-sdk\",\n \"generatorVersion\": \"5.3.3\",\n \"generatorCon"
},
{
"path": ".fernignore",
"chars": 466,
"preview": "4.0.0-5.0.0-migration-guide.md\nbanner.png\nREADME.md\nsrc/cohere/client.py\ntests\n.github/workflows/ci.yml\n.github/ISSUE_TE"
},
{
"path": ".github/ISSUE_TEMPLATE/bug_report.md",
"chars": 444,
"preview": "---\nname: Bug report related to an SDK error\nabout: Create a report to help us improve\ntitle: ''\nlabels: ''\n\n---\n\n**SDK "
},
{
"path": ".github/ISSUE_TEMPLATE/improvement_request.md",
"chars": 337,
"preview": "---\nname: Improvement request, or addition features\nabout: Create a request to help us improve\ntitle: \"\"\nlabels: \"\"\n---\n"
},
{
"path": ".github/workflows/ci.yml",
"chars": 2304,
"preview": "name: ci\n\non: [push]\njobs:\n compile:\n runs-on: ubuntu-latest\n steps:\n - name: Checkout repo\n uses: ac"
},
{
"path": ".gitignore",
"chars": 57,
"preview": ".mypy_cache/\n.ruff_cache/\n__pycache__/\ndist/\npoetry.toml\n"
},
{
"path": "4.0.0-5.0.0-migration-guide.md",
"chars": 2080,
"preview": "## `cohere==4` to `cohere==5` migration guide\n\nAs we migrate from the handwritten, manually-maintained sdk to our auto-g"
},
{
"path": "LICENSE",
"chars": 1062,
"preview": "MIT License\n\nCopyright (c) 2021 Cohere\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof "
},
{
"path": "README.md",
"chars": 5261,
"preview": "# Cohere Python SDK\n\n\n\n[](https://pypi.org/project/"
},
{
"path": "mypy.ini",
"chars": 59,
"preview": "[mypy]\nexclude = src/cohere/manually_maintained/cohere_aws\n"
},
{
"path": "pyproject.toml",
"chars": 2783,
"preview": "[project]\nname = \"cohere\"\ndynamic = [\"version\"]\n\n[tool.poetry]\nname = \"cohere\"\nversion = \"6.1.0\"\ndescription = \"\"\nreadme"
},
{
"path": "reference.md",
"chars": 121132,
"preview": "# Reference\n<details><summary><code>client.<a href=\"src/cohere/client.py\">chat_stream</a>(...) -> typing.Iterator[bytes]"
},
{
"path": "requirements.txt",
"chars": 161,
"preview": "fastavro==1.9.4\nhttpx>=0.21.2\npydantic>= 1.9.2\npydantic-core>=2.18.2,<2.44.0\nrequests==2.0.0\ntokenizers>=0.15,<1\ntypes-r"
},
{
"path": "src/cohere/__init__.py",
"chars": 32201,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/_default_clients.py",
"chars": 1002,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport httpx\n\nSDK_DEFAULT_TIMEOUT = 60\n\n"
},
{
"path": "src/cohere/aliases.py",
"chars": 1215,
"preview": "# Import overrides early to ensure they're applied before types are used\n# This is necessary for backwards compatibility"
},
{
"path": "src/cohere/audio/__init__.py",
"chars": 1236,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/audio/client.py",
"chars": 2067,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom __future__ import annotations\n\nimport typing\n\nfrom"
},
{
"path": "src/cohere/audio/raw_client.py",
"chars": 407,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom ..core.client_wrapper import AsyncClientWrapper, S"
},
{
"path": "src/cohere/audio/transcriptions/__init__.py",
"chars": 1119,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/audio/transcriptions/client.py",
"chars": 4725,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ... import core\nfrom ...core.client"
},
{
"path": "src/cohere/audio/transcriptions/raw_client.py",
"chars": 17327,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\nfrom json.decoder import JSONDecodeError\n"
},
{
"path": "src/cohere/audio/transcriptions/types/__init__.py",
"chars": 1181,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/audio/transcriptions/types/audio_transcriptions_create_response.py",
"chars": 605,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/aws_client.py",
"chars": 10170,
"preview": "import base64\nimport json\nimport re\nimport typing\n\nimport httpx\nfrom httpx import URL, SyncByteStream, ByteStream\n\nfrom "
},
{
"path": "src/cohere/base_client.py",
"chars": 161952,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom __future__ import annotations\n\nimport os\nimport ty"
},
{
"path": "src/cohere/batches/__init__.py",
"chars": 1498,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/batches/client.py",
"chars": 10857,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.client_wrapper import AsyncC"
},
{
"path": "src/cohere/batches/raw_client.py",
"chars": 37748,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\nfrom json.decoder import JSONDecodeError\n"
},
{
"path": "src/cohere/batches/types/__init__.py",
"chars": 1687,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/batches/types/batch.py",
"chars": 2640,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport datetime as dt\nimport typing\n\nimport pydantic\nfr"
},
{
"path": "src/cohere/batches/types/batch_status.py",
"chars": 388,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nBatchStatus = typing.Union[\n typing.L"
},
{
"path": "src/cohere/batches/types/cancel_batch_response.py",
"chars": 180,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nCancelBatchResponse = typing.Dict[str, t"
},
{
"path": "src/cohere/batches/types/create_batch_response.py",
"chars": 684,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ...core.pydantic_ut"
},
{
"path": "src/cohere/batches/types/get_batch_response.py",
"chars": 680,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ...core.pydantic_ut"
},
{
"path": "src/cohere/batches/types/list_batches_response.py",
"chars": 975,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ...core.pydantic_ut"
},
{
"path": "src/cohere/bedrock_client.py",
"chars": 1755,
"preview": "import typing\n\nfrom tokenizers import Tokenizer # type: ignore\n\nfrom .aws_client import AwsClient, AwsClientV2\n\n\nclass "
},
{
"path": "src/cohere/client.py",
"chars": 25974,
"preview": "import asyncio\nimport os\nimport typing\nfrom concurrent.futures import ThreadPoolExecutor\nfrom tokenizers import Tokenize"
},
{
"path": "src/cohere/client_v2.py",
"chars": 3507,
"preview": "import os\nimport typing\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport httpx\nfrom .client import AsyncClient,"
},
{
"path": "src/cohere/config.py",
"chars": 92,
"preview": "embed_batch_size = 96\nembed_stream_batch_size = 96 # Max texts per API request (API limit)\n"
},
{
"path": "src/cohere/connectors/__init__.py",
"chars": 85,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\n"
},
{
"path": "src/cohere/connectors/client.py",
"chars": 21831,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.client_wrapper import AsyncC"
},
{
"path": "src/cohere/connectors/raw_client.py",
"chars": 96024,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\nfrom json.decoder import JSONDecodeError\n"
},
{
"path": "src/cohere/core/__init__.py",
"chars": 4580,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/core/api_error.py",
"chars": 614,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom typing import Any, Dict, Optional\n\n\nclass ApiError"
},
{
"path": "src/cohere/core/client_wrapper.py",
"chars": 4065,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport httpx\nfrom .http_client import As"
},
{
"path": "src/cohere/core/datetime_utils.py",
"chars": 2498,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport datetime as dt\nfrom email.utils import parsedate"
},
{
"path": "src/cohere/core/file.py",
"chars": 2663,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom typing import IO, Dict, List, Mapping, Optional, T"
},
{
"path": "src/cohere/core/force_multipart.py",
"chars": 477,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom typing import Any, Dict\n\n\nclass ForceMultipartDict"
},
{
"path": "src/cohere/core/http_client.py",
"chars": 31590,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport asyncio\nimport email.utils\nimport re\nimport time"
},
{
"path": "src/cohere/core/http_response.py",
"chars": 1437,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom typing import Dict, Generic, TypeVar\n\nimport httpx"
},
{
"path": "src/cohere/core/http_sse/__init__.py",
"chars": 1350,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/core/http_sse/_api.py",
"chars": 3920,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport re\nfrom contextlib import asynccontextmanager, c"
},
{
"path": "src/cohere/core/http_sse/_decoders.py",
"chars": 1733,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom typing import List, Optional\n\nfrom ._models import"
},
{
"path": "src/cohere/core/http_sse/_exceptions.py",
"chars": 127,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport httpx\n\n\nclass SSEError(httpx.TransportError):\n "
},
{
"path": "src/cohere/core/http_sse/_models.py",
"chars": 397,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport json\nfrom dataclasses import dataclass\nfrom typi"
},
{
"path": "src/cohere/core/jsonable_encoder.py",
"chars": 3963,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n\"\"\"\njsonable_encoder converts a Python object to a JSON"
},
{
"path": "src/cohere/core/logging.py",
"chars": 3244,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport logging\nimport typing\n\nLogLevel = typing.Literal"
},
{
"path": "src/cohere/core/parse_error.py",
"chars": 1111,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom typing import Any, Dict, Optional\n\n\nclass ParsingE"
},
{
"path": "src/cohere/core/pydantic_utilities.py",
"chars": 25915,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# nopycln: file\nimport datetime as dt\nimport inspect\nim"
},
{
"path": "src/cohere/core/query_encoder.py",
"chars": 2144,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\nim"
},
{
"path": "src/cohere/core/remove_none_from_dict.py",
"chars": 342,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom typing import Any, Dict, Mapping, Optional\n\n\ndef r"
},
{
"path": "src/cohere/core/request_options.py",
"chars": 1681,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\ntry:\n from typing import NotRequired "
},
{
"path": "src/cohere/core/serialization.py",
"chars": 9818,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport collections\nimport inspect\nimport typing\n\nimport"
},
{
"path": "src/cohere/core/unchecked_base_model.py",
"chars": 18842,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport datetime as dt\nimport enum\nimport inspect\nimport"
},
{
"path": "src/cohere/datasets/__init__.py",
"chars": 1354,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/datasets/client.py",
"chars": 20221,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport datetime as dt\nimport typing\n\nfrom .. import cor"
},
{
"path": "src/cohere/datasets/raw_client.py",
"chars": 80294,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport datetime as dt\nimport typing\nfrom json.decoder i"
},
{
"path": "src/cohere/datasets/types/__init__.py",
"chars": 1568,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/datasets/types/datasets_create_response.py",
"chars": 612,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ...core.pydantic_ut"
},
{
"path": "src/cohere/datasets/types/datasets_get_response.py",
"chars": 572,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ...core.pydantic_ut"
},
{
"path": "src/cohere/datasets/types/datasets_get_usage_response.py",
"chars": 667,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ...core.pydantic_ut"
},
{
"path": "src/cohere/datasets/types/datasets_list_response.py",
"chars": 611,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ...core.pydantic_ut"
},
{
"path": "src/cohere/embed_jobs/__init__.py",
"chars": 1107,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/embed_jobs/client.py",
"chars": 14373,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.client_wrapper import AsyncC"
},
{
"path": "src/cohere/embed_jobs/raw_client.py",
"chars": 62333,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\nfrom json.decoder import JSONDecodeError\n"
},
{
"path": "src/cohere/embed_jobs/types/__init__.py",
"chars": 1163,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/embed_jobs/types/create_embed_job_request_truncate.py",
"chars": 169,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nCreateEmbedJobRequestTruncate = typing.U"
},
{
"path": "src/cohere/environment.py",
"chars": 157,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport enum\n\n\nclass ClientEnvironment(enum.Enum):\n P"
},
{
"path": "src/cohere/errors/__init__.py",
"chars": 2632,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/errors/bad_request_error.py",
"chars": 321,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/client_closed_request_error.py",
"chars": 330,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/forbidden_error.py",
"chars": 320,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/gateway_timeout_error.py",
"chars": 325,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/internal_server_error.py",
"chars": 325,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/invalid_token_error.py",
"chars": 323,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/not_found_error.py",
"chars": 319,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/not_implemented_error.py",
"chars": 325,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/service_unavailable_error.py",
"chars": 329,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/too_many_requests_error.py",
"chars": 326,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/unauthorized_error.py",
"chars": 323,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/errors/unprocessable_entity_error.py",
"chars": 330,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.api_error import ApiError\n\n\n"
},
{
"path": "src/cohere/finetuning/__init__.py",
"chars": 2753,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/finetuning/client.py",
"chars": 24232,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.client_wrapper import AsyncC"
},
{
"path": "src/cohere/finetuning/finetuning/__init__.py",
"chars": 2578,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/finetuning/finetuning/types/__init__.py",
"chars": 3287,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/finetuning/finetuning/types/base_model.py",
"chars": 1087,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/finetuning/types/base_type.py",
"chars": 305,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nBaseType = typing.Union[\n typing.Lite"
},
{
"path": "src/cohere/finetuning/finetuning/types/create_finetuned_model_response.py",
"chars": 784,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/finetuning/types/delete_finetuned_model_response.py",
"chars": 198,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nDeleteFinetunedModelResponse = typing.Di"
},
{
"path": "src/cohere/finetuning/finetuning/types/event.py",
"chars": 1025,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport datetime as dt\nimport typing\n\nimport pydantic\nfr"
},
{
"path": "src/cohere/finetuning/finetuning/types/finetuned_model.py",
"chars": 1973,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport datetime as dt\nimport typing\n\nimport pydantic\nfr"
},
{
"path": "src/cohere/finetuning/finetuning/types/get_finetuned_model_response.py",
"chars": 780,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/finetuning/types/hyperparameters.py",
"chars": 2009,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/finetuning/types/list_events_response.py",
"chars": 1092,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/finetuning/types/list_finetuned_models_response.py",
"chars": 1135,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/finetuning/types/list_training_step_metrics_response.py",
"chars": 1069,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/finetuning/types/lora_target_modules.py",
"chars": 312,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nLoraTargetModules = typing.Union[\n ty"
},
{
"path": "src/cohere/finetuning/finetuning/types/settings.py",
"chars": 1367,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/finetuning/types/status.py",
"chars": 402,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nStatus = typing.Union[\n typing.Litera"
},
{
"path": "src/cohere/finetuning/finetuning/types/strategy.py",
"chars": 193,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nStrategy = typing.Union[typing.Literal[\""
},
{
"path": "src/cohere/finetuning/finetuning/types/training_step_metrics.py",
"chars": 1015,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport datetime as dt\nimport typing\n\nimport pydantic\nfr"
},
{
"path": "src/cohere/finetuning/finetuning/types/update_finetuned_model_response.py",
"chars": 786,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/finetuning/types/wandb_config.py",
"chars": 910,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ....core.pydantic_u"
},
{
"path": "src/cohere/finetuning/raw_client.py",
"chars": 70476,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\nfrom json.decoder import JSONDecodeError\n"
},
{
"path": "src/cohere/manually_maintained/__init__.py",
"chars": 173,
"preview": "# This module ensures overrides are applied early in the import process\n# Import overrides to trigger backwards compatib"
},
{
"path": "src/cohere/manually_maintained/cache.py",
"chars": 811,
"preview": "import typing\nimport time\n\n\nclass CacheMixin:\n # A simple in-memory cache with TTL (thread safe). This is used to cac"
},
{
"path": "src/cohere/manually_maintained/cohere_aws/__init__.py",
"chars": 81,
"preview": "from .client import Client\nfrom .error import CohereError\nfrom .mode import Mode\n"
},
{
"path": "src/cohere/manually_maintained/cohere_aws/chat.py",
"chars": 11351,
"preview": "from .response import CohereObject\nfrom .error import CohereError\nfrom .mode import Mode\nfrom typing import List, Option"
},
{
"path": "src/cohere/manually_maintained/cohere_aws/classification.py",
"chars": 2461,
"preview": "from .response import CohereObject\nfrom typing import Any, Dict, Iterator, List, Literal, Union\n\nPrediction = Union[str,"
},
{
"path": "src/cohere/manually_maintained/cohere_aws/client.py",
"chars": 44935,
"preview": "import json\nimport os\nimport tarfile\nimport tempfile\nimport time\nfrom typing import Any, Dict, List, Optional, Union\n\nfr"
},
{
"path": "src/cohere/manually_maintained/cohere_aws/embeddings.py",
"chars": 607,
"preview": "from .response import CohereObject\nfrom typing import Iterator, List\n\n\nclass Embedding(CohereObject):\n\n def __init__("
},
{
"path": "src/cohere/manually_maintained/cohere_aws/error.py",
"chars": 591,
"preview": "class CohereError(Exception):\n def __init__(\n self,\n message=None,\n http_status=None,\n he"
},
{
"path": "src/cohere/manually_maintained/cohere_aws/generation.py",
"chars": 3611,
"preview": "from .response import CohereObject\nfrom .mode import Mode\nfrom typing import List, Optional, NamedTuple, Generator, Dict"
},
{
"path": "src/cohere/manually_maintained/cohere_aws/mode.py",
"chars": 76,
"preview": "from enum import Enum\n\n\nclass Mode(Enum):\n SAGEMAKER = 1\n BEDROCK = 2\n"
},
{
"path": "src/cohere/manually_maintained/cohere_aws/rerank.py",
"chars": 2195,
"preview": "from typing import Any, Dict, Iterator, List, NamedTuple, Optional\n\nfrom .response import CohereObject\n\nRerankDocument ="
},
{
"path": "src/cohere/manually_maintained/cohere_aws/response.py",
"chars": 337,
"preview": "class CohereObject():\n def __repr__(self) -> str:\n contents = ''\n exclude_list = ['iterator']\n\n "
},
{
"path": "src/cohere/manually_maintained/cohere_aws/summary.py",
"chars": 459,
"preview": "from .error import CohereError\nfrom .response import CohereObject\nfrom typing import Any, Dict, Optional\n\n\nclass Summary"
},
{
"path": "src/cohere/manually_maintained/lazy_aws_deps.py",
"chars": 557,
"preview": "\nwarning = \"AWS dependencies are not installed. Please install boto3, botocore, and sagemaker.\"\n\ndef lazy_sagemaker():\n "
},
{
"path": "src/cohere/manually_maintained/lazy_oci_deps.py",
"chars": 623,
"preview": "\"\"\"Lazy loading for optional OCI SDK dependency.\"\"\"\n\nfrom typing import Any\n\nOCI_INSTALLATION_MESSAGE = \"\"\"\nThe OCI SDK "
},
{
"path": "src/cohere/manually_maintained/streaming_embed.py",
"chars": 2843,
"preview": "\"\"\"Utilities for streaming embed responses without loading all embeddings into memory.\"\"\"\n\nfrom __future__ import annota"
},
{
"path": "src/cohere/manually_maintained/tokenizers.py",
"chars": 3834,
"preview": "import asyncio\nimport logging\nimport typing\n\nimport requests\nfrom tokenizers import Tokenizer # type: ignore\n\nif typing"
},
{
"path": "src/cohere/models/__init__.py",
"chars": 85,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\n"
},
{
"path": "src/cohere/models/client.py",
"chars": 7014,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom ..core.client_wrapper import AsyncC"
},
{
"path": "src/cohere/models/raw_client.py",
"chars": 31218,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\nfrom json.decoder import JSONDecodeError\n"
},
{
"path": "src/cohere/oci_client.py",
"chars": 47086,
"preview": "\"\"\"Oracle Cloud Infrastructure (OCI) client for Cohere API.\"\"\"\n\nimport configparser\nimport email.utils\nimport json\nimpor"
},
{
"path": "src/cohere/overrides.py",
"chars": 2796,
"preview": "import typing\nimport uuid\n\nfrom . import EmbedByTypeResponseEmbeddings\nfrom .core.pydantic_utilities import _get_model_f"
},
{
"path": "src/cohere/py.typed",
"chars": 0,
"preview": ""
},
{
"path": "src/cohere/raw_base_client.py",
"chars": 286502,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport contextlib\nimport json\nimport typing\nfrom json.d"
},
{
"path": "src/cohere/sagemaker_client.py",
"chars": 1821,
"preview": "import typing\n\nfrom .aws_client import AwsClient, AwsClientV2\nfrom .manually_maintained.cohere_aws.client import Client\n"
},
{
"path": "src/cohere/types/__init__.py",
"chars": 34163,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\n# isort: skip_file\n\nimport typing\nfrom importlib import"
},
{
"path": "src/cohere/types/api_meta.py",
"chars": 1026,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/api_meta_api_version.py",
"chars": 625,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/api_meta_billed_units.py",
"chars": 1280,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/api_meta_tokens.py",
"chars": 785,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/assistant_message.py",
"chars": 1114,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/assistant_message_response.py",
"chars": 1219,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/assistant_message_response_content_item.py",
"chars": 1275,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom __future__ import annotations\n\nimport typing\n\nimpo"
},
{
"path": "src/cohere/types/assistant_message_v2content.py",
"chars": 257,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom .assistant_message_v2content_one_it"
},
{
"path": "src/cohere/types/assistant_message_v2content_one_item.py",
"chars": 1260,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom __future__ import annotations\n\nimport typing\n\nimpo"
},
{
"path": "src/cohere/types/auth_token_type.py",
"chars": 168,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nAuthTokenType = typing.Union[typing.Lite"
},
{
"path": "src/cohere/types/chat_citation.py",
"chars": 1760,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_citation_generation_event.py",
"chars": 660,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_citation_type.py",
"chars": 164,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatCitationType = typing.Union[typing.L"
},
{
"path": "src/cohere/types/chat_connector.py",
"chars": 1463,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_delta_event.py",
"chars": 861,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_delta_event_delta.py",
"chars": 676,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_delta_event_delta_message.py",
"chars": 705,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_delta_event_delta_message_content.py",
"chars": 613,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_end_event.py",
"chars": 643,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_start_event.py",
"chars": 782,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_start_event_delta.py",
"chars": 676,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_start_event_delta_message.py",
"chars": 705,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_start_event_delta_message_content.py",
"chars": 802,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_content_start_event_delta_message_content_type.py",
"chars": 188,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatContentStartEventDeltaMessageContent"
},
{
"path": "src/cohere/types/chat_data_metrics.py",
"chars": 902,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_debug_event.py",
"chars": 532,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_document.py",
"chars": 448,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatDocument = typing.Dict[str, str]\n\"\"\""
},
{
"path": "src/cohere/types/chat_document_source.py",
"chars": 817,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_finish_reason.py",
"chars": 222,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatFinishReason = typing.Union[\n typ"
},
{
"path": "src/cohere/types/chat_message.py",
"chars": 1152,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_message_end_event.py",
"chars": 760,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_message_end_event_delta.py",
"chars": 837,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_message_start_event.py",
"chars": 850,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_message_start_event_delta.py",
"chars": 676,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_message_start_event_delta_message.py",
"chars": 657,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_message_v2.py",
"chars": 2890,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nfrom __future__ import annotations\n\nimport typing\n\nimpo"
},
{
"path": "src/cohere/types/chat_messages.py",
"chars": 455,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nfrom .chat_message_v2 import ChatMessage"
},
{
"path": "src/cohere/types/chat_request_citation_quality.py",
"chars": 200,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatRequestCitationQuality = typing.Unio"
},
{
"path": "src/cohere/types/chat_request_prompt_truncation.py",
"chars": 189,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatRequestPromptTruncation = typing.Uni"
},
{
"path": "src/cohere/types/chat_request_safety_mode.py",
"chars": 177,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatRequestSafetyMode = typing.Union[typ"
},
{
"path": "src/cohere/types/chat_search_queries_generation_event.py",
"chars": 713,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_search_query.py",
"chars": 855,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_search_result.py",
"chars": 1255,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_search_result_connector.py",
"chars": 666,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_search_results_event.py",
"chars": 947,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_stream_end_event.py",
"chars": 1391,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_stream_end_event_finish_reason.py",
"chars": 225,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatStreamEndEventFinishReason = typing."
},
{
"path": "src/cohere/types/chat_stream_event.py",
"chars": 507,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_stream_event_type.py",
"chars": 557,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_stream_request_citation_quality.py",
"chars": 212,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatStreamRequestCitationQuality = typin"
},
{
"path": "src/cohere/types/chat_stream_request_prompt_truncation.py",
"chars": 195,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatStreamRequestPromptTruncation = typi"
},
{
"path": "src/cohere/types/chat_stream_request_safety_mode.py",
"chars": 183,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nChatStreamRequestSafetyMode = typing.Uni"
},
{
"path": "src/cohere/types/chat_stream_start_event.py",
"chars": 635,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_text_content.py",
"chars": 572,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_text_generation_event.py",
"chars": 601,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_text_response_format.py",
"chars": 514,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_text_response_format_v2.py",
"chars": 516,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_thinking_content.py",
"chars": 681,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
},
{
"path": "src/cohere/types/chat_tool_call_delta_event.py",
"chars": 781,
"preview": "# This file was auto-generated by Fern from our API Definition.\n\nimport typing\n\nimport pydantic\nfrom ..core.pydantic_uti"
}
]
// ... and 167 more files (download for full content)
About this extraction
This page contains the full source code of the cohere-ai/cohere-python GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 367 files (1.9 MB), approximately 411.8k tokens, and a symbol index with 1368 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.