diff --git a/examples/langgraph-checkpointer/agent.py b/examples/langgraph-checkpointer/agent.py index 8ea98ae8..9f4c9000 100644 --- a/examples/langgraph-checkpointer/agent.py +++ b/examples/langgraph-checkpointer/agent.py @@ -52,6 +52,7 @@ def assistant(state: MessagesState): memory = DaprCheckpointer(store_name='statestore', key_prefix='dapr') react_graph_memory = builder.compile(checkpointer=memory) +memory.set_agent(react_graph_memory) config = {'configurable': {'thread_id': '1'}} diff --git a/examples/langgraph-checkpointer/components/agent-registry.yaml b/examples/langgraph-checkpointer/components/agent-registry.yaml new file mode 100644 index 00000000..b6c1fe4d --- /dev/null +++ b/examples/langgraph-checkpointer/components/agent-registry.yaml @@ -0,0 +1,14 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: agent-registry +spec: + type: state.redis + version: v1 + metadata: + - name: redisHost + value: localhost:6379 + - name: redisPassword + value: "" + - name: keyPrefix + value: none \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/LICENSE b/ext/dapr-ext-agent-core/LICENSE new file mode 100644 index 00000000..be033a7f --- /dev/null +++ b/ext/dapr-ext-agent-core/LICENSE @@ -0,0 +1,203 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 The Dapr Authors. + + and others that have contributed code to the public domain. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/README.rst b/ext/dapr-ext-agent-core/README.rst new file mode 100644 index 00000000..16f73bb1 --- /dev/null +++ b/ext/dapr-ext-agent-core/README.rst @@ -0,0 +1,22 @@ +dapr-ext-agent_core extension +======================= + +|pypi| + +.. |pypi| image:: https://badge.fury.io/py/dapr-ext-agent-core.svg + :target: https://pypi.org/project/dapr-ext-agent-core/ + +This is the Dapr Agent Core extension for Dapr for Agents in the Dapr Python SDK. + +Installation +------------ + +:: + + pip install dapr-ext-agent_core + +References +---------- + +* `Dapr `_ +* `Dapr Python-SDK `_ diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/__init__.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/__init__.py new file mode 100644 index 00000000..e2039e29 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/__init__.py @@ -0,0 +1,14 @@ +from .types import SupportedFrameworks, AgentMetadataSchema, AgentMetadata, LLMMetadata, PubSubMetadata, ToolMetadata, RegistryMetadata, MemoryMetadata +from .metadata import AgentRegistryAdapter + +__all__ = [ + "SupportedFrameworks", + "AgentMetadataSchema", + "AgentMetadata", + "LLMMetadata", + "PubSubMetadata", + "ToolMetadata", + "RegistryMetadata", + "MemoryMetadata", + "AgentRegistryAdapter", +] \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/__init__.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/__init__.py new file mode 100644 index 00000000..6aee51ba --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/__init__.py @@ -0,0 +1,7 @@ +from .dapr_agents import DaprAgentsMapper +from .langgraph import LangGraphMapper + +__all__ = [ + "DaprAgentsMapper", + "LangGraphMapper", +] \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/dapr_agents.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/dapr_agents.py new file mode 100644 index 00000000..0e4c5f78 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/dapr_agents.py @@ -0,0 +1,74 @@ + +from datetime import datetime, timezone +import json +import logging +from typing import Any +from dapr.ext.agent_core.types import AgentMetadata, AgentMetadataSchema, LLMMetadata, MemoryMetadata, PubSubMetadata, RegistryMetadata, ToolMetadata + +logger = logging.getLogger(__name__) + + +class DaprAgentsMapper: + def __init__(self) -> None: + pass + + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + profile = getattr(agent, "profile", None) + memory = getattr(agent, "memory", None) + pubsub = getattr(agent, "pubsub", None) + llm = getattr(agent, "llm", None) + registry = getattr(agent, "_registry", None) + execution = getattr(agent, "execution", None) + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid=getattr(agent, "appid", ""), + type=type(agent).__name__, + orchestrator=False, + role=getattr(profile, "role", "") if profile else "", + goal=getattr(profile, "goal", "") if profile else "", + instructions=getattr(profile, "instructions", None) if profile else [], + statestore=getattr(memory, "store_name", "") if memory else "", + system_prompt=getattr(profile, "system_prompt", "") if profile else "", + ), + name=getattr(agent, "name", ""), + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=PubSubMetadata( + name=getattr(pubsub, "pubsub_name", "") if pubsub else "", + broadcast_topic=getattr(pubsub, "broadcast_topic", None) if pubsub else None, + agent_topic=getattr(pubsub, "agent_topic", None) if pubsub else None, + ), + memory=MemoryMetadata( + type=type(memory).__name__ if memory else "", + session_id=getattr(memory, "session_id", None) if memory else None, + statestore=getattr(memory, "store_name", None) if memory else None, + ), + llm=LLMMetadata( + client=type(llm).__name__ if llm else "", + provider=getattr(llm, "provider", "unknown") if llm else "unknown", + api=getattr(llm, "api", "unknown") if llm else "unknown", + model=getattr(llm, "model", "unknown") if llm else "unknown", + component_name=getattr(llm, "component_name", None) if llm else None, + base_url=getattr(llm, "base_url", None) if llm else None, + azure_endpoint=getattr(llm, "azure_endpoint", None) if llm else None, + azure_deployment=getattr(llm, "azure_deployment", None) if llm else None, + prompt_template=type(getattr(llm, "prompt_template", None)).__name__ if llm and getattr(llm, "prompt_template", None) else None, + ), + registry=RegistryMetadata( + statestore=getattr(getattr(registry, "store", None), "store_name", None) if registry else None, + name=getattr(registry, "team_name", None) if registry else None, + ), + tools=[ + ToolMetadata( + tool_name=getattr(tool, "name", ""), + tool_description=getattr(tool, "description", ""), + tool_args=json.dumps(getattr(tool, "args_schema", {})) + if hasattr(tool, "args_schema") else "{}", + ) + for tool in getattr(agent, "tools", []) + ], + max_iterations=getattr(execution, "max_iterations", None) if execution else None, + tool_choice=getattr(execution, "tool_choice", None) if execution else None, + agent_metadata=getattr(agent, "agent_metadata", None), + ) diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/langgraph.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/langgraph.py new file mode 100644 index 00000000..55d34f9c --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/langgraph.py @@ -0,0 +1,82 @@ + +from datetime import datetime, timezone +import json +import logging +from typing import TYPE_CHECKING, Any, Dict, Optional +from dapr.ext.agent_core.types import AgentMetadata, AgentMetadataSchema, LLMMetadata, MemoryMetadata, PubSubMetadata, RegistryMetadata, ToolMetadata + +if TYPE_CHECKING: + from dapr.ext.langgraph import DaprCheckpointer + +logger = logging.getLogger(__name__) + +class LangGraphMapper: + def __init__(self) -> None: + pass + + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + + logger.info(f"LangGraph log vars: {vars(agent)}") + print(f"LangGraph print vars: {vars(agent)}") + logger.info(f"LangGraph log dir: {dir(agent)}") + print(f"LangGraph print dir: {dir(agent)}") + + introspected_vars: Dict[str, Any] = vars(agent) # type: ignore + introspected_dir = dir(agent) + + checkpointer: Optional["DaprCheckpointer"] = introspected_vars.get("checkpointer", None) # type: ignore + tools = introspected_vars.get("tools", []) # type: ignore + print(f"LangGraph tools: {tools}") + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid="", + type=type(agent).__name__, + orchestrator=False, + role="", + goal="", + instructions=[], + statestore=checkpointer.store_name if checkpointer else None, + system_prompt="", + ), + name=agent.get_name() if hasattr(agent, "get_name") else "", + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=PubSubMetadata( + name="", + broadcast_topic=None, + agent_topic=None, + ), + memory=MemoryMetadata( + type="DaprCheckpointer", + session_id=None, + statestore=checkpointer.store_name if checkpointer else None, + ), + llm=LLMMetadata( + client="", + provider="unknown", + api="unknown", + model="unknown", + component_name=None, + base_url=None, + azure_endpoint=None, + azure_deployment=None, + prompt_template=None, + ), + registry=RegistryMetadata( + statestore=None, + name=None, + ), + tools=[ + ToolMetadata( + tool_name="", + tool_description="", + tool_args=json.dumps({}) + if hasattr(tool, "args_schema") else "{}", + ) + for tool in getattr(agent, "tools", []) + ], + max_iterations=None, + tool_choice=None, + agent_metadata=None, + ) diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/metadata.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/metadata.py new file mode 100644 index 00000000..c2df9e88 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/metadata.py @@ -0,0 +1,277 @@ +from __future__ import annotations + +from importlib.metadata import PackageNotFoundError, version +import logging +import random +import time +from typing import Any, Callable, Dict, Optional, Sequence + +from dapr.clients import DaprClient +from dapr.clients.grpc._response import ( + GetMetadataResponse, + RegisteredComponents, +) +from dapr.clients.grpc._state import Concurrency, Consistency + +from dapr_agents.agents.configs import ( + AgentRegistryConfig, +) +from dapr_agents.storage.daprstores.stateservice import ( + StateStoreError, + StateStoreService +) + +from dapr.ext.agent_core import SupportedFrameworks, AgentMetadataSchema +import dapr.ext.agent_core.mapping + +logger = logging.getLogger(__name__) + + +class AgentRegistryAdapter: + def __init__(self, registry: Optional[AgentRegistryConfig], framework: str, agent: Any) -> None: + self._registry = registry + + try: + with DaprClient(http_timeout_seconds=10) as _client: + resp: GetMetadataResponse = _client.get_metadata() + self.appid = resp.application_id + if self._registry is None: + components: Sequence[RegisteredComponents] = resp.registered_components + for component in components: + if ( + "state" in component.type + and component.name == "agent-registry" + ): + self._registry = AgentRegistryConfig( + store=StateStoreService(store_name=component.name), + team_name="default", + ) + except TimeoutError: + logger.warning( + "Dapr sidecar not responding; proceeding without auto-configuration." + ) + + if self._registry is None: + return + + self.registry_state: StateStoreService = self._registry.store + self._registry_prefix: str = "agents:" + self._meta: Dict[str, str] = {"contentType": "application/json"} + self._max_etag_attempts: int = 10 + self._save_options: Dict[str, Any] = { + "concurrency": Concurrency.first_write, + "consistency": Consistency.strong, + } + + if not self._can_handle(framework): + raise ValueError(f"Adapter cannot handle framework '{framework}'") + + _metadata = self._extract_metadata(agent) + + # We need to handle some null values here to avoid issues during registration + if _metadata.agent.appid == "": + _metadata.agent.appid = self.appid or "" + + if _metadata.registry: + if _metadata.registry.name is None: + _metadata.registry.name = self._registry.team_name + if _metadata.registry.statestore is None: + _metadata.registry.statestore = self.registry_state.store_name + + self._register(_metadata) + + def _can_handle(self, framework: str) -> bool: + """Check if this adapter can handle the given Agent.""" + + for fw in SupportedFrameworks: + if framework.lower() == fw.value.lower(): + self._framework = fw + return True + return False + + + def _extract_metadata(self, agent: Any) -> AgentMetadataSchema: + """Extract metadata from the given Agent.""" + + try: + schema_version = version("dapr-ext-agent_core") + except PackageNotFoundError: + schema_version = "edge" + + framework_mappers = { + SupportedFrameworks.DAPR_AGENTS: dapr.ext.agent_core.mapping.DaprAgentsMapper().map_agent_metadata, + SupportedFrameworks.LANGGRAPH: dapr.ext.agent_core.mapping.LangGraphMapper().map_agent_metadata, + } + + mapper = framework_mappers.get(self._framework) + if not mapper: + raise ValueError(f"Adapter cannot handle framework '{self._framework}'") + + return mapper(agent=agent, schema_version=schema_version) + + def _register(self, metadata: AgentMetadataSchema) -> None: + """Register the adapter with the given Agent.""" + """ + Upsert this agent's metadata in the team registry. + + Args: + metadata: Additional metadata to store for this agent. + team: Team override; falls back to configured default team. + """ + if not metadata.registry: + raise ValueError("Registry metadata is required for registration") + + self._upsert_agent_entry( + team=metadata.registry.name, + agent_name=metadata.name, + agent_metadata=metadata.model_dump(), + ) + + def _mutate_registry_entry( + self, + *, + team: Optional[str], + mutator: Callable[[Dict[str, Any]], Optional[Dict[str, Any]]], + max_attempts: Optional[int] = None, + ) -> None: + """ + Apply a mutation to the team registry with optimistic concurrency. + + Args: + team: Team identifier. + mutator: Function that returns the updated registry dict (or None for no-op). + max_attempts: Override for concurrency retries; defaults to init value. + + Raises: + StateStoreError: If the mutation fails after retries due to contention. + """ + if not self.registry_state: + raise RuntimeError( + "registry_state must be provided to mutate the agent registry" + ) + + key = f"agents:{team or 'default'}" + self._meta["partitionKey"] = key + attempts = max_attempts or self._max_etag_attempts + + self._ensure_registry_initialized(key=key, meta=self._meta) + + for attempt in range(1, attempts + 1): + logger.debug(f"Mutating registry entry '{key}', attempt {attempt}/{attempts}") + try: + current, etag = self.registry_state.load_with_etag( + key=key, + default={}, + state_metadata=self._meta, + ) + if not isinstance(current, dict): + current = {} + + updated = mutator(dict(current)) + if updated is None: + return + + self.registry_state.save( + key=key, + value=updated, + etag=etag, + state_metadata=self._meta, + state_options=self._save_options, + ) + logger.debug(f"Successfully mutated registry entry '{key}'") + return + except Exception as exc: # noqa: BLE001 + logger.warning( + "Conflict during registry mutation (attempt %d/%d) for '%s': %s", + attempt, + attempts, + key, + exc, + ) + if attempt == attempts: + raise StateStoreError( + f"Failed to mutate agent registry key '{key}' after {attempts} attempts." + ) from exc + # Jittered backoff to reduce thundering herd during contention. + time.sleep(min(1.0 * attempt, 3.0) * (1 + random.uniform(0, 0.25))) + + def _upsert_agent_entry( + self, + *, + team: Optional[str], + agent_name: str, + agent_metadata: Dict[str, Any], + max_attempts: Optional[int] = None, + ) -> None: + """ + Insert/update a single agent record in the team registry. + + Args: + team: Team identifier. + agent_name: Agent name (key). + agent_metadata: Metadata value to write. + max_attempts: Override retry attempts. + """ + + def mutator(current: Dict[str, Any]) -> Optional[Dict[str, Any]]: + if current.get(agent_name) == agent_metadata: + return None + current[agent_name] = agent_metadata + return current + + logger.debug("Upserting agent '%s' in team '%s' registry", agent_name, team or "default") + self._mutate_registry_entry( + team=team, + mutator=mutator, + max_attempts=max_attempts, + ) + + def _remove_agent_entry( + self, + *, + team: Optional[str], + agent_name: str, + max_attempts: Optional[int] = None, + ) -> None: + """ + Delete a single agent record from the team registry. + + Args: + team: Team identifier. + agent_name: Agent name (key). + max_attempts: Override retry attempts. + """ + + def mutator(current: Dict[str, Any]) -> Optional[Dict[str, Any]]: + if agent_name not in current: + return None + del current[agent_name] + return current + + self._mutate_registry_entry( + team=team, + mutator=mutator, + max_attempts=max_attempts, + ) + + def _ensure_registry_initialized(self, *, key: str, meta: Dict[str, str]) -> None: + """ + Ensure a registry document exists to create an ETag for concurrency control. + + Args: + key: Registry document key. + meta: Dapr state metadata to use for the operation. + """ + current, etag = self.registry_state.load_with_etag( # type: ignore[union-attr] + key=key, + default={}, + state_metadata=meta, + ) + if etag is None: + self.registry_state.save( # type: ignore[union-attr] + key=key, + value={}, + etag=None, + state_metadata=meta, + state_options=self._save_options, + ) diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/py.typed b/ext/dapr-ext-agent-core/dapr/ext/agent_core/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/types.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/types.py new file mode 100644 index 00000000..33d0b777 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/types.py @@ -0,0 +1,143 @@ +from enum import StrEnum +from typing import Any, Dict, List, Optional +from pydantic import BaseModel, Field + +class SupportedFrameworks(StrEnum): + DAPR_AGENTS = "dapr-agents" + LANGGRAPH = "langgraph" + + +class AgentMetadata(BaseModel): + """Metadata about an agent's configuration and capabilities.""" + + appid: str = Field(..., description="Dapr application ID of the agent") + type: str = Field(..., description="Type of the agent (e.g., standalone, durable)") + orchestrator: bool = Field( + False, description="Indicates if the agent is an orchestrator" + ) + role: str = Field(default="", description="Role of the agent") + goal: str = Field(default="", description="High-level objective of the agent") + instructions: Optional[List[str]] = Field( + default=None, description="Instructions for the agent" + ) + statestore: Optional[str] = Field( + default=None, description="Dapr state store component name used by the agent" + ) + system_prompt: Optional[str] = Field( + default=None, description="System prompt guiding the agent's behavior" + ) + + +class PubSubMetadata(BaseModel): + """Pub/Sub configuration information.""" + + name: str = Field(..., description="Pub/Sub component name") + broadcast_topic: Optional[str] = Field( + default=None, description="Pub/Sub topic for broadcasting messages" + ) + agent_topic: Optional[str] = Field( + default=None, description="Pub/Sub topic for direct agent messages" + ) + + +class MemoryMetadata(BaseModel): + """Memory configuration information.""" + + type: str = Field(..., description="Type of memory used by the agent") + statestore: Optional[str] = Field( + default=None, description="Dapr state store component name for memory" + ) + session_id: Optional[str] = Field( + default=None, description="Default session ID for the agent's memory" + ) + + +class LLMMetadata(BaseModel): + """LLM configuration information.""" + + client: str = Field(..., description="LLM client used by the agent") + provider: str = Field(..., description="LLM provider used by the agent") + api: str = Field(default="unknown", description="API type used by the LLM client") + model: str = Field(default="unknown", description="Model name or identifier") + component_name: Optional[str] = Field( + default=None, description="Dapr component name for the LLM client" + ) + base_url: Optional[str] = Field( + default=None, description="Base URL for the LLM API if applicable" + ) + azure_endpoint: Optional[str] = Field( + default=None, description="Azure endpoint if using Azure OpenAI" + ) + azure_deployment: Optional[str] = Field( + default=None, description="Azure deployment name if using Azure OpenAI" + ) + prompt_template: Optional[str] = Field( + default=None, description="Prompt template used by the agent" + ) + + +class ToolMetadata(BaseModel): + """Metadata about a tool available to the agent.""" + + tool_name: str = Field(..., description="Name of the tool") + tool_description: str = Field( + ..., description="Description of the tool's functionality" + ) + tool_args: str = Field(..., description="Arguments for the tool") + + +class RegistryMetadata(BaseModel): + """Registry configuration information.""" + + statestore: Optional[str] = Field( + None, description="Name of the statestore component for the registry" + ) + name: Optional[str] = Field(default=None, description="Name of the team registry") + + +class AgentMetadataSchema(BaseModel): + """Schema for agent metadata including schema version.""" + + schema_version: str = Field( + ..., + description="Version of the schema used for the agent metadata.", + ) + agent: AgentMetadata = Field( + ..., description="Agent configuration and capabilities" + ) + name: str = Field(..., description="Name of the agent") + registered_at: str = Field(..., description="ISO 8601 timestamp of registration") + pubsub: Optional[PubSubMetadata] = Field( + None, description="Pub/sub configuration if enabled" + ) + memory: Optional[MemoryMetadata] = Field( + None, description="Memory configuration if enabled" + ) + llm: Optional[LLMMetadata] = Field(None, description="LLM configuration") + registry: Optional[RegistryMetadata] = Field( + None, description="Registry configuration" + ) + tools: Optional[List[ToolMetadata]] = Field(None, description="Available tools") + max_iterations: Optional[int] = Field( + None, description="Maximum iterations for agent execution" + ) + tool_choice: Optional[str] = Field(None, description="Tool choice strategy") + agent_metadata: Optional[Dict[str, Any]] = Field( + None, description="Additional metadata about the agent" + ) + + @classmethod + def export_json_schema(cls, version: str) -> Dict[str, Any]: + """ + Export the JSON schema with version information. + + Args: + version: The dapr-agents version for this schema + + Returns: + JSON schema dictionary with metadata + """ + schema = cls.model_json_schema() + schema["$schema"] = "https://json-schema.org/draft/2020-12/schema" + schema["version"] = version + return schema \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/version.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/version.py new file mode 100644 index 00000000..b81f0d98 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +__version__ = '1.17.0.dev' diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/index.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/index.json new file mode 100644 index 00000000..394fae28 --- /dev/null +++ b/ext/dapr-ext-agent-core/schemas/agent-metadata/index.json @@ -0,0 +1,8 @@ +{ + "current_version": "0.10.7", + "schema_url": "https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json", + "available_versions": [ + "v0.10.7", + "v0.10.6" + ] +} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json new file mode 100644 index 00000000..7dea0262 --- /dev/null +++ b/ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json @@ -0,0 +1,2018 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "name": { + "default": "", + "description": "Namememory of the agent", + "title": "Name", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "AzureOpenAIModelConfig": { + "properties": { + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the Azure OpenAI API", + "title": "Api Key" + }, + "azure_ad_token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure Active Directory token for authentication", + "title": "Azure Ad Token" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure organization associated with the OpenAI resource", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure project associated with the OpenAI resource", + "title": "Project" + }, + "api_version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "2024-07-01-preview", + "description": "API version for Azure OpenAI models", + "title": "Api Version" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint for Azure OpenAI models", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment for Azure OpenAI models", + "title": "Azure Deployment" + }, + "azure_client_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Client ID for Managed Identity authentication.", + "title": "Azure Client Id" + }, + "type": { + "const": "azure_openai", + "default": "azure_openai", + "description": "Type of the model, must always be 'azure_openai'", + "title": "Type", + "type": "string" + } + }, + "title": "AzureOpenAIModelConfig", + "type": "object" + }, + "HFHubChatCompletionParams": { + "description": "Specific settings for Hugging Face Hub Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The model to use for chat-completion. Can be a model ID or a URL to a deployed Inference Endpoint.", + "title": "Model" + }, + "frequency_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their existing frequency in the text so far.", + "title": "Frequency Penalty" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify the likelihood of specified tokens appearing in the completion.", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities of the output tokens or not.", + "title": "Logprobs" + }, + "max_tokens": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 100, + "description": "Maximum number of tokens allowed in the response.", + "title": "Max Tokens" + }, + "n": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "UNUSED. Included for compatibility.", + "title": "N" + }, + "presence_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their presence in the text so far.", + "title": "Presence Penalty" + }, + "response_format": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Grammar constraints. Can be either a JSONSchema or a regex.", + "title": "Response Format" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for reproducible control flow.", + "title": "Seed" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Up to four strings which trigger the end of the response.", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Enable realtime streaming of responses.", + "title": "Stream" + }, + "stream_options": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Options for streaming completions.", + "title": "Stream Options" + }, + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Controls randomness of the generations.", + "title": "Temperature" + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of most likely tokens to return at each position.", + "title": "Top Logprobs" + }, + "top_p": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Fraction of the most likely next words to sample from.", + "title": "Top P" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The tool to use for the completion. Defaults to 'auto'.", + "title": "Tool Choice" + }, + "tool_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A prompt to be appended before the tools.", + "title": "Tool Prompt" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A list of tools the model may call.", + "title": "Tools" + } + }, + "title": "HFHubChatCompletionParams", + "type": "object" + }, + "HFHubModelConfig": { + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Model ID on Hugging Face Hub or a URL to a deployed endpoint. If not set, a recommended model may be chosen by your wrapper.", + "title": "Model" + }, + "hf_provider": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "auto", + "description": "Inference provider to use. Defaults to automatic selection based on available providers. Ignored if a custom endpoint URL is provided.", + "title": "Hf Provider" + }, + "token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Hugging Face access token for authentication. If None, uses the locally saved token. Set to False to skip sending a token. Mutually exclusive with api_key.", + "title": "Token" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Alias for token. Use only one of token or api_key.", + "title": "Api Key" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Custom endpoint URL for inference. Used for private deployments or TGI endpoints. Cannot be set if 'model' is a Hub ID.", + "title": "Base Url" + }, + "timeout": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum seconds to wait for a response. If None, waits indefinitely. Useful for slow model loading.", + "title": "Timeout" + }, + "headers": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra HTTP headers to send with requests. Overrides defaults like authorization and user-agent.", + "title": "Headers" + }, + "cookies": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra cookies to send with requests.", + "title": "Cookies" + }, + "proxies": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Proxy settings for HTTP requests. Use standard requests format.", + "title": "Proxies" + }, + "bill_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Billing account for requests. Only used for enterprise/organization billing.", + "title": "Bill To" + }, + "type": { + "const": "huggingface", + "default": "huggingface", + "description": "Type of the model, must always be 'huggingface'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through Hugging Face", + "title": "Name", + "type": "string" + } + }, + "title": "HFHubModelConfig", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + }, + "prompty": { + "anyOf": [ + { + "$ref": "#/$defs/Prompty" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompty template name if used" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "NVIDIAChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + } + }, + "title": "NVIDIAChatCompletionParams", + "type": "object" + }, + "NVIDIAModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "https://integrate.api.nvidia.com/v1", + "description": "Base URL for the NVIDIA API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the NVIDIA API", + "title": "Api Key" + }, + "type": { + "const": "nvidia", + "default": "nvidia", + "description": "Type of the model, must always be 'nvidia'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through NVIDIA", + "title": "Name", + "type": "string" + } + }, + "title": "NVIDIAModelConfig", + "type": "object" + }, + "OpenAIChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "response_format": { + "anyOf": [ + { + "additionalProperties": { + "enum": [ + "text", + "json_object" + ], + "type": "string" + }, + "propertyNames": { + "const": "type" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Format of the response", + "title": "Response Format" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + }, + "function_call": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which function is called", + "title": "Function Call" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for deterministic sampling", + "title": "Seed" + }, + "user": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Unique identifier representing the end-user", + "title": "User" + } + }, + "title": "OpenAIChatCompletionParams", + "type": "object" + }, + "OpenAIModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the OpenAI API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the OpenAI API", + "title": "Api Key" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Organization name for OpenAI", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "OpenAI project name.", + "title": "Project" + }, + "type": { + "const": "openai", + "default": "openai", + "description": "Type of the model, must always be 'openai'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the OpenAI model", + "title": "Name", + "type": "string" + } + }, + "title": "OpenAIModelConfig", + "type": "object" + }, + "OpenAITextCompletionParams": { + "description": "Specific configs for the text completions endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "best_of": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of best completions to generate", + "title": "Best Of" + }, + "echo": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to echo the prompt", + "title": "Echo" + }, + "logprobs": { + "anyOf": [ + { + "maximum": 5, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Include log probabilities", + "title": "Logprobs" + }, + "suffix": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Suffix to append to the prompt", + "title": "Suffix" + } + }, + "title": "OpenAITextCompletionParams", + "type": "object" + }, + "Prompty": { + "description": "A class to handle loading and formatting of Prompty templates for language models workflows.", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Name of the Prompty file.", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Description of the Prompty file.", + "title": "Description" + }, + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "1.0", + "description": "Version of the Prompty.", + "title": "Version" + }, + "authors": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "List of authors for the Prompty.", + "title": "Authors" + }, + "tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "Tags to categorize the Prompty.", + "title": "Tags" + }, + "model": { + "$ref": "#/$defs/PromptyModelConfig", + "description": "Model configuration. Can be either OpenAI or Azure OpenAI." + }, + "inputs": { + "additionalProperties": true, + "default": {}, + "description": "Input parameters for the Prompty. These define the expected inputs.", + "title": "Inputs", + "type": "object" + }, + "sample": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Sample input or the path to a sample file for testing the Prompty.", + "title": "Sample" + }, + "outputs": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": {}, + "description": "Optional outputs for the Prompty. Defines expected output format.", + "title": "Outputs" + }, + "content": { + "description": "The prompt messages defined in the Prompty file.", + "title": "Content", + "type": "string" + } + }, + "required": [ + "model", + "content" + ], + "title": "Prompty", + "type": "object" + }, + "PromptyModelConfig": { + "properties": { + "api": { + "default": "chat", + "description": "The API to use, either 'chat' or 'completion'", + "enum": [ + "chat", + "completion" + ], + "title": "Api", + "type": "string" + }, + "configuration": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAIModelConfig" + }, + { + "$ref": "#/$defs/AzureOpenAIModelConfig" + }, + { + "$ref": "#/$defs/HFHubModelConfig" + }, + { + "$ref": "#/$defs/NVIDIAModelConfig" + } + ], + "description": "Model configuration settings", + "title": "Configuration" + }, + "parameters": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAITextCompletionParams" + }, + { + "$ref": "#/$defs/OpenAIChatCompletionParams" + }, + { + "$ref": "#/$defs/HFHubChatCompletionParams" + }, + { + "$ref": "#/$defs/NVIDIAChatCompletionParams" + } + ], + "description": "Parameters for the model request", + "title": "Parameters" + }, + "response": { + "default": "first", + "description": "Determines if full response or just the first one is returned", + "enum": [ + "first", + "full" + ], + "title": "Response", + "type": "string" + } + }, + "required": [ + "configuration", + "parameters" + ], + "title": "PromptyModelConfig", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "agent_name": { + "description": "Pub/Sub topic the agent subscribes to", + "title": "Agent Name", + "type": "string" + }, + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "agent_name", + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "0.10.7" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json new file mode 100644 index 00000000..b9cbb0bb --- /dev/null +++ b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json @@ -0,0 +1,2018 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "name": { + "default": "", + "description": "Namememory of the agent", + "title": "Name", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "AzureOpenAIModelConfig": { + "properties": { + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the Azure OpenAI API", + "title": "Api Key" + }, + "azure_ad_token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure Active Directory token for authentication", + "title": "Azure Ad Token" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure organization associated with the OpenAI resource", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure project associated with the OpenAI resource", + "title": "Project" + }, + "api_version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "2024-07-01-preview", + "description": "API version for Azure OpenAI models", + "title": "Api Version" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint for Azure OpenAI models", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment for Azure OpenAI models", + "title": "Azure Deployment" + }, + "azure_client_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Client ID for Managed Identity authentication.", + "title": "Azure Client Id" + }, + "type": { + "const": "azure_openai", + "default": "azure_openai", + "description": "Type of the model, must always be 'azure_openai'", + "title": "Type", + "type": "string" + } + }, + "title": "AzureOpenAIModelConfig", + "type": "object" + }, + "HFHubChatCompletionParams": { + "description": "Specific settings for Hugging Face Hub Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The model to use for chat-completion. Can be a model ID or a URL to a deployed Inference Endpoint.", + "title": "Model" + }, + "frequency_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their existing frequency in the text so far.", + "title": "Frequency Penalty" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify the likelihood of specified tokens appearing in the completion.", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities of the output tokens or not.", + "title": "Logprobs" + }, + "max_tokens": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 100, + "description": "Maximum number of tokens allowed in the response.", + "title": "Max Tokens" + }, + "n": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "UNUSED. Included for compatibility.", + "title": "N" + }, + "presence_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their presence in the text so far.", + "title": "Presence Penalty" + }, + "response_format": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Grammar constraints. Can be either a JSONSchema or a regex.", + "title": "Response Format" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for reproducible control flow.", + "title": "Seed" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Up to four strings which trigger the end of the response.", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Enable realtime streaming of responses.", + "title": "Stream" + }, + "stream_options": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Options for streaming completions.", + "title": "Stream Options" + }, + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Controls randomness of the generations.", + "title": "Temperature" + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of most likely tokens to return at each position.", + "title": "Top Logprobs" + }, + "top_p": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Fraction of the most likely next words to sample from.", + "title": "Top P" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The tool to use for the completion. Defaults to 'auto'.", + "title": "Tool Choice" + }, + "tool_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A prompt to be appended before the tools.", + "title": "Tool Prompt" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A list of tools the model may call.", + "title": "Tools" + } + }, + "title": "HFHubChatCompletionParams", + "type": "object" + }, + "HFHubModelConfig": { + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Model ID on Hugging Face Hub or a URL to a deployed endpoint. If not set, a recommended model may be chosen by your wrapper.", + "title": "Model" + }, + "hf_provider": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "auto", + "description": "Inference provider to use. Defaults to automatic selection based on available providers. Ignored if a custom endpoint URL is provided.", + "title": "Hf Provider" + }, + "token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Hugging Face access token for authentication. If None, uses the locally saved token. Set to False to skip sending a token. Mutually exclusive with api_key.", + "title": "Token" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Alias for token. Use only one of token or api_key.", + "title": "Api Key" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Custom endpoint URL for inference. Used for private deployments or TGI endpoints. Cannot be set if 'model' is a Hub ID.", + "title": "Base Url" + }, + "timeout": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum seconds to wait for a response. If None, waits indefinitely. Useful for slow model loading.", + "title": "Timeout" + }, + "headers": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra HTTP headers to send with requests. Overrides defaults like authorization and user-agent.", + "title": "Headers" + }, + "cookies": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra cookies to send with requests.", + "title": "Cookies" + }, + "proxies": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Proxy settings for HTTP requests. Use standard requests format.", + "title": "Proxies" + }, + "bill_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Billing account for requests. Only used for enterprise/organization billing.", + "title": "Bill To" + }, + "type": { + "const": "huggingface", + "default": "huggingface", + "description": "Type of the model, must always be 'huggingface'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through Hugging Face", + "title": "Name", + "type": "string" + } + }, + "title": "HFHubModelConfig", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + }, + "prompty": { + "anyOf": [ + { + "$ref": "#/$defs/Prompty" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompty template name if used" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "NVIDIAChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + } + }, + "title": "NVIDIAChatCompletionParams", + "type": "object" + }, + "NVIDIAModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "https://integrate.api.nvidia.com/v1", + "description": "Base URL for the NVIDIA API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the NVIDIA API", + "title": "Api Key" + }, + "type": { + "const": "nvidia", + "default": "nvidia", + "description": "Type of the model, must always be 'nvidia'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through NVIDIA", + "title": "Name", + "type": "string" + } + }, + "title": "NVIDIAModelConfig", + "type": "object" + }, + "OpenAIChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "response_format": { + "anyOf": [ + { + "additionalProperties": { + "enum": [ + "text", + "json_object" + ], + "type": "string" + }, + "propertyNames": { + "const": "type" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Format of the response", + "title": "Response Format" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + }, + "function_call": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which function is called", + "title": "Function Call" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for deterministic sampling", + "title": "Seed" + }, + "user": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Unique identifier representing the end-user", + "title": "User" + } + }, + "title": "OpenAIChatCompletionParams", + "type": "object" + }, + "OpenAIModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the OpenAI API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the OpenAI API", + "title": "Api Key" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Organization name for OpenAI", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "OpenAI project name.", + "title": "Project" + }, + "type": { + "const": "openai", + "default": "openai", + "description": "Type of the model, must always be 'openai'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the OpenAI model", + "title": "Name", + "type": "string" + } + }, + "title": "OpenAIModelConfig", + "type": "object" + }, + "OpenAITextCompletionParams": { + "description": "Specific configs for the text completions endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "best_of": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of best completions to generate", + "title": "Best Of" + }, + "echo": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to echo the prompt", + "title": "Echo" + }, + "logprobs": { + "anyOf": [ + { + "maximum": 5, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Include log probabilities", + "title": "Logprobs" + }, + "suffix": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Suffix to append to the prompt", + "title": "Suffix" + } + }, + "title": "OpenAITextCompletionParams", + "type": "object" + }, + "Prompty": { + "description": "A class to handle loading and formatting of Prompty templates for language models workflows.", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Name of the Prompty file.", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Description of the Prompty file.", + "title": "Description" + }, + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "1.0", + "description": "Version of the Prompty.", + "title": "Version" + }, + "authors": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "List of authors for the Prompty.", + "title": "Authors" + }, + "tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "Tags to categorize the Prompty.", + "title": "Tags" + }, + "model": { + "$ref": "#/$defs/PromptyModelConfig", + "description": "Model configuration. Can be either OpenAI or Azure OpenAI." + }, + "inputs": { + "additionalProperties": true, + "default": {}, + "description": "Input parameters for the Prompty. These define the expected inputs.", + "title": "Inputs", + "type": "object" + }, + "sample": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Sample input or the path to a sample file for testing the Prompty.", + "title": "Sample" + }, + "outputs": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": {}, + "description": "Optional outputs for the Prompty. Defines expected output format.", + "title": "Outputs" + }, + "content": { + "description": "The prompt messages defined in the Prompty file.", + "title": "Content", + "type": "string" + } + }, + "required": [ + "model", + "content" + ], + "title": "Prompty", + "type": "object" + }, + "PromptyModelConfig": { + "properties": { + "api": { + "default": "chat", + "description": "The API to use, either 'chat' or 'completion'", + "enum": [ + "chat", + "completion" + ], + "title": "Api", + "type": "string" + }, + "configuration": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAIModelConfig" + }, + { + "$ref": "#/$defs/AzureOpenAIModelConfig" + }, + { + "$ref": "#/$defs/HFHubModelConfig" + }, + { + "$ref": "#/$defs/NVIDIAModelConfig" + } + ], + "description": "Model configuration settings", + "title": "Configuration" + }, + "parameters": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAITextCompletionParams" + }, + { + "$ref": "#/$defs/OpenAIChatCompletionParams" + }, + { + "$ref": "#/$defs/HFHubChatCompletionParams" + }, + { + "$ref": "#/$defs/NVIDIAChatCompletionParams" + } + ], + "description": "Parameters for the model request", + "title": "Parameters" + }, + "response": { + "default": "first", + "description": "Determines if full response or just the first one is returned", + "enum": [ + "first", + "full" + ], + "title": "Response", + "type": "string" + } + }, + "required": [ + "configuration", + "parameters" + ], + "title": "PromptyModelConfig", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "agent_name": { + "description": "Pub/Sub topic the agent subscribes to", + "title": "Agent Name", + "type": "string" + }, + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "agent_name", + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "0.10.6" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json new file mode 100644 index 00000000..7dea0262 --- /dev/null +++ b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json @@ -0,0 +1,2018 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "name": { + "default": "", + "description": "Namememory of the agent", + "title": "Name", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "AzureOpenAIModelConfig": { + "properties": { + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the Azure OpenAI API", + "title": "Api Key" + }, + "azure_ad_token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure Active Directory token for authentication", + "title": "Azure Ad Token" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure organization associated with the OpenAI resource", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure project associated with the OpenAI resource", + "title": "Project" + }, + "api_version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "2024-07-01-preview", + "description": "API version for Azure OpenAI models", + "title": "Api Version" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint for Azure OpenAI models", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment for Azure OpenAI models", + "title": "Azure Deployment" + }, + "azure_client_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Client ID for Managed Identity authentication.", + "title": "Azure Client Id" + }, + "type": { + "const": "azure_openai", + "default": "azure_openai", + "description": "Type of the model, must always be 'azure_openai'", + "title": "Type", + "type": "string" + } + }, + "title": "AzureOpenAIModelConfig", + "type": "object" + }, + "HFHubChatCompletionParams": { + "description": "Specific settings for Hugging Face Hub Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The model to use for chat-completion. Can be a model ID or a URL to a deployed Inference Endpoint.", + "title": "Model" + }, + "frequency_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their existing frequency in the text so far.", + "title": "Frequency Penalty" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify the likelihood of specified tokens appearing in the completion.", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities of the output tokens or not.", + "title": "Logprobs" + }, + "max_tokens": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 100, + "description": "Maximum number of tokens allowed in the response.", + "title": "Max Tokens" + }, + "n": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "UNUSED. Included for compatibility.", + "title": "N" + }, + "presence_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their presence in the text so far.", + "title": "Presence Penalty" + }, + "response_format": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Grammar constraints. Can be either a JSONSchema or a regex.", + "title": "Response Format" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for reproducible control flow.", + "title": "Seed" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Up to four strings which trigger the end of the response.", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Enable realtime streaming of responses.", + "title": "Stream" + }, + "stream_options": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Options for streaming completions.", + "title": "Stream Options" + }, + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Controls randomness of the generations.", + "title": "Temperature" + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of most likely tokens to return at each position.", + "title": "Top Logprobs" + }, + "top_p": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Fraction of the most likely next words to sample from.", + "title": "Top P" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The tool to use for the completion. Defaults to 'auto'.", + "title": "Tool Choice" + }, + "tool_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A prompt to be appended before the tools.", + "title": "Tool Prompt" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A list of tools the model may call.", + "title": "Tools" + } + }, + "title": "HFHubChatCompletionParams", + "type": "object" + }, + "HFHubModelConfig": { + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Model ID on Hugging Face Hub or a URL to a deployed endpoint. If not set, a recommended model may be chosen by your wrapper.", + "title": "Model" + }, + "hf_provider": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "auto", + "description": "Inference provider to use. Defaults to automatic selection based on available providers. Ignored if a custom endpoint URL is provided.", + "title": "Hf Provider" + }, + "token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Hugging Face access token for authentication. If None, uses the locally saved token. Set to False to skip sending a token. Mutually exclusive with api_key.", + "title": "Token" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Alias for token. Use only one of token or api_key.", + "title": "Api Key" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Custom endpoint URL for inference. Used for private deployments or TGI endpoints. Cannot be set if 'model' is a Hub ID.", + "title": "Base Url" + }, + "timeout": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum seconds to wait for a response. If None, waits indefinitely. Useful for slow model loading.", + "title": "Timeout" + }, + "headers": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra HTTP headers to send with requests. Overrides defaults like authorization and user-agent.", + "title": "Headers" + }, + "cookies": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra cookies to send with requests.", + "title": "Cookies" + }, + "proxies": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Proxy settings for HTTP requests. Use standard requests format.", + "title": "Proxies" + }, + "bill_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Billing account for requests. Only used for enterprise/organization billing.", + "title": "Bill To" + }, + "type": { + "const": "huggingface", + "default": "huggingface", + "description": "Type of the model, must always be 'huggingface'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through Hugging Face", + "title": "Name", + "type": "string" + } + }, + "title": "HFHubModelConfig", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + }, + "prompty": { + "anyOf": [ + { + "$ref": "#/$defs/Prompty" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompty template name if used" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "NVIDIAChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + } + }, + "title": "NVIDIAChatCompletionParams", + "type": "object" + }, + "NVIDIAModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "https://integrate.api.nvidia.com/v1", + "description": "Base URL for the NVIDIA API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the NVIDIA API", + "title": "Api Key" + }, + "type": { + "const": "nvidia", + "default": "nvidia", + "description": "Type of the model, must always be 'nvidia'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through NVIDIA", + "title": "Name", + "type": "string" + } + }, + "title": "NVIDIAModelConfig", + "type": "object" + }, + "OpenAIChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "response_format": { + "anyOf": [ + { + "additionalProperties": { + "enum": [ + "text", + "json_object" + ], + "type": "string" + }, + "propertyNames": { + "const": "type" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Format of the response", + "title": "Response Format" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + }, + "function_call": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which function is called", + "title": "Function Call" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for deterministic sampling", + "title": "Seed" + }, + "user": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Unique identifier representing the end-user", + "title": "User" + } + }, + "title": "OpenAIChatCompletionParams", + "type": "object" + }, + "OpenAIModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the OpenAI API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the OpenAI API", + "title": "Api Key" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Organization name for OpenAI", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "OpenAI project name.", + "title": "Project" + }, + "type": { + "const": "openai", + "default": "openai", + "description": "Type of the model, must always be 'openai'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the OpenAI model", + "title": "Name", + "type": "string" + } + }, + "title": "OpenAIModelConfig", + "type": "object" + }, + "OpenAITextCompletionParams": { + "description": "Specific configs for the text completions endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "best_of": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of best completions to generate", + "title": "Best Of" + }, + "echo": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to echo the prompt", + "title": "Echo" + }, + "logprobs": { + "anyOf": [ + { + "maximum": 5, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Include log probabilities", + "title": "Logprobs" + }, + "suffix": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Suffix to append to the prompt", + "title": "Suffix" + } + }, + "title": "OpenAITextCompletionParams", + "type": "object" + }, + "Prompty": { + "description": "A class to handle loading and formatting of Prompty templates for language models workflows.", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Name of the Prompty file.", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Description of the Prompty file.", + "title": "Description" + }, + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "1.0", + "description": "Version of the Prompty.", + "title": "Version" + }, + "authors": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "List of authors for the Prompty.", + "title": "Authors" + }, + "tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "Tags to categorize the Prompty.", + "title": "Tags" + }, + "model": { + "$ref": "#/$defs/PromptyModelConfig", + "description": "Model configuration. Can be either OpenAI or Azure OpenAI." + }, + "inputs": { + "additionalProperties": true, + "default": {}, + "description": "Input parameters for the Prompty. These define the expected inputs.", + "title": "Inputs", + "type": "object" + }, + "sample": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Sample input or the path to a sample file for testing the Prompty.", + "title": "Sample" + }, + "outputs": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": {}, + "description": "Optional outputs for the Prompty. Defines expected output format.", + "title": "Outputs" + }, + "content": { + "description": "The prompt messages defined in the Prompty file.", + "title": "Content", + "type": "string" + } + }, + "required": [ + "model", + "content" + ], + "title": "Prompty", + "type": "object" + }, + "PromptyModelConfig": { + "properties": { + "api": { + "default": "chat", + "description": "The API to use, either 'chat' or 'completion'", + "enum": [ + "chat", + "completion" + ], + "title": "Api", + "type": "string" + }, + "configuration": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAIModelConfig" + }, + { + "$ref": "#/$defs/AzureOpenAIModelConfig" + }, + { + "$ref": "#/$defs/HFHubModelConfig" + }, + { + "$ref": "#/$defs/NVIDIAModelConfig" + } + ], + "description": "Model configuration settings", + "title": "Configuration" + }, + "parameters": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAITextCompletionParams" + }, + { + "$ref": "#/$defs/OpenAIChatCompletionParams" + }, + { + "$ref": "#/$defs/HFHubChatCompletionParams" + }, + { + "$ref": "#/$defs/NVIDIAChatCompletionParams" + } + ], + "description": "Parameters for the model request", + "title": "Parameters" + }, + "response": { + "default": "first", + "description": "Determines if full response or just the first one is returned", + "enum": [ + "first", + "full" + ], + "title": "Response", + "type": "string" + } + }, + "required": [ + "configuration", + "parameters" + ], + "title": "PromptyModelConfig", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "agent_name": { + "description": "Pub/Sub topic the agent subscribes to", + "title": "Agent Name", + "type": "string" + }, + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "agent_name", + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "0.10.7" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/scripts/generate_schema.py b/ext/dapr-ext-agent-core/scripts/generate_schema.py new file mode 100644 index 00000000..12759576 --- /dev/null +++ b/ext/dapr-ext-agent-core/scripts/generate_schema.py @@ -0,0 +1,114 @@ +import argparse +import json +from pathlib import Path +from importlib.metadata import version, PackageNotFoundError +from typing import Any, Optional + +from dapr.ext.agent_core import AgentMetadataSchema + + +def get_auto_version() -> str: + """Get current package version automatically.""" + try: + return version("dapr-agents") + except PackageNotFoundError: + return "0.0.0.dev0" + + +def generate_schema(output_dir: Path, schema_version: Optional[str] = None): + """ + Generate versioned schema files. + + Args: + output_dir: Directory to output schema files + schema_version: Specific version to use. If None, auto-detects from package. + """ + # Use provided version or auto-detect + current_version = schema_version or get_auto_version() + + print(f"Generating schema for version: {current_version}") + schema_dir = output_dir / "agent-metadata" + + # Export schema + schema: dict[Any, Any] = AgentMetadataSchema.export_json_schema(current_version) + + # Write versioned file + version_file = schema_dir / f"v{current_version}.json" + with open(version_file, "w") as f: + json.dump(schema, f, indent=2) + print(f"✓ Generated {version_file}") + + # Write latest.json + latest_file = schema_dir / "latest.json" + with open(latest_file, "w") as f: + json.dump(schema, f, indent=2) + print(f"✓ Generated {latest_file}") + + # Write index with all versions + index: dict[Any, Any] = { + "current_version": current_version, + "schema_url": f"https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent-core/schemas/agent-metadata/v{current_version}.json", + "available_versions": sorted( + [f.stem for f in schema_dir.glob("v*.json")], reverse=True + ), + } + + index_file = schema_dir / "index.json" + with open(index_file, "w") as f: + json.dump(index, f, indent=2) + print(f"✓ Generated {index_file}") + print(f"\nSchema generation complete for version {current_version}") + + +def main(): + """Main entry point with CLI argument parsing.""" + parser = argparse.ArgumentParser( + description="Generate JSON schema files for agent metadata", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Auto-detect version from installed package + python scripts/generate_schema.py + + # Generate schema for specific version + python scripts/generate_schema.py --version 1.0.0 + + # Generate for pre-release + python scripts/generate_schema.py --version 1.1.0-rc1 + + # Custom output directory + python scripts/generate_schema.py --version 1.0.0 --output ./custom-schemas + """, + ) + + parser.add_argument( + "--version", + "-v", + type=str, + default=None, + help="Specific version to use for schema generation. If not provided, auto-detects from installed package.", + ) + + parser.add_argument( + "--output", + "-o", + type=Path, + default=None, + help="Output directory for schemas. Defaults to 'schemas' in repo root.", + ) + + args = parser.parse_args() + + # Determine output directory + if args.output: + schemas_dir = args.output + else: + repo_root = Path(__file__).parent.parent + schemas_dir = repo_root / "schemas" + + # Generate schemas + generate_schema(schemas_dir, schema_version=args.version) + + +if __name__ == "__main__": + main() diff --git a/ext/dapr-ext-agent-core/setup.cfg b/ext/dapr-ext-agent-core/setup.cfg new file mode 100644 index 00000000..ba1e96e1 --- /dev/null +++ b/ext/dapr-ext-agent-core/setup.cfg @@ -0,0 +1,38 @@ +[metadata] +url = https://dapr.io/ +author = Dapr Authors +author_email = daprweb@microsoft.com +license = Apache +license_file = LICENSE +classifiers = + Development Status :: 5 - Production/Stable + Intended Audience :: Developers + License :: OSI Approved :: Apache Software License + Operating System :: OS Independent + Programming Language :: Python + Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3.12 + Programming Language :: Python :: 3.13 + Programming Language :: Python :: 3.14 +project_urls = + Documentation = https://github.com/dapr/docs + Source = https://github.com/dapr/python-sdk + +[options] +python_requires = >=3.10 +packages = find_namespace: +include_package_data = True +install_requires = + dapr >= 1.17.0.dev + dapr-agents >= 0.10.7 + +[options.packages.find] +include = + dapr.* + +exclude = + tests + +[options.package_data] +dapr.ext.agent_core = + py.typed diff --git a/ext/dapr-ext-agent-core/setup.py b/ext/dapr-ext-agent-core/setup.py new file mode 100644 index 00000000..d23aed9f --- /dev/null +++ b/ext/dapr-ext-agent-core/setup.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os + +from setuptools import setup + +# Load version in dapr package. +version_info = {} +with open('dapr/ext/agent_core/version.py') as fp: + exec(fp.read(), version_info) +__version__ = version_info['__version__'] + + +def is_release(): + return '.dev' not in __version__ + + +name = 'dapr-ext-agent_core' +version = __version__ +description = 'The official release of Dapr Python SDK Agent Core Extension.' +long_description = """ +This is the core extension for Dapr Python SDK Agent integrations. +Dapr is a portable, serverless, event-driven runtime that makes it easy for developers to +build resilient, stateless and stateful microservices that run on the cloud and edge and +embraces the diversity of languages and developer frameworks. + +Dapr codifies the best practices for building microservice applications into open, +independent, building blocks that enable you to build portable applications with the language +and framework of your choice. Each building block is independent and you can use one, some, +or all of them in your application. +""".lstrip() + +# Get build number from GITHUB_RUN_NUMBER environment variable +build_number = os.environ.get('GITHUB_RUN_NUMBER', '0') + +if not is_release(): + name += '-dev' + version = f'{__version__}{build_number}' + description = ( + 'The developmental release for the Dapr Session Manager extension for Strands Agents' + ) + long_description = 'This is the developmental release for the Dapr Session Manager extension for Strands Agents' + +print(f'package name: {name}, version: {version}', flush=True) + + +setup( + name=name, + version=version, + description=description, + long_description=long_description, +) diff --git a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py index 6d2614d9..c64e1b02 100644 --- a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py +++ b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py @@ -22,6 +22,7 @@ from ulid import ULID from dapr.clients import DaprClient +from dapr.ext.agent_core import AgentRegistryAdapter from langgraph.checkpoint.base import ( WRITES_IDX_MAP, BaseCheckpointSaver, @@ -48,6 +49,15 @@ def __init__(self, store_name: str, key_prefix: str): self.client = DaprClient() self._key_cache: Dict[str, str] = {} + + def set_agent(self, agent: Any) -> None: + self.registry_adapter = AgentRegistryAdapter( + registry=None, + framework='langgraph', + agent=agent, + ) + + # helper: construct Dapr key for a thread def _get_key(self, config: RunnableConfig) -> str: thread_id = None diff --git a/ext/dapr-ext-langgraph/setup.cfg b/ext/dapr-ext-langgraph/setup.cfg index 5a252a79..e7f0b94f 100644 --- a/ext/dapr-ext-langgraph/setup.cfg +++ b/ext/dapr-ext-langgraph/setup.cfg @@ -25,6 +25,7 @@ packages = find_namespace: include_package_data = True install_requires = dapr >= 1.17.0.dev + dapr.ext.agent_core >= 1.17.0.dev langgraph >= 0.3.6 langchain >= 0.1.17 python-ulid >= 3.0.0