| |
- builtins.object
-
- BaseLLMResult
-
- LLMResult
- LLMResultStreaming
- BaseModuleResults
-
- ModuleResults
- ModuleResultsStreaming
- ChatDelta
- GenericModuleResult
- LLMChoice
- LLMChoiceStreaming
- LLMUsage
- OrchestrationResponse
- OrchestrationResponseStreaming
class BaseLLMResult(builtins.object) |
|
BaseLLMResult(id: str, object: str, created: int, model: str) -> None
Base class for LLM results containing common attributes.
Attributes:
id: Unique identifier for the LLM operation.
object: Type of object returned (e.g., "chat.completion").
created: Timestamp when this result was created.
model: Name or identifier of the model used. |
|
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, id: str, object: str, created: int, model: str) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data descriptors defined here:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes defined here:
- __annotations__ = {'created': <class 'int'>, 'id': <class 'str'>, 'model': <class 'str'>, 'object': <class 'str'>}
- __dataclass_fields__ = {'created': Field(name='created',type=<class 'int'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD), 'id': Field(name='id',type=<class 'str'>,default=<data...appingproxy({}),kw_only=False,_field_type=_FIELD), 'model': Field(name='model',type=<class 'str'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'object': Field(name='object',type=<class 'str'>,default=<...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('id', 'object', 'created', 'model')
|
class BaseModuleResults(builtins.object) |
|
BaseModuleResults(input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None) -> None
Base class for module results containing grounding, common filtering and masking attributes.
Attributes:
input_filtering: Results from the input filtering module.
output_filtering: Results from the output filtering module.
input_masking: Results from the input masking module.
grounding: A list of extracted text to be provided as grounding context. |
|
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data descriptors defined here:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes defined here:
- __annotations__ = {'grounding': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult], 'input_filtering': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult], 'input_masking': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult], 'output_filtering': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult]}
- __dataclass_fields__ = {'grounding': Field(name='grounding',type=typing.Optional[gen_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_filtering': Field(name='input_filtering',type=typing.Optiona...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_masking': Field(name='input_masking',type=typing.Optional[...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_filtering': Field(name='output_filtering',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('input_filtering', 'output_filtering', 'input_masking', 'grounding')
- grounding = None
- input_filtering = None
- input_masking = None
- output_filtering = None
|
class ChatDelta(builtins.object) |
|
ChatDelta(content: str, role: Optional[str] = None) -> None
Represents a partial update in a streaming chat response.
Attributes:
content: The text content of the chat delta.
role: Optional role identifier (e.g., 'assistant', 'user') for the message delta. |
|
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, content: str, role: Optional[str] = None) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data descriptors defined here:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes defined here:
- __annotations__ = {'content': <class 'str'>, 'role': typing.Optional[str]}
- __dataclass_fields__ = {'content': Field(name='content',type=<class 'str'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD), 'role': Field(name='role',type=typing.Optional[str],defa...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('content', 'role')
- role = None
|
class GenericModuleResult(builtins.object) |
|
GenericModuleResult(message: str, data: Optional[Dict[str, Any]] = None) -> None
Represents a generic module result in the orchestration process.
Attributes:
message: A message or description generated by the module.
data: Additional data relevant to the module result. |
|
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, message: str, data: Optional[Dict[str, Any]] = None) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data descriptors defined here:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes defined here:
- __annotations__ = {'data': typing.Optional[typing.Dict[str, typing.Any]], 'message': <class 'str'>}
- __dataclass_fields__ = {'data': Field(name='data',type=typing.Optional[typing.Di...appingproxy({}),kw_only=False,_field_type=_FIELD), 'message': Field(name='message',type=<class 'str'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('message', 'data')
- data = None
|
class LLMChoice(builtins.object) |
|
LLMChoice(index: int, message: gen_ai_hub.orchestration.models.message.Message, finish_reason: str, logprobs: Optional[Dict[str, float]] = None) -> None
Represents an individual choice or response generated by the LLM.
Attributes:
index: The index of this particular choice in the list of possible choices.
message: The message object containing the role and content of the response.
finish_reason: The reason why the model stopped generating tokens.
logprobs: Optional dictionary containing token log probabilities. |
|
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, index: int, message: gen_ai_hub.orchestration.models.message.Message, finish_reason: str, logprobs: Optional[Dict[str, float]] = None) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data descriptors defined here:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes defined here:
- __annotations__ = {'finish_reason': <class 'str'>, 'index': <class 'int'>, 'logprobs': typing.Optional[typing.Dict[str, float]], 'message': <class 'gen_ai_hub.orchestration.models.message.Message'>}
- __dataclass_fields__ = {'finish_reason': Field(name='finish_reason',type=<class 'str'>,de...appingproxy({}),kw_only=False,_field_type=_FIELD), 'index': Field(name='index',type=<class 'int'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'logprobs': Field(name='logprobs',type=typing.Optional[typin...appingproxy({}),kw_only=False,_field_type=_FIELD), 'message': Field(name='message',type=<class 'gen_ai_hub.orc...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('index', 'message', 'finish_reason', 'logprobs')
- logprobs = None
|
class LLMChoiceStreaming(builtins.object) |
|
LLMChoiceStreaming(index: int, delta: gen_ai_hub.orchestration.models.response.ChatDelta, finish_reason: Optional[str] = None, logprobs: Optional[Dict[str, float]] = None) -> None
Represents a streaming choice or partial response generated by the LLM.
Attributes:
index: The index of this particular choice in the list of possible choices.
delta: The partial update (ChatDelta) for this choice.
finish_reason: Optional reason for why the generation stopped, may be None during streaming.
logprobs: Optional dictionary containing token log probabilities. |
|
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, index: int, delta: gen_ai_hub.orchestration.models.response.ChatDelta, finish_reason: Optional[str] = None, logprobs: Optional[Dict[str, float]] = None) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data descriptors defined here:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes defined here:
- __annotations__ = {'delta': <class 'gen_ai_hub.orchestration.models.response.ChatDelta'>, 'finish_reason': typing.Optional[str], 'index': <class 'int'>, 'logprobs': typing.Optional[typing.Dict[str, float]]}
- __dataclass_fields__ = {'delta': Field(name='delta',type=<class 'gen_ai_hub.orche...appingproxy({}),kw_only=False,_field_type=_FIELD), 'finish_reason': Field(name='finish_reason',type=typing.Optional[...appingproxy({}),kw_only=False,_field_type=_FIELD), 'index': Field(name='index',type=<class 'int'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'logprobs': Field(name='logprobs',type=typing.Optional[typin...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('index', 'delta', 'finish_reason', 'logprobs')
- finish_reason = None
- logprobs = None
|
class LLMResult(BaseLLMResult) |
|
LLMResult(id: str, object: str, created: int, model: str, choices: List[gen_ai_hub.orchestration.models.response.LLMChoice], usage: gen_ai_hub.orchestration.models.response.LLMUsage, system_fingerprint: Optional[str] = None) -> None
Represents the complete result from an LLM operation.
Attributes:
id: The unique identifier for this LLM operation.
object: The type of object returned (typically "chat.completion").
created: The timestamp when this result was created.
model: The name or identifier of the model used for generating the result.
choices: A list of possible choices generated by the LLM.
usage: The token usage statistics for this operation.
system_fingerprint: An optional system fingerprint for tracking the model used. |
|
- Method resolution order:
- LLMResult
- BaseLLMResult
- builtins.object
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, id: str, object: str, created: int, model: str, choices: List[gen_ai_hub.orchestration.models.response.LLMChoice], usage: gen_ai_hub.orchestration.models.response.LLMUsage, system_fingerprint: Optional[str] = None) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data and other attributes defined here:
- __annotations__ = {'choices': typing.List[gen_ai_hub.orchestration.models.response.LLMChoice], 'system_fingerprint': typing.Optional[str], 'usage': <class 'gen_ai_hub.orchestration.models.response.LLMUsage'>}
- __dataclass_fields__ = {'choices': Field(name='choices',type=typing.List[gen_ai_hub...appingproxy({}),kw_only=False,_field_type=_FIELD), 'created': Field(name='created',type=<class 'int'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD), 'id': Field(name='id',type=<class 'str'>,default=<data...appingproxy({}),kw_only=False,_field_type=_FIELD), 'model': Field(name='model',type=<class 'str'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'object': Field(name='object',type=<class 'str'>,default=<...appingproxy({}),kw_only=False,_field_type=_FIELD), 'system_fingerprint': Field(name='system_fingerprint',type=typing.Opti...appingproxy({}),kw_only=False,_field_type=_FIELD), 'usage': Field(name='usage',type=<class 'gen_ai_hub.orche...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('id', 'object', 'created', 'model', 'choices', 'usage', 'system_fingerprint')
- system_fingerprint = None
Data descriptors inherited from BaseLLMResult:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
|
class LLMResultStreaming(BaseLLMResult) |
|
LLMResultStreaming(id: str, object: str, created: int, model: str, choices: List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming], system_fingerprint: Optional[str] = None) -> None
Represents a streaming result from an LLM operation.
Attributes:
id: The unique identifier for this LLM operation.
object: The type of object returned (typically "chat.completion.chunk").
created: The timestamp when this result was created.
model: The name or identifier of the model used.
choices: A list of streaming choices generated by the LLM.
system_fingerprint: An optional system fingerprint for tracking the model used. |
|
- Method resolution order:
- LLMResultStreaming
- BaseLLMResult
- builtins.object
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, id: str, object: str, created: int, model: str, choices: List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming], system_fingerprint: Optional[str] = None) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data and other attributes defined here:
- __annotations__ = {'choices': typing.List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming], 'system_fingerprint': typing.Optional[str]}
- __dataclass_fields__ = {'choices': Field(name='choices',type=typing.List[gen_ai_hub...appingproxy({}),kw_only=False,_field_type=_FIELD), 'created': Field(name='created',type=<class 'int'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD), 'id': Field(name='id',type=<class 'str'>,default=<data...appingproxy({}),kw_only=False,_field_type=_FIELD), 'model': Field(name='model',type=<class 'str'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'object': Field(name='object',type=<class 'str'>,default=<...appingproxy({}),kw_only=False,_field_type=_FIELD), 'system_fingerprint': Field(name='system_fingerprint',type=typing.Opti...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('id', 'object', 'created', 'model', 'choices', 'system_fingerprint')
- system_fingerprint = None
Data descriptors inherited from BaseLLMResult:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
|
class LLMUsage(builtins.object) |
|
LLMUsage(completion_tokens: int, prompt_tokens: int, total_tokens: int) -> None
Represents the token usage statistics for an LLM (Large Language Model) operation.
Attributes:
completion_tokens: The number of tokens generated by the model in the response.
prompt_tokens: The number of tokens in the input prompt.
total_tokens: The total number of tokens used, including both prompt and completion tokens. |
|
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, completion_tokens: int, prompt_tokens: int, total_tokens: int) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data descriptors defined here:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes defined here:
- __annotations__ = {'completion_tokens': <class 'int'>, 'prompt_tokens': <class 'int'>, 'total_tokens': <class 'int'>}
- __dataclass_fields__ = {'completion_tokens': Field(name='completion_tokens',type=<class 'int'...appingproxy({}),kw_only=False,_field_type=_FIELD), 'prompt_tokens': Field(name='prompt_tokens',type=<class 'int'>,de...appingproxy({}),kw_only=False,_field_type=_FIELD), 'total_tokens': Field(name='total_tokens',type=<class 'int'>,def...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('completion_tokens', 'prompt_tokens', 'total_tokens')
|
class ModuleResults(BaseModuleResults) |
|
ModuleResults(input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, llm: Optional[gen_ai_hub.orchestration.models.response.LLMResult] = None, templating: Optional[List[gen_ai_hub.orchestration.models.message.Message]] = None, output_unmasking: Optional[List[gen_ai_hub.orchestration.models.response.LLMChoice]] = None) -> None
Represents the results of various modules used in processing an orchestration request.
Attributes:
templating: A list of messages that define the conversation's context or template.
llm: The result from the LLM operation.
input_filtering: The result of any input filtering, if applicable.
output_filtering: The result of any output filtering, if applicable.
input_masking: The result of input masking, if applicable.
output_unmasking: The result of output unmasking, if applicable. |
|
- Method resolution order:
- ModuleResults
- BaseModuleResults
- builtins.object
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, llm: Optional[gen_ai_hub.orchestration.models.response.LLMResult] = None, templating: Optional[List[gen_ai_hub.orchestration.models.message.Message]] = None, output_unmasking: Optional[List[gen_ai_hub.orchestration.models.response.LLMChoice]] = None) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data and other attributes defined here:
- __annotations__ = {'llm': typing.Optional[gen_ai_hub.orchestration.models.response.LLMResult], 'output_unmasking': typing.Optional[typing.List[gen_ai_hub.orchestration.models.response.LLMChoice]], 'templating': typing.Optional[typing.List[gen_ai_hub.orchestration.models.message.Message]]}
- __dataclass_fields__ = {'grounding': Field(name='grounding',type=typing.Optional[gen_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_filtering': Field(name='input_filtering',type=typing.Optiona...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_masking': Field(name='input_masking',type=typing.Optional[...appingproxy({}),kw_only=False,_field_type=_FIELD), 'llm': Field(name='llm',type=typing.Optional[gen_ai_hub...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_filtering': Field(name='output_filtering',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_unmasking': Field(name='output_unmasking',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD), 'templating': Field(name='templating',type=typing.Optional[typ...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('input_filtering', 'output_filtering', 'input_masking', 'grounding', 'llm', 'templating', 'output_unmasking')
- llm = None
- output_unmasking = None
- templating = None
Data descriptors inherited from BaseModuleResults:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes inherited from BaseModuleResults:
- grounding = None
- input_filtering = None
- input_masking = None
- output_filtering = None
|
class ModuleResultsStreaming(BaseModuleResults) |
|
ModuleResultsStreaming(input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, llm: Optional[gen_ai_hub.orchestration.models.response.LLMResultStreaming] = None, templating: Optional[List[gen_ai_hub.orchestration.models.response.ChatDelta]] = None, output_unmasking: Optional[List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming]] = None) -> None
Represents the streaming results of various modules used in processing an orchestration request.
Attributes:
llm: The streaming result from the LLM operation.
templating: A list of chat deltas that define the conversation's context or template.
input_filtering: The result of any input filtering, if applicable.
output_filtering: The result of any output filtering, if applicable.
input_masking: The result of input masking, if applicable.
output_unmasking: The result of output unmasking for streaming responses. |
|
- Method resolution order:
- ModuleResultsStreaming
- BaseModuleResults
- builtins.object
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, llm: Optional[gen_ai_hub.orchestration.models.response.LLMResultStreaming] = None, templating: Optional[List[gen_ai_hub.orchestration.models.response.ChatDelta]] = None, output_unmasking: Optional[List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming]] = None) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data and other attributes defined here:
- __annotations__ = {'llm': typing.Optional[gen_ai_hub.orchestration.models.response.LLMResultStreaming], 'output_unmasking': typing.Optional[typing.List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming]], 'templating': typing.Optional[typing.List[gen_ai_hub.orchestration.models.response.ChatDelta]]}
- __dataclass_fields__ = {'grounding': Field(name='grounding',type=typing.Optional[gen_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_filtering': Field(name='input_filtering',type=typing.Optiona...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_masking': Field(name='input_masking',type=typing.Optional[...appingproxy({}),kw_only=False,_field_type=_FIELD), 'llm': Field(name='llm',type=typing.Optional[gen_ai_hub...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_filtering': Field(name='output_filtering',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_unmasking': Field(name='output_unmasking',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD), 'templating': Field(name='templating',type=typing.Optional[typ...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('input_filtering', 'output_filtering', 'input_masking', 'grounding', 'llm', 'templating', 'output_unmasking')
- llm = None
- output_unmasking = None
- templating = None
Data descriptors inherited from BaseModuleResults:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes inherited from BaseModuleResults:
- grounding = None
- input_filtering = None
- input_masking = None
- output_filtering = None
|
class OrchestrationResponse(builtins.object) |
|
OrchestrationResponse(request_id: str, module_results: gen_ai_hub.orchestration.models.response.ModuleResults, orchestration_result: gen_ai_hub.orchestration.models.response.LLMResult) -> None
Represents the complete response from an orchestration process.
Attributes:
request_id: The unique identifier for the request being processed.
module_results: The results from the various modules involved in processing the request.
orchestration_result: The final result from the orchestration, typically mirroring the LLM result. |
|
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, request_id: str, module_results: gen_ai_hub.orchestration.models.response.ModuleResults, orchestration_result: gen_ai_hub.orchestration.models.response.LLMResult) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data descriptors defined here:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes defined here:
- __annotations__ = {'module_results': <class 'gen_ai_hub.orchestration.models.response.ModuleResults'>, 'orchestration_result': <class 'gen_ai_hub.orchestration.models.response.LLMResult'>, 'request_id': <class 'str'>}
- __dataclass_fields__ = {'module_results': Field(name='module_results',type=<class 'gen_ai_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'orchestration_result': Field(name='orchestration_result',type=<class 'g...appingproxy({}),kw_only=False,_field_type=_FIELD), 'request_id': Field(name='request_id',type=<class 'str'>,defau...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('request_id', 'module_results', 'orchestration_result')
|
class OrchestrationResponseStreaming(builtins.object) |
|
OrchestrationResponseStreaming(request_id: str, module_results: gen_ai_hub.orchestration.models.response.ModuleResultsStreaming, orchestration_result: gen_ai_hub.orchestration.models.response.LLMResultStreaming) -> None
Represents the streaming response from an orchestration process.
Attributes:
request_id: The unique identifier for the request being processed.
module_results: The streaming results from the various modules involved in processing the request.
orchestration_result: The streaming result from the orchestration. |
|
Methods defined here:
- __eq__(self, other)
- Return self==value.
- __init__(self, request_id: str, module_results: gen_ai_hub.orchestration.models.response.ModuleResultsStreaming, orchestration_result: gen_ai_hub.orchestration.models.response.LLMResultStreaming) -> None
- Initialize self. See help(type(self)) for accurate signature.
- __repr__(self)
- Return repr(self).
Data descriptors defined here:
- __dict__
- dictionary for instance variables (if defined)
- __weakref__
- list of weak references to the object (if defined)
Data and other attributes defined here:
- __annotations__ = {'module_results': <class 'gen_ai_hub.orchestration.models.response.ModuleResultsStreaming'>, 'orchestration_result': <class 'gen_ai_hub.orchestration.models.response.LLMResultStreaming'>, 'request_id': <class 'str'>}
- __dataclass_fields__ = {'module_results': Field(name='module_results',type=<class 'gen_ai_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'orchestration_result': Field(name='orchestration_result',type=<class 'g...appingproxy({}),kw_only=False,_field_type=_FIELD), 'request_id': Field(name='request_id',type=<class 'str'>,defau...appingproxy({}),kw_only=False,_field_type=_FIELD)}
- __dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
- __hash__ = None
- __match_args__ = ('request_id', 'module_results', 'orchestration_result')
| |