{
"operation-name" : "add",
"description" : "This operation adds a chat model",
"request-properties" : {
"base-url" : {
"type" : {
"TYPE_MODEL_VALUE" : "STRING"
},
"description" : "Endpoint to connect to an Ollama chat model.",
"expressions-allowed" : true,
"required" : true,
"nillable" : false,
"min-length" : 1,
"max-length" : 2147483647,
"stability" : "default"
},
"connect-timeout" : {
"type" : {
"TYPE_MODEL_VALUE" : "LONG"
},
"description" : "Timeout for the Ollama chat model.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"default" : 0,
"unit" : "MILLISECONDS",
"stability" : "default"
},
"log-requests" : {
"type" : {
"TYPE_MODEL_VALUE" : "BOOLEAN"
},
"description" : "Enabling the tracing of requests going to Ollama.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"stability" : "default"
},
"log-responses" : {
"type" : {
"TYPE_MODEL_VALUE" : "BOOLEAN"
},
"description" : "Enabling the tracing of responses from Ollama.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"stability" : "default"
},
"max-retries" : {
"type" : {
"TYPE_MODEL_VALUE" : "INT"
},
"description" : "=The maximum number of retries for API requests.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"stability" : "default"
},
"model-name" : {
"type" : {
"TYPE_MODEL_VALUE" : "STRING"
},
"description" : "Name of the chat model served by Ollama.",
"expressions-allowed" : true,
"required" : true,
"nillable" : false,
"min-length" : 1,
"max-length" : 2147483647,
"stability" : "default"
},
"num-predict" : {
"type" : {
"TYPE_MODEL_VALUE" : "INT"
},
"description" : "The maximum number of tokens to predict (Ollama-specific parameter).",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"stability" : "default"
},
"repeat-penalty" : {
"type" : {
"TYPE_MODEL_VALUE" : "DOUBLE"
},
"description" : "Penalty for token repetition to reduce repetitive text generation.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"stability" : "default"
},
"response-format" : {
"type" : {
"TYPE_MODEL_VALUE" : "STRING"
},
"description" : "The format of the response from Ollama.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"allowed" : [
"JSON",
"TEXT"
],
"stability" : "default"
},
"seed" : {
"type" : {
"TYPE_MODEL_VALUE" : "INT"
},
"description" : "Random seed for reproducible outputs from the Ollama model.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"stability" : "default"
},
"stop-sequences" : {
"type" : {
"TYPE_MODEL_VALUE" : "LIST"
},
"description" : "List of stop sequences to tell the model to stop generating content.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"min-length" : 0,
"max-length" : 2147483647,
"stability" : "default",
"value-type" : {
"TYPE_MODEL_VALUE" : "STRING"
}
},
"streaming" : {
"type" : {
"TYPE_MODEL_VALUE" : "BOOLEAN"
},
"description" : "Whether to create a token streaming chat language model or not.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"default" : false,
"stability" : "default"
},
"temperature" : {
"type" : {
"TYPE_MODEL_VALUE" : "DOUBLE"
},
"description" : "Temperature of the Ollama chat model.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"stability" : "default"
},
"top-k" : {
"type" : {
"TYPE_MODEL_VALUE" : "INT"
},
"description" : "The number of highest probability tokens to consider during generation.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"stability" : "default"
},
"top-p" : {
"type" : {
"TYPE_MODEL_VALUE" : "DOUBLE"
},
"description" : "Controls token diversity using nucleus sampling.",
"expressions-allowed" : true,
"required" : false,
"nillable" : true,
"stability" : "default"
}
},
"reply-properties" : {},
"stability" : "default",
"read-only" : false,
"runtime-only" : false
}