LLM reel caption and video description + Refactor in services
This commit is contained in:
137
app/Services/AIPrompt/OpenAPIPrompt.php
Normal file
137
app/Services/AIPrompt/OpenAPIPrompt.php
Normal file
@ -0,0 +1,137 @@
|
||||
<?php
|
||||
|
||||
namespace App\Services\AIPrompt;
|
||||
|
||||
use Uri;
|
||||
|
||||
/**
|
||||
* Use OpenAI API to get answers from a model.
|
||||
*/
|
||||
class OpenAPIPrompt implements IAIPrompt
|
||||
{
|
||||
private string $host;
|
||||
private ?string $token = null;
|
||||
|
||||
public function __construct(string $host = null) {
|
||||
$this->host = $host ?? config('llm.api.host');
|
||||
if (config('llm.api.token')) {
|
||||
$this->token = config('llm.api.token');
|
||||
}
|
||||
}
|
||||
|
||||
private function getHeaders(): array
|
||||
{
|
||||
return [
|
||||
'Authorization: ' . ($this->token ? 'Bearer ' . $this->token : ''),
|
||||
'Content-Type: application/json',
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Call the OpenAI API with the given endpoint and body.
|
||||
* @param string $endpoint
|
||||
* @param string $body
|
||||
* @throws \Exception
|
||||
* @return string
|
||||
*/
|
||||
private function callAPI(string $endpoint, string $body): string
|
||||
{
|
||||
$url = $this->host . $endpoint;
|
||||
|
||||
$ch = curl_init($url);
|
||||
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
|
||||
curl_setopt($ch, CURLOPT_HTTPHEADER, $this->getHeaders());
|
||||
curl_setopt($ch, CURLOPT_POST, true);
|
||||
curl_setopt($ch, CURLOPT_POSTFIELDS, $body);
|
||||
$response = curl_exec($ch);
|
||||
$httpCode = curl_getinfo($ch, CURLINFO_HTTP_CODE);
|
||||
curl_close($ch);
|
||||
|
||||
if ($httpCode !== 200) {
|
||||
throw new \Exception("Error calling OpenAI API: HTTP $httpCode - $response");
|
||||
}
|
||||
return $response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call the OpenAI API generate endpoint. to generate a response to a prompt.
|
||||
* @param string $model
|
||||
* @param string $prompt
|
||||
* @param array $images
|
||||
* @return void
|
||||
*/
|
||||
public function generate(string $model, string $prompt, array $images = [], string $outputFormat = null, string $systemMessage = null, bool $keepAlive = true, bool $shouldThink = false): string
|
||||
{
|
||||
/*
|
||||
Generate a completion
|
||||
|
||||
POST /api/generate
|
||||
|
||||
Generate a response for a given prompt with a provided model. This is a streaming endpoint, so there will be a series of responses. The final response object will include statistics and additional data from the request.
|
||||
Parameters
|
||||
|
||||
model: (required) the model name
|
||||
prompt: the prompt to generate a response for
|
||||
suffix: the text after the model response
|
||||
images: (optional) a list of base64-encoded images (for multimodal models such as llava)
|
||||
think: (for thinking models) should the model think before responding?
|
||||
|
||||
Advanced parameters (optional):
|
||||
|
||||
format: the format to return a response in. Format can be json or a JSON schema
|
||||
options: additional model parameters listed in the documentation for the Modelfile such as temperature
|
||||
system: system message to (overrides what is defined in the Modelfile)
|
||||
template: the prompt template to use (overrides what is defined in the Modelfile)
|
||||
stream: if false the response will be returned as a single response object, rather than a stream of objects
|
||||
raw: if true no formatting will be applied to the prompt. You may choose to use the raw parameter if you are specifying a full templated prompt in your request to the API
|
||||
keep_alive: controls how long the model will stay loaded into memory following the request (default: 5m)
|
||||
context (deprecated): the context parameter returned from a previous request to /generate, this can be used to keep a short conversational memory
|
||||
|
||||
Structured outputs
|
||||
|
||||
Structured outputs are supported by providing a JSON schema in the format parameter. The model will generate a response that matches the schema. See the structured outputs example below.
|
||||
JSON mode
|
||||
|
||||
Enable JSON mode by setting the format parameter to json. This will structure the response as a valid JSON object. See the JSON mode example below.
|
||||
|
||||
Important
|
||||
|
||||
**It's important to instruct the model to use JSON in the prompt. Otherwise, the model may generate large amounts whitespace.**
|
||||
*/
|
||||
|
||||
// Transform the images to base64
|
||||
foreach ($images as &$image) {
|
||||
if (file_exists($image)) {
|
||||
$image = base64_encode(file_get_contents($image));
|
||||
}
|
||||
}
|
||||
|
||||
$body = [
|
||||
'model' => $model,
|
||||
'prompt' => $prompt,
|
||||
'images' => $images,
|
||||
'think' => $shouldThink,
|
||||
'stream' => false,
|
||||
];
|
||||
|
||||
if ($systemMessage !== null) {
|
||||
$body['system'] = $systemMessage;
|
||||
}
|
||||
if ($outputFormat !== null) {
|
||||
$body['format'] = json_decode($outputFormat);
|
||||
}
|
||||
if (!$keepAlive) {
|
||||
$body['keep_alive'] = "0m";
|
||||
}
|
||||
|
||||
$body = json_encode($body);
|
||||
|
||||
dump($body);
|
||||
$response = $this->callAPI('/api/generate', $body);
|
||||
$decodedResponse = json_decode($response, true);
|
||||
if (json_last_error() !== JSON_ERROR_NONE) {
|
||||
throw new \Exception("Error decoding JSON response: " . json_last_error_msg());
|
||||
}
|
||||
return $decodedResponse['response'] ?? '';
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user