# ollama LLM platform (LLM, Embedding model)

## LLM model:
```python
import requests
def ollama_platform(prompt, model, mode):
OLLAMA_PLATFORM_URL = 'http://實驗室4090 IP:6666/ollama_platform/'
response = requests.post(OLLAMA_PLATFORM_URL, json={"prompt": prompt, "model": model, "mode": mode})
return response.json()
# example usage
response = ollama_platform('France is good', 'llama3.2:3b', mode='LLM')
response['output']['message']['content']
```
---
## Embedding model:
```python
import requests
def ollama_platform(prompt, model, mode):
OLLAMA_PLATFORM_URL = 'http://實驗室4090 IP:6666/ollama_platform/'
response = requests.post(OLLAMA_PLATFORM_URL, json={"prompt": prompt, "model": model, "mode": mode})
return response.json()
embedding = ollama_platform('text', 'embedding model name', mode='embedding')
# example usage
response = ollama_platform('France is good', 'bge-m3', mode='embedding')
response['output']['embedding']
```
---
## Vision LLM:
```python
import requests
def ollama_platform(prompt, model, mode, images):
OLLAMA_PLATFORM_URL = 'http://140.115.54.162:6666/ollama_platform/'
response = requests.post(OLLAMA_PLATFORM_URL, json={"prompt": prompt,
"model": model,
"mode": mode,
"images": images})
return response.json()
# example usage
response = ollama_platform('What is in this image?', 'llama3.2-vision', mode='vision', images=['照片的絕對位置'])
response['output']['message']['content']
```
## 維護
```python
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List, Optional
from ollama import Client
import uvicorn
OLLAMA_URL = "http://localhost:11434"
app = FastAPI()
class OllamaRequest(BaseModel):
prompt: str
model: str
mode: str
images: Optional[List[str]] = None
def ollama_platform(prompt, model, mode, images=None):
client = Client(host=OLLAMA_URL)
try:
if mode == 'LLM':
response = client.chat(model=model, messages=[
{
'role': 'user',
'content': prompt
}
])
elif mode == 'embedding':
response = client.embeddings(
model=model,
prompt=prompt,
)
elif mode == 'vision' and images:
response = client.chat(model=model, messages=[
{
'role': 'user',
'content': prompt,
'images': images
}
])
else:
raise ValueError("Unsupported mode or missing images for vision mode.")
return response
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/ollama_platform/")
async def run_ollama(request: OllamaRequest):
try:
response = ollama_platform(request.prompt, request.model, request.mode, request.images)
return {"output": response}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# 運行 FastAPI 應用
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
```