Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,16 @@ MYTHIC__SERVER_PORT=7443
MYTHIC__TIMEOUT=-1
MYTHIC__PAYLOAD_PORT_HTTP=1337
```
If you need local LLM via [ollama](https://github.com/ollama/ollama) add to env also
If you need local LLM via [ollama](https://github.com/ollama/ollama) add to env also this. We recommend these models [Mistral](https://ollama.com/library/mistral), [Qwen3-ab](https://ollama.com/jaahas/qwen3-abliterated) , [llama3.1:8b](https://ollama.com/library/llama3.1), [llama3.1-ab:8b](https://ollama.com/mannix/llama3.1-8b-abliterated) (more freely), [Qwen2.5-coder:32b](https://ollama.com/library/qwen2.5-coder) which depends on your resources.
```env
LLMSERVICE__LOCAL=TRUE
LLMSERVICE__API_URL=http://localhost:69228
LLMSERVICE__API_KEY=super_secret_key
LLMSERVICE__TIMEOUT=120
LLMSERVICE__DEFAULT_MODEL=mistral
```
Otherwise, the values will be taken from the [config](https://github.com/eogod/EAGLE/blob/main/backend/app/core/config.py), and you can see the validation there.

Install dependencies
```bash
# Poetry install (python3.13)
Expand Down
96 changes: 63 additions & 33 deletions backend/app/cmd/llm_analysis.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,23 @@
""" Module for unified interface to LLM services """
import os
from dotenv import load_dotenv

import g4f
from ollama import Client
from fastapi import HTTPException
# TODO: ollama, openrouter , yandexgpt support

from app.core.llm_templ import LLMTemplates

# Настройка g4f
g4f.debug.logging = True

load_dotenv()
IS_LOCAL_LLM: bool = os.getenv('LLMSERVICE__LOCAL')
if IS_LOCAL_LLM:
client_ollama = Client(host=os.getenv('LLMSERVICE__API_URL'))
else:
client_ollama = None


class LLMService:
def __init__(self):
Expand All @@ -20,43 +33,60 @@ async def query_llm(self, prompt: str, provider_name: str = None) -> str:
"""
# TODO: support custom system prompt
try:
# Если указан провайдер, используем его
if provider_name and provider_name in self.providers:
provider = self.providers[provider_name]
try:
response = await g4f.ChatCompletion.create_async(
model=g4f.models.default,
messages=[{"role": "user", "content": prompt}],
provider=provider
)
return response
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Provider {provider_name} failed: {str(e)}"
) from e

# Если провайдер не указан или не найден, пробуем разные
for name, provider in self.providers.items():
try:
response = await g4f.ChatCompletion.create_async(
model=g4f.models.default,
messages=[{"role": "user", "content": prompt}],
provider=provider
)
if response and len(response) > 0:
return response
except Exception as e:
print(f"Provider {name} failed: {e}")
continue

return "Не удалось получить ответ от ни одного провайдера"

if IS_LOCAL_LLM:
res: str = self._local_llm(prompt)
else:
res: str = await self._g4f_llm(prompt, provider_name)
return res
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Ошибка при запросе к LLM: {str(e)}"
) from e

def _local_llm(self, prompt: str) -> str:
""" query with local llm ollama """
res: dict = client_ollama.generate(
model=os.getenv('LLMSERVICE__DEFAULT_MODEL'),
prompt=prompt, system=LLMTemplates.SYSTEM_PROMT
)
# remove think text for deepseek-r1, qwen , qwq models
parts_th = res.response.rsplit('</think>', 1)
return parts_th[-1] if len(parts_th) > 1 else res.response

async def _g4f_llm(self, prompt: str, provider_name: str) -> str:
""" query with g4f proxy providers like DDG """
# Если указан провайдер, используем его
if provider_name and provider_name in self.providers:
provider = self.providers[provider_name]
try:
response = await g4f.ChatCompletion.create_async(
model=g4f.models.default,
messages=[{"role": "user", "content": prompt}],
provider=provider
)
return response
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Provider {provider_name} failed: {str(e)}"
) from e

# Если провайдер не указан или не найден, пробуем разные
for name, provider in self.providers.items():
try:
response = await g4f.ChatCompletion.create_async(
model=g4f.models.default,
messages=[{"role": "user", "content": prompt}],
provider=provider
)
if response and len(response) > 0:
return response
except Exception as e:
print(f"Provider {name} failed: {e}")
continue

return "Не удалось получить ответ от ни одного провайдера"


# Инициализация сервиса
llm_service = LLMService()
9 changes: 5 additions & 4 deletions backend/app/core/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,11 @@ class Mythic(BaseModel):

class LLMservice(BaseModel):
""" env format like LLMSERVICE__API_URL=http... """
API_URL: HttpUrl = "http://localhost:69228" # Для локального Ollama
API_KEY: str = None
TIMEOUT: int = 120
DEFAULT_MODEL: str = "mistral"
local: bool = False
api_url: HttpUrl = "http://localhost:69228" # Для локального Ollama
api_key: str = None
timeout: int = 120
default_model: str = "mistral"


class Settings(BaseSettings):
Expand Down
24 changes: 20 additions & 4 deletions backend/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions backend/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ gql = "^3.5.3"
dotenv = "^0.9.9"
autopep8 = "^2.3.2"
g4f = "^0.5.8.1"
ollama = "^0.5.3"

[tool.poetry.group.dev.dependencies]
coverage = "^7.9.1"
Expand Down