khoj/pyproject.toml
Debanjum Singh Solanky 8ca39a436c Use llama.cpp for offline chat models
- Benefits of moving to llama-cpp-python from gpt4all:
  - Support for all GGUF format chat models
  - Support for AMD, Nvidia, Mac, Vulcan GPU machines (instead of just Vulcan, Mac)
  - Supports models with more capabilities like tools, schema
    enforcement, speculative ddecoding, image gen etc.
- Upgrade default chat model, prompt size, tokenizer for new supported
  chat models

- Load offline chat model when present on disk without requiring internet
  - Load model onto GPU if not disabled and device has GPU
  - Load model onto CPU if loading model onto GPU fails
  - Create helper function to check and load model from disk, when model
    glob is present on disk.

    `Llama.from_pretrained' needs internet to get repo info from
    HuggingFace. This isn't required, if the model is already downloaded

    Didn't find any existing HF or llama.cpp method that looked for model
    glob on disk without internet
2024-03-26 22:33:01 +05:30

140 lines
3.3 KiB
TOML

[build-system]
requires = ["hatchling", "hatch-vcs"]
build-backend = "hatchling.build"
[project]
name = "khoj-assistant"
description = "An AI copilot for your Second Brain"
readme = "README.md"
license = "AGPL-3.0-or-later"
requires-python = ">=3.8"
authors = [
{ name = "Debanjum Singh Solanky, Saba Imran" },
]
keywords = [
"search",
"semantic-search",
"productivity",
"NLP",
"AI",
"org-mode",
"markdown",
"images",
"pdf",
]
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Human Machine Interfaces",
"Topic :: Text Processing :: Linguistic",
]
dependencies = [
"beautifulsoup4 ~= 4.12.3",
"dateparser >= 1.1.1",
"defusedxml == 0.7.1",
"fastapi >= 0.104.1",
"python-multipart >= 0.0.7",
"jinja2 == 3.1.3",
"openai >= 1.0.0",
"tiktoken >= 0.3.2",
"tenacity >= 8.2.2",
"pillow ~= 9.5.0",
"pydantic >= 2.0.0",
"pyyaml == 6.0",
"rich >= 13.3.1",
"schedule == 1.1.0",
"sentence-transformers == 2.5.1",
"transformers >= 4.28.0",
"torch == 2.0.1",
"uvicorn == 0.17.6",
"aiohttp ~= 3.9.0",
"langchain <= 0.2.0",
"langchain-openai >= 0.0.5",
"requests >= 2.26.0",
"anyio == 3.7.1",
"pymupdf >= 1.23.5",
"django == 4.2.10",
"authlib == 1.2.1",
"llama-cpp-python == 0.2.56",
"itsdangerous == 2.1.2",
"httpx == 0.25.0",
"pgvector == 0.2.4",
"psycopg2-binary == 2.9.9",
"gunicorn == 21.2.0",
"lxml == 4.9.3",
"tzdata == 2023.3",
"rapidocr-onnxruntime == 1.3.11",
"openai-whisper >= 20231117",
"django-phonenumber-field == 7.3.0",
"phonenumbers == 8.13.27",
"markdownify ~= 0.11.6",
]
dynamic = ["version"]
[project.urls]
Homepage = "https://khoj.dev"
Documentation = "https://docs.khoj.dev"
Code = "https://github.com/khoj-ai/khoj"
[project.scripts]
khoj = "khoj.main:run"
[project.optional-dependencies]
prod = [
"google-auth == 2.23.3",
"stripe == 7.3.0",
"twilio == 8.11",
"boto3 >= 1.34.57",
]
dev = [
"khoj-assistant[prod]",
"pytest >= 7.1.2",
"pytest-xdist[psutil]",
"pytest-django == 4.5.2",
"pytest-asyncio == 0.21.1",
"freezegun >= 1.2.0",
"factory-boy >= 3.2.1",
"psutil >= 5.8.0",
"mypy >= 1.0.1",
"black >= 23.1.0",
"pre-commit >= 3.0.4",
]
[tool.hatch.version]
source = "vcs"
raw-options.local_scheme = "no-local-version" # PEP440 compliant version for PyPi
[tool.hatch.build.targets.sdist]
include = ["src/khoj"]
[tool.hatch.build.targets.wheel]
packages = ["src/khoj"]
[tool.mypy]
files = "src/khoj"
pretty = true
strict_optional = false
install_types = true
ignore_missing_imports = true
non_interactive = true
show_error_codes = true
warn_unused_ignores = false
[tool.black]
line-length = 120
[tool.isort]
profile = "black"
[tool.pytest.ini_options]
addopts = "--strict-markers"
markers = [
"chatquality: Evaluate chatbot capabilities and quality",
]