git: 730828c62763 - main - misc/py-litellm: add port: Call all LLM APIs using the OpenAI format
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 12 Feb 2024 08:34:26 UTC
The branch main has been updated by tagattie: URL: https://cgit.FreeBSD.org/ports/commit/?id=730828c627631142966c84d4c2943defaad86e4e commit 730828c627631142966c84d4c2943defaad86e4e Author: Hiroki Tagato <tagattie@FreeBSD.org> AuthorDate: 2024-02-12 08:30:35 +0000 Commit: Hiroki Tagato <tagattie@FreeBSD.org> CommitDate: 2024-02-12 08:34:14 +0000 misc/py-litellm: add port: Call all LLM APIs using the OpenAI format Call all LLM APIs using the OpenAI format [Bedrock, Huggingface, VertexAI, TogetherAI, Azure, OpenAI, etc.] LiteLLM manages: - Translate inputs to provider's completion, embedding, and image_generation endpoints - Consistent output, text responses will always be available at ['choices'][0]['message']['content'] - Retry/fallback logic across multiple deployments (e.g. Azure/OpenAI) - Router - Track spend & set budgets per project OpenAI Proxy Server WWW: https://github.com/BerriAI/litellm --- misc/Makefile | 1 + misc/py-litellm/Makefile | 46 ++++++++++++++++++++++ misc/py-litellm/distinfo | 3 ++ misc/py-litellm/files/patch-litellm_proxy_start.sh | 8 ++++ misc/py-litellm/pkg-descr | 11 ++++++ 5 files changed, 69 insertions(+) diff --git a/misc/Makefile b/misc/Makefile index 20142149858b..8f664ef1613b 100644 --- a/misc/Makefile +++ b/misc/Makefile @@ -435,6 +435,7 @@ SUBDIR += py-lazrs SUBDIR += py-lightgbm SUBDIR += py-lightning-utilities + SUBDIR += py-litellm SUBDIR += py-log_symbols SUBDIR += py-mffpy SUBDIR += py-mmcv diff --git a/misc/py-litellm/Makefile b/misc/py-litellm/Makefile new file mode 100644 index 000000000000..5702d6b60bf2 --- /dev/null +++ b/misc/py-litellm/Makefile @@ -0,0 +1,46 @@ +PORTNAME= litellm +DISTVERSION= 1.23.9 +CATEGORIES= misc python +MASTER_SITES= PYPI +PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX} + +MAINTAINER= tagattie@FreeBSD.org +COMMENT= Call all LLM APIs using the OpenAI format +WWW= https://github.com/BerriAI/litellm + +LICENSE= MIT +LICENSE_FILE= ${WRKSRC}/LICENSE + +BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}poetry-core>0:devel/py-poetry-core@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}wheel>0:devel/py-wheel@${PY_FLAVOR} +RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}openai>=1.0.0:misc/py-openai@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}python-dotenv>=0.2.0:www/py-python-dotenv@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}tiktoken>=0.4.0:textproc/py-tiktoken@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}importlib-metadata>=6.8.0:devel/py-importlib-metadata@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}tokenizers>0:textproc/py-tokenizers@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}click>0:devel/py-click@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}Jinja2>=3.1.2<4.0.0:devel/py-Jinja2@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}aiohttp>0:www/py-aiohttp@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}requests>=2.31.0<3.0.0:www/py-requests@${PY_FLAVOR} + +USES= python shebangfix +USE_PYTHON= autoplist pep517 + +REINPLACE_ARGS= -i '' +NO_ARCH= yes + +PORTDOCS= README.md + +OPTIONS_DEFINE= DOCS + +post-patch: + @${REINPLACE_CMD} -e 's|%%PYTHON_CMD%%|${PYTHON_CMD}|' \ + ${WRKSRC}/litellm/proxy/start.sh + @${FIND} ${WRKSRC}/litellm/proxy -type f \ + \( -name '*.orig' -o -name '*.bak' \) -delete + +post-install-DOCS-on: + @${MKDIR} ${STAGEDIR}${DOCSDIR} + ${INSTALL_MAN} ${PORTDOCS:S|^|${WRKSRC}/|} ${STAGEDIR}${DOCSDIR} + +.include <bsd.port.mk> diff --git a/misc/py-litellm/distinfo b/misc/py-litellm/distinfo new file mode 100644 index 000000000000..d494a3abcdc4 --- /dev/null +++ b/misc/py-litellm/distinfo @@ -0,0 +1,3 @@ +TIMESTAMP = 1707722656 +SHA256 (litellm-1.23.9.tar.gz) = 0c1e0e56f4d1d9c8a33da09d6736bde9b21a8ea324db8c05cc3de65c6b4fad7d +SIZE (litellm-1.23.9.tar.gz) = 3139242 diff --git a/misc/py-litellm/files/patch-litellm_proxy_start.sh b/misc/py-litellm/files/patch-litellm_proxy_start.sh new file mode 100644 index 000000000000..f1ce771fdaeb --- /dev/null +++ b/misc/py-litellm/files/patch-litellm_proxy_start.sh @@ -0,0 +1,8 @@ +--- litellm/proxy/start.sh.orig 2024-02-11 03:13:21 UTC ++++ litellm/proxy/start.sh +@@ -1,2 +1,2 @@ +-#!/bin/bash +-python3 proxy_cli.py +\ No newline at end of file ++#!/bin/sh ++%%PYTHON_CMD%% proxy_cli.py diff --git a/misc/py-litellm/pkg-descr b/misc/py-litellm/pkg-descr new file mode 100644 index 000000000000..bf534cc9750c --- /dev/null +++ b/misc/py-litellm/pkg-descr @@ -0,0 +1,11 @@ +Call all LLM APIs using the OpenAI format [Bedrock, Huggingface, +VertexAI, TogetherAI, Azure, OpenAI, etc.] + +LiteLLM manages: +- Translate inputs to provider's completion, embedding, and + image_generation endpoints +- Consistent output, text responses will always be available at + ['choices'][0]['message']['content'] +- Retry/fallback logic across multiple deployments (e.g. Azure/OpenAI) + - Router +- Track spend & set budgets per project OpenAI Proxy Server