-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathpyproject.toml
128 lines (113 loc) · 2.97 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
[build-system]
requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "flow-judge"
version = "0.1.2"
description = "A small yet powerful LM Judge"
readme = "README.md"
authors = [
{name = "Bernardo Garcia", email = "bernardo@flow-ai.com"},
{name = "Karolus Sariola", email = "karolus@flow-ai.com"},
{name = "Minaam Shahid", email = "minaam@flow-ai.com"},
{name = "Tiina Vaahtio", email = "tiina@flow-ai.com"},
{name = "Alex Wegrzyn", email = "alex.wegrzyn@flow-ai.com"},
]
license = {file = "LICENSE"}
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
keywords = ["LM-judge", "evaluation", "LLMs", "AI", "benchmarking"]
requires-python = ">=3.10"
dependencies = [
"pydantic>=2.9.1",
"requests>=2.32.3",
"hf-transfer>=0.1.1",
"ipykernel>=6.29.0",
"ipywidgets>=8.1.0",
"tqdm>=4.66.1",
"structlog",
]
[project.optional-dependencies]
dev = [
"pytest",
"pre-commit",
"ruff",
"black",
"isort",
"pytest-cov",
"codecov",
"mypy>=1.11.2",
"types-requests",
"types-tqdm",
"memray>=1.14.0",
"pytest-memray>=1.7.0",
"pytest-asyncio>=0.23.6, <0.24.0",
"hypothesis"
]
integrations-test = [
"llama-index",
"llama-index-embeddings-huggingface"
]
hf = [
"transformers>=4.45.0",
"torch>=2.3.0",
"bitsandbytes>=0.41.0,<=0.42.0",
"accelerate>=0.34.2",
]
vllm = ["vllm==0.6.2"]
llamafile = [
"torch>=2.3.0",
"openai>=1.51.0",
]
baseten = [
"truss>=0.9.44",
"openai>=1.51.0",
"aiohttp>=3.10.5"
]
[project.urls]
Homepage = "https://github.com/flowaicom/flow-judge"
[tool.setuptools]
packages = ["flow_judge", "flow_judge.integrations", "flow_judge.metrics", "flow_judge.models", "flow_judge.utils"]
[tool.setuptools.package-data]
"flow_judge.models" = ["adapters/baseten/**/*.yaml"]
[tool.setuptools_scm]
version_scheme = "python-simplified-semver"
[tool.ruff]
line-length = 100
include = ["flow_judge/**/*.py", "tests/**/*.py", "setup.py"]
[tool.ruff.lint]
select = ["E", "F", "I", "N", "W", "B", "C", "D"]
ignore = ["D100", "D104"]
[tool.ruff.lint.per-file-ignores]
"__init__.py" = ["F401"]
[tool.ruff.lint.pydocstyle]
convention = "google"
[tool.black]
line-length = 100
target-version = ['py311']
include = '(flow_judge/.*\.py$|tests/.*\.py$|setup\.py)'
[tool.isort]
profile = "black"
line_length = 100
src_paths = ["flow_judge", "tests"]
[tool.mypy]
warn_unused_configs = true
warn_redundant_casts = true
warn_unused_ignores = true
strict_equality = true
check_untyped_defs = true
disallow_any_generics = true
disallow_untyped_defs = false
disallow_incomplete_defs = false
[tool.bdist_wheel]
universal = true
[tool.pytest.ini_options]
asyncio_mode = "auto"
markers = [
"asyncio: mark test as an asyncio coroutine",
"memray: marks tests to be run with memray profiling",
"e2e: marks end-to-end tests",
]