forked from fla-org/flash-linear-attention
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpyproject.toml
More file actions
46 lines (41 loc) · 1.09 KB
/
pyproject.toml
File metadata and controls
46 lines (41 loc) · 1.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
[project]
name = "flash-linear-attention"
dynamic = ["version"]
description = "Fast Triton-based implementations of causal linear attention"
readme = "README.md"
authors = [
{ name = "Songlin Yang", email = "yangsl66@mit.edu" },
{ name = "Yu Zhang", email = "yzhang.cs@outlook.com" },
]
license = { file = "LICENSE" }
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
requires-python = ">=3.10"
dependencies = [
"torch>=2.5",
"transformers>=4.53.0",
"datasets>=3.3.0",
"einops",
"pytest",
]
[project.optional-dependencies]
conv1d = ["causal-conv1d>=1.4.0"]
benchmark = ["matplotlib"]
[project.urls]
Homepage = "https://github.com/fla-org/flash-linear-attention"
Repository = "https://github.com/fla-org/flash-linear-attention"
[build-system]
requires = ["setuptools>=45", "wheel"]
[tool.isort]
line_length = 127
multi_line_output = 3
[tool.pytest.ini_options]
log_cli = true
log_cli_level = "INFO"
pythonpath = [
"."
]