in_source_id
string
before_files
list
after_files
list
pr_diff
string
feast-dev__feast-3501
[ { "content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport glob\nimport json\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom distutils.cmd import Command\nfrom distutils.dir_util import copy_tree\nfrom pathlib import Path\nfrom subprocess import CalledProcessError\n\nfrom setuptools import Extension, find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.build_ext import build_ext as _build_ext\n from setuptools.command.build_py import build_py\n from setuptools.command.develop import develop\n from setuptools.command.install import install\n\nexcept ImportError:\n from distutils.command.build_ext import build_ext as _build_ext\n from distutils.command.build_py import build_py\n from distutils.core import setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.8.0\"\n\nREQUIRED = [\n \"click>=7.0.0,<9.0.0\",\n \"colorama>=0.3.9,<1\",\n \"dill~=0.3.0\",\n \"fastavro>=1.1.0,<2\",\n \"grpcio>=1.47.0,<2\",\n \"grpcio-reflection>=1.47.0,<2\",\n \"Jinja2>=2,<4\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy>=1.22,<3\",\n \"pandas>=1.4.3,<2\",\n \"pandavro~=1.5.0\", # For some reason pandavro higher than 1.5.* only support pandas less than 1.3.\n \"protobuf<5,>3.20\",\n \"proto-plus>=1.20.0,<2\",\n \"pyarrow>=4,<9\",\n \"pydantic>=1,<2\",\n \"pygments>=2.12.0,<3\",\n \"PyYAML>=5.4.0,<7\",\n \"requests\",\n \"SQLAlchemy[mypy]>1,<2\",\n \"tabulate>=0.8.0,<1\",\n \"tenacity>=7,<9\",\n \"toml>=0.10.0,<1\",\n \"tqdm>=4,<5\",\n \"typeguard\",\n \"fastapi>=0.68.0,<1\",\n \"uvicorn[standard]>=0.14.0,<1\",\n \"dask>=2021.1.0\",\n \"bowler\", # Needed for automatic repo upgrades\n \"httpx>=0.23.3\", # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656).\n]\n\nGCP_REQUIRED = [\n \"google-api-core>=1.23.0,<3\",\n \"googleapis-common-protos>=1.52.0,<2\",\n \"google-cloud-bigquery[pandas]>=2,<4\",\n \"google-cloud-bigquery-storage >= 2.0.0,<3\",\n \"google-cloud-datastore>=2.1.0,<3\",\n \"google-cloud-storage>=1.34.0,<3\",\n \"google-cloud-bigtable>=2.11.0,<3\",\n]\n\nREDIS_REQUIRED = [\n \"redis==4.2.2\",\n \"hiredis>=2.0.0,<3\",\n]\n\nAWS_REQUIRED = [\"boto3>=1.17.0,<=1.20.23\", \"docker>=5.0.2\", \"s3fs>=0.4.0,<=2022.01.0\"]\n\nBYTEWAX_REQUIRED = [\"bytewax==0.13.1\", \"docker>=5.0.2\", \"kubernetes<=20.13.0\"]\n\nSNOWFLAKE_REQUIRED = [\n \"snowflake-connector-python[pandas]>=2.7.3,<3\",\n # `pyOpenSSL==22.1.0` requires `cryptography<39,>=38.0.0`, which is incompatible\n # with `snowflake-connector-python[pandas]==2.8.0`, which depends on\n # `cryptography<37.0.0,>=3.1.0`.\n \"pyOpenSSL<22.1.0\",\n]\n\nSPARK_REQUIRED = [\n \"pyspark>=3.0.0,<4\",\n]\n\nTRINO_REQUIRED = [\n \"trino>=0.305.0,<0.400.0\", \"regex\"\n]\n\nPOSTGRES_REQUIRED = [\n \"psycopg2-binary>=2.8.3,<3\",\n]\n\nMYSQL_REQUIRED = [\"mysqlclient\", \"pymysql\", \"types-PyMySQL\"]\n\nHBASE_REQUIRED = [\n \"happybase>=1.2.0,<3\",\n]\n\nCASSANDRA_REQUIRED = [\n \"cassandra-driver>=3.24.0,<4\",\n]\n\nGE_REQUIRED = [\"great_expectations>=0.14.0,<0.15.0\"]\n\nGO_REQUIRED = [\n \"cffi~=1.15.0\",\n]\n\nAZURE_REQUIRED = [\n \"azure-storage-blob>=0.37.0\",\n \"azure-identity>=1.6.1\",\n \"SQLAlchemy>=1.4.19\",\n \"pyodbc>=4.0.30\",\n \"pymssql\",\n]\n\nROCKSET_REQUIRED = [\n \"rockset>=1.0.3\",\n]\n\nCI_REQUIRED = (\n [\n \"build\",\n \"cryptography>=35.0,<36\",\n \"flake8\",\n \"black>=22.6.0,<23\",\n \"isort>=5,<6\",\n \"grpcio-tools>=1.47.0\",\n \"grpcio-testing>=1.47.0\",\n \"minio==7.1.0\",\n \"mock==2.0.0\",\n \"moto<4\",\n \"mypy>=0.981,<0.990\",\n \"mypy-protobuf==3.1\",\n \"avro==1.10.0\",\n \"gcsfs>=0.4.0,<=2022.01.0\",\n \"urllib3>=1.25.4,<2\",\n \"psutil==5.9.0\",\n \"py>=1.11.0\", # https://github.com/pytest-dev/pytest/issues/10420\n \"pytest>=6.0.0,<8\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-benchmark>=3.4.1,<4\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering~=0.6.0\",\n \"pytest-mock==1.10.4\",\n \"Sphinx>4.0.0,<7\",\n \"testcontainers>=3.5,<4\",\n \"adlfs==0.5.9\",\n \"firebase-admin>=5.2.0,<6\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"pip-tools\",\n \"pybindgen\",\n \"types-protobuf~=3.19.22\",\n \"types-python-dateutil\",\n \"types-pytz\",\n \"types-PyYAML\",\n \"types-redis\",\n \"types-requests\",\n \"types-setuptools\",\n \"types-tabulate\",\n ]\n + GCP_REQUIRED\n + REDIS_REQUIRED\n + AWS_REQUIRED\n + BYTEWAX_REQUIRED\n + SNOWFLAKE_REQUIRED\n + SPARK_REQUIRED\n + POSTGRES_REQUIRED\n + MYSQL_REQUIRED\n + TRINO_REQUIRED\n + GE_REQUIRED\n + HBASE_REQUIRED\n + CASSANDRA_REQUIRED\n + AZURE_REQUIRED\n + ROCKSET_REQUIRED\n)\n\n\n# rtd builds fail because of mysql not being installed in their environment.\n# We can add mysql there, but it's not strictly needed. This will be faster for builds.\nDOCS_REQUIRED = CI_REQUIRED.copy()\nfor _r in MYSQL_REQUIRED:\n DOCS_REQUIRED.remove(_r)\n\nDEV_REQUIRED = [\"mypy-protobuf==3.1\", \"grpcio-testing~=1.0\"] + CI_REQUIRED\n\n# Get git repo root directory\nrepo_root = str(pathlib.Path(__file__).resolve().parent)\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\", encoding=\"utf8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)\nif shutil.which(\"git\"):\n use_scm_version = {\"root\": \".\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX}\nelse:\n use_scm_version = None\n\nPROTO_SUBDIRS = [\"core\", \"serving\", \"types\", \"storage\"]\nPYTHON_CODE_PREFIX = \"sdk/python\"\n\n\nclass BuildPythonProtosCommand(Command):\n description = \"Builds the proto files into Python files.\"\n user_options = [\n (\"inplace\", \"i\", \"Write generated proto files to source directory.\"),\n ]\n\n def initialize_options(self):\n self.python_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.build_lib = None\n self.inplace = 0\n\n def finalize_options(self):\n self.set_undefined_options(\"build\", (\"build_lib\", \"build_lib\"))\n\n @property\n def python_folder(self):\n if self.inplace:\n return os.path.join(\n os.path.dirname(__file__) or os.getcwd(), \"sdk/python/feast/protos\"\n )\n\n return os.path.join(self.build_lib, \"feast/protos\")\n\n def _generate_python_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n Path(self.python_folder).mkdir(parents=True, exist_ok=True)\n subprocess.check_call(\n self.python_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--python_out\",\n self.python_folder,\n \"--grpc_python_out\",\n self.python_folder,\n \"--mypy_out\",\n self.python_folder,\n ]\n + proto_files\n )\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_python_protos(f\"feast/{sub_folder}/*.proto\")\n # We need the __init__ files for each of the generated subdirs\n # so that they are regular packages, and don't need the `--namespace-packages` flags\n # when being typechecked using mypy.\n with open(f\"{self.python_folder}/feast/{sub_folder}/__init__.py\", \"w\"):\n pass\n\n with open(f\"{self.python_folder}/__init__.py\", \"w\"):\n pass\n with open(f\"{self.python_folder}/feast/__init__.py\", \"w\"):\n pass\n\n for path in Path(self.python_folder).rglob(\"*.py\"):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, \"r\") as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n f\"from feast.{folder}\", f\"from feast.protos.feast.{folder}\"\n )\n\n # Write the file out again\n with open(path, \"w\") as file:\n file.write(filedata)\n\n\ndef _generate_path_with_gopath():\n go_path = subprocess.check_output([\"go\", \"env\", \"GOPATH\"]).decode(\"utf-8\")\n go_path = go_path.strip()\n path_val = os.getenv(\"PATH\")\n path_val = f\"{path_val}:{go_path}/bin\"\n\n return path_val\n\n\ndef _ensure_go_and_proto_toolchain():\n try:\n version = subprocess.check_output([\"go\", \"version\"])\n except Exception as e:\n raise RuntimeError(\"Unable to find go toolchain\") from e\n\n semver_string = re.search(r\"go[\\S]+\", str(version)).group().lstrip(\"go\")\n parts = semver_string.split(\".\")\n if not (int(parts[0]) >= 1 and int(parts[1]) >= 16):\n raise RuntimeError(f\"Go compiler too old; expected 1.16+ found {semver_string}\")\n\n path_val = _generate_path_with_gopath()\n\n try:\n subprocess.check_call([\"protoc-gen-go\", \"--version\"], env={\"PATH\": path_val})\n subprocess.check_call(\n [\"protoc-gen-go-grpc\", \"--version\"], env={\"PATH\": path_val}\n )\n except Exception as e:\n raise RuntimeError(\"Unable to find go/grpc extensions for protoc\") from e\n\n\nclass BuildGoProtosCommand(Command):\n description = \"Builds the proto files into Go files.\"\n user_options = []\n\n def initialize_options(self):\n self.go_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.go_folder = os.path.join(repo_root, \"go/protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.path_val = _generate_path_with_gopath()\n\n def finalize_options(self):\n pass\n\n def _generate_go_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n try:\n subprocess.check_call(\n self.go_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--go_out\",\n self.go_folder,\n \"--go_opt=module=github.com/feast-dev/feast/go/protos\",\n \"--go-grpc_out\",\n self.go_folder,\n \"--go-grpc_opt=module=github.com/feast-dev/feast/go/protos\",\n ]\n + proto_files,\n env={\"PATH\": self.path_val},\n )\n except CalledProcessError as e:\n print(f\"Stderr: {e.stderr}\")\n print(f\"Stdout: {e.stdout}\")\n\n def run(self):\n go_dir = Path(repo_root) / \"go\" / \"protos\"\n go_dir.mkdir(exist_ok=True)\n for sub_folder in self.sub_folders:\n self._generate_go_protos(f\"feast/{sub_folder}/*.proto\")\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command(\"build_python_protos\")\n if os.getenv(\"COMPILE_GO\", \"false\").lower() == \"true\":\n _ensure_go_and_proto_toolchain()\n self.run_command(\"build_go_protos\")\n\n self.run_command(\"build_ext\")\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.reinitialize_command(\"build_python_protos\", inplace=1)\n self.run_command(\"build_python_protos\")\n if os.getenv(\"COMPILE_GO\", \"false\").lower() == \"true\":\n _ensure_go_and_proto_toolchain()\n self.run_command(\"build_go_protos\")\n\n develop.run(self)\n\n\nclass build_ext(_build_ext):\n def finalize_options(self) -> None:\n super().finalize_options()\n if os.getenv(\"COMPILE_GO\", \"false\").lower() == \"false\":\n self.extensions = [e for e in self.extensions if not self._is_go_ext(e)]\n\n def _is_go_ext(self, ext: Extension):\n return any(\n source.endswith(\".go\") or source.startswith(\"github\")\n for source in ext.sources\n )\n\n def build_extension(self, ext: Extension):\n print(f\"Building extension {ext}\")\n if not self._is_go_ext(ext):\n # the base class may mutate `self.compiler`\n compiler = copy.deepcopy(self.compiler)\n self.compiler, compiler = compiler, self.compiler\n try:\n return _build_ext.build_extension(self, ext)\n finally:\n self.compiler, compiler = compiler, self.compiler\n\n bin_path = _generate_path_with_gopath()\n go_env = json.loads(\n subprocess.check_output([\"go\", \"env\", \"-json\"]).decode(\"utf-8\").strip()\n )\n\n print(f\"Go env: {go_env}\")\n print(f\"CWD: {os.getcwd()}\")\n\n destination = os.path.dirname(os.path.abspath(self.get_ext_fullpath(ext.name)))\n subprocess.check_call(\n [\"go\", \"install\", \"golang.org/x/tools/cmd/goimports\"],\n env={\"PATH\": bin_path, **go_env},\n )\n subprocess.check_call(\n [\"go\", \"get\", \"github.com/go-python/gopy@v0.4.4\"],\n env={\"PATH\": bin_path, **go_env},\n )\n subprocess.check_call(\n [\"go\", \"install\", \"github.com/go-python/gopy\"],\n env={\"PATH\": bin_path, **go_env},\n )\n subprocess.check_call(\n [\n \"gopy\",\n \"build\",\n \"-output\",\n destination,\n \"-vm\",\n sys.executable,\n \"--build-tags\",\n \"cgo,ccalloc\",\n \"--dynamic-link=True\",\n \"-no-make\",\n *ext.sources,\n ],\n env={\n \"PATH\": bin_path,\n \"CGO_LDFLAGS_ALLOW\": \".*\",\n **go_env,\n },\n )\n\n def copy_extensions_to_source(self):\n build_py = self.get_finalized_command(\"build_py\")\n for ext in self.extensions:\n fullname = self.get_ext_fullname(ext.name)\n modpath = fullname.split(\".\")\n package = \".\".join(modpath[:-1])\n package_dir = build_py.get_package_dir(package)\n\n src_dir = dest_dir = package_dir\n\n if src_dir.startswith(PYTHON_CODE_PREFIX):\n src_dir = package_dir[len(PYTHON_CODE_PREFIX) :]\n src_dir = src_dir.lstrip(\"/\")\n\n src_dir = os.path.join(self.build_lib, src_dir)\n\n # copy whole directory\n print(f\"Copying from {src_dir} to {dest_dir}\")\n copy_tree(src_dir, dest_dir)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(\n where=PYTHON_CODE_PREFIX, exclude=(\"java\", \"infra\", \"sdk/python/tests\", \"ui\")\n ),\n package_dir={\"\": PYTHON_CODE_PREFIX},\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": DEV_REQUIRED,\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"bytewax\": BYTEWAX_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n \"snowflake\": SNOWFLAKE_REQUIRED,\n \"spark\": SPARK_REQUIRED,\n \"trino\": TRINO_REQUIRED,\n \"postgres\": POSTGRES_REQUIRED,\n \"azure\": AZURE_REQUIRED,\n \"mysql\": MYSQL_REQUIRED,\n \"ge\": GE_REQUIRED,\n \"hbase\": HBASE_REQUIRED,\n \"go\": GO_REQUIRED,\n \"docs\": DOCS_REQUIRED,\n \"cassandra\": CASSANDRA_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version=use_scm_version,\n setup_requires=[\n \"setuptools_scm\",\n \"grpcio>=1.47.0\",\n \"grpcio-tools>=1.47.0\",\n \"mypy-protobuf==3.1\",\n \"pybindgen==0.22.0\",\n ],\n cmdclass={\n \"build_python_protos\": BuildPythonProtosCommand,\n \"build_go_protos\": BuildGoProtosCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n \"build_ext\": build_ext,\n },\n ext_modules=[\n Extension(\n \"feast.embedded_go.lib._embedded\",\n [\"github.com/feast-dev/feast/go/embedded\"],\n )\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport glob\nimport json\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom distutils.cmd import Command\nfrom distutils.dir_util import copy_tree\nfrom pathlib import Path\nfrom subprocess import CalledProcessError\n\nfrom setuptools import Extension, find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.build_ext import build_ext as _build_ext\n from setuptools.command.build_py import build_py\n from setuptools.command.develop import develop\n from setuptools.command.install import install\n\nexcept ImportError:\n from distutils.command.build_ext import build_ext as _build_ext\n from distutils.command.build_py import build_py\n from distutils.core import setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.8.0\"\n\nREQUIRED = [\n \"click>=7.0.0,<9.0.0\",\n \"colorama>=0.3.9,<1\",\n \"dill~=0.3.0\",\n \"fastavro>=1.1.0,<2\",\n \"grpcio>=1.47.0,<2\",\n \"grpcio-reflection>=1.47.0,<2\",\n \"Jinja2>=2,<4\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy>=1.22,<3\",\n \"pandas>=1.4.3,<2\",\n \"pandavro~=1.5.0\", # For some reason pandavro higher than 1.5.* only support pandas less than 1.3.\n \"protobuf<5,>3.20\",\n \"proto-plus>=1.20.0,<2\",\n \"pyarrow>=4,<9\",\n \"pydantic>=1,<2\",\n \"pygments>=2.12.0,<3\",\n \"PyYAML>=5.4.0,<7\",\n \"requests\",\n \"SQLAlchemy[mypy]>1,<2\",\n \"tabulate>=0.8.0,<1\",\n \"tenacity>=7,<9\",\n \"toml>=0.10.0,<1\",\n \"tqdm>=4,<5\",\n \"typeguard\",\n \"fastapi>=0.68.0,<1\",\n \"uvicorn[standard]>=0.14.0,<1\",\n \"dask>=2021.1.0\",\n \"bowler\", # Needed for automatic repo upgrades\n \"httpx>=0.23.3\", # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656).\n]\n\nGCP_REQUIRED = [\n \"google-api-core>=1.23.0,<3\",\n \"googleapis-common-protos>=1.52.0,<2\",\n \"google-cloud-bigquery[pandas]>=2,<4\",\n \"google-cloud-bigquery-storage >= 2.0.0,<3\",\n \"google-cloud-datastore>=2.1.0,<3\",\n \"google-cloud-storage>=1.34.0,<3\",\n \"google-cloud-bigtable>=2.11.0,<3\",\n]\n\nREDIS_REQUIRED = [\n \"redis==4.2.2\",\n \"hiredis>=2.0.0,<3\",\n]\n\nAWS_REQUIRED = [\"boto3>=1.17.0,<=1.20.23\", \"docker>=5.0.2\", \"s3fs>=0.4.0,<=2022.01.0\"]\n\nBYTEWAX_REQUIRED = [\"bytewax==0.13.1\", \"docker>=5.0.2\", \"kubernetes<=20.13.0\"]\n\nSNOWFLAKE_REQUIRED = [\n \"snowflake-connector-python[pandas]>=2.7.3,<3\",\n # `pyOpenSSL==22.1.0` requires `cryptography<39,>=38.0.0`, which is incompatible\n # with `snowflake-connector-python[pandas]==2.8.0`, which depends on\n # `cryptography<37.0.0,>=3.1.0`.\n \"pyOpenSSL<22.1.0\",\n]\n\nSPARK_REQUIRED = [\n \"pyspark>=3.0.0,<4\",\n]\n\nTRINO_REQUIRED = [\n \"trino>=0.305.0,<0.400.0\", \"regex\"\n]\n\nPOSTGRES_REQUIRED = [\n \"psycopg2-binary>=2.8.3,<3\",\n]\n\nMYSQL_REQUIRED = [\"mysqlclient\", \"pymysql\", \"types-PyMySQL\"]\n\nHBASE_REQUIRED = [\n \"happybase>=1.2.0,<3\",\n]\n\nCASSANDRA_REQUIRED = [\n \"cassandra-driver>=3.24.0,<4\",\n]\n\nGE_REQUIRED = [\"great_expectations>=0.15.41,<0.16.0\"]\n\nGO_REQUIRED = [\n \"cffi~=1.15.0\",\n]\n\nAZURE_REQUIRED = [\n \"azure-storage-blob>=0.37.0\",\n \"azure-identity>=1.6.1\",\n \"SQLAlchemy>=1.4.19\",\n \"pyodbc>=4.0.30\",\n \"pymssql\",\n]\n\nROCKSET_REQUIRED = [\n \"rockset>=1.0.3\",\n]\n\nCI_REQUIRED = (\n [\n \"build\",\n \"cryptography>=35.0,<36\",\n \"flake8\",\n \"black>=22.6.0,<23\",\n \"isort>=5,<6\",\n \"grpcio-tools>=1.47.0\",\n \"grpcio-testing>=1.47.0\",\n \"minio==7.1.0\",\n \"mock==2.0.0\",\n \"moto<4\",\n \"mypy>=0.981,<0.990\",\n \"mypy-protobuf==3.1\",\n \"avro==1.10.0\",\n \"gcsfs>=0.4.0,<=2022.01.0\",\n \"urllib3>=1.25.4,<2\",\n \"psutil==5.9.0\",\n \"py>=1.11.0\", # https://github.com/pytest-dev/pytest/issues/10420\n \"pytest>=6.0.0,<8\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-benchmark>=3.4.1,<4\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering~=0.6.0\",\n \"pytest-mock==1.10.4\",\n \"Sphinx>4.0.0,<7\",\n \"testcontainers>=3.5,<4\",\n \"adlfs==0.5.9\",\n \"firebase-admin>=5.2.0,<6\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"pip-tools\",\n \"pybindgen\",\n \"types-protobuf~=3.19.22\",\n \"types-python-dateutil\",\n \"types-pytz\",\n \"types-PyYAML\",\n \"types-redis\",\n \"types-requests\",\n \"types-setuptools\",\n \"types-tabulate\",\n ]\n + GCP_REQUIRED\n + REDIS_REQUIRED\n + AWS_REQUIRED\n + BYTEWAX_REQUIRED\n + SNOWFLAKE_REQUIRED\n + SPARK_REQUIRED\n + POSTGRES_REQUIRED\n + MYSQL_REQUIRED\n + TRINO_REQUIRED\n + GE_REQUIRED\n + HBASE_REQUIRED\n + CASSANDRA_REQUIRED\n + AZURE_REQUIRED\n + ROCKSET_REQUIRED\n)\n\n\n# rtd builds fail because of mysql not being installed in their environment.\n# We can add mysql there, but it's not strictly needed. This will be faster for builds.\nDOCS_REQUIRED = CI_REQUIRED.copy()\nfor _r in MYSQL_REQUIRED:\n DOCS_REQUIRED.remove(_r)\n\nDEV_REQUIRED = [\"mypy-protobuf==3.1\", \"grpcio-testing~=1.0\"] + CI_REQUIRED\n\n# Get git repo root directory\nrepo_root = str(pathlib.Path(__file__).resolve().parent)\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\", encoding=\"utf8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)\nif shutil.which(\"git\"):\n use_scm_version = {\"root\": \".\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX}\nelse:\n use_scm_version = None\n\nPROTO_SUBDIRS = [\"core\", \"serving\", \"types\", \"storage\"]\nPYTHON_CODE_PREFIX = \"sdk/python\"\n\n\nclass BuildPythonProtosCommand(Command):\n description = \"Builds the proto files into Python files.\"\n user_options = [\n (\"inplace\", \"i\", \"Write generated proto files to source directory.\"),\n ]\n\n def initialize_options(self):\n self.python_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.build_lib = None\n self.inplace = 0\n\n def finalize_options(self):\n self.set_undefined_options(\"build\", (\"build_lib\", \"build_lib\"))\n\n @property\n def python_folder(self):\n if self.inplace:\n return os.path.join(\n os.path.dirname(__file__) or os.getcwd(), \"sdk/python/feast/protos\"\n )\n\n return os.path.join(self.build_lib, \"feast/protos\")\n\n def _generate_python_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n Path(self.python_folder).mkdir(parents=True, exist_ok=True)\n subprocess.check_call(\n self.python_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--python_out\",\n self.python_folder,\n \"--grpc_python_out\",\n self.python_folder,\n \"--mypy_out\",\n self.python_folder,\n ]\n + proto_files\n )\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_python_protos(f\"feast/{sub_folder}/*.proto\")\n # We need the __init__ files for each of the generated subdirs\n # so that they are regular packages, and don't need the `--namespace-packages` flags\n # when being typechecked using mypy.\n with open(f\"{self.python_folder}/feast/{sub_folder}/__init__.py\", \"w\"):\n pass\n\n with open(f\"{self.python_folder}/__init__.py\", \"w\"):\n pass\n with open(f\"{self.python_folder}/feast/__init__.py\", \"w\"):\n pass\n\n for path in Path(self.python_folder).rglob(\"*.py\"):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, \"r\") as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n f\"from feast.{folder}\", f\"from feast.protos.feast.{folder}\"\n )\n\n # Write the file out again\n with open(path, \"w\") as file:\n file.write(filedata)\n\n\ndef _generate_path_with_gopath():\n go_path = subprocess.check_output([\"go\", \"env\", \"GOPATH\"]).decode(\"utf-8\")\n go_path = go_path.strip()\n path_val = os.getenv(\"PATH\")\n path_val = f\"{path_val}:{go_path}/bin\"\n\n return path_val\n\n\ndef _ensure_go_and_proto_toolchain():\n try:\n version = subprocess.check_output([\"go\", \"version\"])\n except Exception as e:\n raise RuntimeError(\"Unable to find go toolchain\") from e\n\n semver_string = re.search(r\"go[\\S]+\", str(version)).group().lstrip(\"go\")\n parts = semver_string.split(\".\")\n if not (int(parts[0]) >= 1 and int(parts[1]) >= 16):\n raise RuntimeError(f\"Go compiler too old; expected 1.16+ found {semver_string}\")\n\n path_val = _generate_path_with_gopath()\n\n try:\n subprocess.check_call([\"protoc-gen-go\", \"--version\"], env={\"PATH\": path_val})\n subprocess.check_call(\n [\"protoc-gen-go-grpc\", \"--version\"], env={\"PATH\": path_val}\n )\n except Exception as e:\n raise RuntimeError(\"Unable to find go/grpc extensions for protoc\") from e\n\n\nclass BuildGoProtosCommand(Command):\n description = \"Builds the proto files into Go files.\"\n user_options = []\n\n def initialize_options(self):\n self.go_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.go_folder = os.path.join(repo_root, \"go/protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.path_val = _generate_path_with_gopath()\n\n def finalize_options(self):\n pass\n\n def _generate_go_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n try:\n subprocess.check_call(\n self.go_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--go_out\",\n self.go_folder,\n \"--go_opt=module=github.com/feast-dev/feast/go/protos\",\n \"--go-grpc_out\",\n self.go_folder,\n \"--go-grpc_opt=module=github.com/feast-dev/feast/go/protos\",\n ]\n + proto_files,\n env={\"PATH\": self.path_val},\n )\n except CalledProcessError as e:\n print(f\"Stderr: {e.stderr}\")\n print(f\"Stdout: {e.stdout}\")\n\n def run(self):\n go_dir = Path(repo_root) / \"go\" / \"protos\"\n go_dir.mkdir(exist_ok=True)\n for sub_folder in self.sub_folders:\n self._generate_go_protos(f\"feast/{sub_folder}/*.proto\")\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command(\"build_python_protos\")\n if os.getenv(\"COMPILE_GO\", \"false\").lower() == \"true\":\n _ensure_go_and_proto_toolchain()\n self.run_command(\"build_go_protos\")\n\n self.run_command(\"build_ext\")\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.reinitialize_command(\"build_python_protos\", inplace=1)\n self.run_command(\"build_python_protos\")\n if os.getenv(\"COMPILE_GO\", \"false\").lower() == \"true\":\n _ensure_go_and_proto_toolchain()\n self.run_command(\"build_go_protos\")\n\n develop.run(self)\n\n\nclass build_ext(_build_ext):\n def finalize_options(self) -> None:\n super().finalize_options()\n if os.getenv(\"COMPILE_GO\", \"false\").lower() == \"false\":\n self.extensions = [e for e in self.extensions if not self._is_go_ext(e)]\n\n def _is_go_ext(self, ext: Extension):\n return any(\n source.endswith(\".go\") or source.startswith(\"github\")\n for source in ext.sources\n )\n\n def build_extension(self, ext: Extension):\n print(f\"Building extension {ext}\")\n if not self._is_go_ext(ext):\n # the base class may mutate `self.compiler`\n compiler = copy.deepcopy(self.compiler)\n self.compiler, compiler = compiler, self.compiler\n try:\n return _build_ext.build_extension(self, ext)\n finally:\n self.compiler, compiler = compiler, self.compiler\n\n bin_path = _generate_path_with_gopath()\n go_env = json.loads(\n subprocess.check_output([\"go\", \"env\", \"-json\"]).decode(\"utf-8\").strip()\n )\n\n print(f\"Go env: {go_env}\")\n print(f\"CWD: {os.getcwd()}\")\n\n destination = os.path.dirname(os.path.abspath(self.get_ext_fullpath(ext.name)))\n subprocess.check_call(\n [\"go\", \"install\", \"golang.org/x/tools/cmd/goimports\"],\n env={\"PATH\": bin_path, **go_env},\n )\n subprocess.check_call(\n [\"go\", \"get\", \"github.com/go-python/gopy@v0.4.4\"],\n env={\"PATH\": bin_path, **go_env},\n )\n subprocess.check_call(\n [\"go\", \"install\", \"github.com/go-python/gopy\"],\n env={\"PATH\": bin_path, **go_env},\n )\n subprocess.check_call(\n [\n \"gopy\",\n \"build\",\n \"-output\",\n destination,\n \"-vm\",\n sys.executable,\n \"--build-tags\",\n \"cgo,ccalloc\",\n \"--dynamic-link=True\",\n \"-no-make\",\n *ext.sources,\n ],\n env={\n \"PATH\": bin_path,\n \"CGO_LDFLAGS_ALLOW\": \".*\",\n **go_env,\n },\n )\n\n def copy_extensions_to_source(self):\n build_py = self.get_finalized_command(\"build_py\")\n for ext in self.extensions:\n fullname = self.get_ext_fullname(ext.name)\n modpath = fullname.split(\".\")\n package = \".\".join(modpath[:-1])\n package_dir = build_py.get_package_dir(package)\n\n src_dir = dest_dir = package_dir\n\n if src_dir.startswith(PYTHON_CODE_PREFIX):\n src_dir = package_dir[len(PYTHON_CODE_PREFIX) :]\n src_dir = src_dir.lstrip(\"/\")\n\n src_dir = os.path.join(self.build_lib, src_dir)\n\n # copy whole directory\n print(f\"Copying from {src_dir} to {dest_dir}\")\n copy_tree(src_dir, dest_dir)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(\n where=PYTHON_CODE_PREFIX, exclude=(\"java\", \"infra\", \"sdk/python/tests\", \"ui\")\n ),\n package_dir={\"\": PYTHON_CODE_PREFIX},\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": DEV_REQUIRED,\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"bytewax\": BYTEWAX_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n \"snowflake\": SNOWFLAKE_REQUIRED,\n \"spark\": SPARK_REQUIRED,\n \"trino\": TRINO_REQUIRED,\n \"postgres\": POSTGRES_REQUIRED,\n \"azure\": AZURE_REQUIRED,\n \"mysql\": MYSQL_REQUIRED,\n \"ge\": GE_REQUIRED,\n \"hbase\": HBASE_REQUIRED,\n \"go\": GO_REQUIRED,\n \"docs\": DOCS_REQUIRED,\n \"cassandra\": CASSANDRA_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version=use_scm_version,\n setup_requires=[\n \"setuptools_scm\",\n \"grpcio>=1.47.0\",\n \"grpcio-tools>=1.47.0\",\n \"mypy-protobuf==3.1\",\n \"pybindgen==0.22.0\",\n ],\n cmdclass={\n \"build_python_protos\": BuildPythonProtosCommand,\n \"build_go_protos\": BuildGoProtosCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n \"build_ext\": build_ext,\n },\n ext_modules=[\n Extension(\n \"feast.embedded_go.lib._embedded\",\n [\"github.com/feast-dev/feast/go/embedded\"],\n )\n ],\n)\n", "path": "setup.py" } ]
diff --git a/sdk/python/requirements/py3.10-ci-requirements.txt b/sdk/python/requirements/py3.10-ci-requirements.txt index 274a0dbc257..2ee998b85c7 100644 --- a/sdk/python/requirements/py3.10-ci-requirements.txt +++ b/sdk/python/requirements/py3.10-ci-requirements.txt @@ -24,15 +24,25 @@ aiosignal==1.3.1 # via aiohttp alabaster==0.7.13 # via sphinx -altair==4.2.2 +altair==4.2.0 # via great-expectations anyio==3.6.2 # via # httpcore + # jupyter-server # starlette # watchfiles appdirs==1.4.4 # via fissix +argon2-cffi==21.3.0 + # via + # jupyter-server + # nbclassic + # notebook +argon2-cffi-bindings==21.2.0 + # via argon2-cffi +arrow==1.2.3 + # via isoduration asn1crypto==1.5.1 # via # oscrypto @@ -65,16 +75,20 @@ azure-identity==1.12.0 # via # adlfs # feast (setup.py) -azure-storage-blob==12.14.1 +azure-storage-blob==12.15.0 # via # adlfs # feast (setup.py) -babel==2.11.0 +babel==2.12.1 # via sphinx backcall==0.2.0 # via ipython +beautifulsoup4==4.11.2 + # via nbconvert black==22.12.0 # via feast (setup.py) +bleach==6.0.0 + # via nbconvert boto3==1.20.23 # via # feast (setup.py) @@ -110,6 +124,7 @@ certifi==2022.12.7 # snowflake-connector-python cffi==1.15.1 # via + # argon2-cffi-bindings # azure-datalake-store # cryptography # snowflake-connector-python @@ -137,7 +152,9 @@ colorama==0.4.6 # via # feast (setup.py) # great-expectations -coverage[toml]==7.1.0 +comm==0.1.2 + # via ipykernel +coverage[toml]==7.2.1 # via pytest-cov cryptography==35.0.0 # via @@ -153,16 +170,18 @@ cryptography==35.0.0 # snowflake-connector-python # types-pyopenssl # types-redis -dask==2023.2.0 +dask==2023.3.0 # via feast (setup.py) -dataclasses==0.6 - # via great-expectations db-dtypes==1.0.5 # via google-cloud-bigquery +debugpy==1.6.6 + # via ipykernel decorator==5.1.1 # via # gcsfs # ipython +defusedxml==0.7.1 + # via nbconvert deprecated==1.2.13 # via redis deprecation==2.1.0 @@ -190,11 +209,11 @@ executing==1.2.0 # via stack-data fastapi==0.92.0 # via feast (setup.py) -fastavro==1.7.1 +fastavro==1.7.2 # via # feast (setup.py) # pandavro -fastjsonschema==2.16.2 +fastjsonschema==2.16.3 # via nbformat filelock==3.9.0 # via @@ -206,6 +225,8 @@ fissix==21.11.13 # via bowler flake8==6.0.0 # via feast (setup.py) +fqdn==1.5.1 + # via jsonschema frozenlist==1.3.3 # via # aiohttp @@ -234,9 +255,9 @@ google-api-core[grpc]==2.11.0 # google-cloud-datastore # google-cloud-firestore # google-cloud-storage -google-api-python-client==2.79.0 +google-api-python-client==2.80.0 # via firebase-admin -google-auth==2.16.1 +google-auth==2.16.2 # via # gcsfs # google-api-core @@ -250,11 +271,11 @@ google-auth-httplib2==0.1.0 # via google-api-python-client google-auth-oauthlib==1.0.0 # via gcsfs -google-cloud-bigquery[pandas]==3.5.0 +google-cloud-bigquery[pandas]==3.6.0 # via feast (setup.py) -google-cloud-bigquery-storage==2.18.1 +google-cloud-bigquery-storage==2.19.0 # via feast (setup.py) -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.17.0 # via feast (setup.py) google-cloud-core==2.3.2 # via @@ -263,7 +284,7 @@ google-cloud-core==2.3.2 # google-cloud-datastore # google-cloud-firestore # google-cloud-storage -google-cloud-datastore==2.13.2 +google-cloud-datastore==2.14.0 # via feast (setup.py) google-cloud-firestore==2.10.0 # via firebase-admin @@ -284,7 +305,7 @@ googleapis-common-protos[grpc]==1.58.0 # google-api-core # grpc-google-iam-v1 # grpcio-status -great-expectations==0.14.13 +great-expectations==0.15.50 # via feast (setup.py) greenlet==2.0.2 # via sqlalchemy @@ -332,6 +353,7 @@ identify==2.5.18 idna==3.4 # via # anyio + # jsonschema # requests # rfc3986 # snowflake-connector-python @@ -342,20 +364,42 @@ importlib-metadata==6.0.0 # via great-expectations iniconfig==2.0.0 # via pytest -ipython==8.10.0 +ipykernel==6.21.2 + # via + # ipywidgets + # nbclassic + # notebook +ipython==8.11.0 + # via + # great-expectations + # ipykernel + # ipywidgets +ipython-genutils==0.2.0 + # via + # nbclassic + # notebook +ipywidgets==8.0.4 # via great-expectations isodate==0.6.1 - # via msrest + # via + # azure-storage-blob + # msrest +isoduration==20.11.0 + # via jsonschema isort==5.12.0 # via feast (setup.py) jedi==0.18.2 # via ipython -jinja2==3.0.3 +jinja2==3.1.2 # via # altair # feast (setup.py) # great-expectations + # jupyter-server # moto + # nbclassic + # nbconvert + # notebook # sphinx jmespath==0.10.0 # via @@ -364,31 +408,70 @@ jmespath==0.10.0 jsonpatch==1.32 # via great-expectations jsonpointer==2.3 - # via jsonpatch -jsonschema==4.17.3 + # via + # jsonpatch + # jsonschema +jsonschema[format-nongpl]==4.17.3 # via # altair # feast (setup.py) # great-expectations + # jupyter-events # nbformat +jupyter-client==8.0.3 + # via + # ipykernel + # jupyter-server + # nbclassic + # nbclient + # notebook jupyter-core==5.2.0 - # via nbformat + # via + # ipykernel + # jupyter-client + # jupyter-server + # nbclassic + # nbclient + # nbconvert + # nbformat + # notebook +jupyter-events==0.6.3 + # via jupyter-server +jupyter-server==2.3.0 + # via + # nbclassic + # notebook-shim +jupyter-server-terminals==0.4.4 + # via jupyter-server +jupyterlab-pygments==0.2.2 + # via nbconvert +jupyterlab-widgets==3.0.5 + # via ipywidgets kubernetes==20.13.0 # via feast (setup.py) locket==1.0.0 # via partd +makefun==1.15.1 + # via great-expectations markupsafe==2.1.2 # via # jinja2 # moto + # nbconvert +marshmallow==3.19.0 + # via great-expectations matplotlib-inline==0.1.6 - # via ipython + # via + # ipykernel + # ipython mccabe==0.7.0 # via flake8 minio==7.1.0 # via feast (setup.py) mistune==2.0.5 - # via great-expectations + # via + # great-expectations + # nbconvert mmh3==3.0.0 # via feast (setup.py) mock==2.0.0 @@ -406,9 +489,7 @@ msal-extensions==1.0.0 msgpack==1.0.4 # via cachecontrol msrest==0.7.1 - # via - # azure-storage-blob - # msrestazure + # via msrestazure msrestazure==0.6.4 # via adlfs multidict==6.0.4 @@ -429,10 +510,34 @@ mypy-protobuf==3.1 # via feast (setup.py) mysqlclient==2.1.1 # via feast (setup.py) +nbclassic==0.5.2 + # via notebook +nbclient==0.7.2 + # via nbconvert +nbconvert==7.2.9 + # via + # jupyter-server + # nbclassic + # notebook nbformat==5.7.3 - # via great-expectations + # via + # great-expectations + # jupyter-server + # nbclassic + # nbclient + # nbconvert + # notebook +nest-asyncio==1.5.6 + # via + # ipykernel + # nbclassic + # notebook nodeenv==1.7.0 # via pre-commit +notebook==6.5.2 + # via great-expectations +notebook-shim==0.2.2 + # via nbclassic numpy==1.24.2 # via # altair @@ -456,6 +561,10 @@ packaging==23.0 # docker # google-cloud-bigquery # great-expectations + # ipykernel + # jupyter-server + # marshmallow + # nbconvert # pytest # redis # sphinx @@ -470,6 +579,8 @@ pandas==1.5.3 # snowflake-connector-python pandavro==1.5.2 # via feast (setup.py) +pandocfilters==1.5.0 + # via nbconvert parso==0.8.3 # via jedi partd==1.3.0 @@ -482,7 +593,7 @@ pexpect==4.8.0 # via ipython pickleshare==0.7.5 # via ipython -pip-tools==6.12.2 +pip-tools==6.12.3 # via feast (setup.py) platformdirs==3.0.0 # via @@ -495,9 +606,14 @@ ply==3.11 # via thriftpy2 portalocker==2.7.0 # via msal-extensions -pre-commit==3.0.4 +pre-commit==3.1.1 # via feast (setup.py) -prompt-toolkit==3.0.37 +prometheus-client==0.16.0 + # via + # jupyter-server + # nbclassic + # notebook +prompt-toolkit==3.0.38 # via ipython proto-plus==1.22.2 # via @@ -525,11 +641,15 @@ protobuf==4.22.0 # mypy-protobuf # proto-plus psutil==5.9.0 - # via feast (setup.py) + # via + # feast (setup.py) + # ipykernel psycopg2-binary==2.9.5 # via feast (setup.py) ptyprocess==0.7.0 - # via pexpect + # via + # pexpect + # terminado pure-eval==0.2.2 # via stack-data py==1.11.0 @@ -562,12 +682,14 @@ pydantic==1.10.5 # via # fastapi # feast (setup.py) + # great-expectations pyflakes==3.0.1 # via flake8 pygments==2.14.0 # via # feast (setup.py) # ipython + # nbconvert # sphinx pyjwt[crypto]==2.6.0 # via @@ -584,7 +706,7 @@ pyopenssl==22.0.0 # via # feast (setup.py) # snowflake-connector-python -pyparsing==2.4.7 +pyparsing==3.0.9 # via # great-expectations # httplib2 @@ -621,18 +743,21 @@ pytest-xdist==3.2.0 python-dateutil==2.8.2 # via # adal + # arrow # botocore # google-cloud-bigquery # great-expectations + # jupyter-client # kubernetes # moto # pandas # rockset -python-dotenv==0.21.1 +python-dotenv==1.0.0 # via uvicorn +python-json-logger==2.0.7 + # via jupyter-events pytz==2022.7.1 # via - # babel # great-expectations # moto # pandas @@ -644,9 +769,17 @@ pyyaml==6.0 # via # dask # feast (setup.py) + # jupyter-events # kubernetes # pre-commit # uvicorn +pyzmq==25.0.0 + # via + # ipykernel + # jupyter-client + # jupyter-server + # nbclassic + # notebook redis==4.2.2 # via feast (setup.py) regex==2022.10.31 @@ -681,8 +814,16 @@ requests-oauthlib==1.3.1 # msrest responses==0.22.0 # via moto +rfc3339-validator==0.1.4 + # via + # jsonschema + # jupyter-events rfc3986[idna2008]==1.5.0 # via httpx +rfc3986-validator==0.1.1 + # via + # jsonschema + # jupyter-events rockset==1.0.5 # via feast (setup.py) rsa==4.9 @@ -695,10 +836,17 @@ s3transfer==0.5.2 # via boto3 scipy==1.10.1 # via great-expectations +send2trash==1.8.0 + # via + # jupyter-server + # nbclassic + # notebook six==1.16.0 # via + # asttokens # azure-core # azure-identity + # bleach # cassandra-driver # geomet # google-auth @@ -710,6 +858,7 @@ six==1.16.0 # msrestazure # pandavro # python-dateutil + # rfc3339-validator # thriftpy2 sniffio==1.3.0 # via @@ -720,6 +869,8 @@ snowballstemmer==2.2.0 # via sphinx snowflake-connector-python[pandas]==2.9.0 # via feast (setup.py) +soupsieve==2.4 + # via beautifulsoup4 sphinx==6.1.3 # via feast (setup.py) sphinxcontrib-applehelp==1.0.4 @@ -744,14 +895,20 @@ starlette==0.25.0 # via fastapi tabulate==0.9.0 # via feast (setup.py) -tenacity==8.2.1 +tenacity==8.2.2 # via feast (setup.py) -termcolor==2.2.0 - # via great-expectations +terminado==0.17.1 + # via + # jupyter-server + # jupyter-server-terminals + # nbclassic + # notebook testcontainers==3.7.1 # via feast (setup.py) thriftpy2==0.4.16 # via happybase +tinycss2==1.2.1 + # via nbconvert toml==0.10.2 # via # feast (setup.py) @@ -769,22 +926,38 @@ toolz==0.12.0 # altair # dask # partd +tornado==6.2 + # via + # ipykernel + # jupyter-client + # jupyter-server + # nbclassic + # notebook + # terminado tqdm==4.64.1 # via # feast (setup.py) # great-expectations traitlets==5.9.0 # via + # comm + # ipykernel # ipython + # ipywidgets + # jupyter-client # jupyter-core + # jupyter-events + # jupyter-server # matplotlib-inline + # nbclassic + # nbclient + # nbconvert # nbformat + # notebook trino==0.321.0 # via feast (setup.py) typeguard==2.13.3 # via feast (setup.py) -types-docutils==0.19.1.6 - # via types-setuptools types-protobuf==3.19.22 # via # feast (setup.py) @@ -793,27 +966,28 @@ types-pymysql==1.0.19.5 # via feast (setup.py) types-pyopenssl==23.0.0.4 # via types-redis -types-python-dateutil==2.8.19.8 +types-python-dateutil==2.8.19.10 # via feast (setup.py) types-pytz==2022.7.1.2 # via feast (setup.py) types-pyyaml==6.0.12.8 # via feast (setup.py) -types-redis==4.5.1.3 +types-redis==4.5.1.4 # via feast (setup.py) -types-requests==2.28.11.14 +types-requests==2.28.11.15 # via feast (setup.py) -types-setuptools==67.4.0.1 +types-setuptools==67.4.0.3 # via feast (setup.py) types-tabulate==0.9.0.1 # via feast (setup.py) types-toml==0.10.8.5 # via responses -types-urllib3==1.26.25.7 +types-urllib3==1.26.25.8 # via types-requests typing-extensions==4.5.0 # via # azure-core + # azure-storage-blob # great-expectations # mypy # pydantic @@ -825,6 +999,8 @@ tzlocal==4.2 # via # great-expectations # trino +uri-template==1.2.0 + # via jsonschema uritemplate==4.1.1 # via google-api-python-client urllib3==1.26.14 @@ -843,7 +1019,7 @@ uvicorn[standard]==0.20.0 # via feast (setup.py) uvloop==0.17.0 # via uvicorn -virtualenv==20.19.0 +virtualenv==20.20.0 # via pre-commit volatile==2.1.0 # via bowler @@ -851,9 +1027,16 @@ watchfiles==0.18.1 # via uvicorn wcwidth==0.2.6 # via prompt-toolkit +webcolors==1.12 + # via jsonschema +webencodings==0.5.1 + # via + # bleach + # tinycss2 websocket-client==1.5.1 # via # docker + # jupyter-server # kubernetes websockets==10.4 # via uvicorn @@ -861,7 +1044,9 @@ werkzeug==2.1.2 # via moto wheel==0.38.4 # via pip-tools -wrapt==1.14.1 +widgetsnbextension==4.0.5 + # via ipywidgets +wrapt==1.15.0 # via # aiobotocore # deprecated @@ -870,7 +1055,7 @@ xmltodict==0.13.0 # via moto yarl==1.8.2 # via aiohttp -zipp==3.14.0 +zipp==3.15.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/sdk/python/requirements/py3.10-requirements.txt b/sdk/python/requirements/py3.10-requirements.txt index 451ffa32537..1efd6fc61ed 100644 --- a/sdk/python/requirements/py3.10-requirements.txt +++ b/sdk/python/requirements/py3.10-requirements.txt @@ -35,13 +35,13 @@ cloudpickle==2.2.1 # via dask colorama==0.4.6 # via feast (setup.py) -dask==2023.2.0 +dask==2023.3.0 # via feast (setup.py) dill==0.3.6 # via feast (setup.py) fastapi==0.92.0 # via feast (setup.py) -fastavro==1.7.1 +fastavro==1.7.2 # via # feast (setup.py) # pandavro @@ -123,7 +123,7 @@ pyrsistent==0.19.3 # via jsonschema python-dateutil==2.8.2 # via pandas -python-dotenv==0.21.1 +python-dotenv==1.0.0 # via uvicorn pytz==2022.7.1 # via pandas @@ -153,7 +153,7 @@ starlette==0.25.0 # via fastapi tabulate==0.9.0 # via feast (setup.py) -tenacity==8.2.1 +tenacity==8.2.2 # via feast (setup.py) toml==0.10.2 # via feast (setup.py) diff --git a/sdk/python/requirements/py3.8-ci-requirements.txt b/sdk/python/requirements/py3.8-ci-requirements.txt index 4e94a670235..1afec16b406 100644 --- a/sdk/python/requirements/py3.8-ci-requirements.txt +++ b/sdk/python/requirements/py3.8-ci-requirements.txt @@ -24,15 +24,25 @@ aiosignal==1.3.1 # via aiohttp alabaster==0.7.13 # via sphinx -altair==4.2.2 +altair==4.2.0 # via great-expectations anyio==3.6.2 # via # httpcore + # jupyter-server # starlette # watchfiles appdirs==1.4.4 # via fissix +argon2-cffi==21.3.0 + # via + # jupyter-server + # nbclassic + # notebook +argon2-cffi-bindings==21.2.0 + # via argon2-cffi +arrow==1.2.3 + # via isoduration asn1crypto==1.5.1 # via # oscrypto @@ -65,11 +75,11 @@ azure-identity==1.12.0 # via # adlfs # feast (setup.py) -azure-storage-blob==12.14.1 +azure-storage-blob==12.15.0 # via # adlfs # feast (setup.py) -babel==2.11.0 +babel==2.12.1 # via sphinx backcall==0.2.0 # via ipython @@ -77,8 +87,12 @@ backports-zoneinfo==0.2.1 # via # pytz-deprecation-shim # tzlocal +beautifulsoup4==4.11.2 + # via nbconvert black==22.12.0 # via feast (setup.py) +bleach==6.0.0 + # via nbconvert boto3==1.20.23 # via # feast (setup.py) @@ -114,6 +128,7 @@ certifi==2022.12.7 # snowflake-connector-python cffi==1.15.1 # via + # argon2-cffi-bindings # azure-datalake-store # cryptography # snowflake-connector-python @@ -141,7 +156,9 @@ colorama==0.4.6 # via # feast (setup.py) # great-expectations -coverage[toml]==7.1.0 +comm==0.1.2 + # via ipykernel +coverage[toml]==7.2.1 # via pytest-cov cryptography==35.0.0 # via @@ -157,16 +174,18 @@ cryptography==35.0.0 # snowflake-connector-python # types-pyopenssl # types-redis -dask==2023.2.0 +dask==2023.3.0 # via feast (setup.py) -dataclasses==0.6 - # via great-expectations db-dtypes==1.0.5 # via google-cloud-bigquery +debugpy==1.6.6 + # via ipykernel decorator==5.1.1 # via # gcsfs # ipython +defusedxml==0.7.1 + # via nbconvert deprecated==1.2.13 # via redis deprecation==2.1.0 @@ -194,11 +213,11 @@ executing==1.2.0 # via stack-data fastapi==0.92.0 # via feast (setup.py) -fastavro==1.7.1 +fastavro==1.7.2 # via # feast (setup.py) # pandavro -fastjsonschema==2.16.2 +fastjsonschema==2.16.3 # via nbformat filelock==3.9.0 # via @@ -210,6 +229,8 @@ fissix==21.11.13 # via bowler flake8==6.0.0 # via feast (setup.py) +fqdn==1.5.1 + # via jsonschema frozenlist==1.3.3 # via # aiohttp @@ -238,9 +259,9 @@ google-api-core[grpc]==2.11.0 # google-cloud-datastore # google-cloud-firestore # google-cloud-storage -google-api-python-client==2.79.0 +google-api-python-client==2.80.0 # via firebase-admin -google-auth==2.16.1 +google-auth==2.16.2 # via # gcsfs # google-api-core @@ -254,11 +275,11 @@ google-auth-httplib2==0.1.0 # via google-api-python-client google-auth-oauthlib==1.0.0 # via gcsfs -google-cloud-bigquery[pandas]==3.5.0 +google-cloud-bigquery[pandas]==3.6.0 # via feast (setup.py) -google-cloud-bigquery-storage==2.18.1 +google-cloud-bigquery-storage==2.19.0 # via feast (setup.py) -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.17.0 # via feast (setup.py) google-cloud-core==2.3.2 # via @@ -267,7 +288,7 @@ google-cloud-core==2.3.2 # google-cloud-datastore # google-cloud-firestore # google-cloud-storage -google-cloud-datastore==2.13.2 +google-cloud-datastore==2.14.0 # via feast (setup.py) google-cloud-firestore==2.10.0 # via firebase-admin @@ -288,7 +309,7 @@ googleapis-common-protos[grpc]==1.58.0 # google-api-core # grpc-google-iam-v1 # grpcio-status -great-expectations==0.14.13 +great-expectations==0.15.50 # via feast (setup.py) greenlet==2.0.2 # via sqlalchemy @@ -336,6 +357,7 @@ identify==2.5.18 idna==3.4 # via # anyio + # jsonschema # requests # rfc3986 # snowflake-connector-python @@ -345,25 +367,49 @@ imagesize==1.4.1 importlib-metadata==6.0.0 # via # great-expectations + # jupyter-client + # nbconvert # sphinx importlib-resources==5.12.0 # via jsonschema iniconfig==2.0.0 # via pytest -ipython==8.10.0 +ipykernel==6.21.2 + # via + # ipywidgets + # nbclassic + # notebook +ipython==8.11.0 + # via + # great-expectations + # ipykernel + # ipywidgets +ipython-genutils==0.2.0 + # via + # nbclassic + # notebook +ipywidgets==8.0.4 # via great-expectations isodate==0.6.1 - # via msrest + # via + # azure-storage-blob + # msrest +isoduration==20.11.0 + # via jsonschema isort==5.12.0 # via feast (setup.py) jedi==0.18.2 # via ipython -jinja2==3.0.3 +jinja2==3.1.2 # via # altair # feast (setup.py) # great-expectations + # jupyter-server # moto + # nbclassic + # nbconvert + # notebook # sphinx jmespath==0.10.0 # via @@ -372,31 +418,70 @@ jmespath==0.10.0 jsonpatch==1.32 # via great-expectations jsonpointer==2.3 - # via jsonpatch -jsonschema==4.17.3 + # via + # jsonpatch + # jsonschema +jsonschema[format-nongpl]==4.17.3 # via # altair # feast (setup.py) # great-expectations + # jupyter-events # nbformat +jupyter-client==8.0.3 + # via + # ipykernel + # jupyter-server + # nbclassic + # nbclient + # notebook jupyter-core==5.2.0 - # via nbformat + # via + # ipykernel + # jupyter-client + # jupyter-server + # nbclassic + # nbclient + # nbconvert + # nbformat + # notebook +jupyter-events==0.6.3 + # via jupyter-server +jupyter-server==2.3.0 + # via + # nbclassic + # notebook-shim +jupyter-server-terminals==0.4.4 + # via jupyter-server +jupyterlab-pygments==0.2.2 + # via nbconvert +jupyterlab-widgets==3.0.5 + # via ipywidgets kubernetes==20.13.0 # via feast (setup.py) locket==1.0.0 # via partd +makefun==1.15.1 + # via great-expectations markupsafe==2.1.2 # via # jinja2 # moto + # nbconvert +marshmallow==3.19.0 + # via great-expectations matplotlib-inline==0.1.6 - # via ipython + # via + # ipykernel + # ipython mccabe==0.7.0 # via flake8 minio==7.1.0 # via feast (setup.py) mistune==2.0.5 - # via great-expectations + # via + # great-expectations + # nbconvert mmh3==3.0.0 # via feast (setup.py) mock==2.0.0 @@ -414,9 +499,7 @@ msal-extensions==1.0.0 msgpack==1.0.4 # via cachecontrol msrest==0.7.1 - # via - # azure-storage-blob - # msrestazure + # via msrestazure msrestazure==0.6.4 # via adlfs multidict==6.0.4 @@ -437,10 +520,34 @@ mypy-protobuf==3.1 # via feast (setup.py) mysqlclient==2.1.1 # via feast (setup.py) +nbclassic==0.5.2 + # via notebook +nbclient==0.7.2 + # via nbconvert +nbconvert==7.2.9 + # via + # jupyter-server + # nbclassic + # notebook nbformat==5.7.3 - # via great-expectations + # via + # great-expectations + # jupyter-server + # nbclassic + # nbclient + # nbconvert + # notebook +nest-asyncio==1.5.6 + # via + # ipykernel + # nbclassic + # notebook nodeenv==1.7.0 # via pre-commit +notebook==6.5.2 + # via great-expectations +notebook-shim==0.2.2 + # via nbclassic numpy==1.24.2 # via # altair @@ -464,6 +571,10 @@ packaging==23.0 # docker # google-cloud-bigquery # great-expectations + # ipykernel + # jupyter-server + # marshmallow + # nbconvert # pytest # redis # sphinx @@ -478,6 +589,8 @@ pandas==1.5.3 # snowflake-connector-python pandavro==1.5.2 # via feast (setup.py) +pandocfilters==1.5.0 + # via nbconvert parso==0.8.3 # via jedi partd==1.3.0 @@ -490,7 +603,7 @@ pexpect==4.8.0 # via ipython pickleshare==0.7.5 # via ipython -pip-tools==6.12.2 +pip-tools==6.12.3 # via feast (setup.py) pkgutil-resolve-name==1.3.10 # via jsonschema @@ -505,9 +618,14 @@ ply==3.11 # via thriftpy2 portalocker==2.7.0 # via msal-extensions -pre-commit==3.0.4 +pre-commit==3.1.1 # via feast (setup.py) -prompt-toolkit==3.0.37 +prometheus-client==0.16.0 + # via + # jupyter-server + # nbclassic + # notebook +prompt-toolkit==3.0.38 # via ipython proto-plus==1.22.2 # via @@ -535,11 +653,15 @@ protobuf==4.22.0 # mypy-protobuf # proto-plus psutil==5.9.0 - # via feast (setup.py) + # via + # feast (setup.py) + # ipykernel psycopg2-binary==2.9.5 # via feast (setup.py) ptyprocess==0.7.0 - # via pexpect + # via + # pexpect + # terminado pure-eval==0.2.2 # via stack-data py==1.11.0 @@ -572,12 +694,14 @@ pydantic==1.10.5 # via # fastapi # feast (setup.py) + # great-expectations pyflakes==3.0.1 # via flake8 pygments==2.14.0 # via # feast (setup.py) # ipython + # nbconvert # sphinx pyjwt[crypto]==2.6.0 # via @@ -594,7 +718,7 @@ pyopenssl==22.0.0 # via # feast (setup.py) # snowflake-connector-python -pyparsing==2.4.7 +pyparsing==3.0.9 # via # great-expectations # httplib2 @@ -631,15 +755,19 @@ pytest-xdist==3.2.0 python-dateutil==2.8.2 # via # adal + # arrow # botocore # google-cloud-bigquery # great-expectations + # jupyter-client # kubernetes # moto # pandas # rockset -python-dotenv==0.21.1 +python-dotenv==1.0.0 # via uvicorn +python-json-logger==2.0.7 + # via jupyter-events pytz==2022.7.1 # via # babel @@ -654,9 +782,17 @@ pyyaml==6.0 # via # dask # feast (setup.py) + # jupyter-events # kubernetes # pre-commit # uvicorn +pyzmq==25.0.0 + # via + # ipykernel + # jupyter-client + # jupyter-server + # nbclassic + # notebook redis==4.2.2 # via feast (setup.py) regex==2022.10.31 @@ -691,8 +827,16 @@ requests-oauthlib==1.3.1 # msrest responses==0.22.0 # via moto +rfc3339-validator==0.1.4 + # via + # jsonschema + # jupyter-events rfc3986[idna2008]==1.5.0 # via httpx +rfc3986-validator==0.1.1 + # via + # jsonschema + # jupyter-events rockset==1.0.5 # via feast (setup.py) rsa==4.9 @@ -707,10 +851,17 @@ s3transfer==0.5.2 # via boto3 scipy==1.10.1 # via great-expectations +send2trash==1.8.0 + # via + # jupyter-server + # nbclassic + # notebook six==1.16.0 # via + # asttokens # azure-core # azure-identity + # bleach # cassandra-driver # geomet # google-auth @@ -722,6 +873,7 @@ six==1.16.0 # msrestazure # pandavro # python-dateutil + # rfc3339-validator # thriftpy2 sniffio==1.3.0 # via @@ -732,6 +884,8 @@ snowballstemmer==2.2.0 # via sphinx snowflake-connector-python[pandas]==2.9.0 # via feast (setup.py) +soupsieve==2.4 + # via beautifulsoup4 sphinx==6.1.3 # via feast (setup.py) sphinxcontrib-applehelp==1.0.4 @@ -756,14 +910,20 @@ starlette==0.25.0 # via fastapi tabulate==0.9.0 # via feast (setup.py) -tenacity==8.2.1 +tenacity==8.2.2 # via feast (setup.py) -termcolor==2.2.0 - # via great-expectations +terminado==0.17.1 + # via + # jupyter-server + # jupyter-server-terminals + # nbclassic + # notebook testcontainers==3.7.1 # via feast (setup.py) thriftpy2==0.4.16 # via happybase +tinycss2==1.2.1 + # via nbconvert toml==0.10.2 # via # feast (setup.py) @@ -781,22 +941,38 @@ toolz==0.12.0 # altair # dask # partd +tornado==6.2 + # via + # ipykernel + # jupyter-client + # jupyter-server + # nbclassic + # notebook + # terminado tqdm==4.64.1 # via # feast (setup.py) # great-expectations traitlets==5.9.0 # via + # comm + # ipykernel # ipython + # ipywidgets + # jupyter-client # jupyter-core + # jupyter-events + # jupyter-server # matplotlib-inline + # nbclassic + # nbclient + # nbconvert # nbformat + # notebook trino==0.321.0 # via feast (setup.py) typeguard==2.13.3 # via feast (setup.py) -types-docutils==0.19.1.6 - # via types-setuptools types-protobuf==3.19.22 # via # feast (setup.py) @@ -805,28 +981,29 @@ types-pymysql==1.0.19.5 # via feast (setup.py) types-pyopenssl==23.0.0.4 # via types-redis -types-python-dateutil==2.8.19.8 +types-python-dateutil==2.8.19.10 # via feast (setup.py) types-pytz==2022.7.1.2 # via feast (setup.py) types-pyyaml==6.0.12.8 # via feast (setup.py) -types-redis==4.5.1.3 +types-redis==4.5.1.4 # via feast (setup.py) -types-requests==2.28.11.14 +types-requests==2.28.11.15 # via feast (setup.py) -types-setuptools==67.4.0.1 +types-setuptools==67.4.0.3 # via feast (setup.py) types-tabulate==0.9.0.1 # via feast (setup.py) types-toml==0.10.8.5 # via responses -types-urllib3==1.26.25.7 +types-urllib3==1.26.25.8 # via types-requests typing-extensions==4.5.0 # via # aioitertools # azure-core + # azure-storage-blob # black # great-expectations # mypy @@ -840,6 +1017,8 @@ tzlocal==4.2 # via # great-expectations # trino +uri-template==1.2.0 + # via jsonschema uritemplate==4.1.1 # via google-api-python-client urllib3==1.26.14 @@ -858,7 +1037,7 @@ uvicorn[standard]==0.20.0 # via feast (setup.py) uvloop==0.17.0 # via uvicorn -virtualenv==20.19.0 +virtualenv==20.20.0 # via pre-commit volatile==2.1.0 # via bowler @@ -866,9 +1045,16 @@ watchfiles==0.18.1 # via uvicorn wcwidth==0.2.6 # via prompt-toolkit +webcolors==1.12 + # via jsonschema +webencodings==0.5.1 + # via + # bleach + # tinycss2 websocket-client==1.5.1 # via # docker + # jupyter-server # kubernetes websockets==10.4 # via uvicorn @@ -876,7 +1062,9 @@ werkzeug==2.1.2 # via moto wheel==0.38.4 # via pip-tools -wrapt==1.14.1 +widgetsnbextension==4.0.5 + # via ipywidgets +wrapt==1.15.0 # via # aiobotocore # deprecated @@ -885,7 +1073,7 @@ xmltodict==0.13.0 # via moto yarl==1.8.2 # via aiohttp -zipp==3.14.0 +zipp==3.15.0 # via # importlib-metadata # importlib-resources diff --git a/sdk/python/requirements/py3.8-requirements.txt b/sdk/python/requirements/py3.8-requirements.txt index 0429a8313ce..c57baa0d27a 100644 --- a/sdk/python/requirements/py3.8-requirements.txt +++ b/sdk/python/requirements/py3.8-requirements.txt @@ -35,13 +35,13 @@ cloudpickle==2.2.1 # via dask colorama==0.4.6 # via feast (setup.py) -dask==2023.2.0 +dask==2023.3.0 # via feast (setup.py) dill==0.3.6 # via feast (setup.py) fastapi==0.92.0 # via feast (setup.py) -fastavro==1.7.1 +fastavro==1.7.2 # via # feast (setup.py) # pandavro @@ -127,7 +127,7 @@ pyrsistent==0.19.3 # via jsonschema python-dateutil==2.8.2 # via pandas -python-dotenv==0.21.1 +python-dotenv==1.0.0 # via uvicorn pytz==2022.7.1 # via pandas @@ -157,7 +157,7 @@ starlette==0.25.0 # via fastapi tabulate==0.9.0 # via feast (setup.py) -tenacity==8.2.1 +tenacity==8.2.2 # via feast (setup.py) toml==0.10.2 # via feast (setup.py) @@ -189,5 +189,5 @@ watchfiles==0.18.1 # via uvicorn websockets==10.4 # via uvicorn -zipp==3.14.0 +zipp==3.15.0 # via importlib-resources diff --git a/sdk/python/requirements/py3.9-ci-requirements.txt b/sdk/python/requirements/py3.9-ci-requirements.txt index df472dd6b07..4e0ab11d366 100644 --- a/sdk/python/requirements/py3.9-ci-requirements.txt +++ b/sdk/python/requirements/py3.9-ci-requirements.txt @@ -24,15 +24,25 @@ aiosignal==1.3.1 # via aiohttp alabaster==0.7.13 # via sphinx -altair==4.2.2 +altair==4.2.0 # via great-expectations anyio==3.6.2 # via # httpcore + # jupyter-server # starlette # watchfiles appdirs==1.4.4 # via fissix +argon2-cffi==21.3.0 + # via + # jupyter-server + # nbclassic + # notebook +argon2-cffi-bindings==21.2.0 + # via argon2-cffi +arrow==1.2.3 + # via isoduration asn1crypto==1.5.1 # via # oscrypto @@ -65,16 +75,20 @@ azure-identity==1.12.0 # via # adlfs # feast (setup.py) -azure-storage-blob==12.14.1 +azure-storage-blob==12.15.0 # via # adlfs # feast (setup.py) -babel==2.11.0 +babel==2.12.1 # via sphinx backcall==0.2.0 # via ipython +beautifulsoup4==4.11.2 + # via nbconvert black==22.12.0 # via feast (setup.py) +bleach==6.0.0 + # via nbconvert boto3==1.20.23 # via # feast (setup.py) @@ -110,6 +124,7 @@ certifi==2022.12.7 # snowflake-connector-python cffi==1.15.1 # via + # argon2-cffi-bindings # azure-datalake-store # cryptography # snowflake-connector-python @@ -137,7 +152,9 @@ colorama==0.4.6 # via # feast (setup.py) # great-expectations -coverage[toml]==7.1.0 +comm==0.1.2 + # via ipykernel +coverage[toml]==7.2.1 # via pytest-cov cryptography==35.0.0 # via @@ -153,16 +170,18 @@ cryptography==35.0.0 # snowflake-connector-python # types-pyopenssl # types-redis -dask==2023.2.0 +dask==2023.3.0 # via feast (setup.py) -dataclasses==0.6 - # via great-expectations db-dtypes==1.0.5 # via google-cloud-bigquery +debugpy==1.6.6 + # via ipykernel decorator==5.1.1 # via # gcsfs # ipython +defusedxml==0.7.1 + # via nbconvert deprecated==1.2.13 # via redis deprecation==2.1.0 @@ -190,11 +209,11 @@ executing==1.2.0 # via stack-data fastapi==0.92.0 # via feast (setup.py) -fastavro==1.7.1 +fastavro==1.7.2 # via # feast (setup.py) # pandavro -fastjsonschema==2.16.2 +fastjsonschema==2.16.3 # via nbformat filelock==3.9.0 # via @@ -206,6 +225,8 @@ fissix==21.11.13 # via bowler flake8==6.0.0 # via feast (setup.py) +fqdn==1.5.1 + # via jsonschema frozenlist==1.3.3 # via # aiohttp @@ -234,9 +255,9 @@ google-api-core[grpc]==2.11.0 # google-cloud-datastore # google-cloud-firestore # google-cloud-storage -google-api-python-client==2.79.0 +google-api-python-client==2.80.0 # via firebase-admin -google-auth==2.16.1 +google-auth==2.16.2 # via # gcsfs # google-api-core @@ -250,11 +271,11 @@ google-auth-httplib2==0.1.0 # via google-api-python-client google-auth-oauthlib==1.0.0 # via gcsfs -google-cloud-bigquery[pandas]==3.5.0 +google-cloud-bigquery[pandas]==3.6.0 # via feast (setup.py) -google-cloud-bigquery-storage==2.18.1 +google-cloud-bigquery-storage==2.19.0 # via feast (setup.py) -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.17.0 # via feast (setup.py) google-cloud-core==2.3.2 # via @@ -263,7 +284,7 @@ google-cloud-core==2.3.2 # google-cloud-datastore # google-cloud-firestore # google-cloud-storage -google-cloud-datastore==2.13.2 +google-cloud-datastore==2.14.0 # via feast (setup.py) google-cloud-firestore==2.10.0 # via firebase-admin @@ -284,7 +305,7 @@ googleapis-common-protos[grpc]==1.58.0 # google-api-core # grpc-google-iam-v1 # grpcio-status -great-expectations==0.14.13 +great-expectations==0.15.50 # via feast (setup.py) greenlet==2.0.2 # via sqlalchemy @@ -332,6 +353,7 @@ identify==2.5.18 idna==3.4 # via # anyio + # jsonschema # requests # rfc3986 # snowflake-connector-python @@ -341,23 +363,47 @@ imagesize==1.4.1 importlib-metadata==6.0.0 # via # great-expectations + # jupyter-client + # nbconvert # sphinx iniconfig==2.0.0 # via pytest -ipython==8.10.0 +ipykernel==6.21.2 + # via + # ipywidgets + # nbclassic + # notebook +ipython==8.11.0 + # via + # great-expectations + # ipykernel + # ipywidgets +ipython-genutils==0.2.0 + # via + # nbclassic + # notebook +ipywidgets==8.0.4 # via great-expectations isodate==0.6.1 - # via msrest + # via + # azure-storage-blob + # msrest +isoduration==20.11.0 + # via jsonschema isort==5.12.0 # via feast (setup.py) jedi==0.18.2 # via ipython -jinja2==3.0.3 +jinja2==3.1.2 # via # altair # feast (setup.py) # great-expectations + # jupyter-server # moto + # nbclassic + # nbconvert + # notebook # sphinx jmespath==0.10.0 # via @@ -366,31 +412,70 @@ jmespath==0.10.0 jsonpatch==1.32 # via great-expectations jsonpointer==2.3 - # via jsonpatch -jsonschema==4.17.3 + # via + # jsonpatch + # jsonschema +jsonschema[format-nongpl]==4.17.3 # via # altair # feast (setup.py) # great-expectations + # jupyter-events # nbformat +jupyter-client==8.0.3 + # via + # ipykernel + # jupyter-server + # nbclassic + # nbclient + # notebook jupyter-core==5.2.0 - # via nbformat + # via + # ipykernel + # jupyter-client + # jupyter-server + # nbclassic + # nbclient + # nbconvert + # nbformat + # notebook +jupyter-events==0.6.3 + # via jupyter-server +jupyter-server==2.3.0 + # via + # nbclassic + # notebook-shim +jupyter-server-terminals==0.4.4 + # via jupyter-server +jupyterlab-pygments==0.2.2 + # via nbconvert +jupyterlab-widgets==3.0.5 + # via ipywidgets kubernetes==20.13.0 # via feast (setup.py) locket==1.0.0 # via partd +makefun==1.15.1 + # via great-expectations markupsafe==2.1.2 # via # jinja2 # moto + # nbconvert +marshmallow==3.19.0 + # via great-expectations matplotlib-inline==0.1.6 - # via ipython + # via + # ipykernel + # ipython mccabe==0.7.0 # via flake8 minio==7.1.0 # via feast (setup.py) mistune==2.0.5 - # via great-expectations + # via + # great-expectations + # nbconvert mmh3==3.0.0 # via feast (setup.py) mock==2.0.0 @@ -408,9 +493,7 @@ msal-extensions==1.0.0 msgpack==1.0.4 # via cachecontrol msrest==0.7.1 - # via - # azure-storage-blob - # msrestazure + # via msrestazure msrestazure==0.6.4 # via adlfs multidict==6.0.4 @@ -431,10 +514,34 @@ mypy-protobuf==3.1 # via feast (setup.py) mysqlclient==2.1.1 # via feast (setup.py) +nbclassic==0.5.2 + # via notebook +nbclient==0.7.2 + # via nbconvert +nbconvert==7.2.9 + # via + # jupyter-server + # nbclassic + # notebook nbformat==5.7.3 - # via great-expectations + # via + # great-expectations + # jupyter-server + # nbclassic + # nbclient + # nbconvert + # notebook +nest-asyncio==1.5.6 + # via + # ipykernel + # nbclassic + # notebook nodeenv==1.7.0 # via pre-commit +notebook==6.5.2 + # via great-expectations +notebook-shim==0.2.2 + # via nbclassic numpy==1.24.2 # via # altair @@ -458,6 +565,10 @@ packaging==23.0 # docker # google-cloud-bigquery # great-expectations + # ipykernel + # jupyter-server + # marshmallow + # nbconvert # pytest # redis # sphinx @@ -472,6 +583,8 @@ pandas==1.5.3 # snowflake-connector-python pandavro==1.5.2 # via feast (setup.py) +pandocfilters==1.5.0 + # via nbconvert parso==0.8.3 # via jedi partd==1.3.0 @@ -484,7 +597,7 @@ pexpect==4.8.0 # via ipython pickleshare==0.7.5 # via ipython -pip-tools==6.12.2 +pip-tools==6.12.3 # via feast (setup.py) platformdirs==3.0.0 # via @@ -497,9 +610,14 @@ ply==3.11 # via thriftpy2 portalocker==2.7.0 # via msal-extensions -pre-commit==3.0.4 +pre-commit==3.1.1 # via feast (setup.py) -prompt-toolkit==3.0.37 +prometheus-client==0.16.0 + # via + # jupyter-server + # nbclassic + # notebook +prompt-toolkit==3.0.38 # via ipython proto-plus==1.22.2 # via @@ -527,11 +645,15 @@ protobuf==4.22.0 # mypy-protobuf # proto-plus psutil==5.9.0 - # via feast (setup.py) + # via + # feast (setup.py) + # ipykernel psycopg2-binary==2.9.5 # via feast (setup.py) ptyprocess==0.7.0 - # via pexpect + # via + # pexpect + # terminado pure-eval==0.2.2 # via stack-data py==1.11.0 @@ -564,12 +686,14 @@ pydantic==1.10.5 # via # fastapi # feast (setup.py) + # great-expectations pyflakes==3.0.1 # via flake8 pygments==2.14.0 # via # feast (setup.py) # ipython + # nbconvert # sphinx pyjwt[crypto]==2.6.0 # via @@ -586,7 +710,7 @@ pyopenssl==22.0.0 # via # feast (setup.py) # snowflake-connector-python -pyparsing==2.4.7 +pyparsing==3.0.9 # via # great-expectations # httplib2 @@ -623,18 +747,21 @@ pytest-xdist==3.2.0 python-dateutil==2.8.2 # via # adal + # arrow # botocore # google-cloud-bigquery # great-expectations + # jupyter-client # kubernetes # moto # pandas # rockset -python-dotenv==0.21.1 +python-dotenv==1.0.0 # via uvicorn +python-json-logger==2.0.7 + # via jupyter-events pytz==2022.7.1 # via - # babel # great-expectations # moto # pandas @@ -646,9 +773,17 @@ pyyaml==6.0 # via # dask # feast (setup.py) + # jupyter-events # kubernetes # pre-commit # uvicorn +pyzmq==25.0.0 + # via + # ipykernel + # jupyter-client + # jupyter-server + # nbclassic + # notebook redis==4.2.2 # via feast (setup.py) regex==2022.10.31 @@ -683,8 +818,16 @@ requests-oauthlib==1.3.1 # msrest responses==0.22.0 # via moto +rfc3339-validator==0.1.4 + # via + # jsonschema + # jupyter-events rfc3986[idna2008]==1.5.0 # via httpx +rfc3986-validator==0.1.1 + # via + # jsonschema + # jupyter-events rockset==1.0.5 # via feast (setup.py) rsa==4.9 @@ -699,10 +842,17 @@ s3transfer==0.5.2 # via boto3 scipy==1.10.1 # via great-expectations +send2trash==1.8.0 + # via + # jupyter-server + # nbclassic + # notebook six==1.16.0 # via + # asttokens # azure-core # azure-identity + # bleach # cassandra-driver # geomet # google-auth @@ -714,6 +864,7 @@ six==1.16.0 # msrestazure # pandavro # python-dateutil + # rfc3339-validator # thriftpy2 sniffio==1.3.0 # via @@ -724,6 +875,8 @@ snowballstemmer==2.2.0 # via sphinx snowflake-connector-python[pandas]==2.9.0 # via feast (setup.py) +soupsieve==2.4 + # via beautifulsoup4 sphinx==6.1.3 # via feast (setup.py) sphinxcontrib-applehelp==1.0.4 @@ -748,14 +901,20 @@ starlette==0.25.0 # via fastapi tabulate==0.9.0 # via feast (setup.py) -tenacity==8.2.1 +tenacity==8.2.2 # via feast (setup.py) -termcolor==2.2.0 - # via great-expectations +terminado==0.17.1 + # via + # jupyter-server + # jupyter-server-terminals + # nbclassic + # notebook testcontainers==3.7.1 # via feast (setup.py) thriftpy2==0.4.16 # via happybase +tinycss2==1.2.1 + # via nbconvert toml==0.10.2 # via # feast (setup.py) @@ -773,22 +932,38 @@ toolz==0.12.0 # altair # dask # partd +tornado==6.2 + # via + # ipykernel + # jupyter-client + # jupyter-server + # nbclassic + # notebook + # terminado tqdm==4.64.1 # via # feast (setup.py) # great-expectations traitlets==5.9.0 # via + # comm + # ipykernel # ipython + # ipywidgets + # jupyter-client # jupyter-core + # jupyter-events + # jupyter-server # matplotlib-inline + # nbclassic + # nbclient + # nbconvert # nbformat + # notebook trino==0.321.0 # via feast (setup.py) typeguard==2.13.3 # via feast (setup.py) -types-docutils==0.19.1.6 - # via types-setuptools types-protobuf==3.19.22 # via # feast (setup.py) @@ -797,28 +972,29 @@ types-pymysql==1.0.19.5 # via feast (setup.py) types-pyopenssl==23.0.0.4 # via types-redis -types-python-dateutil==2.8.19.8 +types-python-dateutil==2.8.19.10 # via feast (setup.py) types-pytz==2022.7.1.2 # via feast (setup.py) types-pyyaml==6.0.12.8 # via feast (setup.py) -types-redis==4.5.1.3 +types-redis==4.5.1.4 # via feast (setup.py) -types-requests==2.28.11.14 +types-requests==2.28.11.15 # via feast (setup.py) -types-setuptools==67.4.0.1 +types-setuptools==67.4.0.3 # via feast (setup.py) types-tabulate==0.9.0.1 # via feast (setup.py) types-toml==0.10.8.5 # via responses -types-urllib3==1.26.25.7 +types-urllib3==1.26.25.8 # via types-requests typing-extensions==4.5.0 # via # aioitertools # azure-core + # azure-storage-blob # black # great-expectations # mypy @@ -832,6 +1008,8 @@ tzlocal==4.2 # via # great-expectations # trino +uri-template==1.2.0 + # via jsonschema uritemplate==4.1.1 # via google-api-python-client urllib3==1.26.14 @@ -850,7 +1028,7 @@ uvicorn[standard]==0.20.0 # via feast (setup.py) uvloop==0.17.0 # via uvicorn -virtualenv==20.19.0 +virtualenv==20.20.0 # via pre-commit volatile==2.1.0 # via bowler @@ -858,9 +1036,16 @@ watchfiles==0.18.1 # via uvicorn wcwidth==0.2.6 # via prompt-toolkit +webcolors==1.12 + # via jsonschema +webencodings==0.5.1 + # via + # bleach + # tinycss2 websocket-client==1.5.1 # via # docker + # jupyter-server # kubernetes websockets==10.4 # via uvicorn @@ -868,7 +1053,9 @@ werkzeug==2.1.2 # via moto wheel==0.38.4 # via pip-tools -wrapt==1.14.1 +widgetsnbextension==4.0.5 + # via ipywidgets +wrapt==1.15.0 # via # aiobotocore # deprecated @@ -877,7 +1064,7 @@ xmltodict==0.13.0 # via moto yarl==1.8.2 # via aiohttp -zipp==3.14.0 +zipp==3.15.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/sdk/python/requirements/py3.9-requirements.txt b/sdk/python/requirements/py3.9-requirements.txt index c33622325cc..3e2c23fb164 100644 --- a/sdk/python/requirements/py3.9-requirements.txt +++ b/sdk/python/requirements/py3.9-requirements.txt @@ -35,13 +35,13 @@ cloudpickle==2.2.1 # via dask colorama==0.4.6 # via feast (setup.py) -dask==2023.2.0 +dask==2023.3.0 # via feast (setup.py) dill==0.3.6 # via feast (setup.py) fastapi==0.92.0 # via feast (setup.py) -fastavro==1.7.1 +fastavro==1.7.2 # via # feast (setup.py) # pandavro @@ -123,7 +123,7 @@ pyrsistent==0.19.3 # via jsonschema python-dateutil==2.8.2 # via pandas -python-dotenv==0.21.1 +python-dotenv==1.0.0 # via uvicorn pytz==2022.7.1 # via pandas @@ -153,7 +153,7 @@ starlette==0.25.0 # via fastapi tabulate==0.9.0 # via feast (setup.py) -tenacity==8.2.1 +tenacity==8.2.2 # via feast (setup.py) toml==0.10.2 # via feast (setup.py) diff --git a/setup.py b/setup.py index 581f04dcfbf..d9c949b30f6 100644 --- a/setup.py +++ b/setup.py @@ -127,7 +127,7 @@ "cassandra-driver>=3.24.0,<4", ] -GE_REQUIRED = ["great_expectations>=0.14.0,<0.15.0"] +GE_REQUIRED = ["great_expectations>=0.15.41,<0.16.0"] GO_REQUIRED = [ "cffi~=1.15.0",
CiviWiki__OpenCiviWiki-1042
[ { "content": "from django.contrib.auth.models import AbstractUser\nimport os\nimport io\nfrom django.core.files.storage import default_storage\nfrom django.conf import settings\nfrom django.db import models\nfrom PIL import Image, ImageOps\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\n\nfrom taggit.managers import TaggableManager\n\nfrom api.models.category import Category\nfrom common.utils import PathAndRename\n\n\nclass User(AbstractUser):\n \"\"\"\n A new custom User model for any functionality needed in the future. Extending AbstractUser\n allows for adding new fields to the user model as needed.\n \"\"\"\n\n class Meta:\n db_table = \"users\"\n\n\n# Image manipulation constants\nPROFILE_IMG_SIZE = (171, 171)\nPROFILE_IMG_THUMB_SIZE = (40, 40)\nWHITE_BG = (255, 255, 255)\n\n\nclass ProfileManager(models.Manager):\n def summarize(self, profile):\n from api.models.civi import Civi\n\n data = {\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"about_me\": profile.about_me,\n \"history\": [\n Civi.objects.serialize(c)\n for c in Civi.objects.filter(author_id=profile.id).order_by(\"-created\")\n ],\n \"profile_image\": profile.profile_image_url,\n \"followers\": self.followers(profile),\n \"following\": self.following(profile),\n }\n return data\n\n def chip_summarize(self, profile):\n data = {\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"profile_image\": profile.profile_image_url,\n }\n return data\n\n def card_summarize(self, profile, request_profile):\n # Length at which to truncate 'about me' text\n about_me_truncate_length = 150\n\n # If 'about me' text is longer than 150 characters... add elipsis (truncate)\n ellipsis_if_too_long = (\n \"\" if len(profile.about_me) <= about_me_truncate_length else \"...\"\n )\n\n data = {\n \"id\": profile.user.id,\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"about_me\": profile.about_me[:about_me_truncate_length] + ellipsis_if_too_long,\n \"profile_image\": profile.profile_image_url,\n \"follow_state\": True\n if profile in request_profile.following.all()\n else False,\n \"request_profile\": request_profile.first_name,\n }\n return data\n\n def followers(self, profile):\n return [self.chip_summarize(follower) for follower in profile.followers.all()]\n\n def following(self, profile):\n return [self.chip_summarize(following) for following in profile.following.all()]\n\n\nprofile_upload_path = PathAndRename(\"\")\n\n\nclass Profile(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n first_name = models.CharField(max_length=63, blank=False)\n last_name = models.CharField(max_length=63, blank=False)\n about_me = models.CharField(max_length=511, blank=True)\n\n categories = models.ManyToManyField(\n Category, related_name=\"user_categories\", symmetrical=False\n )\n tags = TaggableManager()\n\n followers = models.ManyToManyField(\n \"self\", related_name=\"follower\", symmetrical=False\n )\n following = models.ManyToManyField(\n \"self\", related_name=\"followings\", symmetrical=False\n )\n\n is_verified = models.BooleanField(default=False)\n full_profile = models.BooleanField(default=False)\n\n objects = ProfileManager()\n profile_image = models.ImageField(\n upload_to=profile_upload_path, blank=True, null=True\n )\n profile_image_thumb = models.ImageField(\n upload_to=profile_upload_path, blank=True, null=True\n )\n\n @property\n def full_name(self):\n \"\"\"Returns the person's full name.\"\"\"\n\n return f\"{self.first_name} {self.last_name}\"\n\n @property\n def profile_image_url(self):\n \"\"\"Return placeholder profile image if user didn't upload one\"\"\"\n\n if self.profile_image:\n file_exists = default_storage.exists(\n os.path.join(settings.MEDIA_ROOT, self.profile_image.name)\n )\n if file_exists:\n return self.profile_image.url\n\n return \"/static/img/no_image_md.png\"\n\n @property\n def profile_image_thumb_url(self):\n \"\"\"Return placeholder profile image if user didn't upload one\"\"\"\n\n if self.profile_image_thumb:\n file_exists = default_storage.exists(\n os.path.join(settings.MEDIA_ROOT, self.profile_image_thumb.name)\n )\n if file_exists:\n return self.profile_image_thumb.url\n\n return \"/static/img/no_image_md.png\"\n\n def __init__(self, *args, **kwargs):\n super(Profile, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n \"\"\" Image crop/resize and thumbnail creation \"\"\"\n\n # New Profile image --\n if self.profile_image:\n self.resize_profile_image()\n\n self.full_profile = self.is_full_profile()\n\n super(Profile, self).save(*args, **kwargs)\n\n def resize_profile_image(self):\n \"\"\"\n Resizes and crops the user uploaded image and creates a thumbnail version of it\n \"\"\"\n profile_image_field = self.profile_image\n image_file = io.StringIO(profile_image_field.read())\n profile_image = Image.open(image_file)\n profile_image.load()\n\n # Resize image\n profile_image = ImageOps.fit(\n profile_image, PROFILE_IMG_SIZE, Image.ANTIALIAS, centering=(0.5, 0.5)\n )\n\n # Convert to JPG image format with white background\n if profile_image.mode not in (\"L\", \"RGB\"):\n white_bg_img = Image.new(\"RGB\", PROFILE_IMG_SIZE, WHITE_BG)\n white_bg_img.paste(profile_image, mask=profile_image.split()[3])\n profile_image = white_bg_img\n\n # Save new cropped image\n tmp_image_file = io.StringIO()\n profile_image.save(tmp_image_file, \"JPEG\", quality=90)\n tmp_image_file.seek(0)\n self.profile_image = InMemoryUploadedFile(\n tmp_image_file,\n \"ImageField\",\n self.profile_image.name,\n \"image/jpeg\",\n tmp_image_file.len,\n None,\n )\n # Make a Thumbnail Image for the new resized image\n thumb_image = profile_image.copy()\n thumb_image.thumbnail(PROFILE_IMG_THUMB_SIZE, resample=Image.ANTIALIAS)\n tmp_image_file = io.StringIO()\n thumb_image.save(tmp_image_file, \"JPEG\", quality=90)\n tmp_image_file.seek(0)\n self.profile_image_thumb = InMemoryUploadedFile(\n tmp_image_file,\n \"ImageField\",\n self.profile_image.name,\n \"image/jpeg\",\n tmp_image_file.len,\n None,\n )\n\n def is_full_profile(self):\n if self.first_name and self.last_name:\n return True\n else:\n return False\n", "path": "project/accounts/models.py" } ]
[ { "content": "from django.contrib.auth.models import AbstractUser\nimport os\nimport io\nfrom django.core.files.storage import default_storage\nfrom django.conf import settings\nfrom django.db import models\nfrom PIL import Image, ImageOps\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\n\nfrom taggit.managers import TaggableManager\n\nfrom api.models import Category\nfrom common.utils import PathAndRename\n\n\nclass User(AbstractUser):\n \"\"\"\n A new custom User model for any functionality needed in the future. Extending AbstractUser\n allows for adding new fields to the user model as needed.\n \"\"\"\n\n class Meta:\n db_table = \"users\"\n\n\n# Image manipulation constants\nPROFILE_IMG_SIZE = (171, 171)\nPROFILE_IMG_THUMB_SIZE = (40, 40)\nWHITE_BG = (255, 255, 255)\n\n\nclass ProfileManager(models.Manager):\n def summarize(self, profile):\n from api.models.civi import Civi\n\n data = {\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"about_me\": profile.about_me,\n \"history\": [\n Civi.objects.serialize(c)\n for c in Civi.objects.filter(author_id=profile.id).order_by(\"-created\")\n ],\n \"profile_image\": profile.profile_image_url,\n \"followers\": self.followers(profile),\n \"following\": self.following(profile),\n }\n return data\n\n def chip_summarize(self, profile):\n data = {\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"profile_image\": profile.profile_image_url,\n }\n return data\n\n def card_summarize(self, profile, request_profile):\n # Length at which to truncate 'about me' text\n about_me_truncate_length = 150\n\n # If 'about me' text is longer than 150 characters... add elipsis (truncate)\n ellipsis_if_too_long = (\n \"\" if len(profile.about_me) <= about_me_truncate_length else \"...\"\n )\n\n data = {\n \"id\": profile.user.id,\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"about_me\": profile.about_me[:about_me_truncate_length] + ellipsis_if_too_long,\n \"profile_image\": profile.profile_image_url,\n \"follow_state\": True\n if profile in request_profile.following.all()\n else False,\n \"request_profile\": request_profile.first_name,\n }\n return data\n\n def followers(self, profile):\n return [self.chip_summarize(follower) for follower in profile.followers.all()]\n\n def following(self, profile):\n return [self.chip_summarize(following) for following in profile.following.all()]\n\n\nprofile_upload_path = PathAndRename(\"\")\n\n\nclass Profile(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n first_name = models.CharField(max_length=63, blank=False)\n last_name = models.CharField(max_length=63, blank=False)\n about_me = models.CharField(max_length=511, blank=True)\n\n categories = models.ManyToManyField(\n Category, related_name=\"user_categories\", symmetrical=False\n )\n tags = TaggableManager()\n\n followers = models.ManyToManyField(\n \"self\", related_name=\"follower\", symmetrical=False\n )\n following = models.ManyToManyField(\n \"self\", related_name=\"followings\", symmetrical=False\n )\n\n is_verified = models.BooleanField(default=False)\n full_profile = models.BooleanField(default=False)\n\n objects = ProfileManager()\n profile_image = models.ImageField(\n upload_to=profile_upload_path, blank=True, null=True\n )\n profile_image_thumb = models.ImageField(\n upload_to=profile_upload_path, blank=True, null=True\n )\n\n @property\n def full_name(self):\n \"\"\"Returns the person's full name.\"\"\"\n\n return f\"{self.first_name} {self.last_name}\"\n\n @property\n def profile_image_url(self):\n \"\"\"Return placeholder profile image if user didn't upload one\"\"\"\n\n if self.profile_image:\n file_exists = default_storage.exists(\n os.path.join(settings.MEDIA_ROOT, self.profile_image.name)\n )\n if file_exists:\n return self.profile_image.url\n\n return \"/static/img/no_image_md.png\"\n\n @property\n def profile_image_thumb_url(self):\n \"\"\"Return placeholder profile image if user didn't upload one\"\"\"\n\n if self.profile_image_thumb:\n file_exists = default_storage.exists(\n os.path.join(settings.MEDIA_ROOT, self.profile_image_thumb.name)\n )\n if file_exists:\n return self.profile_image_thumb.url\n\n return \"/static/img/no_image_md.png\"\n\n def __init__(self, *args, **kwargs):\n super(Profile, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n \"\"\" Image crop/resize and thumbnail creation \"\"\"\n\n # New Profile image --\n if self.profile_image:\n self.resize_profile_image()\n\n self.full_profile = self.is_full_profile()\n\n super(Profile, self).save(*args, **kwargs)\n\n def resize_profile_image(self):\n \"\"\"\n Resizes and crops the user uploaded image and creates a thumbnail version of it\n \"\"\"\n profile_image_field = self.profile_image\n image_file = io.StringIO(profile_image_field.read())\n profile_image = Image.open(image_file)\n profile_image.load()\n\n # Resize image\n profile_image = ImageOps.fit(\n profile_image, PROFILE_IMG_SIZE, Image.ANTIALIAS, centering=(0.5, 0.5)\n )\n\n # Convert to JPG image format with white background\n if profile_image.mode not in (\"L\", \"RGB\"):\n white_bg_img = Image.new(\"RGB\", PROFILE_IMG_SIZE, WHITE_BG)\n white_bg_img.paste(profile_image, mask=profile_image.split()[3])\n profile_image = white_bg_img\n\n # Save new cropped image\n tmp_image_file = io.StringIO()\n profile_image.save(tmp_image_file, \"JPEG\", quality=90)\n tmp_image_file.seek(0)\n self.profile_image = InMemoryUploadedFile(\n tmp_image_file,\n \"ImageField\",\n self.profile_image.name,\n \"image/jpeg\",\n tmp_image_file.len,\n None,\n )\n # Make a Thumbnail Image for the new resized image\n thumb_image = profile_image.copy()\n thumb_image.thumbnail(PROFILE_IMG_THUMB_SIZE, resample=Image.ANTIALIAS)\n tmp_image_file = io.StringIO()\n thumb_image.save(tmp_image_file, \"JPEG\", quality=90)\n tmp_image_file.seek(0)\n self.profile_image_thumb = InMemoryUploadedFile(\n tmp_image_file,\n \"ImageField\",\n self.profile_image.name,\n \"image/jpeg\",\n tmp_image_file.len,\n None,\n )\n\n def is_full_profile(self):\n if self.first_name and self.last_name:\n return True\n else:\n return False\n", "path": "project/accounts/models.py" } ]
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000..353944cbd --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,33 @@ +name: CI Workflow + +on: + pull_request: + branches : [ develop ] + + workflow_dispatch: + + push: + +jobs: + python-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Set up Python 3.7 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements/dev.txt + pip install pytest-django + - name: Run migrations + run: python project/manage.py migrate + - name: Run tests + env: + CIVIWIKI_LOCAL_NAME : True + DJANGO_SETTINGS_MODULE: core.settings + run: | + cd project + pytest -p no:warning diff --git a/project/accounts/models.py b/project/accounts/models.py index 5d48149b1..9ca081b49 100644 --- a/project/accounts/models.py +++ b/project/accounts/models.py @@ -9,7 +9,7 @@ from taggit.managers import TaggableManager -from api.models.category import Category +from api.models import Category from common.utils import PathAndRename diff --git a/project/api/tests/__init__.py b/project/api/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/api/tests/test_propublica.py b/project/api/tests/test_propublica.py deleted file mode 100644 index 49124443b..000000000 --- a/project/api/tests/test_propublica.py +++ /dev/null @@ -1,30 +0,0 @@ -from mock import patch, MagicMock - -from rest_framework.test import APITestCase, override_settings - -from ..propublica import ProPublicaAPI -from .propublica_responses import PROPUBLICA_CORRECT_RESPONSE - - -@override_settings(PROPUBLICA_API_KEY="test-token") -class ProPublicaAPITestCase(APITestCase): - def setUp(self): - self.api_instance = ProPublicaAPI() - - def test_headers_are_set_correctly(self): - self.assertDictEqual( - self.api_instance.auth_headers, {"X-API-Key": "test-token"} - ) - - @patch("api.propublica.requests.get") - def test_if_search_returns_correct_results(self, get_mock): - get_mock.return_value = MagicMock( - json=MagicMock(return_value=PROPUBLICA_CORRECT_RESPONSE) - ) - query = "query" - data = self.api_instance.search(query) - self.assertEqual(data, PROPUBLICA_CORRECT_RESPONSE) - get_mock.assert_called_once_with( - self.api_instance.URL.format(query=query), - headers=self.api_instance.auth_headers, - ) diff --git a/project/test_framework/__init__.py b/project/test_framework/__init__.py deleted file mode 100644 index 1a7e0dd46..000000000 --- a/project/test_framework/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -from contextlib import contextmanager - -from aloe import around, world -from selenium import webdriver - -""" -this method sets a global variable of type WebDriver. This is the browser. -@around.all is a global hook. It will be ran once before all scenarios -After the last scenario is completed (pass or fail) the WebDriver will be closed -""" - - -@around.all -@contextmanager -def with_chrome(): - options = webdriver.ChromeOptions() - options.add_argument("--headless") - options.add_argument("--disable-extensions") - options.add_argument("--no-sandbox") - world.browser = webdriver.Chrome(options=options) - yield - world.browser.quit() - delattr(world, "browser") diff --git a/project/test_framework/accessability/__init__.py b/project/test_framework/accessability/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/test_framework/accessability/features/__init__.py b/project/test_framework/accessability/features/__init__.py deleted file mode 100644 index 34278a88c..000000000 --- a/project/test_framework/accessability/features/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -from aloe import step - -from .accessability import login, logout, is_user_loggedin - - -@step(r"a user with \"(.*?)\" credentials tries to log in") -def user_logs_in(self, correct): - login(correct) - - -@step(r"the user is logged in") -def user_is_logged_in(self): - assert is_user_loggedin() - - -@step(r"the user can log out") -def user_logs_out(self): - logout() - - -@step(r"the user is not logged in") -def user_not_logged_in(self): - assert not is_user_loggedin() diff --git a/project/test_framework/accessability/features/accessability.feature b/project/test_framework/accessability/features/accessability.feature deleted file mode 100644 index 1dabce843..000000000 --- a/project/test_framework/accessability/features/accessability.feature +++ /dev/null @@ -1,10 +0,0 @@ -Feature: Accessibility tests - - Scenario: A user can login and logout - Given a user with "correct" credentials tries to log in - When the user is logged in - Then the user can log out - - Scenario: Users cannot login with wrong password - Given a user with "incorrect" credentials tries to log in - Then the user is not logged in \ No newline at end of file diff --git a/project/test_framework/accessability/features/accessability.py b/project/test_framework/accessability/features/accessability.py deleted file mode 100644 index 29e0faf1a..000000000 --- a/project/test_framework/accessability/features/accessability.py +++ /dev/null @@ -1,29 +0,0 @@ -from test_framework.utils import * - - -def login(credentials): - if is_user_loggedin(): - return - else: - open_home_page() - el = find_element(login_signup_button) - el.click() - wait_for_page("login") - find_element(username_field).send_keys(username) - if credentials == "correct": - find_element(password_field).send_keys(password) - find_element(login_button).click() - elif credentials == "incorrect": - find_element(password_field).send_keys(password_wrong) - - -def is_user_loggedin(): - try: - return find_element(user_label).is_displayed() - except: - return - - -def logout(): - find_element(nav_menu).click() - find_element(logout_button).click() diff --git a/project/test_framework/my_feed/__init__.py b/project/test_framework/my_feed/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/test_framework/my_feed/features/__init__.py b/project/test_framework/my_feed/features/__init__.py deleted file mode 100644 index 9558c1dc6..000000000 --- a/project/test_framework/my_feed/features/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from test_framework.accessability.features import * -from test_framework.accessability.features.accessability import * diff --git a/project/test_framework/my_feed/features/my_feed.feature b/project/test_framework/my_feed/features/my_feed.feature deleted file mode 100644 index 8bb776080..000000000 --- a/project/test_framework/my_feed/features/my_feed.feature +++ /dev/null @@ -1,10 +0,0 @@ -Feature: My Feed tests - - Scenario: User can view My Feed - Given a user with "correct" credentials tries to log in - When user opens the "My Feed" menu - And My Feed is displayed - And user can filter threads by category - And the user can "remove" the "Agriculture" category - And the user can "add" the "Agriculture" category - Then the trending issues are displayed \ No newline at end of file diff --git a/project/test_framework/my_profile/__init__.py b/project/test_framework/my_profile/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/test_framework/my_profile/features/__init__.py b/project/test_framework/my_profile/features/__init__.py deleted file mode 100644 index 9558c1dc6..000000000 --- a/project/test_framework/my_profile/features/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from test_framework.accessability.features import * -from test_framework.accessability.features.accessability import * diff --git a/project/test_framework/my_profile/features/my_profile.feature b/project/test_framework/my_profile/features/my_profile.feature deleted file mode 100644 index 124ae8bc7..000000000 --- a/project/test_framework/my_profile/features/my_profile.feature +++ /dev/null @@ -1,7 +0,0 @@ -Feature: My Profile tests - - Scenario: User can change My Profile settings - Given a user with "correct" credentials tries to log in - When user opens the "My Profile" menu - And the user can view it's activity - Then the user can change About Me \ No newline at end of file diff --git a/project/test_framework/selectors.py b/project/test_framework/selectors.py deleted file mode 100644 index e98403086..000000000 --- a/project/test_framework/selectors.py +++ /dev/null @@ -1,55 +0,0 @@ -civi_wiki_url = "https://www.civiwiki.org" -username = "unit1test" -password = "unit1test" -password_wrong = "unit123test" - -# Accessibility -login_signup_button = ".login-btn" -username_field = "#username" -password_field = "#password" -login_button = ".login-button" -user_label = ".label-username" -civi_wiki_logo = ".wordmark" -nav_menu = ".nav-menu" -logout_button = "a.dropdown-item:nth-child(5)" -settings_button = "a.dropdown-item:nth-child(4)" -my_profile_button = "a.dropdown-item:nth-child(2)" -my_feed_button = "a.dropdown-item:nth-child(1)" - -# Settings -email_field = "#email" -first_name_field = "#first_name" -last_name_field = "#last_name" -location_field = ".location-info" -location_set = "#autocomplete" -activity = ".profile-civi-list" -edit_profile_butoon = "a.btn" -about_me_field = "#about_me" -my_feed_list = "#feed-list" -manage_categories = "#manage-categories" -choose_button = "#choose-category" -trending_issues = ".list-title" - -# Threads -add_civi_button = ".add-civi" -add_problem_button = "button.btn-small:nth-child(1)" -add_cause_button = "button.btn-small:nth-child(2)" -add_solution_button = "button.btn:nth-child(3)" -civi_title_field = "#civi-title" -civi_description_field = "#civi-body" -civi_links_field = "#civi_links" -create_civi_button = ".create-new-civi" -first_civi = ".civi-title > span:nth-child(1)" -create_thread_button = ".new-thread" -thread_title = "#thread-title" -thread_summary = "#thread-body" -thread_type_dropdown = ( - ".new-thread-level-selection > div:nth-child(1) > div:nth-child(1)" -) -thread_category_dropdown = ".new-thread-category-selection > div:nth-child(1) > div:nth-child(1) > input:nth-child(2)" -thread_create_draft = ".draft-new-thread" -thread_publish = "#js-publish-btn" -add_response_button = "#add-new-response" -response_title = "#response-title" -response_body = "#response-body" -response_confirm = ".create-new-response" diff --git a/project/test_framework/settings/__init__.py b/project/test_framework/settings/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/test_framework/settings/features/__init__.py b/project/test_framework/settings/features/__init__.py deleted file mode 100644 index 988644d7f..000000000 --- a/project/test_framework/settings/features/__init__.py +++ /dev/null @@ -1,149 +0,0 @@ -import time -from selenium.webdriver.common.keys import Keys - -from test_framework.utils import * -from test_framework.accessability.features import * -from test_framework.accessability.features.accessability import * - - -def open_menu(menu): - find_element(nav_menu).click() - if menu == "Settings": - find_element(settings_button).click() - wait_for_page("/settings") - if menu == "My Profile": - find_element(my_profile_button).click() - wait_for_page(username) - if menu == "My Feed": - find_element(my_feed_button).click() - - -def get_settings(): - settings = { - "email": world.browser.find_element_by_css_selector(email_field).get_attribute( - "value" - ), - "username": world.browser.find_element_by_css_selector( - username_field - ).get_attribute("value"), - "first name": find_element(first_name_field).get_attribute("value"), - "last name": find_element(last_name_field).get_attribute("value"), - "location": find_element(location_field).text, - } - return settings - - -def set_settings(): - time.sleep(1) - first_name_1 = "unit" - first_name_2 = "unit2" - last_name_1 = "test" - last_name_2 = "test2" - location_1 = "Washington, DC, United States" - location_2 = "New York, NY, United States" - - if find_element(first_name_field).get_attribute("value") == first_name_1: - find_element(first_name_field).clear() - find_element(first_name_field).send_keys(first_name_2) - elif find_element(first_name_field).get_attribute("value") == first_name_2: - find_element(first_name_field).clear() - find_element(first_name_field).send_keys(first_name_1) - - if find_element(last_name_field).get_attribute("value") == last_name_1: - find_element(last_name_field).clear() - find_element(last_name_field).send_keys(last_name_2) - elif find_element(last_name_field).get_attribute("value") == last_name_2: - find_element(last_name_field).clear() - find_element(last_name_field).send_keys(last_name_1) - - if ( - location_1.replace(", DC, United States", "") - in find_element(location_field).text - ): - find_element(location_set).send_keys(location_2) - time.sleep(1) - find_element(location_set).send_keys(Keys.DOWN) - find_element(location_set).send_keys(Keys.RETURN) - elif ( - location_2.replace(", NY, United States", "") - in find_element(location_field).text - ): - find_element(location_set).send_keys(location_1) - time.sleep(1) - find_element(location_set).send_keys(Keys.DOWN) - find_element(location_set).send_keys(Keys.RETURN) - - find_element(first_name_field).click() - - -def is_activity_displayed(): - return find_element(activity).is_displayed() - - -def change_about_me(): - first_about = "this is the first about me string" - second_about = "this is the secod about me string" - find_element(edit_profile_butoon).click() - about_me = find_element(about_me_field) - world.first_about_me = about_me.get_attribute("value") - - if about_me.get_attribute("value") == first_about: - about_me.clear() - about_me.send_keys(second_about) - about_me.send_keys(Keys.TAB) - elif about_me.get_attribute("value") == second_about: - about_me.clear() - about_me.send_keys(first_about) - about_me.send_keys(Keys.TAB) - - -def get_about_me(): - find_element(edit_profile_butoon).click() - return find_element(about_me_field).get_attribute("value") - - -def is_my_feed_displayed(): - return find_element(my_feed_list).is_displayed() - - -def select_filter(filter): - categories = find_elements_by_class("category-item") - - for category in categories: - if category.text == filter: - category.click() - break - - -def get_threads_count(): - return len(find_elements_by_class("s12")) - - -def add_remove_category(mode, wanted_category): - find_element(manage_categories).click() - time.sleep(1) - categories = find_elements_by_class("category-checkbox") - for category in categories: - if wanted_category in category.text: - checkbox = category.find_element_by_class_name("filled-in") - if mode == "remove" and checkbox.get_attribute("checked") == "true": - category.find_elements_by_css_selector("*")[1].click() - break - elif mode == "add" and checkbox.get_attribute("checked") != "true": - category.find_elements_by_css_selector("*")[1].click() - break - - find_element(choose_button).click() - time.sleep(3) - - -def find_category(wanted_category): - categories = find_elements_by_class("category-item") - for category in categories: - if category.text == wanted_category: - return "true" - return - - -def is_trending_issues(): - return find_element(trending_issues).is_displayed() diff --git a/project/test_framework/settings/features/settings.feature b/project/test_framework/settings/features/settings.feature deleted file mode 100644 index e811b004a..000000000 --- a/project/test_framework/settings/features/settings.feature +++ /dev/null @@ -1,8 +0,0 @@ -Feature: Settings tests - - Scenario: User can change it's settings - Given a user with "correct" credentials tries to log in - When user opens the "Settings" menu - And the current settings are saved - And the user enters new settings - Then the settings are changed \ No newline at end of file diff --git a/project/test_framework/threads/__init__.py b/project/test_framework/threads/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/test_framework/threads/features/__init__.py b/project/test_framework/threads/features/__init__.py deleted file mode 100644 index 5951b300f..000000000 --- a/project/test_framework/threads/features/__init__.py +++ /dev/null @@ -1,132 +0,0 @@ -import time -from selenium.webdriver.common.keys import Keys - -from test_framework.utils import * -from test_framework.accessability.features import * - - -def open_thread(thread_name): - time.sleep(3) - threads = find_elements_by_class("thread-title") - for thread in threads: - if thread.text == thread_name: - thread.click() - wait_for_page("/thread/") - break - - -def add_civi(mode, title, description, links): - time.sleep(1) - find_element(add_civi_button).click() - if mode == "problem": - find_element(add_problem_button).click() - find_element(civi_title_field).send_keys(title) - find_element(civi_description_field).send_keys(description) - elif mode == "cause": - find_element(add_cause_button).click() - find_element(civi_title_field).send_keys(title) - find_element(civi_description_field).send_keys(description) - find_element(civi_links_field).send_keys(links) - find_element(civi_links_field).send_keys(Keys.RETURN) - time.sleep(1) - elif mode == "solution": - find_element(add_solution_button).click() - find_element(civi_title_field).send_keys(title) - find_element(civi_description_field).send_keys(description) - find_element(civi_links_field).send_keys(links) - find_element(civi_links_field).send_keys(Keys.RETURN) - time.sleep(1) - - find_element(create_civi_button).click() - return find_element(first_civi).is_displayed() - - -def create_thread(thread_type, title, summary, category): - find_element(create_thread_button).click() - find_element(thread_title).send_keys(title) - find_element(thread_summary).send_keys(summary) - find_element(thread_type_dropdown).click() - - for element in find_element(thread_type_dropdown).find_elements_by_css_selector( - "*" - ): - if element.text == thread_type: - element.click() - break - - find_element(thread_category_dropdown).click() - time.sleep(1) - find_element(thread_category_dropdown).send_keys(Keys.DOWN) - find_element(thread_category_dropdown).send_keys(Keys.RETURN) - time.sleep(1) - # for element in find_element_by_class("new-thread-category-selection").find_elements_by_css_selector("*"): - # if element.text in category: - # scroll_into_view(element) - # # js_click(element) - # element.click() - # break - find_element(thread_create_draft).click() - time.sleep(1) - open_thread(title) - find_element(thread_publish).click() - time.sleep(3) - - -def civi_vote(civi_title, vote): - for civi in find_elements_by_class("civi-card"): - if civi_title in civi.find_element_by_class_name("text-wrapper").text: - click_vote(vote, civi) - - -def click_vote(vote, element): - options = element.find_elements_by_class_name("rating-wrapper") - if vote == "DISAGREE": - options[0].find_element_by_class_name("rating-button").click() - elif vote == "NEUTRAL": - options[2].find_element_by_class_name("rating-button").click() - elif vote == "AGREE": - options[4].find_element_by_class_name("rating-button").click() - time.sleep(3) - - -def is_civi_visible(type): - if type == "cause": - number_of_elements = find_elements_by_class("cause-nav-indiv") - elif type == "solution": - number_of_elements = find_elements_by_class("solution-nav-indiv") - elif type == "problem": - number_of_elements = find_elements_by_class("problem-nav-indiv") - - if len(number_of_elements) > 0: - return "true" - else: - return - - -def add_comment(civi_title, response_title_text, response_body_text): - for civi in find_elements_by_class("civi-card"): - if civi_title in civi.find_element_by_class_name("text-wrapper").text: - # time.sleep(1) - civi.click() - # time.sleep(1) - try: - find_element(add_response_button).click() - except: - civi.click() - find_element(add_response_button).click() - find_element(response_title).send_keys(response_title_text) - find_element(response_body).send_keys(response_body_text) - find_element(response_confirm).click() - # time.sleep(3) - break - - -def remove_civi(civi_title): - time.sleep(1) - for civi in find_elements_by_class("civi-card"): - if civi_title in civi.find_element_by_class_name("text-wrapper").text: - try: - civi.find_element_by_class_name("delete").click() - except: - time.sleep(2) - civi.find_element_by_class_name("delete").click() diff --git a/project/test_framework/threads/features/threads.feature b/project/test_framework/threads/features/threads.feature deleted file mode 100644 index c4d419939..000000000 --- a/project/test_framework/threads/features/threads.feature +++ /dev/null @@ -1,38 +0,0 @@ -Feature: Threads tests - - Scenario: User can create threads - Given a user with "correct" credentials tries to log in - And user opens the "My Feed" menu - When user can create a new "Federal" thread - And user opens the "My Feed" menu - And user can create a new "State" thread - And user opens the "My Feed" menu -# Then the user can delete a thread - - Scenario: User can add new problems, causes and solutions - Given a user with "correct" credentials tries to log in - And user opens the "My Feed" menu - And the user opens the "automation Federal thread" thread - When the user can add a "problem" to the thread - And the user can add a "cause" to the thread - And the user can add a "solution" to the thread - - Scenario: Civis are only visible when user votes AGREE - Given a user with "correct" credentials tries to log in - When the user opens the "automation Federal thread" thread - Then the "cause" is "not visible" when user clicks "DISAGREE" on "this is a problem" - And the "cause" is "visible" when user clicks "AGREE" on "this is a problem" - And the "solution" is "not visible" when user clicks "DISAGREE" on "c1" - And the "solution" is "visible" when user clicks "AGREE" on "c1" - - Scenario: User can add response to civis - Given a user with "correct" credentials tries to log in - When the user opens the "automation Federal thread" thread - Then the user can add response to "this is a problem" - - Scenario: User can remove civis - Given a user with "correct" credentials tries to log in - When the user opens the "automation Federal thread" thread - Then the user can remove "solution" called "this is a test solution" - And the user can remove "cause" called "c1" - And the user can remove "problem" called "this is a problem" \ No newline at end of file diff --git a/project/test_framework/utils.py b/project/test_framework/utils.py deleted file mode 100644 index 3b9171337..000000000 --- a/project/test_framework/utils.py +++ /dev/null @@ -1,63 +0,0 @@ -from aloe import world -from selenium.webdriver.common.by import By -from selenium.webdriver.support.wait import WebDriverWait -from selenium.webdriver.support import expected_conditions as EC - -from test_framework.selectors import * - -webDriverWaitInSeconds = 5 - - -def open_home_page(): - world.browser.get(civi_wiki_url) - wait_for_page(civi_wiki_url) - - -######################################################## -# Waits for page with URL containing "url" to load -def wait_for_page(url): - wait = WebDriverWait(world.browser, webDriverWaitInSeconds) - wait.until(EC.url_contains(url)) - - -# Returns a list of WebElement found by a CSS Selector -def find_elements(selector): - return world.browser.find_elements_by_css_selector(selector) - - -# Returns a list of WebElement found by Class name -def find_elements_by_class(selector): - return world.browser.find_elements_by_class_name(selector) - - -# Returns a single WebElement found by a CSS Selector -def find_element(selector): - element = WebDriverWait(world.browser, webDriverWaitInSeconds).until( - EC.element_to_be_clickable((By.CSS_SELECTOR, selector)) - ) - return element - - -# Returns a single WebElement found by Class name -def find_element_by_class(selector): - return world.browser.find_element_by_class_name(selector) - - -# Returns a single WebElement found by Name -def find_element_by_name(selector): - element = WebDriverWait(world.browser, webDriverWaitInSeconds).until( - EC.element_to_be_clickable((By.NAME, selector)) - ) - return element - - -# Scrolls "element" into view. Used when element is outside the field of view -def scroll_into_view(element): - world.browser.execute_script("arguments[0].scrollIntoView();", element) - return element - - -# Uses JS to click on an "element". Used when "element" is not a button or text field -# Ex. homes in the Homes page -def js_click(element): - world.browser.execute_script("arguments[0].click();", element) diff --git a/project/threads/tests.py b/project/threads/tests.py index 7ce503c2d..a39b155ac 100644 --- a/project/threads/tests.py +++ b/project/threads/tests.py @@ -1,3 +1 @@ -from django.test import TestCase - # Create your tests here.
python-discord__site-1232
[ { "content": "\"\"\"\nDjango settings for pydis_site project.\n\nGenerated by 'django-admin startproject' using Django 2.1.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.1/ref/settings/\n\"\"\"\n\nimport logging\nimport os\nimport secrets\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom socket import gethostbyname, gethostname\n\nimport environ\nimport sentry_sdk\nfrom sentry_sdk.integrations.logging import LoggingIntegration\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nenv = environ.Env(\n DEBUG=(bool, False),\n SITE_DSN=(str, \"\"),\n BUILDING_DOCKER=(bool, False),\n STATIC_BUILD=(bool, False),\n GIT_SHA=(str, 'development'),\n TIMEOUT_PERIOD=(int, 5),\n GITHUB_TOKEN=(str, None),\n GITHUB_APP_ID=(str, None),\n GITHUB_APP_KEY=(str, None),\n)\n\nGIT_SHA = env(\"GIT_SHA\")\nGITHUB_API = \"https://api.github.com\"\nGITHUB_TOKEN = env(\"GITHUB_TOKEN\")\nGITHUB_APP_ID = env(\"GITHUB_APP_ID\")\nGITHUB_APP_KEY = env(\"GITHUB_APP_KEY\")\nGITHUB_TIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\"\n\"\"\"The datetime string format GitHub uses.\"\"\"\n\nSTATIC_BUILD: bool = env(\"STATIC_BUILD\")\n\nif GITHUB_APP_KEY and (key_file := Path(GITHUB_APP_KEY)).is_file():\n # Allow the OAuth key to be loaded from a file\n GITHUB_APP_KEY = key_file.read_text(encoding=\"utf-8\")\n\nif not STATIC_BUILD:\n sentry_sdk.init(\n dsn=env('SITE_DSN'),\n integrations=[DjangoIntegration(), LoggingIntegration(level=logging.DEBUG, event_level=logging.ERROR)],\n send_default_pii=True,\n release=f\"site@{GIT_SHA}\",\n profiles_sample_rate=1.0,\n enable_tracing=True,\n enable_db_query_source=True,\n db_query_source_threshold_ms=100, # Queries slower that 100ms will include the source in the event\n )\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDEBUG = env('DEBUG')\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nif DEBUG:\n ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', default=['*'])\n SECRET_KEY = \"yellow polkadot bikini\" # noqa: S105\n\n # Prevent verbose warnings emitted when passing a non-timezone aware\n # datetime object to the database, whilst we have time zone support\n # active. See the Django documentation for more details:\n # https://docs.djangoproject.com/en/dev/topics/i18n/timezones/\n warnings.filterwarnings(\n 'error', r\"DateTimeField .* received a naive datetime\",\n RuntimeWarning, r'django\\.db\\.models\\.fields',\n )\n\nelif 'CI' in os.environ:\n ALLOWED_HOSTS = ['*']\n SECRET_KEY = secrets.token_urlsafe(32)\n\n # See above. We run with `CI=true`, but debug unset in GitHub Actions,\n # so we also want to filter it there.\n warnings.filterwarnings(\n 'error', r\"DateTimeField .* received a naive datetime\",\n RuntimeWarning, r'django\\.db\\.models\\.fields',\n )\n\nelse:\n ALLOWED_HOSTS = env.list(\n 'ALLOWED_HOSTS',\n default=[\n 'www.pythondiscord.com',\n 'pythondiscord.com',\n gethostname(),\n gethostbyname(gethostname()),\n 'site.default.svc.cluster.local',\n ],\n )\n SECRET_KEY = env('SECRET_KEY')\n\n# Application definition\nNON_STATIC_APPS = [\n 'pydis_site.apps.api',\n 'pydis_site.apps.staff',\n] if not STATIC_BUILD else []\n\nINSTALLED_APPS = [\n *NON_STATIC_APPS,\n 'pydis_site.apps.home',\n 'pydis_site.apps.resources',\n 'pydis_site.apps.content',\n 'pydis_site.apps.events',\n 'pydis_site.apps.redirect',\n\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n\n 'django_filters',\n 'django_simple_bulma',\n 'rest_framework',\n 'rest_framework.authtoken',\n\n 'django_distill',\n]\n\nif not env(\"BUILDING_DOCKER\"):\n INSTALLED_APPS.append(\"django_prometheus\")\n\nif STATIC_BUILD:\n # The only middleware required during static builds\n MIDDLEWARE = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n ]\nelse:\n # Ensure that Prometheus middlewares are first and last here.\n MIDDLEWARE = [\n 'django_prometheus.middleware.PrometheusBeforeMiddleware',\n\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n 'django_prometheus.middleware.PrometheusAfterMiddleware'\n ]\n\nROOT_URLCONF = 'pydis_site.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'pydis_site', 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n \"pydis_site.context_processors.git_sha_processor\"\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'pydis_site.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n 'default': env.db(),\n 'metricity': env.db('METRICITY_DB_URL'),\n} if not STATIC_BUILD else {}\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, 'pydis_site', 'static')]\nSTATIC_ROOT = env('STATIC_ROOT', default='/app/staticfiles')\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n\n 'django_simple_bulma.finders.SimpleBulmaFinder',\n]\n\nif DEBUG:\n PARENT_HOST = env('PARENT_HOST', default='pythondiscord.local:8000')\n\n if \":\" in PARENT_HOST:\n ALLOWED_HOSTS.append(PARENT_HOST.split(\":\", 1)[0])\n else:\n ALLOWED_HOSTS.append(PARENT_HOST)\nelse:\n PARENT_HOST = env('PARENT_HOST', default='pythondiscord.com')\n\n# Django Model Configuration\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Django REST framework\n# https://www.django-rest-framework.org\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.DjangoModelPermissions',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json'\n}\n\n# Logging\n# https://docs.djangoproject.com/en/2.1/topics/logging/\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': (\n '%(asctime)s | %(process)d:%(thread)d | %(module)s | %(levelname)-8s | %(message)s'\n )\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n 'level': env(\n 'LOG_LEVEL',\n default=(\n # If there is no explicit `LOG_LEVEL` set,\n # use `DEBUG` if we're running in debug mode but not\n # testing. Use `ERROR` if we're running tests, else\n # default to using `WARN`.\n 'INFO'\n if DEBUG and 'test' not in sys.argv\n else (\n 'ERROR'\n if 'test' in sys.argv\n else 'WARN'\n )\n )\n )\n }\n }\n}\n\n# Custom settings for django-simple-bulma\nBULMA_SETTINGS = {\n \"variables\": {\n \"primary\": \"#7289DA\", # PyDis blurple\n \"green\": \"#32ac66\", # Colour picked after Discord discussion\n \"turquoise\": \"#7289DA\", # Blurple, because Bulma uses this regardless of `primary` above\n \"blue\": \"#2482c1\", # Colour picked after Discord discussion\n \"cyan\": \"#2482c1\", # Colour picked after Discord discussion (matches the blue)\n \"purple\": \"#aa55e4\", # Apparently unused, but changed for consistency\n \"red\": \"#d63852\", # Colour picked after Discord discussion\n\n \"link\": \"$primary\",\n\n \"dimensions\": \"16 24 32 48 64 96 128 256 512\", # Possible image dimensions\n \"navbar-height\": \"4.75rem\",\n \"footer-padding\": \"1rem 1.5rem 1rem\",\n \"tooltip-max-width\": \"30rem\",\n },\n \"dark_variables\": {\n \"primary\": \"#5365A4\", # A darker PyDis blurple\n \"warning\": \"#4B4636\",\n \"warning-invert\": \"#FFFFFF\",\n \"primary-dark\": \"#EFF1FB\", # Bulma's primary-light\n \"primary-light\": \"#2B3660\",\n \"success-dark\": \"#EFFAF5\", # Bulma's success-light\n \"success-light\": \"#214133\",\n \"danger-dark\": \"#FEECF0\", # Bulma's danger-light\n \"danger-light\": \"#4C1822\",\n \"info-dark\": \"#EFF5FB\", # Bulma's info-light\n \"info-light\": \"#254056\",\n\n \"body-background-color\": \"#252629\",\n\n \"white\": \"#2C2F33\",\n \"white-bis\": \"#23272A \",\n \"white-ter\": \"#36393F\",\n \"light\": \"$white\",\n\n \"black\": \"#F7F7F7\",\n \"black-bis\": \"#F2F2F2\",\n \"black-ter\": \"#E6E6E6\",\n \"dark\": \"$black\",\n\n \"grey-darker\": \"#303032\",\n\n \"text\": \"#F4F4F4\",\n \"text-light\": \"#F7F7F7\",\n \"text-strong\": \"#FEFEFE\",\n\n \"link\": \"#99B0FF\", # A brighter PyDis blurple\n \"link-hover\": \"#FFFFFF\",\n \"link-focus\": \"$link-hover\",\n \"link-active\": \"$link-hover\",\n\n \"code\": \"#FF7990\", # Adjusted to 4.5 contrast ratio per WCAG Level AA\n \"code-background\": \"#464951\", # A graduation lighter than the default for light theme\n\n # Same as bulma, adjusted for dark mode\n \"shadow\": \"0 0.5em 1em -0.125em rgba(0, 0, 0, 0.3), 0 0px 0 1px rgba(0, 0, 0, 0.13)\",\n \"border\": \"#4E4F51\",\n \"border-light\": \"#313233\",\n\n # Use the same sizes\n \"dimensions\": \"16 24 32 48 64 96 128 256 512\",\n \"navbar-height\": \"4.75rem\",\n \"footer-padding\": \"1rem 1.5rem 1rem\",\n \"tooltip-max-width\": \"30rem\",\n },\n \"extensions\": [\n \"bulma-dropdown\",\n \"bulma-navbar-burger\",\n ],\n \"fontawesome_token\": \"ff22cb6f41\",\n}\n\n# Information about site repository\nSITE_REPOSITORY_OWNER = \"python-discord\"\nSITE_REPOSITORY_NAME = \"site\"\nSITE_REPOSITORY_BRANCH = \"master\"\n\n# Path for events pages\nEVENTS_PAGES_PATH = Path(BASE_DIR, \"pydis_site\", \"templates\", \"events\", \"pages\")\n\n# Path for content pages\nCONTENT_PAGES_PATH = Path(BASE_DIR, \"pydis_site\", \"apps\", \"content\", \"resources\")\n\n# Path for redirection links\nREDIRECTIONS_PATH = Path(BASE_DIR, \"pydis_site\", \"apps\", \"redirect\", \"redirects.yaml\")\n\n# How long to wait for synchronous requests before timing out\nTIMEOUT_PERIOD = env(\"TIMEOUT_PERIOD\")\n\n# Source files url for 'Edit on GitHub' link on content articles\nCONTENT_SRC_URL = (\n \"https://github.com/python-discord/site/tree/main/pydis_site/apps/content/resources/\"\n)\n", "path": "pydis_site/settings.py" } ]
[ { "content": "\"\"\"\nDjango settings for pydis_site project.\n\nGenerated by 'django-admin startproject' using Django 2.1.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.1/ref/settings/\n\"\"\"\n\nimport logging\nimport os\nimport secrets\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom socket import gethostbyname, gethostname\n\nimport environ\nimport sentry_sdk\nfrom sentry_sdk.integrations.logging import LoggingIntegration\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nenv = environ.Env(\n DEBUG=(bool, False),\n SITE_DSN=(str, \"\"),\n BUILDING_DOCKER=(bool, False),\n STATIC_BUILD=(bool, False),\n GIT_SHA=(str, 'development'),\n TIMEOUT_PERIOD=(int, 5),\n GITHUB_TOKEN=(str, None),\n GITHUB_APP_ID=(str, None),\n GITHUB_APP_KEY=(str, None),\n)\n\nGIT_SHA = env(\"GIT_SHA\")\nGITHUB_API = \"https://api.github.com\"\nGITHUB_TOKEN = env(\"GITHUB_TOKEN\")\nGITHUB_APP_ID = env(\"GITHUB_APP_ID\")\nGITHUB_APP_KEY = env(\"GITHUB_APP_KEY\")\nGITHUB_TIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\"\n\"\"\"The datetime string format GitHub uses.\"\"\"\n\nSTATIC_BUILD: bool = env(\"STATIC_BUILD\")\n\nif GITHUB_APP_KEY and (key_file := Path(GITHUB_APP_KEY)).is_file():\n # Allow the OAuth key to be loaded from a file\n GITHUB_APP_KEY = key_file.read_text(encoding=\"utf-8\")\n\nif not STATIC_BUILD:\n sentry_sdk.init(\n dsn=env('SITE_DSN'),\n integrations=[DjangoIntegration(), LoggingIntegration(level=logging.DEBUG, event_level=logging.ERROR)],\n send_default_pii=True,\n release=f\"site@{GIT_SHA}\",\n profiles_sample_rate=1.0,\n enable_tracing=True,\n enable_db_query_source=True,\n db_query_source_threshold_ms=100, # Queries slower that 100ms will include the source in the event\n )\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDEBUG = env('DEBUG')\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nif DEBUG:\n ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', default=['*'])\n SECRET_KEY = \"yellow polkadot bikini\" # noqa: S105\n\n # Prevent verbose warnings emitted when passing a non-timezone aware\n # datetime object to the database, whilst we have time zone support\n # active. See the Django documentation for more details:\n # https://docs.djangoproject.com/en/dev/topics/i18n/timezones/\n warnings.filterwarnings(\n 'error', r\"DateTimeField .* received a naive datetime\",\n RuntimeWarning, r'django\\.db\\.models\\.fields',\n )\n\nelif 'CI' in os.environ:\n ALLOWED_HOSTS = ['*']\n SECRET_KEY = secrets.token_urlsafe(32)\n\n # See above. We run with `CI=true`, but debug unset in GitHub Actions,\n # so we also want to filter it there.\n warnings.filterwarnings(\n 'error', r\"DateTimeField .* received a naive datetime\",\n RuntimeWarning, r'django\\.db\\.models\\.fields',\n )\n\nelse:\n ALLOWED_HOSTS = env.list(\n 'ALLOWED_HOSTS',\n default=[\n 'www.pythondiscord.com',\n 'pythondiscord.com',\n gethostname(),\n gethostbyname(gethostname()),\n 'site.default.svc.cluster.local',\n ],\n )\n SECRET_KEY = env('SECRET_KEY')\n\n# Application definition\nNON_STATIC_APPS = [\n 'pydis_site.apps.api',\n 'pydis_site.apps.staff',\n] if not STATIC_BUILD else []\n\nINSTALLED_APPS = [\n *NON_STATIC_APPS,\n 'pydis_site.apps.home',\n 'pydis_site.apps.resources',\n 'pydis_site.apps.content',\n 'pydis_site.apps.events',\n 'pydis_site.apps.redirect',\n\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n\n 'django_filters',\n 'django_simple_bulma',\n 'rest_framework',\n 'rest_framework.authtoken',\n\n 'django_distill',\n]\n\nif not env(\"BUILDING_DOCKER\"):\n INSTALLED_APPS.append(\"django_prometheus\")\n\nif STATIC_BUILD:\n # The only middleware required during static builds\n MIDDLEWARE = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n ]\nelse:\n # Ensure that Prometheus middlewares are first and last here.\n MIDDLEWARE = [\n 'django_prometheus.middleware.PrometheusBeforeMiddleware',\n\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n 'django_prometheus.middleware.PrometheusAfterMiddleware'\n ]\n\nROOT_URLCONF = 'pydis_site.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'pydis_site', 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n \"pydis_site.context_processors.git_sha_processor\"\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'pydis_site.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n 'default': env.db(),\n 'metricity': env.db('METRICITY_DB_URL'),\n} if not STATIC_BUILD else {}\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, 'pydis_site', 'static')]\nSTATIC_ROOT = env('STATIC_ROOT', default='/app/staticfiles')\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n\n 'django_simple_bulma.finders.SimpleBulmaFinder',\n]\n\nif DEBUG:\n PARENT_HOST = env('PARENT_HOST', default='pythondiscord.local:8000')\n\n if \":\" in PARENT_HOST:\n ALLOWED_HOSTS.append(PARENT_HOST.split(\":\", 1)[0])\n else:\n ALLOWED_HOSTS.append(PARENT_HOST)\nelse:\n PARENT_HOST = env('PARENT_HOST', default='pythondiscord.com')\n\n# Django Model Configuration\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Django REST framework\n# https://www.django-rest-framework.org\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.DjangoModelPermissions',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json'\n}\n\n# Logging\n# https://docs.djangoproject.com/en/2.1/topics/logging/\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': (\n '%(asctime)s | %(process)d:%(thread)d | %(module)s | %(levelname)-8s | %(message)s'\n )\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n 'level': env(\n 'LOG_LEVEL',\n default=(\n # If there is no explicit `LOG_LEVEL` set,\n # use `DEBUG` if we're running in debug mode but not\n # testing. Use `ERROR` if we're running tests, else\n # default to using `WARN`.\n 'INFO'\n if DEBUG and 'test' not in sys.argv\n else (\n 'ERROR'\n if 'test' in sys.argv\n else 'WARN'\n )\n )\n )\n }\n }\n}\n\n# Custom settings for django-simple-bulma\nBULMA_SETTINGS = {\n \"variables\": {\n \"primary\": \"#7289DA\", # PyDis blurple\n \"green\": \"#32ac66\", # Colour picked after Discord discussion\n \"turquoise\": \"#7289DA\", # Blurple, because Bulma uses this regardless of `primary` above\n \"blue\": \"#2482c1\", # Colour picked after Discord discussion\n \"cyan\": \"#2482c1\", # Colour picked after Discord discussion (matches the blue)\n \"purple\": \"#aa55e4\", # Apparently unused, but changed for consistency\n \"red\": \"#d63852\", # Colour picked after Discord discussion\n\n \"link\": \"$primary\",\n\n \"dimensions\": \"16 24 32 48 64 96 128 256 512\", # Possible image dimensions\n \"navbar-height\": \"4.75rem\",\n \"footer-padding\": \"1rem 1.5rem 1rem\",\n \"tooltip-max-width\": \"30rem\",\n },\n \"dark_variables\": {\n \"primary\": \"#5365A4\", # A darker PyDis blurple\n \"warning\": \"#4B4636\",\n \"warning-invert\": \"#FFFFFF\",\n \"primary-dark\": \"#EFF1FB\", # Bulma's primary-light\n \"primary-light\": \"#2B3660\",\n \"success-dark\": \"#EFFAF5\", # Bulma's success-light\n \"success-light\": \"#214133\",\n \"danger-dark\": \"#FEECF0\", # Bulma's danger-light\n \"danger-light\": \"#4C1822\",\n \"info-dark\": \"#EFF5FB\", # Bulma's info-light\n \"info-light\": \"#254056\",\n\n \"body-background-color\": \"#252629\",\n\n \"white\": \"#2C2F33\",\n \"white-bis\": \"#23272A \",\n \"white-ter\": \"#36393F\",\n \"light\": \"$white\",\n\n \"black\": \"#F7F7F7\",\n \"black-bis\": \"#F2F2F2\",\n \"black-ter\": \"#E6E6E6\",\n \"dark\": \"$black\",\n\n \"grey-darker\": \"#303032\",\n\n \"text\": \"#F4F4F4\",\n \"text-light\": \"#F7F7F7\",\n \"text-strong\": \"#FEFEFE\",\n\n \"link\": \"#99B0FF\", # A brighter PyDis blurple\n \"link-hover\": \"#FFFFFF\",\n \"link-focus\": \"$link-hover\",\n \"link-active\": \"$link-hover\",\n\n \"code\": \"#FF7990\", # Adjusted to 4.5 contrast ratio per WCAG Level AA\n \"code-background\": \"#464951\", # A graduation lighter than the default for light theme\n\n # Same as bulma, adjusted for dark mode\n \"shadow\": \"0 0.5em 1em -0.125em rgba(0, 0, 0, 0.3), 0 0px 0 1px rgba(0, 0, 0, 0.13)\",\n \"border\": \"#4E4F51\",\n \"border-light\": \"#313233\",\n\n # Use the same sizes\n \"dimensions\": \"16 24 32 48 64 96 128 256 512\",\n \"navbar-height\": \"4.75rem\",\n \"footer-padding\": \"1rem 1.5rem 1rem\",\n \"tooltip-max-width\": \"30rem\",\n },\n \"extensions\": [\n \"bulma-dropdown\",\n \"bulma-navbar-burger\",\n ],\n}\n\n# Information about site repository\nSITE_REPOSITORY_OWNER = \"python-discord\"\nSITE_REPOSITORY_NAME = \"site\"\nSITE_REPOSITORY_BRANCH = \"master\"\n\n# Path for events pages\nEVENTS_PAGES_PATH = Path(BASE_DIR, \"pydis_site\", \"templates\", \"events\", \"pages\")\n\n# Path for content pages\nCONTENT_PAGES_PATH = Path(BASE_DIR, \"pydis_site\", \"apps\", \"content\", \"resources\")\n\n# Path for redirection links\nREDIRECTIONS_PATH = Path(BASE_DIR, \"pydis_site\", \"apps\", \"redirect\", \"redirects.yaml\")\n\n# How long to wait for synchronous requests before timing out\nTIMEOUT_PERIOD = env(\"TIMEOUT_PERIOD\")\n\n# Source files url for 'Edit on GitHub' link on content articles\nCONTENT_SRC_URL = (\n \"https://github.com/python-discord/site/tree/main/pydis_site/apps/content/resources/\"\n)\n", "path": "pydis_site/settings.py" } ]
diff --git a/pydis_site/settings.py b/pydis_site/settings.py index a916ddb0d..dfde9cef2 100644 --- a/pydis_site/settings.py +++ b/pydis_site/settings.py @@ -369,7 +369,6 @@ "bulma-dropdown", "bulma-navbar-burger", ], - "fontawesome_token": "ff22cb6f41", } # Information about site repository diff --git a/pydis_site/static/fontawesome/LICENSE.txt b/pydis_site/static/fontawesome/LICENSE.txt new file mode 100644 index 000000000..39e18e3d3 --- /dev/null +++ b/pydis_site/static/fontawesome/LICENSE.txt @@ -0,0 +1,165 @@ +Fonticons, Inc. (https://fontawesome.com) + +-------------------------------------------------------------------------------- + +Font Awesome Free License + +Font Awesome Free is free, open source, and GPL friendly. You can use it for +commercial projects, open source projects, or really almost whatever you want. +Full Font Awesome Free license: https://fontawesome.com/license/free. + +-------------------------------------------------------------------------------- + +# Icons: CC BY 4.0 License (https://creativecommons.org/licenses/by/4.0/) + +The Font Awesome Free download is licensed under a Creative Commons +Attribution 4.0 International License and applies to all icons packaged +as SVG and JS file types. + +-------------------------------------------------------------------------------- + +# Fonts: SIL OFL 1.1 License + +In the Font Awesome Free download, the SIL OFL license applies to all icons +packaged as web and desktop font files. + +Copyright (c) 2023 Fonticons, Inc. (https://fontawesome.com) +with Reserved Font Name: "Font Awesome". + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + +SIL OPEN FONT LICENSE +Version 1.1 - 26 February 2007 + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting β€” in part or in whole β€” any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. + +-------------------------------------------------------------------------------- + +# Code: MIT License (https://opensource.org/licenses/MIT) + +In the Font Awesome Free download, the MIT license applies to all non-font and +non-icon files. + +Copyright 2023 Fonticons, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in the +Software without restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, subject to the +following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +# Attribution + +Attribution is required by MIT, SIL OFL, and CC BY licenses. Downloaded Font +Awesome Free files already contain embedded comments with sufficient +attribution, so you shouldn't need to do anything additional when using these +files normally. + +We've kept attribution comments terse, so we ask that you do not actively work +to remove them from files, especially code. They're a great way for folks to +learn about Font Awesome. + +-------------------------------------------------------------------------------- + +# Brand Icons + +All brand icons are trademarks of their respective owners. The use of these +trademarks does not indicate endorsement of the trademark holder by Font +Awesome, nor vice versa. **Please do not use brand logos for any purpose except +to represent the company, product, or service to which they refer.** diff --git a/pydis_site/static/fontawesome/README.md b/pydis_site/static/fontawesome/README.md new file mode 100644 index 000000000..171ef5ecd --- /dev/null +++ b/pydis_site/static/fontawesome/README.md @@ -0,0 +1,7 @@ +# Font Awesome 6.5.1 + +Thanks for downloading Font Awesome! We're so excited you're here. + +Our documentation is available online. Just head here: + +https://fontawesome.com diff --git a/pydis_site/static/fontawesome/UPGRADING.md b/pydis_site/static/fontawesome/UPGRADING.md new file mode 100644 index 000000000..d1e7d3658 --- /dev/null +++ b/pydis_site/static/fontawesome/UPGRADING.md @@ -0,0 +1,10 @@ +# Upgrading Guide + +See the [changelog](https://fontawesome.com/docs/changelog/) for detailed information about what has changed between versions. + +The upgrading guide has moved for version 6 to: + +- [Web](https://fontawesome.com/docs/web/setup/upgrade/) +- [Desktop](https://fontawesome.com/docs/desktop/setup/upgrade/) + +As always, [submit issues](https://github.com/FortAwesome/Font-Awesome/issues/new) that you run into with this guide or with these upgrades to us. diff --git a/pydis_site/static/fontawesome/css/all.min.css b/pydis_site/static/fontawesome/css/all.min.css new file mode 100644 index 000000000..f59519f9c --- /dev/null +++ b/pydis_site/static/fontawesome/css/all.min.css @@ -0,0 +1,9 @@ +/*! + * Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com + * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) + * Copyright 2023 Fonticons, Inc. + */ +.fa{font-family:var(--fa-style-family,"Font Awesome 6 Free");font-weight:var(--fa-style,900)}.fa,.fa-brands,.fa-classic,.fa-regular,.fa-sharp,.fa-solid,.fab,.far,.fas{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:var(--fa-display,inline-block);font-style:normal;font-variant:normal;line-height:1;text-rendering:auto}.fa-classic,.fa-regular,.fa-solid,.far,.fas{font-family:"Font Awesome 6 Free"}.fa-brands,.fab{font-family:"Font Awesome 6 Brands"}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-2xs{font-size:.625em;line-height:.1em;vertical-align:.225em}.fa-xs{font-size:.75em;line-height:.08333em;vertical-align:.125em}.fa-sm{font-size:.875em;line-height:.07143em;vertical-align:.05357em}.fa-lg{font-size:1.25em;line-height:.05em;vertical-align:-.075em}.fa-xl{font-size:1.5em;line-height:.04167em;vertical-align:-.125em}.fa-2xl{font-size:2em;line-height:.03125em;vertical-align:-.1875em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:var(--fa-li-margin,2.5em);padding-left:0}.fa-ul>li{position:relative}.fa-li{left:calc(var(--fa-li-width, 2em)*-1);position:absolute;text-align:center;width:var(--fa-li-width,2em);line-height:inherit}.fa-border{border-radius:var(--fa-border-radius,.1em);border:var(--fa-border-width,.08em) var(--fa-border-style,solid) var(--fa-border-color,#eee);padding:var(--fa-border-padding,.2em .25em .15em)}.fa-pull-left{float:left;margin-right:var(--fa-pull-margin,.3em)}.fa-pull-right{float:right;margin-left:var(--fa-pull-margin,.3em)}.fa-beat{-webkit-animation-name:fa-beat;animation-name:fa-beat;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-bounce{-webkit-animation-name:fa-bounce;animation-name:fa-bounce;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1))}.fa-fade{-webkit-animation-name:fa-fade;animation-name:fa-fade;-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-beat-fade,.fa-fade{-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s)}.fa-beat-fade{-webkit-animation-name:fa-beat-fade;animation-name:fa-beat-fade;-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-flip{-webkit-animation-name:fa-flip;animation-name:fa-flip;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-shake{-webkit-animation-name:fa-shake;animation-name:fa-shake;-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-shake,.fa-spin{-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal)}.fa-spin{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-duration:var(--fa-animation-duration,2s);animation-duration:var(--fa-animation-duration,2s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-spin-reverse{--fa-animation-direction:reverse}.fa-pulse,.fa-spin-pulse{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,steps(8));animation-timing-function:var(--fa-animation-timing,steps(8))}@media (prefers-reduced-motion:reduce){.fa-beat,.fa-beat-fade,.fa-bounce,.fa-fade,.fa-flip,.fa-pulse,.fa-shake,.fa-spin,.fa-spin-pulse{-webkit-animation-delay:-1ms;animation-delay:-1ms;-webkit-animation-duration:1ms;animation-duration:1ms;-webkit-animation-iteration-count:1;animation-iteration-count:1;-webkit-transition-delay:0s;transition-delay:0s;-webkit-transition-duration:0s;transition-duration:0s}}@-webkit-keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@-webkit-keyframes fa-bounce{0%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}to{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}}@keyframes fa-bounce{0%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}to{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}}@-webkit-keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@-webkit-keyframes fa-beat-fade{0%,to{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@keyframes fa-beat-fade{0%,to{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@-webkit-keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@-webkit-keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}8%,24%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}40%,to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}@keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}8%,24%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}40%,to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.fa-rotate-90{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-webkit-transform:scaleY(-1);transform:scaleY(-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1);transform:scale(-1)}.fa-rotate-by{-webkit-transform:rotate(var(--fa-rotate-angle,none));transform:rotate(var(--fa-rotate-angle,none))}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%;z-index:var(--fa-stack-z-index,auto)}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:var(--fa-inverse,#fff)} + +.fa-0:before{content:"\30"}.fa-1:before{content:"\31"}.fa-2:before{content:"\32"}.fa-3:before{content:"\33"}.fa-4:before{content:"\34"}.fa-5:before{content:"\35"}.fa-6:before{content:"\36"}.fa-7:before{content:"\37"}.fa-8:before{content:"\38"}.fa-9:before{content:"\39"}.fa-fill-drip:before{content:"\f576"}.fa-arrows-to-circle:before{content:"\e4bd"}.fa-chevron-circle-right:before,.fa-circle-chevron-right:before{content:"\f138"}.fa-at:before{content:"\40"}.fa-trash-alt:before,.fa-trash-can:before{content:"\f2ed"}.fa-text-height:before{content:"\f034"}.fa-user-times:before,.fa-user-xmark:before{content:"\f235"}.fa-stethoscope:before{content:"\f0f1"}.fa-comment-alt:before,.fa-message:before{content:"\f27a"}.fa-info:before{content:"\f129"}.fa-compress-alt:before,.fa-down-left-and-up-right-to-center:before{content:"\f422"}.fa-explosion:before{content:"\e4e9"}.fa-file-alt:before,.fa-file-lines:before,.fa-file-text:before{content:"\f15c"}.fa-wave-square:before{content:"\f83e"}.fa-ring:before{content:"\f70b"}.fa-building-un:before{content:"\e4d9"}.fa-dice-three:before{content:"\f527"}.fa-calendar-alt:before,.fa-calendar-days:before{content:"\f073"}.fa-anchor-circle-check:before{content:"\e4aa"}.fa-building-circle-arrow-right:before{content:"\e4d1"}.fa-volleyball-ball:before,.fa-volleyball:before{content:"\f45f"}.fa-arrows-up-to-line:before{content:"\e4c2"}.fa-sort-desc:before,.fa-sort-down:before{content:"\f0dd"}.fa-circle-minus:before,.fa-minus-circle:before{content:"\f056"}.fa-door-open:before{content:"\f52b"}.fa-right-from-bracket:before,.fa-sign-out-alt:before{content:"\f2f5"}.fa-atom:before{content:"\f5d2"}.fa-soap:before{content:"\e06e"}.fa-heart-music-camera-bolt:before,.fa-icons:before{content:"\f86d"}.fa-microphone-alt-slash:before,.fa-microphone-lines-slash:before{content:"\f539"}.fa-bridge-circle-check:before{content:"\e4c9"}.fa-pump-medical:before{content:"\e06a"}.fa-fingerprint:before{content:"\f577"}.fa-hand-point-right:before{content:"\f0a4"}.fa-magnifying-glass-location:before,.fa-search-location:before{content:"\f689"}.fa-forward-step:before,.fa-step-forward:before{content:"\f051"}.fa-face-smile-beam:before,.fa-smile-beam:before{content:"\f5b8"}.fa-flag-checkered:before{content:"\f11e"}.fa-football-ball:before,.fa-football:before{content:"\f44e"}.fa-school-circle-exclamation:before{content:"\e56c"}.fa-crop:before{content:"\f125"}.fa-angle-double-down:before,.fa-angles-down:before{content:"\f103"}.fa-users-rectangle:before{content:"\e594"}.fa-people-roof:before{content:"\e537"}.fa-people-line:before{content:"\e534"}.fa-beer-mug-empty:before,.fa-beer:before{content:"\f0fc"}.fa-diagram-predecessor:before{content:"\e477"}.fa-arrow-up-long:before,.fa-long-arrow-up:before{content:"\f176"}.fa-burn:before,.fa-fire-flame-simple:before{content:"\f46a"}.fa-male:before,.fa-person:before{content:"\f183"}.fa-laptop:before{content:"\f109"}.fa-file-csv:before{content:"\f6dd"}.fa-menorah:before{content:"\f676"}.fa-truck-plane:before{content:"\e58f"}.fa-record-vinyl:before{content:"\f8d9"}.fa-face-grin-stars:before,.fa-grin-stars:before{content:"\f587"}.fa-bong:before{content:"\f55c"}.fa-pastafarianism:before,.fa-spaghetti-monster-flying:before{content:"\f67b"}.fa-arrow-down-up-across-line:before{content:"\e4af"}.fa-spoon:before,.fa-utensil-spoon:before{content:"\f2e5"}.fa-jar-wheat:before{content:"\e517"}.fa-envelopes-bulk:before,.fa-mail-bulk:before{content:"\f674"}.fa-file-circle-exclamation:before{content:"\e4eb"}.fa-circle-h:before,.fa-hospital-symbol:before{content:"\f47e"}.fa-pager:before{content:"\f815"}.fa-address-book:before,.fa-contact-book:before{content:"\f2b9"}.fa-strikethrough:before{content:"\f0cc"}.fa-k:before{content:"\4b"}.fa-landmark-flag:before{content:"\e51c"}.fa-pencil-alt:before,.fa-pencil:before{content:"\f303"}.fa-backward:before{content:"\f04a"}.fa-caret-right:before{content:"\f0da"}.fa-comments:before{content:"\f086"}.fa-file-clipboard:before,.fa-paste:before{content:"\f0ea"}.fa-code-pull-request:before{content:"\e13c"}.fa-clipboard-list:before{content:"\f46d"}.fa-truck-loading:before,.fa-truck-ramp-box:before{content:"\f4de"}.fa-user-check:before{content:"\f4fc"}.fa-vial-virus:before{content:"\e597"}.fa-sheet-plastic:before{content:"\e571"}.fa-blog:before{content:"\f781"}.fa-user-ninja:before{content:"\f504"}.fa-person-arrow-up-from-line:before{content:"\e539"}.fa-scroll-torah:before,.fa-torah:before{content:"\f6a0"}.fa-broom-ball:before,.fa-quidditch-broom-ball:before,.fa-quidditch:before{content:"\f458"}.fa-toggle-off:before{content:"\f204"}.fa-archive:before,.fa-box-archive:before{content:"\f187"}.fa-person-drowning:before{content:"\e545"}.fa-arrow-down-9-1:before,.fa-sort-numeric-desc:before,.fa-sort-numeric-down-alt:before{content:"\f886"}.fa-face-grin-tongue-squint:before,.fa-grin-tongue-squint:before{content:"\f58a"}.fa-spray-can:before{content:"\f5bd"}.fa-truck-monster:before{content:"\f63b"}.fa-w:before{content:"\57"}.fa-earth-africa:before,.fa-globe-africa:before{content:"\f57c"}.fa-rainbow:before{content:"\f75b"}.fa-circle-notch:before{content:"\f1ce"}.fa-tablet-alt:before,.fa-tablet-screen-button:before{content:"\f3fa"}.fa-paw:before{content:"\f1b0"}.fa-cloud:before{content:"\f0c2"}.fa-trowel-bricks:before{content:"\e58a"}.fa-face-flushed:before,.fa-flushed:before{content:"\f579"}.fa-hospital-user:before{content:"\f80d"}.fa-tent-arrow-left-right:before{content:"\e57f"}.fa-gavel:before,.fa-legal:before{content:"\f0e3"}.fa-binoculars:before{content:"\f1e5"}.fa-microphone-slash:before{content:"\f131"}.fa-box-tissue:before{content:"\e05b"}.fa-motorcycle:before{content:"\f21c"}.fa-bell-concierge:before,.fa-concierge-bell:before{content:"\f562"}.fa-pen-ruler:before,.fa-pencil-ruler:before{content:"\f5ae"}.fa-people-arrows-left-right:before,.fa-people-arrows:before{content:"\e068"}.fa-mars-and-venus-burst:before{content:"\e523"}.fa-caret-square-right:before,.fa-square-caret-right:before{content:"\f152"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-sun-plant-wilt:before{content:"\e57a"}.fa-toilets-portable:before{content:"\e584"}.fa-hockey-puck:before{content:"\f453"}.fa-table:before{content:"\f0ce"}.fa-magnifying-glass-arrow-right:before{content:"\e521"}.fa-digital-tachograph:before,.fa-tachograph-digital:before{content:"\f566"}.fa-users-slash:before{content:"\e073"}.fa-clover:before{content:"\e139"}.fa-mail-reply:before,.fa-reply:before{content:"\f3e5"}.fa-star-and-crescent:before{content:"\f699"}.fa-house-fire:before{content:"\e50c"}.fa-minus-square:before,.fa-square-minus:before{content:"\f146"}.fa-helicopter:before{content:"\f533"}.fa-compass:before{content:"\f14e"}.fa-caret-square-down:before,.fa-square-caret-down:before{content:"\f150"}.fa-file-circle-question:before{content:"\e4ef"}.fa-laptop-code:before{content:"\f5fc"}.fa-swatchbook:before{content:"\f5c3"}.fa-prescription-bottle:before{content:"\f485"}.fa-bars:before,.fa-navicon:before{content:"\f0c9"}.fa-people-group:before{content:"\e533"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\f253"}.fa-heart-broken:before,.fa-heart-crack:before{content:"\f7a9"}.fa-external-link-square-alt:before,.fa-square-up-right:before{content:"\f360"}.fa-face-kiss-beam:before,.fa-kiss-beam:before{content:"\f597"}.fa-film:before{content:"\f008"}.fa-ruler-horizontal:before{content:"\f547"}.fa-people-robbery:before{content:"\e536"}.fa-lightbulb:before{content:"\f0eb"}.fa-caret-left:before{content:"\f0d9"}.fa-circle-exclamation:before,.fa-exclamation-circle:before{content:"\f06a"}.fa-school-circle-xmark:before{content:"\e56d"}.fa-arrow-right-from-bracket:before,.fa-sign-out:before{content:"\f08b"}.fa-chevron-circle-down:before,.fa-circle-chevron-down:before{content:"\f13a"}.fa-unlock-alt:before,.fa-unlock-keyhole:before{content:"\f13e"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-headphones-alt:before,.fa-headphones-simple:before{content:"\f58f"}.fa-sitemap:before{content:"\f0e8"}.fa-circle-dollar-to-slot:before,.fa-donate:before{content:"\f4b9"}.fa-memory:before{content:"\f538"}.fa-road-spikes:before{content:"\e568"}.fa-fire-burner:before{content:"\e4f1"}.fa-flag:before{content:"\f024"}.fa-hanukiah:before{content:"\f6e6"}.fa-feather:before{content:"\f52d"}.fa-volume-down:before,.fa-volume-low:before{content:"\f027"}.fa-comment-slash:before{content:"\f4b3"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-compress:before{content:"\f066"}.fa-wheat-alt:before,.fa-wheat-awn:before{content:"\e2cd"}.fa-ankh:before{content:"\f644"}.fa-hands-holding-child:before{content:"\e4fa"}.fa-asterisk:before{content:"\2a"}.fa-check-square:before,.fa-square-check:before{content:"\f14a"}.fa-peseta-sign:before{content:"\e221"}.fa-header:before,.fa-heading:before{content:"\f1dc"}.fa-ghost:before{content:"\f6e2"}.fa-list-squares:before,.fa-list:before{content:"\f03a"}.fa-phone-square-alt:before,.fa-square-phone-flip:before{content:"\f87b"}.fa-cart-plus:before{content:"\f217"}.fa-gamepad:before{content:"\f11b"}.fa-circle-dot:before,.fa-dot-circle:before{content:"\f192"}.fa-dizzy:before,.fa-face-dizzy:before{content:"\f567"}.fa-egg:before{content:"\f7fb"}.fa-house-medical-circle-xmark:before{content:"\e513"}.fa-campground:before{content:"\f6bb"}.fa-folder-plus:before{content:"\f65e"}.fa-futbol-ball:before,.fa-futbol:before,.fa-soccer-ball:before{content:"\f1e3"}.fa-paint-brush:before,.fa-paintbrush:before{content:"\f1fc"}.fa-lock:before{content:"\f023"}.fa-gas-pump:before{content:"\f52f"}.fa-hot-tub-person:before,.fa-hot-tub:before{content:"\f593"}.fa-map-location:before,.fa-map-marked:before{content:"\f59f"}.fa-house-flood-water:before{content:"\e50e"}.fa-tree:before{content:"\f1bb"}.fa-bridge-lock:before{content:"\e4cc"}.fa-sack-dollar:before{content:"\f81d"}.fa-edit:before,.fa-pen-to-square:before{content:"\f044"}.fa-car-side:before{content:"\f5e4"}.fa-share-alt:before,.fa-share-nodes:before{content:"\f1e0"}.fa-heart-circle-minus:before{content:"\e4ff"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\f252"}.fa-microscope:before{content:"\f610"}.fa-sink:before{content:"\e06d"}.fa-bag-shopping:before,.fa-shopping-bag:before{content:"\f290"}.fa-arrow-down-z-a:before,.fa-sort-alpha-desc:before,.fa-sort-alpha-down-alt:before{content:"\f881"}.fa-mitten:before{content:"\f7b5"}.fa-person-rays:before{content:"\e54d"}.fa-users:before{content:"\f0c0"}.fa-eye-slash:before{content:"\f070"}.fa-flask-vial:before{content:"\e4f3"}.fa-hand-paper:before,.fa-hand:before{content:"\f256"}.fa-om:before{content:"\f679"}.fa-worm:before{content:"\e599"}.fa-house-circle-xmark:before{content:"\e50b"}.fa-plug:before{content:"\f1e6"}.fa-chevron-up:before{content:"\f077"}.fa-hand-spock:before{content:"\f259"}.fa-stopwatch:before{content:"\f2f2"}.fa-face-kiss:before,.fa-kiss:before{content:"\f596"}.fa-bridge-circle-xmark:before{content:"\e4cb"}.fa-face-grin-tongue:before,.fa-grin-tongue:before{content:"\f589"}.fa-chess-bishop:before{content:"\f43a"}.fa-face-grin-wink:before,.fa-grin-wink:before{content:"\f58c"}.fa-deaf:before,.fa-deafness:before,.fa-ear-deaf:before,.fa-hard-of-hearing:before{content:"\f2a4"}.fa-road-circle-check:before{content:"\e564"}.fa-dice-five:before{content:"\f523"}.fa-rss-square:before,.fa-square-rss:before{content:"\f143"}.fa-land-mine-on:before{content:"\e51b"}.fa-i-cursor:before{content:"\f246"}.fa-stamp:before{content:"\f5bf"}.fa-stairs:before{content:"\e289"}.fa-i:before{content:"\49"}.fa-hryvnia-sign:before,.fa-hryvnia:before{content:"\f6f2"}.fa-pills:before{content:"\f484"}.fa-face-grin-wide:before,.fa-grin-alt:before{content:"\f581"}.fa-tooth:before{content:"\f5c9"}.fa-v:before{content:"\56"}.fa-bangladeshi-taka-sign:before{content:"\e2e6"}.fa-bicycle:before{content:"\f206"}.fa-rod-asclepius:before,.fa-rod-snake:before,.fa-staff-aesculapius:before,.fa-staff-snake:before{content:"\e579"}.fa-head-side-cough-slash:before{content:"\e062"}.fa-ambulance:before,.fa-truck-medical:before{content:"\f0f9"}.fa-wheat-awn-circle-exclamation:before{content:"\e598"}.fa-snowman:before{content:"\f7d0"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-road-barrier:before{content:"\e562"}.fa-school:before{content:"\f549"}.fa-igloo:before{content:"\f7ae"}.fa-joint:before{content:"\f595"}.fa-angle-right:before{content:"\f105"}.fa-horse:before{content:"\f6f0"}.fa-q:before{content:"\51"}.fa-g:before{content:"\47"}.fa-notes-medical:before{content:"\f481"}.fa-temperature-2:before,.fa-temperature-half:before,.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\f2c9"}.fa-dong-sign:before{content:"\e169"}.fa-capsules:before{content:"\f46b"}.fa-poo-bolt:before,.fa-poo-storm:before{content:"\f75a"}.fa-face-frown-open:before,.fa-frown-open:before{content:"\f57a"}.fa-hand-point-up:before{content:"\f0a6"}.fa-money-bill:before{content:"\f0d6"}.fa-bookmark:before{content:"\f02e"}.fa-align-justify:before{content:"\f039"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-helmet-un:before{content:"\e503"}.fa-bullseye:before{content:"\f140"}.fa-bacon:before{content:"\f7e5"}.fa-hand-point-down:before{content:"\f0a7"}.fa-arrow-up-from-bracket:before{content:"\e09a"}.fa-folder-blank:before,.fa-folder:before{content:"\f07b"}.fa-file-medical-alt:before,.fa-file-waveform:before{content:"\f478"}.fa-radiation:before{content:"\f7b9"}.fa-chart-simple:before{content:"\e473"}.fa-mars-stroke:before{content:"\f229"}.fa-vial:before{content:"\f492"}.fa-dashboard:before,.fa-gauge-med:before,.fa-gauge:before,.fa-tachometer-alt-average:before{content:"\f624"}.fa-magic-wand-sparkles:before,.fa-wand-magic-sparkles:before{content:"\e2ca"}.fa-e:before{content:"\45"}.fa-pen-alt:before,.fa-pen-clip:before{content:"\f305"}.fa-bridge-circle-exclamation:before{content:"\e4ca"}.fa-user:before{content:"\f007"}.fa-school-circle-check:before{content:"\e56b"}.fa-dumpster:before{content:"\f793"}.fa-shuttle-van:before,.fa-van-shuttle:before{content:"\f5b6"}.fa-building-user:before{content:"\e4da"}.fa-caret-square-left:before,.fa-square-caret-left:before{content:"\f191"}.fa-highlighter:before{content:"\f591"}.fa-key:before{content:"\f084"}.fa-bullhorn:before{content:"\f0a1"}.fa-globe:before{content:"\f0ac"}.fa-synagogue:before{content:"\f69b"}.fa-person-half-dress:before{content:"\e548"}.fa-road-bridge:before{content:"\e563"}.fa-location-arrow:before{content:"\f124"}.fa-c:before{content:"\43"}.fa-tablet-button:before{content:"\f10a"}.fa-building-lock:before{content:"\e4d6"}.fa-pizza-slice:before{content:"\f818"}.fa-money-bill-wave:before{content:"\f53a"}.fa-area-chart:before,.fa-chart-area:before{content:"\f1fe"}.fa-house-flag:before{content:"\e50d"}.fa-person-circle-minus:before{content:"\e540"}.fa-ban:before,.fa-cancel:before{content:"\f05e"}.fa-camera-rotate:before{content:"\e0d8"}.fa-air-freshener:before,.fa-spray-can-sparkles:before{content:"\f5d0"}.fa-star:before{content:"\f005"}.fa-repeat:before{content:"\f363"}.fa-cross:before{content:"\f654"}.fa-box:before{content:"\f466"}.fa-venus-mars:before{content:"\f228"}.fa-arrow-pointer:before,.fa-mouse-pointer:before{content:"\f245"}.fa-expand-arrows-alt:before,.fa-maximize:before{content:"\f31e"}.fa-charging-station:before{content:"\f5e7"}.fa-shapes:before,.fa-triangle-circle-square:before{content:"\f61f"}.fa-random:before,.fa-shuffle:before{content:"\f074"}.fa-person-running:before,.fa-running:before{content:"\f70c"}.fa-mobile-retro:before{content:"\e527"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-spider:before{content:"\f717"}.fa-hands-bound:before{content:"\e4f9"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-plane-circle-exclamation:before{content:"\e556"}.fa-x-ray:before{content:"\f497"}.fa-spell-check:before{content:"\f891"}.fa-slash:before{content:"\f715"}.fa-computer-mouse:before,.fa-mouse:before{content:"\f8cc"}.fa-arrow-right-to-bracket:before,.fa-sign-in:before{content:"\f090"}.fa-shop-slash:before,.fa-store-alt-slash:before{content:"\e070"}.fa-server:before{content:"\f233"}.fa-virus-covid-slash:before{content:"\e4a9"}.fa-shop-lock:before{content:"\e4a5"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\f251"}.fa-blender-phone:before{content:"\f6b6"}.fa-building-wheat:before{content:"\e4db"}.fa-person-breastfeeding:before{content:"\e53a"}.fa-right-to-bracket:before,.fa-sign-in-alt:before{content:"\f2f6"}.fa-venus:before{content:"\f221"}.fa-passport:before{content:"\f5ab"}.fa-heart-pulse:before,.fa-heartbeat:before{content:"\f21e"}.fa-people-carry-box:before,.fa-people-carry:before{content:"\f4ce"}.fa-temperature-high:before{content:"\f769"}.fa-microchip:before{content:"\f2db"}.fa-crown:before{content:"\f521"}.fa-weight-hanging:before{content:"\f5cd"}.fa-xmarks-lines:before{content:"\e59a"}.fa-file-prescription:before{content:"\f572"}.fa-weight-scale:before,.fa-weight:before{content:"\f496"}.fa-user-friends:before,.fa-user-group:before{content:"\f500"}.fa-arrow-up-a-z:before,.fa-sort-alpha-up:before{content:"\f15e"}.fa-chess-knight:before{content:"\f441"}.fa-face-laugh-squint:before,.fa-laugh-squint:before{content:"\f59b"}.fa-wheelchair:before{content:"\f193"}.fa-arrow-circle-up:before,.fa-circle-arrow-up:before{content:"\f0aa"}.fa-toggle-on:before{content:"\f205"}.fa-person-walking:before,.fa-walking:before{content:"\f554"}.fa-l:before{content:"\4c"}.fa-fire:before{content:"\f06d"}.fa-bed-pulse:before,.fa-procedures:before{content:"\f487"}.fa-shuttle-space:before,.fa-space-shuttle:before{content:"\f197"}.fa-face-laugh:before,.fa-laugh:before{content:"\f599"}.fa-folder-open:before{content:"\f07c"}.fa-heart-circle-plus:before{content:"\e500"}.fa-code-fork:before{content:"\e13b"}.fa-city:before{content:"\f64f"}.fa-microphone-alt:before,.fa-microphone-lines:before{content:"\f3c9"}.fa-pepper-hot:before{content:"\f816"}.fa-unlock:before{content:"\f09c"}.fa-colon-sign:before{content:"\e140"}.fa-headset:before{content:"\f590"}.fa-store-slash:before{content:"\e071"}.fa-road-circle-xmark:before{content:"\e566"}.fa-user-minus:before{content:"\f503"}.fa-mars-stroke-up:before,.fa-mars-stroke-v:before{content:"\f22a"}.fa-champagne-glasses:before,.fa-glass-cheers:before{content:"\f79f"}.fa-clipboard:before{content:"\f328"}.fa-house-circle-exclamation:before{content:"\e50a"}.fa-file-arrow-up:before,.fa-file-upload:before{content:"\f574"}.fa-wifi-3:before,.fa-wifi-strong:before,.fa-wifi:before{content:"\f1eb"}.fa-bath:before,.fa-bathtub:before{content:"\f2cd"}.fa-underline:before{content:"\f0cd"}.fa-user-edit:before,.fa-user-pen:before{content:"\f4ff"}.fa-signature:before{content:"\f5b7"}.fa-stroopwafel:before{content:"\f551"}.fa-bold:before{content:"\f032"}.fa-anchor-lock:before{content:"\e4ad"}.fa-building-ngo:before{content:"\e4d7"}.fa-manat-sign:before{content:"\e1d5"}.fa-not-equal:before{content:"\f53e"}.fa-border-style:before,.fa-border-top-left:before{content:"\f853"}.fa-map-location-dot:before,.fa-map-marked-alt:before{content:"\f5a0"}.fa-jedi:before{content:"\f669"}.fa-poll:before,.fa-square-poll-vertical:before{content:"\f681"}.fa-mug-hot:before{content:"\f7b6"}.fa-battery-car:before,.fa-car-battery:before{content:"\f5df"}.fa-gift:before{content:"\f06b"}.fa-dice-two:before{content:"\f528"}.fa-chess-queen:before{content:"\f445"}.fa-glasses:before{content:"\f530"}.fa-chess-board:before{content:"\f43c"}.fa-building-circle-check:before{content:"\e4d2"}.fa-person-chalkboard:before{content:"\e53d"}.fa-mars-stroke-h:before,.fa-mars-stroke-right:before{content:"\f22b"}.fa-hand-back-fist:before,.fa-hand-rock:before{content:"\f255"}.fa-caret-square-up:before,.fa-square-caret-up:before{content:"\f151"}.fa-cloud-showers-water:before{content:"\e4e4"}.fa-bar-chart:before,.fa-chart-bar:before{content:"\f080"}.fa-hands-bubbles:before,.fa-hands-wash:before{content:"\e05e"}.fa-less-than-equal:before{content:"\f537"}.fa-train:before{content:"\f238"}.fa-eye-low-vision:before,.fa-low-vision:before{content:"\f2a8"}.fa-crow:before{content:"\f520"}.fa-sailboat:before{content:"\e445"}.fa-window-restore:before{content:"\f2d2"}.fa-plus-square:before,.fa-square-plus:before{content:"\f0fe"}.fa-torii-gate:before{content:"\f6a1"}.fa-frog:before{content:"\f52e"}.fa-bucket:before{content:"\e4cf"}.fa-image:before{content:"\f03e"}.fa-microphone:before{content:"\f130"}.fa-cow:before{content:"\f6c8"}.fa-caret-up:before{content:"\f0d8"}.fa-screwdriver:before{content:"\f54a"}.fa-folder-closed:before{content:"\e185"}.fa-house-tsunami:before{content:"\e515"}.fa-square-nfi:before{content:"\e576"}.fa-arrow-up-from-ground-water:before{content:"\e4b5"}.fa-glass-martini-alt:before,.fa-martini-glass:before{content:"\f57b"}.fa-rotate-back:before,.fa-rotate-backward:before,.fa-rotate-left:before,.fa-undo-alt:before{content:"\f2ea"}.fa-columns:before,.fa-table-columns:before{content:"\f0db"}.fa-lemon:before{content:"\f094"}.fa-head-side-mask:before{content:"\e063"}.fa-handshake:before{content:"\f2b5"}.fa-gem:before{content:"\f3a5"}.fa-dolly-box:before,.fa-dolly:before{content:"\f472"}.fa-smoking:before{content:"\f48d"}.fa-compress-arrows-alt:before,.fa-minimize:before{content:"\f78c"}.fa-monument:before{content:"\f5a6"}.fa-snowplow:before{content:"\f7d2"}.fa-angle-double-right:before,.fa-angles-right:before{content:"\f101"}.fa-cannabis:before{content:"\f55f"}.fa-circle-play:before,.fa-play-circle:before{content:"\f144"}.fa-tablets:before{content:"\f490"}.fa-ethernet:before{content:"\f796"}.fa-eur:before,.fa-euro-sign:before,.fa-euro:before{content:"\f153"}.fa-chair:before{content:"\f6c0"}.fa-check-circle:before,.fa-circle-check:before{content:"\f058"}.fa-circle-stop:before,.fa-stop-circle:before{content:"\f28d"}.fa-compass-drafting:before,.fa-drafting-compass:before{content:"\f568"}.fa-plate-wheat:before{content:"\e55a"}.fa-icicles:before{content:"\f7ad"}.fa-person-shelter:before{content:"\e54f"}.fa-neuter:before{content:"\f22c"}.fa-id-badge:before{content:"\f2c1"}.fa-marker:before{content:"\f5a1"}.fa-face-laugh-beam:before,.fa-laugh-beam:before{content:"\f59a"}.fa-helicopter-symbol:before{content:"\e502"}.fa-universal-access:before{content:"\f29a"}.fa-chevron-circle-up:before,.fa-circle-chevron-up:before{content:"\f139"}.fa-lari-sign:before{content:"\e1c8"}.fa-volcano:before{content:"\f770"}.fa-person-walking-dashed-line-arrow-right:before{content:"\e553"}.fa-gbp:before,.fa-pound-sign:before,.fa-sterling-sign:before{content:"\f154"}.fa-viruses:before{content:"\e076"}.fa-square-person-confined:before{content:"\e577"}.fa-user-tie:before{content:"\f508"}.fa-arrow-down-long:before,.fa-long-arrow-down:before{content:"\f175"}.fa-tent-arrow-down-to-line:before{content:"\e57e"}.fa-certificate:before{content:"\f0a3"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-suitcase:before{content:"\f0f2"}.fa-person-skating:before,.fa-skating:before{content:"\f7c5"}.fa-filter-circle-dollar:before,.fa-funnel-dollar:before{content:"\f662"}.fa-camera-retro:before{content:"\f083"}.fa-arrow-circle-down:before,.fa-circle-arrow-down:before{content:"\f0ab"}.fa-arrow-right-to-file:before,.fa-file-import:before{content:"\f56f"}.fa-external-link-square:before,.fa-square-arrow-up-right:before{content:"\f14c"}.fa-box-open:before{content:"\f49e"}.fa-scroll:before{content:"\f70e"}.fa-spa:before{content:"\f5bb"}.fa-location-pin-lock:before{content:"\e51f"}.fa-pause:before{content:"\f04c"}.fa-hill-avalanche:before{content:"\e507"}.fa-temperature-0:before,.fa-temperature-empty:before,.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\f2cb"}.fa-bomb:before{content:"\f1e2"}.fa-registered:before{content:"\f25d"}.fa-address-card:before,.fa-contact-card:before,.fa-vcard:before{content:"\f2bb"}.fa-balance-scale-right:before,.fa-scale-unbalanced-flip:before{content:"\f516"}.fa-subscript:before{content:"\f12c"}.fa-diamond-turn-right:before,.fa-directions:before{content:"\f5eb"}.fa-burst:before{content:"\e4dc"}.fa-house-laptop:before,.fa-laptop-house:before{content:"\e066"}.fa-face-tired:before,.fa-tired:before{content:"\f5c8"}.fa-money-bills:before{content:"\e1f3"}.fa-smog:before{content:"\f75f"}.fa-crutch:before{content:"\f7f7"}.fa-cloud-arrow-up:before,.fa-cloud-upload-alt:before,.fa-cloud-upload:before{content:"\f0ee"}.fa-palette:before{content:"\f53f"}.fa-arrows-turn-right:before{content:"\e4c0"}.fa-vest:before{content:"\e085"}.fa-ferry:before{content:"\e4ea"}.fa-arrows-down-to-people:before{content:"\e4b9"}.fa-seedling:before,.fa-sprout:before{content:"\f4d8"}.fa-arrows-alt-h:before,.fa-left-right:before{content:"\f337"}.fa-boxes-packing:before{content:"\e4c7"}.fa-arrow-circle-left:before,.fa-circle-arrow-left:before{content:"\f0a8"}.fa-group-arrows-rotate:before{content:"\e4f6"}.fa-bowl-food:before{content:"\e4c6"}.fa-candy-cane:before{content:"\f786"}.fa-arrow-down-wide-short:before,.fa-sort-amount-asc:before,.fa-sort-amount-down:before{content:"\f160"}.fa-cloud-bolt:before,.fa-thunderstorm:before{content:"\f76c"}.fa-remove-format:before,.fa-text-slash:before{content:"\f87d"}.fa-face-smile-wink:before,.fa-smile-wink:before{content:"\f4da"}.fa-file-word:before{content:"\f1c2"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-arrows-h:before,.fa-arrows-left-right:before{content:"\f07e"}.fa-house-lock:before{content:"\e510"}.fa-cloud-arrow-down:before,.fa-cloud-download-alt:before,.fa-cloud-download:before{content:"\f0ed"}.fa-children:before{content:"\e4e1"}.fa-blackboard:before,.fa-chalkboard:before{content:"\f51b"}.fa-user-alt-slash:before,.fa-user-large-slash:before{content:"\f4fa"}.fa-envelope-open:before{content:"\f2b6"}.fa-handshake-alt-slash:before,.fa-handshake-simple-slash:before{content:"\e05f"}.fa-mattress-pillow:before{content:"\e525"}.fa-guarani-sign:before{content:"\e19a"}.fa-arrows-rotate:before,.fa-refresh:before,.fa-sync:before{content:"\f021"}.fa-fire-extinguisher:before{content:"\f134"}.fa-cruzeiro-sign:before{content:"\e152"}.fa-greater-than-equal:before{content:"\f532"}.fa-shield-alt:before,.fa-shield-halved:before{content:"\f3ed"}.fa-atlas:before,.fa-book-atlas:before{content:"\f558"}.fa-virus:before{content:"\e074"}.fa-envelope-circle-check:before{content:"\e4e8"}.fa-layer-group:before{content:"\f5fd"}.fa-arrows-to-dot:before{content:"\e4be"}.fa-archway:before{content:"\f557"}.fa-heart-circle-check:before{content:"\e4fd"}.fa-house-chimney-crack:before,.fa-house-damage:before{content:"\f6f1"}.fa-file-archive:before,.fa-file-zipper:before{content:"\f1c6"}.fa-square:before{content:"\f0c8"}.fa-glass-martini:before,.fa-martini-glass-empty:before{content:"\f000"}.fa-couch:before{content:"\f4b8"}.fa-cedi-sign:before{content:"\e0df"}.fa-italic:before{content:"\f033"}.fa-church:before{content:"\f51d"}.fa-comments-dollar:before{content:"\f653"}.fa-democrat:before{content:"\f747"}.fa-z:before{content:"\5a"}.fa-person-skiing:before,.fa-skiing:before{content:"\f7c9"}.fa-road-lock:before{content:"\e567"}.fa-a:before{content:"\41"}.fa-temperature-arrow-down:before,.fa-temperature-down:before{content:"\e03f"}.fa-feather-alt:before,.fa-feather-pointed:before{content:"\f56b"}.fa-p:before{content:"\50"}.fa-snowflake:before{content:"\f2dc"}.fa-newspaper:before{content:"\f1ea"}.fa-ad:before,.fa-rectangle-ad:before{content:"\f641"}.fa-arrow-circle-right:before,.fa-circle-arrow-right:before{content:"\f0a9"}.fa-filter-circle-xmark:before{content:"\e17b"}.fa-locust:before{content:"\e520"}.fa-sort:before,.fa-unsorted:before{content:"\f0dc"}.fa-list-1-2:before,.fa-list-numeric:before,.fa-list-ol:before{content:"\f0cb"}.fa-person-dress-burst:before{content:"\e544"}.fa-money-check-alt:before,.fa-money-check-dollar:before{content:"\f53d"}.fa-vector-square:before{content:"\f5cb"}.fa-bread-slice:before{content:"\f7ec"}.fa-language:before{content:"\f1ab"}.fa-face-kiss-wink-heart:before,.fa-kiss-wink-heart:before{content:"\f598"}.fa-filter:before{content:"\f0b0"}.fa-question:before{content:"\3f"}.fa-file-signature:before{content:"\f573"}.fa-arrows-alt:before,.fa-up-down-left-right:before{content:"\f0b2"}.fa-house-chimney-user:before{content:"\e065"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-puzzle-piece:before{content:"\f12e"}.fa-money-check:before{content:"\f53c"}.fa-star-half-alt:before,.fa-star-half-stroke:before{content:"\f5c0"}.fa-code:before{content:"\f121"}.fa-glass-whiskey:before,.fa-whiskey-glass:before{content:"\f7a0"}.fa-building-circle-exclamation:before{content:"\e4d3"}.fa-magnifying-glass-chart:before{content:"\e522"}.fa-arrow-up-right-from-square:before,.fa-external-link:before{content:"\f08e"}.fa-cubes-stacked:before{content:"\e4e6"}.fa-krw:before,.fa-won-sign:before,.fa-won:before{content:"\f159"}.fa-virus-covid:before{content:"\e4a8"}.fa-austral-sign:before{content:"\e0a9"}.fa-f:before{content:"\46"}.fa-leaf:before{content:"\f06c"}.fa-road:before{content:"\f018"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-person-circle-plus:before{content:"\e541"}.fa-chart-pie:before,.fa-pie-chart:before{content:"\f200"}.fa-bolt-lightning:before{content:"\e0b7"}.fa-sack-xmark:before{content:"\e56a"}.fa-file-excel:before{content:"\f1c3"}.fa-file-contract:before{content:"\f56c"}.fa-fish-fins:before{content:"\e4f2"}.fa-building-flag:before{content:"\e4d5"}.fa-face-grin-beam:before,.fa-grin-beam:before{content:"\f582"}.fa-object-ungroup:before{content:"\f248"}.fa-poop:before{content:"\f619"}.fa-location-pin:before,.fa-map-marker:before{content:"\f041"}.fa-kaaba:before{content:"\f66b"}.fa-toilet-paper:before{content:"\f71e"}.fa-hard-hat:before,.fa-hat-hard:before,.fa-helmet-safety:before{content:"\f807"}.fa-eject:before{content:"\f052"}.fa-arrow-alt-circle-right:before,.fa-circle-right:before{content:"\f35a"}.fa-plane-circle-check:before{content:"\e555"}.fa-face-rolling-eyes:before,.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-object-group:before{content:"\f247"}.fa-chart-line:before,.fa-line-chart:before{content:"\f201"}.fa-mask-ventilator:before{content:"\e524"}.fa-arrow-right:before{content:"\f061"}.fa-map-signs:before,.fa-signs-post:before{content:"\f277"}.fa-cash-register:before{content:"\f788"}.fa-person-circle-question:before{content:"\e542"}.fa-h:before{content:"\48"}.fa-tarp:before{content:"\e57b"}.fa-screwdriver-wrench:before,.fa-tools:before{content:"\f7d9"}.fa-arrows-to-eye:before{content:"\e4bf"}.fa-plug-circle-bolt:before{content:"\e55b"}.fa-heart:before{content:"\f004"}.fa-mars-and-venus:before{content:"\f224"}.fa-home-user:before,.fa-house-user:before{content:"\e1b0"}.fa-dumpster-fire:before{content:"\f794"}.fa-house-crack:before{content:"\e3b1"}.fa-cocktail:before,.fa-martini-glass-citrus:before{content:"\f561"}.fa-face-surprise:before,.fa-surprise:before{content:"\f5c2"}.fa-bottle-water:before{content:"\e4c5"}.fa-circle-pause:before,.fa-pause-circle:before{content:"\f28b"}.fa-toilet-paper-slash:before{content:"\e072"}.fa-apple-alt:before,.fa-apple-whole:before{content:"\f5d1"}.fa-kitchen-set:before{content:"\e51a"}.fa-r:before{content:"\52"}.fa-temperature-1:before,.fa-temperature-quarter:before,.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\f2ca"}.fa-cube:before{content:"\f1b2"}.fa-bitcoin-sign:before{content:"\e0b4"}.fa-shield-dog:before{content:"\e573"}.fa-solar-panel:before{content:"\f5ba"}.fa-lock-open:before{content:"\f3c1"}.fa-elevator:before{content:"\e16d"}.fa-money-bill-transfer:before{content:"\e528"}.fa-money-bill-trend-up:before{content:"\e529"}.fa-house-flood-water-circle-arrow-right:before{content:"\e50f"}.fa-poll-h:before,.fa-square-poll-horizontal:before{content:"\f682"}.fa-circle:before{content:"\f111"}.fa-backward-fast:before,.fa-fast-backward:before{content:"\f049"}.fa-recycle:before{content:"\f1b8"}.fa-user-astronaut:before{content:"\f4fb"}.fa-plane-slash:before{content:"\e069"}.fa-trademark:before{content:"\f25c"}.fa-basketball-ball:before,.fa-basketball:before{content:"\f434"}.fa-satellite-dish:before{content:"\f7c0"}.fa-arrow-alt-circle-up:before,.fa-circle-up:before{content:"\f35b"}.fa-mobile-alt:before,.fa-mobile-screen-button:before{content:"\f3cd"}.fa-volume-high:before,.fa-volume-up:before{content:"\f028"}.fa-users-rays:before{content:"\e593"}.fa-wallet:before{content:"\f555"}.fa-clipboard-check:before{content:"\f46c"}.fa-file-audio:before{content:"\f1c7"}.fa-burger:before,.fa-hamburger:before{content:"\f805"}.fa-wrench:before{content:"\f0ad"}.fa-bugs:before{content:"\e4d0"}.fa-rupee-sign:before,.fa-rupee:before{content:"\f156"}.fa-file-image:before{content:"\f1c5"}.fa-circle-question:before,.fa-question-circle:before{content:"\f059"}.fa-plane-departure:before{content:"\f5b0"}.fa-handshake-slash:before{content:"\e060"}.fa-book-bookmark:before{content:"\e0bb"}.fa-code-branch:before{content:"\f126"}.fa-hat-cowboy:before{content:"\f8c0"}.fa-bridge:before{content:"\e4c8"}.fa-phone-alt:before,.fa-phone-flip:before{content:"\f879"}.fa-truck-front:before{content:"\e2b7"}.fa-cat:before{content:"\f6be"}.fa-anchor-circle-exclamation:before{content:"\e4ab"}.fa-truck-field:before{content:"\e58d"}.fa-route:before{content:"\f4d7"}.fa-clipboard-question:before{content:"\e4e3"}.fa-panorama:before{content:"\e209"}.fa-comment-medical:before{content:"\f7f5"}.fa-teeth-open:before{content:"\f62f"}.fa-file-circle-minus:before{content:"\e4ed"}.fa-tags:before{content:"\f02c"}.fa-wine-glass:before{content:"\f4e3"}.fa-fast-forward:before,.fa-forward-fast:before{content:"\f050"}.fa-face-meh-blank:before,.fa-meh-blank:before{content:"\f5a4"}.fa-parking:before,.fa-square-parking:before{content:"\f540"}.fa-house-signal:before{content:"\e012"}.fa-bars-progress:before,.fa-tasks-alt:before{content:"\f828"}.fa-faucet-drip:before{content:"\e006"}.fa-cart-flatbed:before,.fa-dolly-flatbed:before{content:"\f474"}.fa-ban-smoking:before,.fa-smoking-ban:before{content:"\f54d"}.fa-terminal:before{content:"\f120"}.fa-mobile-button:before{content:"\f10b"}.fa-house-medical-flag:before{content:"\e514"}.fa-basket-shopping:before,.fa-shopping-basket:before{content:"\f291"}.fa-tape:before{content:"\f4db"}.fa-bus-alt:before,.fa-bus-simple:before{content:"\f55e"}.fa-eye:before{content:"\f06e"}.fa-face-sad-cry:before,.fa-sad-cry:before{content:"\f5b3"}.fa-audio-description:before{content:"\f29e"}.fa-person-military-to-person:before{content:"\e54c"}.fa-file-shield:before{content:"\e4f0"}.fa-user-slash:before{content:"\f506"}.fa-pen:before{content:"\f304"}.fa-tower-observation:before{content:"\e586"}.fa-file-code:before{content:"\f1c9"}.fa-signal-5:before,.fa-signal-perfect:before,.fa-signal:before{content:"\f012"}.fa-bus:before{content:"\f207"}.fa-heart-circle-xmark:before{content:"\e501"}.fa-home-lg:before,.fa-house-chimney:before{content:"\e3af"}.fa-window-maximize:before{content:"\f2d0"}.fa-face-frown:before,.fa-frown:before{content:"\f119"}.fa-prescription:before{content:"\f5b1"}.fa-shop:before,.fa-store-alt:before{content:"\f54f"}.fa-floppy-disk:before,.fa-save:before{content:"\f0c7"}.fa-vihara:before{content:"\f6a7"}.fa-balance-scale-left:before,.fa-scale-unbalanced:before{content:"\f515"}.fa-sort-asc:before,.fa-sort-up:before{content:"\f0de"}.fa-comment-dots:before,.fa-commenting:before{content:"\f4ad"}.fa-plant-wilt:before{content:"\e5aa"}.fa-diamond:before{content:"\f219"}.fa-face-grin-squint:before,.fa-grin-squint:before{content:"\f585"}.fa-hand-holding-dollar:before,.fa-hand-holding-usd:before{content:"\f4c0"}.fa-bacterium:before{content:"\e05a"}.fa-hand-pointer:before{content:"\f25a"}.fa-drum-steelpan:before{content:"\f56a"}.fa-hand-scissors:before{content:"\f257"}.fa-hands-praying:before,.fa-praying-hands:before{content:"\f684"}.fa-arrow-right-rotate:before,.fa-arrow-rotate-forward:before,.fa-arrow-rotate-right:before,.fa-redo:before{content:"\f01e"}.fa-biohazard:before{content:"\f780"}.fa-location-crosshairs:before,.fa-location:before{content:"\f601"}.fa-mars-double:before{content:"\f227"}.fa-child-dress:before{content:"\e59c"}.fa-users-between-lines:before{content:"\e591"}.fa-lungs-virus:before{content:"\e067"}.fa-face-grin-tears:before,.fa-grin-tears:before{content:"\f588"}.fa-phone:before{content:"\f095"}.fa-calendar-times:before,.fa-calendar-xmark:before{content:"\f273"}.fa-child-reaching:before{content:"\e59d"}.fa-head-side-virus:before{content:"\e064"}.fa-user-cog:before,.fa-user-gear:before{content:"\f4fe"}.fa-arrow-up-1-9:before,.fa-sort-numeric-up:before{content:"\f163"}.fa-door-closed:before{content:"\f52a"}.fa-shield-virus:before{content:"\e06c"}.fa-dice-six:before{content:"\f526"}.fa-mosquito-net:before{content:"\e52c"}.fa-bridge-water:before{content:"\e4ce"}.fa-person-booth:before{content:"\f756"}.fa-text-width:before{content:"\f035"}.fa-hat-wizard:before{content:"\f6e8"}.fa-pen-fancy:before{content:"\f5ac"}.fa-digging:before,.fa-person-digging:before{content:"\f85e"}.fa-trash:before{content:"\f1f8"}.fa-gauge-simple-med:before,.fa-gauge-simple:before,.fa-tachometer-average:before{content:"\f629"}.fa-book-medical:before{content:"\f7e6"}.fa-poo:before{content:"\f2fe"}.fa-quote-right-alt:before,.fa-quote-right:before{content:"\f10e"}.fa-shirt:before,.fa-t-shirt:before,.fa-tshirt:before{content:"\f553"}.fa-cubes:before{content:"\f1b3"}.fa-divide:before{content:"\f529"}.fa-tenge-sign:before,.fa-tenge:before{content:"\f7d7"}.fa-headphones:before{content:"\f025"}.fa-hands-holding:before{content:"\f4c2"}.fa-hands-clapping:before{content:"\e1a8"}.fa-republican:before{content:"\f75e"}.fa-arrow-left:before{content:"\f060"}.fa-person-circle-xmark:before{content:"\e543"}.fa-ruler:before{content:"\f545"}.fa-align-left:before{content:"\f036"}.fa-dice-d6:before{content:"\f6d1"}.fa-restroom:before{content:"\f7bd"}.fa-j:before{content:"\4a"}.fa-users-viewfinder:before{content:"\e595"}.fa-file-video:before{content:"\f1c8"}.fa-external-link-alt:before,.fa-up-right-from-square:before{content:"\f35d"}.fa-table-cells:before,.fa-th:before{content:"\f00a"}.fa-file-pdf:before{content:"\f1c1"}.fa-bible:before,.fa-book-bible:before{content:"\f647"}.fa-o:before{content:"\4f"}.fa-medkit:before,.fa-suitcase-medical:before{content:"\f0fa"}.fa-user-secret:before{content:"\f21b"}.fa-otter:before{content:"\f700"}.fa-female:before,.fa-person-dress:before{content:"\f182"}.fa-comment-dollar:before{content:"\f651"}.fa-briefcase-clock:before,.fa-business-time:before{content:"\f64a"}.fa-table-cells-large:before,.fa-th-large:before{content:"\f009"}.fa-book-tanakh:before,.fa-tanakh:before{content:"\f827"}.fa-phone-volume:before,.fa-volume-control-phone:before{content:"\f2a0"}.fa-hat-cowboy-side:before{content:"\f8c1"}.fa-clipboard-user:before{content:"\f7f3"}.fa-child:before{content:"\f1ae"}.fa-lira-sign:before{content:"\f195"}.fa-satellite:before{content:"\f7bf"}.fa-plane-lock:before{content:"\e558"}.fa-tag:before{content:"\f02b"}.fa-comment:before{content:"\f075"}.fa-birthday-cake:before,.fa-cake-candles:before,.fa-cake:before{content:"\f1fd"}.fa-envelope:before{content:"\f0e0"}.fa-angle-double-up:before,.fa-angles-up:before{content:"\f102"}.fa-paperclip:before{content:"\f0c6"}.fa-arrow-right-to-city:before{content:"\e4b3"}.fa-ribbon:before{content:"\f4d6"}.fa-lungs:before{content:"\f604"}.fa-arrow-up-9-1:before,.fa-sort-numeric-up-alt:before{content:"\f887"}.fa-litecoin-sign:before{content:"\e1d3"}.fa-border-none:before{content:"\f850"}.fa-circle-nodes:before{content:"\e4e2"}.fa-parachute-box:before{content:"\f4cd"}.fa-indent:before{content:"\f03c"}.fa-truck-field-un:before{content:"\e58e"}.fa-hourglass-empty:before,.fa-hourglass:before{content:"\f254"}.fa-mountain:before{content:"\f6fc"}.fa-user-doctor:before,.fa-user-md:before{content:"\f0f0"}.fa-circle-info:before,.fa-info-circle:before{content:"\f05a"}.fa-cloud-meatball:before{content:"\f73b"}.fa-camera-alt:before,.fa-camera:before{content:"\f030"}.fa-square-virus:before{content:"\e578"}.fa-meteor:before{content:"\f753"}.fa-car-on:before{content:"\e4dd"}.fa-sleigh:before{content:"\f7cc"}.fa-arrow-down-1-9:before,.fa-sort-numeric-asc:before,.fa-sort-numeric-down:before{content:"\f162"}.fa-hand-holding-droplet:before,.fa-hand-holding-water:before{content:"\f4c1"}.fa-water:before{content:"\f773"}.fa-calendar-check:before{content:"\f274"}.fa-braille:before{content:"\f2a1"}.fa-prescription-bottle-alt:before,.fa-prescription-bottle-medical:before{content:"\f486"}.fa-landmark:before{content:"\f66f"}.fa-truck:before{content:"\f0d1"}.fa-crosshairs:before{content:"\f05b"}.fa-person-cane:before{content:"\e53c"}.fa-tent:before{content:"\e57d"}.fa-vest-patches:before{content:"\e086"}.fa-check-double:before{content:"\f560"}.fa-arrow-down-a-z:before,.fa-sort-alpha-asc:before,.fa-sort-alpha-down:before{content:"\f15d"}.fa-money-bill-wheat:before{content:"\e52a"}.fa-cookie:before{content:"\f563"}.fa-arrow-left-rotate:before,.fa-arrow-rotate-back:before,.fa-arrow-rotate-backward:before,.fa-arrow-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-hard-drive:before,.fa-hdd:before{content:"\f0a0"}.fa-face-grin-squint-tears:before,.fa-grin-squint-tears:before{content:"\f586"}.fa-dumbbell:before{content:"\f44b"}.fa-list-alt:before,.fa-rectangle-list:before{content:"\f022"}.fa-tarp-droplet:before{content:"\e57c"}.fa-house-medical-circle-check:before{content:"\e511"}.fa-person-skiing-nordic:before,.fa-skiing-nordic:before{content:"\f7ca"}.fa-calendar-plus:before{content:"\f271"}.fa-plane-arrival:before{content:"\f5af"}.fa-arrow-alt-circle-left:before,.fa-circle-left:before{content:"\f359"}.fa-subway:before,.fa-train-subway:before{content:"\f239"}.fa-chart-gantt:before{content:"\e0e4"}.fa-indian-rupee-sign:before,.fa-indian-rupee:before,.fa-inr:before{content:"\e1bc"}.fa-crop-alt:before,.fa-crop-simple:before{content:"\f565"}.fa-money-bill-1:before,.fa-money-bill-alt:before{content:"\f3d1"}.fa-left-long:before,.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-dna:before{content:"\f471"}.fa-virus-slash:before{content:"\e075"}.fa-minus:before,.fa-subtract:before{content:"\f068"}.fa-chess:before{content:"\f439"}.fa-arrow-left-long:before,.fa-long-arrow-left:before{content:"\f177"}.fa-plug-circle-check:before{content:"\e55c"}.fa-street-view:before{content:"\f21d"}.fa-franc-sign:before{content:"\e18f"}.fa-volume-off:before{content:"\f026"}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before,.fa-hands-american-sign-language-interpreting:before,.fa-hands-asl-interpreting:before{content:"\f2a3"}.fa-cog:before,.fa-gear:before{content:"\f013"}.fa-droplet-slash:before,.fa-tint-slash:before{content:"\f5c7"}.fa-mosque:before{content:"\f678"}.fa-mosquito:before{content:"\e52b"}.fa-star-of-david:before{content:"\f69a"}.fa-person-military-rifle:before{content:"\e54b"}.fa-cart-shopping:before,.fa-shopping-cart:before{content:"\f07a"}.fa-vials:before{content:"\f493"}.fa-plug-circle-plus:before{content:"\e55f"}.fa-place-of-worship:before{content:"\f67f"}.fa-grip-vertical:before{content:"\f58e"}.fa-arrow-turn-up:before,.fa-level-up:before{content:"\f148"}.fa-u:before{content:"\55"}.fa-square-root-alt:before,.fa-square-root-variable:before{content:"\f698"}.fa-clock-four:before,.fa-clock:before{content:"\f017"}.fa-backward-step:before,.fa-step-backward:before{content:"\f048"}.fa-pallet:before{content:"\f482"}.fa-faucet:before{content:"\e005"}.fa-baseball-bat-ball:before{content:"\f432"}.fa-s:before{content:"\53"}.fa-timeline:before{content:"\e29c"}.fa-keyboard:before{content:"\f11c"}.fa-caret-down:before{content:"\f0d7"}.fa-clinic-medical:before,.fa-house-chimney-medical:before{content:"\f7f2"}.fa-temperature-3:before,.fa-temperature-three-quarters:before,.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-mobile-android-alt:before,.fa-mobile-screen:before{content:"\f3cf"}.fa-plane-up:before{content:"\e22d"}.fa-piggy-bank:before{content:"\f4d3"}.fa-battery-3:before,.fa-battery-half:before{content:"\f242"}.fa-mountain-city:before{content:"\e52e"}.fa-coins:before{content:"\f51e"}.fa-khanda:before{content:"\f66d"}.fa-sliders-h:before,.fa-sliders:before{content:"\f1de"}.fa-folder-tree:before{content:"\f802"}.fa-network-wired:before{content:"\f6ff"}.fa-map-pin:before{content:"\f276"}.fa-hamsa:before{content:"\f665"}.fa-cent-sign:before{content:"\e3f5"}.fa-flask:before{content:"\f0c3"}.fa-person-pregnant:before{content:"\e31e"}.fa-wand-sparkles:before{content:"\f72b"}.fa-ellipsis-v:before,.fa-ellipsis-vertical:before{content:"\f142"}.fa-ticket:before{content:"\f145"}.fa-power-off:before{content:"\f011"}.fa-long-arrow-alt-right:before,.fa-right-long:before{content:"\f30b"}.fa-flag-usa:before{content:"\f74d"}.fa-laptop-file:before{content:"\e51d"}.fa-teletype:before,.fa-tty:before{content:"\f1e4"}.fa-diagram-next:before{content:"\e476"}.fa-person-rifle:before{content:"\e54e"}.fa-house-medical-circle-exclamation:before{content:"\e512"}.fa-closed-captioning:before{content:"\f20a"}.fa-hiking:before,.fa-person-hiking:before{content:"\f6ec"}.fa-venus-double:before{content:"\f226"}.fa-images:before{content:"\f302"}.fa-calculator:before{content:"\f1ec"}.fa-people-pulling:before{content:"\e535"}.fa-n:before{content:"\4e"}.fa-cable-car:before,.fa-tram:before{content:"\f7da"}.fa-cloud-rain:before{content:"\f73d"}.fa-building-circle-xmark:before{content:"\e4d4"}.fa-ship:before{content:"\f21a"}.fa-arrows-down-to-line:before{content:"\e4b8"}.fa-download:before{content:"\f019"}.fa-face-grin:before,.fa-grin:before{content:"\f580"}.fa-backspace:before,.fa-delete-left:before{content:"\f55a"}.fa-eye-dropper-empty:before,.fa-eye-dropper:before,.fa-eyedropper:before{content:"\f1fb"}.fa-file-circle-check:before{content:"\e5a0"}.fa-forward:before{content:"\f04e"}.fa-mobile-android:before,.fa-mobile-phone:before,.fa-mobile:before{content:"\f3ce"}.fa-face-meh:before,.fa-meh:before{content:"\f11a"}.fa-align-center:before{content:"\f037"}.fa-book-dead:before,.fa-book-skull:before{content:"\f6b7"}.fa-drivers-license:before,.fa-id-card:before{content:"\f2c2"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-heart-circle-exclamation:before{content:"\e4fe"}.fa-home-alt:before,.fa-home-lg-alt:before,.fa-home:before,.fa-house:before{content:"\f015"}.fa-calendar-week:before{content:"\f784"}.fa-laptop-medical:before{content:"\f812"}.fa-b:before{content:"\42"}.fa-file-medical:before{content:"\f477"}.fa-dice-one:before{content:"\f525"}.fa-kiwi-bird:before{content:"\f535"}.fa-arrow-right-arrow-left:before,.fa-exchange:before{content:"\f0ec"}.fa-redo-alt:before,.fa-rotate-forward:before,.fa-rotate-right:before{content:"\f2f9"}.fa-cutlery:before,.fa-utensils:before{content:"\f2e7"}.fa-arrow-up-wide-short:before,.fa-sort-amount-up:before{content:"\f161"}.fa-mill-sign:before{content:"\e1ed"}.fa-bowl-rice:before{content:"\e2eb"}.fa-skull:before{content:"\f54c"}.fa-broadcast-tower:before,.fa-tower-broadcast:before{content:"\f519"}.fa-truck-pickup:before{content:"\f63c"}.fa-long-arrow-alt-up:before,.fa-up-long:before{content:"\f30c"}.fa-stop:before{content:"\f04d"}.fa-code-merge:before{content:"\f387"}.fa-upload:before{content:"\f093"}.fa-hurricane:before{content:"\f751"}.fa-mound:before{content:"\e52d"}.fa-toilet-portable:before{content:"\e583"}.fa-compact-disc:before{content:"\f51f"}.fa-file-arrow-down:before,.fa-file-download:before{content:"\f56d"}.fa-caravan:before{content:"\f8ff"}.fa-shield-cat:before{content:"\e572"}.fa-bolt:before,.fa-zap:before{content:"\f0e7"}.fa-glass-water:before{content:"\e4f4"}.fa-oil-well:before{content:"\e532"}.fa-vault:before{content:"\e2c5"}.fa-mars:before{content:"\f222"}.fa-toilet:before{content:"\f7d8"}.fa-plane-circle-xmark:before{content:"\e557"}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen-sign:before,.fa-yen:before{content:"\f157"}.fa-rouble:before,.fa-rub:before,.fa-ruble-sign:before,.fa-ruble:before{content:"\f158"}.fa-sun:before{content:"\f185"}.fa-guitar:before{content:"\f7a6"}.fa-face-laugh-wink:before,.fa-laugh-wink:before{content:"\f59c"}.fa-horse-head:before{content:"\f7ab"}.fa-bore-hole:before{content:"\e4c3"}.fa-industry:before{content:"\f275"}.fa-arrow-alt-circle-down:before,.fa-circle-down:before{content:"\f358"}.fa-arrows-turn-to-dots:before{content:"\e4c1"}.fa-florin-sign:before{content:"\e184"}.fa-arrow-down-short-wide:before,.fa-sort-amount-desc:before,.fa-sort-amount-down-alt:before{content:"\f884"}.fa-less-than:before{content:"\3c"}.fa-angle-down:before{content:"\f107"}.fa-car-tunnel:before{content:"\e4de"}.fa-head-side-cough:before{content:"\e061"}.fa-grip-lines:before{content:"\f7a4"}.fa-thumbs-down:before{content:"\f165"}.fa-user-lock:before{content:"\f502"}.fa-arrow-right-long:before,.fa-long-arrow-right:before{content:"\f178"}.fa-anchor-circle-xmark:before{content:"\e4ac"}.fa-ellipsis-h:before,.fa-ellipsis:before{content:"\f141"}.fa-chess-pawn:before{content:"\f443"}.fa-first-aid:before,.fa-kit-medical:before{content:"\f479"}.fa-person-through-window:before{content:"\e5a9"}.fa-toolbox:before{content:"\f552"}.fa-hands-holding-circle:before{content:"\e4fb"}.fa-bug:before{content:"\f188"}.fa-credit-card-alt:before,.fa-credit-card:before{content:"\f09d"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-hand-holding-hand:before{content:"\e4f7"}.fa-book-open-reader:before,.fa-book-reader:before{content:"\f5da"}.fa-mountain-sun:before{content:"\e52f"}.fa-arrows-left-right-to-line:before{content:"\e4ba"}.fa-dice-d20:before{content:"\f6cf"}.fa-truck-droplet:before{content:"\e58c"}.fa-file-circle-xmark:before{content:"\e5a1"}.fa-temperature-arrow-up:before,.fa-temperature-up:before{content:"\e040"}.fa-medal:before{content:"\f5a2"}.fa-bed:before{content:"\f236"}.fa-h-square:before,.fa-square-h:before{content:"\f0fd"}.fa-podcast:before{content:"\f2ce"}.fa-temperature-4:before,.fa-temperature-full:before,.fa-thermometer-4:before,.fa-thermometer-full:before{content:"\f2c7"}.fa-bell:before{content:"\f0f3"}.fa-superscript:before{content:"\f12b"}.fa-plug-circle-xmark:before{content:"\e560"}.fa-star-of-life:before{content:"\f621"}.fa-phone-slash:before{content:"\f3dd"}.fa-paint-roller:before{content:"\f5aa"}.fa-hands-helping:before,.fa-handshake-angle:before{content:"\f4c4"}.fa-location-dot:before,.fa-map-marker-alt:before{content:"\f3c5"}.fa-file:before{content:"\f15b"}.fa-greater-than:before{content:"\3e"}.fa-person-swimming:before,.fa-swimmer:before{content:"\f5c4"}.fa-arrow-down:before{content:"\f063"}.fa-droplet:before,.fa-tint:before{content:"\f043"}.fa-eraser:before{content:"\f12d"}.fa-earth-america:before,.fa-earth-americas:before,.fa-earth:before,.fa-globe-americas:before{content:"\f57d"}.fa-person-burst:before{content:"\e53b"}.fa-dove:before{content:"\f4ba"}.fa-battery-0:before,.fa-battery-empty:before{content:"\f244"}.fa-socks:before{content:"\f696"}.fa-inbox:before{content:"\f01c"}.fa-section:before{content:"\e447"}.fa-gauge-high:before,.fa-tachometer-alt-fast:before,.fa-tachometer-alt:before{content:"\f625"}.fa-envelope-open-text:before{content:"\f658"}.fa-hospital-alt:before,.fa-hospital-wide:before,.fa-hospital:before{content:"\f0f8"}.fa-wine-bottle:before{content:"\f72f"}.fa-chess-rook:before{content:"\f447"}.fa-bars-staggered:before,.fa-reorder:before,.fa-stream:before{content:"\f550"}.fa-dharmachakra:before{content:"\f655"}.fa-hotdog:before{content:"\f80f"}.fa-blind:before,.fa-person-walking-with-cane:before{content:"\f29d"}.fa-drum:before{content:"\f569"}.fa-ice-cream:before{content:"\f810"}.fa-heart-circle-bolt:before{content:"\e4fc"}.fa-fax:before{content:"\f1ac"}.fa-paragraph:before{content:"\f1dd"}.fa-check-to-slot:before,.fa-vote-yea:before{content:"\f772"}.fa-star-half:before{content:"\f089"}.fa-boxes-alt:before,.fa-boxes-stacked:before,.fa-boxes:before{content:"\f468"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-assistive-listening-systems:before,.fa-ear-listen:before{content:"\f2a2"}.fa-tree-city:before{content:"\e587"}.fa-play:before{content:"\f04b"}.fa-font:before{content:"\f031"}.fa-rupiah-sign:before{content:"\e23d"}.fa-magnifying-glass:before,.fa-search:before{content:"\f002"}.fa-ping-pong-paddle-ball:before,.fa-table-tennis-paddle-ball:before,.fa-table-tennis:before{content:"\f45d"}.fa-diagnoses:before,.fa-person-dots-from-line:before{content:"\f470"}.fa-trash-can-arrow-up:before,.fa-trash-restore-alt:before{content:"\f82a"}.fa-naira-sign:before{content:"\e1f6"}.fa-cart-arrow-down:before{content:"\f218"}.fa-walkie-talkie:before{content:"\f8ef"}.fa-file-edit:before,.fa-file-pen:before{content:"\f31c"}.fa-receipt:before{content:"\f543"}.fa-pen-square:before,.fa-pencil-square:before,.fa-square-pen:before{content:"\f14b"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-person-circle-exclamation:before{content:"\e53f"}.fa-chevron-down:before{content:"\f078"}.fa-battery-5:before,.fa-battery-full:before,.fa-battery:before{content:"\f240"}.fa-skull-crossbones:before{content:"\f714"}.fa-code-compare:before{content:"\e13a"}.fa-list-dots:before,.fa-list-ul:before{content:"\f0ca"}.fa-school-lock:before{content:"\e56f"}.fa-tower-cell:before{content:"\e585"}.fa-down-long:before,.fa-long-arrow-alt-down:before{content:"\f309"}.fa-ranking-star:before{content:"\e561"}.fa-chess-king:before{content:"\f43f"}.fa-person-harassing:before{content:"\e549"}.fa-brazilian-real-sign:before{content:"\e46c"}.fa-landmark-alt:before,.fa-landmark-dome:before{content:"\f752"}.fa-arrow-up:before{content:"\f062"}.fa-television:before,.fa-tv-alt:before,.fa-tv:before{content:"\f26c"}.fa-shrimp:before{content:"\e448"}.fa-list-check:before,.fa-tasks:before{content:"\f0ae"}.fa-jug-detergent:before{content:"\e519"}.fa-circle-user:before,.fa-user-circle:before{content:"\f2bd"}.fa-user-shield:before{content:"\f505"}.fa-wind:before{content:"\f72e"}.fa-car-burst:before,.fa-car-crash:before{content:"\f5e1"}.fa-y:before{content:"\59"}.fa-person-snowboarding:before,.fa-snowboarding:before{content:"\f7ce"}.fa-shipping-fast:before,.fa-truck-fast:before{content:"\f48b"}.fa-fish:before{content:"\f578"}.fa-user-graduate:before{content:"\f501"}.fa-adjust:before,.fa-circle-half-stroke:before{content:"\f042"}.fa-clapperboard:before{content:"\e131"}.fa-circle-radiation:before,.fa-radiation-alt:before{content:"\f7ba"}.fa-baseball-ball:before,.fa-baseball:before{content:"\f433"}.fa-jet-fighter-up:before{content:"\e518"}.fa-diagram-project:before,.fa-project-diagram:before{content:"\f542"}.fa-copy:before{content:"\f0c5"}.fa-volume-mute:before,.fa-volume-times:before,.fa-volume-xmark:before{content:"\f6a9"}.fa-hand-sparkles:before{content:"\e05d"}.fa-grip-horizontal:before,.fa-grip:before{content:"\f58d"}.fa-share-from-square:before,.fa-share-square:before{content:"\f14d"}.fa-child-combatant:before,.fa-child-rifle:before{content:"\e4e0"}.fa-gun:before{content:"\e19b"}.fa-phone-square:before,.fa-square-phone:before{content:"\f098"}.fa-add:before,.fa-plus:before{content:"\2b"}.fa-expand:before{content:"\f065"}.fa-computer:before{content:"\e4e5"}.fa-close:before,.fa-multiply:before,.fa-remove:before,.fa-times:before,.fa-xmark:before{content:"\f00d"}.fa-arrows-up-down-left-right:before,.fa-arrows:before{content:"\f047"}.fa-chalkboard-teacher:before,.fa-chalkboard-user:before{content:"\f51c"}.fa-peso-sign:before{content:"\e222"}.fa-building-shield:before{content:"\e4d8"}.fa-baby:before{content:"\f77c"}.fa-users-line:before{content:"\e592"}.fa-quote-left-alt:before,.fa-quote-left:before{content:"\f10d"}.fa-tractor:before{content:"\f722"}.fa-trash-arrow-up:before,.fa-trash-restore:before{content:"\f829"}.fa-arrow-down-up-lock:before{content:"\e4b0"}.fa-lines-leaning:before{content:"\e51e"}.fa-ruler-combined:before{content:"\f546"}.fa-copyright:before{content:"\f1f9"}.fa-equals:before{content:"\3d"}.fa-blender:before{content:"\f517"}.fa-teeth:before{content:"\f62e"}.fa-ils:before,.fa-shekel-sign:before,.fa-shekel:before,.fa-sheqel-sign:before,.fa-sheqel:before{content:"\f20b"}.fa-map:before{content:"\f279"}.fa-rocket:before{content:"\f135"}.fa-photo-film:before,.fa-photo-video:before{content:"\f87c"}.fa-folder-minus:before{content:"\f65d"}.fa-store:before{content:"\f54e"}.fa-arrow-trend-up:before{content:"\e098"}.fa-plug-circle-minus:before{content:"\e55e"}.fa-sign-hanging:before,.fa-sign:before{content:"\f4d9"}.fa-bezier-curve:before{content:"\f55b"}.fa-bell-slash:before{content:"\f1f6"}.fa-tablet-android:before,.fa-tablet:before{content:"\f3fb"}.fa-school-flag:before{content:"\e56e"}.fa-fill:before{content:"\f575"}.fa-angle-up:before{content:"\f106"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-holly-berry:before{content:"\f7aa"}.fa-chevron-left:before{content:"\f053"}.fa-bacteria:before{content:"\e059"}.fa-hand-lizard:before{content:"\f258"}.fa-notdef:before{content:"\e1fe"}.fa-disease:before{content:"\f7fa"}.fa-briefcase-medical:before{content:"\f469"}.fa-genderless:before{content:"\f22d"}.fa-chevron-right:before{content:"\f054"}.fa-retweet:before{content:"\f079"}.fa-car-alt:before,.fa-car-rear:before{content:"\f5de"}.fa-pump-soap:before{content:"\e06b"}.fa-video-slash:before{content:"\f4e2"}.fa-battery-2:before,.fa-battery-quarter:before{content:"\f243"}.fa-radio:before{content:"\f8d7"}.fa-baby-carriage:before,.fa-carriage-baby:before{content:"\f77d"}.fa-traffic-light:before{content:"\f637"}.fa-thermometer:before{content:"\f491"}.fa-vr-cardboard:before{content:"\f729"}.fa-hand-middle-finger:before{content:"\f806"}.fa-percent:before,.fa-percentage:before{content:"\25"}.fa-truck-moving:before{content:"\f4df"}.fa-glass-water-droplet:before{content:"\e4f5"}.fa-display:before{content:"\e163"}.fa-face-smile:before,.fa-smile:before{content:"\f118"}.fa-thumb-tack:before,.fa-thumbtack:before{content:"\f08d"}.fa-trophy:before{content:"\f091"}.fa-person-praying:before,.fa-pray:before{content:"\f683"}.fa-hammer:before{content:"\f6e3"}.fa-hand-peace:before{content:"\f25b"}.fa-rotate:before,.fa-sync-alt:before{content:"\f2f1"}.fa-spinner:before{content:"\f110"}.fa-robot:before{content:"\f544"}.fa-peace:before{content:"\f67c"}.fa-cogs:before,.fa-gears:before{content:"\f085"}.fa-warehouse:before{content:"\f494"}.fa-arrow-up-right-dots:before{content:"\e4b7"}.fa-splotch:before{content:"\f5bc"}.fa-face-grin-hearts:before,.fa-grin-hearts:before{content:"\f584"}.fa-dice-four:before{content:"\f524"}.fa-sim-card:before{content:"\f7c4"}.fa-transgender-alt:before,.fa-transgender:before{content:"\f225"}.fa-mercury:before{content:"\f223"}.fa-arrow-turn-down:before,.fa-level-down:before{content:"\f149"}.fa-person-falling-burst:before{content:"\e547"}.fa-award:before{content:"\f559"}.fa-ticket-alt:before,.fa-ticket-simple:before{content:"\f3ff"}.fa-building:before{content:"\f1ad"}.fa-angle-double-left:before,.fa-angles-left:before{content:"\f100"}.fa-qrcode:before{content:"\f029"}.fa-clock-rotate-left:before,.fa-history:before{content:"\f1da"}.fa-face-grin-beam-sweat:before,.fa-grin-beam-sweat:before{content:"\f583"}.fa-arrow-right-from-file:before,.fa-file-export:before{content:"\f56e"}.fa-shield-blank:before,.fa-shield:before{content:"\f132"}.fa-arrow-up-short-wide:before,.fa-sort-amount-up-alt:before{content:"\f885"}.fa-house-medical:before{content:"\e3b2"}.fa-golf-ball-tee:before,.fa-golf-ball:before{content:"\f450"}.fa-chevron-circle-left:before,.fa-circle-chevron-left:before{content:"\f137"}.fa-house-chimney-window:before{content:"\e00d"}.fa-pen-nib:before{content:"\f5ad"}.fa-tent-arrow-turn-left:before{content:"\e580"}.fa-tents:before{content:"\e582"}.fa-magic:before,.fa-wand-magic:before{content:"\f0d0"}.fa-dog:before{content:"\f6d3"}.fa-carrot:before{content:"\f787"}.fa-moon:before{content:"\f186"}.fa-wine-glass-alt:before,.fa-wine-glass-empty:before{content:"\f5ce"}.fa-cheese:before{content:"\f7ef"}.fa-yin-yang:before{content:"\f6ad"}.fa-music:before{content:"\f001"}.fa-code-commit:before{content:"\f386"}.fa-temperature-low:before{content:"\f76b"}.fa-biking:before,.fa-person-biking:before{content:"\f84a"}.fa-broom:before{content:"\f51a"}.fa-shield-heart:before{content:"\e574"}.fa-gopuram:before{content:"\f664"}.fa-earth-oceania:before,.fa-globe-oceania:before{content:"\e47b"}.fa-square-xmark:before,.fa-times-square:before,.fa-xmark-square:before{content:"\f2d3"}.fa-hashtag:before{content:"\23"}.fa-expand-alt:before,.fa-up-right-and-down-left-from-center:before{content:"\f424"}.fa-oil-can:before{content:"\f613"}.fa-t:before{content:"\54"}.fa-hippo:before{content:"\f6ed"}.fa-chart-column:before{content:"\e0e3"}.fa-infinity:before{content:"\f534"}.fa-vial-circle-check:before{content:"\e596"}.fa-person-arrow-down-to-line:before{content:"\e538"}.fa-voicemail:before{content:"\f897"}.fa-fan:before{content:"\f863"}.fa-person-walking-luggage:before{content:"\e554"}.fa-arrows-alt-v:before,.fa-up-down:before{content:"\f338"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-calendar:before{content:"\f133"}.fa-trailer:before{content:"\e041"}.fa-bahai:before,.fa-haykal:before{content:"\f666"}.fa-sd-card:before{content:"\f7c2"}.fa-dragon:before{content:"\f6d5"}.fa-shoe-prints:before{content:"\f54b"}.fa-circle-plus:before,.fa-plus-circle:before{content:"\f055"}.fa-face-grin-tongue-wink:before,.fa-grin-tongue-wink:before{content:"\f58b"}.fa-hand-holding:before{content:"\f4bd"}.fa-plug-circle-exclamation:before{content:"\e55d"}.fa-chain-broken:before,.fa-chain-slash:before,.fa-link-slash:before,.fa-unlink:before{content:"\f127"}.fa-clone:before{content:"\f24d"}.fa-person-walking-arrow-loop-left:before{content:"\e551"}.fa-arrow-up-z-a:before,.fa-sort-alpha-up-alt:before{content:"\f882"}.fa-fire-alt:before,.fa-fire-flame-curved:before{content:"\f7e4"}.fa-tornado:before{content:"\f76f"}.fa-file-circle-plus:before{content:"\e494"}.fa-book-quran:before,.fa-quran:before{content:"\f687"}.fa-anchor:before{content:"\f13d"}.fa-border-all:before{content:"\f84c"}.fa-angry:before,.fa-face-angry:before{content:"\f556"}.fa-cookie-bite:before{content:"\f564"}.fa-arrow-trend-down:before{content:"\e097"}.fa-feed:before,.fa-rss:before{content:"\f09e"}.fa-draw-polygon:before{content:"\f5ee"}.fa-balance-scale:before,.fa-scale-balanced:before{content:"\f24e"}.fa-gauge-simple-high:before,.fa-tachometer-fast:before,.fa-tachometer:before{content:"\f62a"}.fa-shower:before{content:"\f2cc"}.fa-desktop-alt:before,.fa-desktop:before{content:"\f390"}.fa-m:before{content:"\4d"}.fa-table-list:before,.fa-th-list:before{content:"\f00b"}.fa-comment-sms:before,.fa-sms:before{content:"\f7cd"}.fa-book:before{content:"\f02d"}.fa-user-plus:before{content:"\f234"}.fa-check:before{content:"\f00c"}.fa-battery-4:before,.fa-battery-three-quarters:before{content:"\f241"}.fa-house-circle-check:before{content:"\e509"}.fa-angle-left:before{content:"\f104"}.fa-diagram-successor:before{content:"\e47a"}.fa-truck-arrow-right:before{content:"\e58b"}.fa-arrows-split-up-and-left:before{content:"\e4bc"}.fa-fist-raised:before,.fa-hand-fist:before{content:"\f6de"}.fa-cloud-moon:before{content:"\f6c3"}.fa-briefcase:before{content:"\f0b1"}.fa-person-falling:before{content:"\e546"}.fa-image-portrait:before,.fa-portrait:before{content:"\f3e0"}.fa-user-tag:before{content:"\f507"}.fa-rug:before{content:"\e569"}.fa-earth-europe:before,.fa-globe-europe:before{content:"\f7a2"}.fa-cart-flatbed-suitcase:before,.fa-luggage-cart:before{content:"\f59d"}.fa-rectangle-times:before,.fa-rectangle-xmark:before,.fa-times-rectangle:before,.fa-window-close:before{content:"\f410"}.fa-baht-sign:before{content:"\e0ac"}.fa-book-open:before{content:"\f518"}.fa-book-journal-whills:before,.fa-journal-whills:before{content:"\f66a"}.fa-handcuffs:before{content:"\e4f8"}.fa-exclamation-triangle:before,.fa-triangle-exclamation:before,.fa-warning:before{content:"\f071"}.fa-database:before{content:"\f1c0"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-bottle-droplet:before{content:"\e4c4"}.fa-mask-face:before{content:"\e1d7"}.fa-hill-rockslide:before{content:"\e508"}.fa-exchange-alt:before,.fa-right-left:before{content:"\f362"}.fa-paper-plane:before{content:"\f1d8"}.fa-road-circle-exclamation:before{content:"\e565"}.fa-dungeon:before{content:"\f6d9"}.fa-align-right:before{content:"\f038"}.fa-money-bill-1-wave:before,.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-life-ring:before{content:"\f1cd"}.fa-hands:before,.fa-sign-language:before,.fa-signing:before{content:"\f2a7"}.fa-calendar-day:before{content:"\f783"}.fa-ladder-water:before,.fa-swimming-pool:before,.fa-water-ladder:before{content:"\f5c5"}.fa-arrows-up-down:before,.fa-arrows-v:before{content:"\f07d"}.fa-face-grimace:before,.fa-grimace:before{content:"\f57f"}.fa-wheelchair-alt:before,.fa-wheelchair-move:before{content:"\e2ce"}.fa-level-down-alt:before,.fa-turn-down:before{content:"\f3be"}.fa-person-walking-arrow-right:before{content:"\e552"}.fa-envelope-square:before,.fa-square-envelope:before{content:"\f199"}.fa-dice:before{content:"\f522"}.fa-bowling-ball:before{content:"\f436"}.fa-brain:before{content:"\f5dc"}.fa-band-aid:before,.fa-bandage:before{content:"\f462"}.fa-calendar-minus:before{content:"\f272"}.fa-circle-xmark:before,.fa-times-circle:before,.fa-xmark-circle:before{content:"\f057"}.fa-gifts:before{content:"\f79c"}.fa-hotel:before{content:"\f594"}.fa-earth-asia:before,.fa-globe-asia:before{content:"\f57e"}.fa-id-card-alt:before,.fa-id-card-clip:before{content:"\f47f"}.fa-magnifying-glass-plus:before,.fa-search-plus:before{content:"\f00e"}.fa-thumbs-up:before{content:"\f164"}.fa-user-clock:before{content:"\f4fd"}.fa-allergies:before,.fa-hand-dots:before{content:"\f461"}.fa-file-invoice:before{content:"\f570"}.fa-window-minimize:before{content:"\f2d1"}.fa-coffee:before,.fa-mug-saucer:before{content:"\f0f4"}.fa-brush:before{content:"\f55d"}.fa-mask:before{content:"\f6fa"}.fa-magnifying-glass-minus:before,.fa-search-minus:before{content:"\f010"}.fa-ruler-vertical:before{content:"\f548"}.fa-user-alt:before,.fa-user-large:before{content:"\f406"}.fa-train-tram:before{content:"\e5b4"}.fa-user-nurse:before{content:"\f82f"}.fa-syringe:before{content:"\f48e"}.fa-cloud-sun:before{content:"\f6c4"}.fa-stopwatch-20:before{content:"\e06f"}.fa-square-full:before{content:"\f45c"}.fa-magnet:before{content:"\f076"}.fa-jar:before{content:"\e516"}.fa-note-sticky:before,.fa-sticky-note:before{content:"\f249"}.fa-bug-slash:before{content:"\e490"}.fa-arrow-up-from-water-pump:before{content:"\e4b6"}.fa-bone:before{content:"\f5d7"}.fa-user-injured:before{content:"\f728"}.fa-face-sad-tear:before,.fa-sad-tear:before{content:"\f5b4"}.fa-plane:before{content:"\f072"}.fa-tent-arrows-down:before{content:"\e581"}.fa-exclamation:before{content:"\21"}.fa-arrows-spin:before{content:"\e4bb"}.fa-print:before{content:"\f02f"}.fa-try:before,.fa-turkish-lira-sign:before,.fa-turkish-lira:before{content:"\e2bb"}.fa-dollar-sign:before,.fa-dollar:before,.fa-usd:before{content:"\24"}.fa-x:before{content:"\58"}.fa-magnifying-glass-dollar:before,.fa-search-dollar:before{content:"\f688"}.fa-users-cog:before,.fa-users-gear:before{content:"\f509"}.fa-person-military-pointing:before{content:"\e54a"}.fa-bank:before,.fa-building-columns:before,.fa-institution:before,.fa-museum:before,.fa-university:before{content:"\f19c"}.fa-umbrella:before{content:"\f0e9"}.fa-trowel:before{content:"\e589"}.fa-d:before{content:"\44"}.fa-stapler:before{content:"\e5af"}.fa-masks-theater:before,.fa-theater-masks:before{content:"\f630"}.fa-kip-sign:before{content:"\e1c4"}.fa-hand-point-left:before{content:"\f0a5"}.fa-handshake-alt:before,.fa-handshake-simple:before{content:"\f4c6"}.fa-fighter-jet:before,.fa-jet-fighter:before{content:"\f0fb"}.fa-share-alt-square:before,.fa-square-share-nodes:before{content:"\f1e1"}.fa-barcode:before{content:"\f02a"}.fa-plus-minus:before{content:"\e43c"}.fa-video-camera:before,.fa-video:before{content:"\f03d"}.fa-graduation-cap:before,.fa-mortar-board:before{content:"\f19d"}.fa-hand-holding-medical:before{content:"\e05c"}.fa-person-circle-check:before{content:"\e53e"}.fa-level-up-alt:before,.fa-turn-up:before{content:"\f3bf"} +.fa-sr-only,.fa-sr-only-focusable:not(:focus),.sr-only,.sr-only-focusable:not(:focus){position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border-width:0}:host,:root{--fa-style-family-brands:"Font Awesome 6 Brands";--fa-font-brands:normal 400 1em/1 "Font Awesome 6 Brands"}@font-face{font-family:"Font Awesome 6 Brands";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}.fa-brands,.fab{font-weight:400}.fa-monero:before{content:"\f3d0"}.fa-hooli:before{content:"\f427"}.fa-yelp:before{content:"\f1e9"}.fa-cc-visa:before{content:"\f1f0"}.fa-lastfm:before{content:"\f202"}.fa-shopware:before{content:"\f5b5"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-aws:before{content:"\f375"}.fa-redhat:before{content:"\f7bc"}.fa-yoast:before{content:"\f2b1"}.fa-cloudflare:before{content:"\e07d"}.fa-ups:before{content:"\f7e0"}.fa-pixiv:before{content:"\e640"}.fa-wpexplorer:before{content:"\f2de"}.fa-dyalog:before{content:"\f399"}.fa-bity:before{content:"\f37a"}.fa-stackpath:before{content:"\f842"}.fa-buysellads:before{content:"\f20d"}.fa-first-order:before{content:"\f2b0"}.fa-modx:before{content:"\f285"}.fa-guilded:before{content:"\e07e"}.fa-vnv:before{content:"\f40b"}.fa-js-square:before,.fa-square-js:before{content:"\f3b9"}.fa-microsoft:before{content:"\f3ca"}.fa-qq:before{content:"\f1d6"}.fa-orcid:before{content:"\f8d2"}.fa-java:before{content:"\f4e4"}.fa-invision:before{content:"\f7b0"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-centercode:before{content:"\f380"}.fa-glide-g:before{content:"\f2a6"}.fa-drupal:before{content:"\f1a9"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-unity:before{content:"\e049"}.fa-whmcs:before{content:"\f40d"}.fa-rocketchat:before{content:"\f3e8"}.fa-vk:before{content:"\f189"}.fa-untappd:before{content:"\f405"}.fa-mailchimp:before{content:"\f59e"}.fa-css3-alt:before{content:"\f38b"}.fa-reddit-square:before,.fa-square-reddit:before{content:"\f1a2"}.fa-vimeo-v:before{content:"\f27d"}.fa-contao:before{content:"\f26d"}.fa-square-font-awesome:before{content:"\e5ad"}.fa-deskpro:before{content:"\f38f"}.fa-brave:before{content:"\e63c"}.fa-sistrix:before{content:"\f3ee"}.fa-instagram-square:before,.fa-square-instagram:before{content:"\e055"}.fa-battle-net:before{content:"\f835"}.fa-the-red-yeti:before{content:"\f69d"}.fa-hacker-news-square:before,.fa-square-hacker-news:before{content:"\f3af"}.fa-edge:before{content:"\f282"}.fa-threads:before{content:"\e618"}.fa-napster:before{content:"\f3d2"}.fa-snapchat-square:before,.fa-square-snapchat:before{content:"\f2ad"}.fa-google-plus-g:before{content:"\f0d5"}.fa-artstation:before{content:"\f77a"}.fa-markdown:before{content:"\f60f"}.fa-sourcetree:before{content:"\f7d3"}.fa-google-plus:before{content:"\f2b3"}.fa-diaspora:before{content:"\f791"}.fa-foursquare:before{content:"\f180"}.fa-stack-overflow:before{content:"\f16c"}.fa-github-alt:before{content:"\f113"}.fa-phoenix-squadron:before{content:"\f511"}.fa-pagelines:before{content:"\f18c"}.fa-algolia:before{content:"\f36c"}.fa-red-river:before{content:"\f3e3"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-safari:before{content:"\f267"}.fa-google:before{content:"\f1a0"}.fa-font-awesome-alt:before,.fa-square-font-awesome-stroke:before{content:"\f35c"}.fa-atlassian:before{content:"\f77b"}.fa-linkedin-in:before{content:"\f0e1"}.fa-digital-ocean:before{content:"\f391"}.fa-nimblr:before{content:"\f5a8"}.fa-chromecast:before{content:"\f838"}.fa-evernote:before{content:"\f839"}.fa-hacker-news:before{content:"\f1d4"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-adversal:before{content:"\f36a"}.fa-creative-commons:before{content:"\f25e"}.fa-watchman-monitoring:before{content:"\e087"}.fa-fonticons:before{content:"\f280"}.fa-weixin:before{content:"\f1d7"}.fa-shirtsinbulk:before{content:"\f214"}.fa-codepen:before{content:"\f1cb"}.fa-git-alt:before{content:"\f841"}.fa-lyft:before{content:"\f3c3"}.fa-rev:before{content:"\f5b2"}.fa-windows:before{content:"\f17a"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-square-viadeo:before,.fa-viadeo-square:before{content:"\f2aa"}.fa-meetup:before{content:"\f2e0"}.fa-centos:before{content:"\f789"}.fa-adn:before{content:"\f170"}.fa-cloudsmith:before{content:"\f384"}.fa-opensuse:before{content:"\e62b"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-dribbble-square:before,.fa-square-dribbble:before{content:"\f397"}.fa-codiepie:before{content:"\f284"}.fa-node:before{content:"\f419"}.fa-mix:before{content:"\f3cb"}.fa-steam:before{content:"\f1b6"}.fa-cc-apple-pay:before{content:"\f416"}.fa-scribd:before{content:"\f28a"}.fa-debian:before{content:"\e60b"}.fa-openid:before{content:"\f19b"}.fa-instalod:before{content:"\e081"}.fa-expeditedssl:before{content:"\f23e"}.fa-sellcast:before{content:"\f2da"}.fa-square-twitter:before,.fa-twitter-square:before{content:"\f081"}.fa-r-project:before{content:"\f4f7"}.fa-delicious:before{content:"\f1a5"}.fa-freebsd:before{content:"\f3a4"}.fa-vuejs:before{content:"\f41f"}.fa-accusoft:before{content:"\f369"}.fa-ioxhost:before{content:"\f208"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-app-store:before{content:"\f36f"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-itunes-note:before{content:"\f3b5"}.fa-golang:before{content:"\e40f"}.fa-kickstarter:before{content:"\f3bb"}.fa-grav:before{content:"\f2d6"}.fa-weibo:before{content:"\f18a"}.fa-uncharted:before{content:"\e084"}.fa-firstdraft:before{content:"\f3a1"}.fa-square-youtube:before,.fa-youtube-square:before{content:"\f431"}.fa-wikipedia-w:before{content:"\f266"}.fa-rendact:before,.fa-wpressr:before{content:"\f3e4"}.fa-angellist:before{content:"\f209"}.fa-galactic-republic:before{content:"\f50c"}.fa-nfc-directional:before{content:"\e530"}.fa-skype:before{content:"\f17e"}.fa-joget:before{content:"\f3b7"}.fa-fedora:before{content:"\f798"}.fa-stripe-s:before{content:"\f42a"}.fa-meta:before{content:"\e49b"}.fa-laravel:before{content:"\f3bd"}.fa-hotjar:before{content:"\f3b1"}.fa-bluetooth-b:before{content:"\f294"}.fa-square-letterboxd:before{content:"\e62e"}.fa-sticker-mule:before{content:"\f3f7"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-hips:before{content:"\f452"}.fa-behance:before{content:"\f1b4"}.fa-reddit:before{content:"\f1a1"}.fa-discord:before{content:"\f392"}.fa-chrome:before{content:"\f268"}.fa-app-store-ios:before{content:"\f370"}.fa-cc-discover:before{content:"\f1f2"}.fa-wpbeginner:before{content:"\f297"}.fa-confluence:before{content:"\f78d"}.fa-shoelace:before{content:"\e60c"}.fa-mdb:before{content:"\f8ca"}.fa-dochub:before{content:"\f394"}.fa-accessible-icon:before{content:"\f368"}.fa-ebay:before{content:"\f4f4"}.fa-amazon:before{content:"\f270"}.fa-unsplash:before{content:"\e07c"}.fa-yarn:before{content:"\f7e3"}.fa-square-steam:before,.fa-steam-square:before{content:"\f1b7"}.fa-500px:before{content:"\f26e"}.fa-square-vimeo:before,.fa-vimeo-square:before{content:"\f194"}.fa-asymmetrik:before{content:"\f372"}.fa-font-awesome-flag:before,.fa-font-awesome-logo-full:before,.fa-font-awesome:before{content:"\f2b4"}.fa-gratipay:before{content:"\f184"}.fa-apple:before{content:"\f179"}.fa-hive:before{content:"\e07f"}.fa-gitkraken:before{content:"\f3a6"}.fa-keybase:before{content:"\f4f5"}.fa-apple-pay:before{content:"\f415"}.fa-padlet:before{content:"\e4a0"}.fa-amazon-pay:before{content:"\f42c"}.fa-github-square:before,.fa-square-github:before{content:"\f092"}.fa-stumbleupon:before{content:"\f1a4"}.fa-fedex:before{content:"\f797"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-shopify:before{content:"\e057"}.fa-neos:before{content:"\f612"}.fa-square-threads:before{content:"\e619"}.fa-hackerrank:before{content:"\f5f7"}.fa-researchgate:before{content:"\f4f8"}.fa-swift:before{content:"\f8e1"}.fa-angular:before{content:"\f420"}.fa-speakap:before{content:"\f3f3"}.fa-angrycreative:before{content:"\f36e"}.fa-y-combinator:before{content:"\f23b"}.fa-empire:before{content:"\f1d1"}.fa-envira:before{content:"\f299"}.fa-google-scholar:before{content:"\e63b"}.fa-gitlab-square:before,.fa-square-gitlab:before{content:"\e5ae"}.fa-studiovinari:before{content:"\f3f8"}.fa-pied-piper:before{content:"\f2ae"}.fa-wordpress:before{content:"\f19a"}.fa-product-hunt:before{content:"\f288"}.fa-firefox:before{content:"\f269"}.fa-linode:before{content:"\f2b8"}.fa-goodreads:before{content:"\f3a8"}.fa-odnoklassniki-square:before,.fa-square-odnoklassniki:before{content:"\f264"}.fa-jsfiddle:before{content:"\f1cc"}.fa-sith:before{content:"\f512"}.fa-themeisle:before{content:"\f2b2"}.fa-page4:before{content:"\f3d7"}.fa-hashnode:before{content:"\e499"}.fa-react:before{content:"\f41b"}.fa-cc-paypal:before{content:"\f1f4"}.fa-squarespace:before{content:"\f5be"}.fa-cc-stripe:before{content:"\f1f5"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-bitcoin:before{content:"\f379"}.fa-keycdn:before{content:"\f3ba"}.fa-opera:before{content:"\f26a"}.fa-itch-io:before{content:"\f83a"}.fa-umbraco:before{content:"\f8e8"}.fa-galactic-senate:before{content:"\f50d"}.fa-ubuntu:before{content:"\f7df"}.fa-draft2digital:before{content:"\f396"}.fa-stripe:before{content:"\f429"}.fa-houzz:before{content:"\f27c"}.fa-gg:before{content:"\f260"}.fa-dhl:before{content:"\f790"}.fa-pinterest-square:before,.fa-square-pinterest:before{content:"\f0d3"}.fa-xing:before{content:"\f168"}.fa-blackberry:before{content:"\f37b"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-playstation:before{content:"\f3df"}.fa-quinscape:before{content:"\f459"}.fa-less:before{content:"\f41d"}.fa-blogger-b:before{content:"\f37d"}.fa-opencart:before{content:"\f23d"}.fa-vine:before{content:"\f1ca"}.fa-signal-messenger:before{content:"\e663"}.fa-paypal:before{content:"\f1ed"}.fa-gitlab:before{content:"\f296"}.fa-typo3:before{content:"\f42b"}.fa-reddit-alien:before{content:"\f281"}.fa-yahoo:before{content:"\f19e"}.fa-dailymotion:before{content:"\e052"}.fa-affiliatetheme:before{content:"\f36b"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-bootstrap:before{content:"\f836"}.fa-odnoklassniki:before{content:"\f263"}.fa-nfc-symbol:before{content:"\e531"}.fa-mintbit:before{content:"\e62f"}.fa-ethereum:before{content:"\f42e"}.fa-speaker-deck:before{content:"\f83c"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-patreon:before{content:"\f3d9"}.fa-avianex:before{content:"\f374"}.fa-ello:before{content:"\f5f1"}.fa-gofore:before{content:"\f3a7"}.fa-bimobject:before{content:"\f378"}.fa-brave-reverse:before{content:"\e63d"}.fa-facebook-f:before{content:"\f39e"}.fa-google-plus-square:before,.fa-square-google-plus:before{content:"\f0d4"}.fa-mandalorian:before{content:"\f50f"}.fa-first-order-alt:before{content:"\f50a"}.fa-osi:before{content:"\f41a"}.fa-google-wallet:before{content:"\f1ee"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-periscope:before{content:"\f3da"}.fa-fulcrum:before{content:"\f50b"}.fa-cloudscale:before{content:"\f383"}.fa-forumbee:before{content:"\f211"}.fa-mizuni:before{content:"\f3cc"}.fa-schlix:before{content:"\f3ea"}.fa-square-xing:before,.fa-xing-square:before{content:"\f169"}.fa-bandcamp:before{content:"\f2d5"}.fa-wpforms:before{content:"\f298"}.fa-cloudversify:before{content:"\f385"}.fa-usps:before{content:"\f7e1"}.fa-megaport:before{content:"\f5a3"}.fa-magento:before{content:"\f3c4"}.fa-spotify:before{content:"\f1bc"}.fa-optin-monster:before{content:"\f23c"}.fa-fly:before{content:"\f417"}.fa-aviato:before{content:"\f421"}.fa-itunes:before{content:"\f3b4"}.fa-cuttlefish:before{content:"\f38c"}.fa-blogger:before{content:"\f37c"}.fa-flickr:before{content:"\f16e"}.fa-viber:before{content:"\f409"}.fa-soundcloud:before{content:"\f1be"}.fa-digg:before{content:"\f1a6"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-letterboxd:before{content:"\e62d"}.fa-symfony:before{content:"\f83d"}.fa-maxcdn:before{content:"\f136"}.fa-etsy:before{content:"\f2d7"}.fa-facebook-messenger:before{content:"\f39f"}.fa-audible:before{content:"\f373"}.fa-think-peaks:before{content:"\f731"}.fa-bilibili:before{content:"\e3d9"}.fa-erlang:before{content:"\f39d"}.fa-x-twitter:before{content:"\e61b"}.fa-cotton-bureau:before{content:"\f89e"}.fa-dashcube:before{content:"\f210"}.fa-42-group:before,.fa-innosoft:before{content:"\e080"}.fa-stack-exchange:before{content:"\f18d"}.fa-elementor:before{content:"\f430"}.fa-pied-piper-square:before,.fa-square-pied-piper:before{content:"\e01e"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-palfed:before{content:"\f3d8"}.fa-superpowers:before{content:"\f2dd"}.fa-resolving:before{content:"\f3e7"}.fa-xbox:before{content:"\f412"}.fa-searchengin:before{content:"\f3eb"}.fa-tiktok:before{content:"\e07b"}.fa-facebook-square:before,.fa-square-facebook:before{content:"\f082"}.fa-renren:before{content:"\f18b"}.fa-linux:before{content:"\f17c"}.fa-glide:before{content:"\f2a5"}.fa-linkedin:before{content:"\f08c"}.fa-hubspot:before{content:"\f3b2"}.fa-deploydog:before{content:"\f38e"}.fa-twitch:before{content:"\f1e8"}.fa-ravelry:before{content:"\f2d9"}.fa-mixer:before{content:"\e056"}.fa-lastfm-square:before,.fa-square-lastfm:before{content:"\f203"}.fa-vimeo:before{content:"\f40a"}.fa-mendeley:before{content:"\f7b3"}.fa-uniregistry:before{content:"\f404"}.fa-figma:before{content:"\f799"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-dropbox:before{content:"\f16b"}.fa-instagram:before{content:"\f16d"}.fa-cmplid:before{content:"\e360"}.fa-upwork:before{content:"\e641"}.fa-facebook:before{content:"\f09a"}.fa-gripfire:before{content:"\f3ac"}.fa-jedi-order:before{content:"\f50e"}.fa-uikit:before{content:"\f403"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-phabricator:before{content:"\f3db"}.fa-ussunnah:before{content:"\f407"}.fa-earlybirds:before{content:"\f39a"}.fa-trade-federation:before{content:"\f513"}.fa-autoprefixer:before{content:"\f41c"}.fa-whatsapp:before{content:"\f232"}.fa-slideshare:before{content:"\f1e7"}.fa-google-play:before{content:"\f3ab"}.fa-viadeo:before{content:"\f2a9"}.fa-line:before{content:"\f3c0"}.fa-google-drive:before{content:"\f3aa"}.fa-servicestack:before{content:"\f3ec"}.fa-simplybuilt:before{content:"\f215"}.fa-bitbucket:before{content:"\f171"}.fa-imdb:before{content:"\f2d8"}.fa-deezer:before{content:"\e077"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-jira:before{content:"\f7b1"}.fa-docker:before{content:"\f395"}.fa-screenpal:before{content:"\e570"}.fa-bluetooth:before{content:"\f293"}.fa-gitter:before{content:"\f426"}.fa-d-and-d:before{content:"\f38d"}.fa-microblog:before{content:"\e01a"}.fa-cc-diners-club:before{content:"\f24c"}.fa-gg-circle:before{content:"\f261"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-yandex:before{content:"\f413"}.fa-readme:before{content:"\f4d5"}.fa-html5:before{content:"\f13b"}.fa-sellsy:before{content:"\f213"}.fa-sass:before{content:"\f41e"}.fa-wirsindhandwerk:before,.fa-wsh:before{content:"\e2d0"}.fa-buromobelexperte:before{content:"\f37f"}.fa-salesforce:before{content:"\f83b"}.fa-octopus-deploy:before{content:"\e082"}.fa-medapps:before{content:"\f3c6"}.fa-ns8:before{content:"\f3d5"}.fa-pinterest-p:before{content:"\f231"}.fa-apper:before{content:"\f371"}.fa-fort-awesome:before{content:"\f286"}.fa-waze:before{content:"\f83f"}.fa-cc-jcb:before{content:"\f24b"}.fa-snapchat-ghost:before,.fa-snapchat:before{content:"\f2ab"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-rust:before{content:"\e07a"}.fa-wix:before{content:"\f5cf"}.fa-behance-square:before,.fa-square-behance:before{content:"\f1b5"}.fa-supple:before{content:"\f3f9"}.fa-webflow:before{content:"\e65c"}.fa-rebel:before{content:"\f1d0"}.fa-css3:before{content:"\f13c"}.fa-staylinked:before{content:"\f3f5"}.fa-kaggle:before{content:"\f5fa"}.fa-space-awesome:before{content:"\e5ac"}.fa-deviantart:before{content:"\f1bd"}.fa-cpanel:before{content:"\f388"}.fa-goodreads-g:before{content:"\f3a9"}.fa-git-square:before,.fa-square-git:before{content:"\f1d2"}.fa-square-tumblr:before,.fa-tumblr-square:before{content:"\f174"}.fa-trello:before{content:"\f181"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-get-pocket:before{content:"\f265"}.fa-perbyte:before{content:"\e083"}.fa-grunt:before{content:"\f3ad"}.fa-weebly:before{content:"\f5cc"}.fa-connectdevelop:before{content:"\f20e"}.fa-leanpub:before{content:"\f212"}.fa-black-tie:before{content:"\f27e"}.fa-themeco:before{content:"\f5c6"}.fa-python:before{content:"\f3e2"}.fa-android:before{content:"\f17b"}.fa-bots:before{content:"\e340"}.fa-free-code-camp:before{content:"\f2c5"}.fa-hornbill:before{content:"\f592"}.fa-js:before{content:"\f3b8"}.fa-ideal:before{content:"\e013"}.fa-git:before{content:"\f1d3"}.fa-dev:before{content:"\f6cc"}.fa-sketch:before{content:"\f7c6"}.fa-yandex-international:before{content:"\f414"}.fa-cc-amex:before{content:"\f1f3"}.fa-uber:before{content:"\f402"}.fa-github:before{content:"\f09b"}.fa-php:before{content:"\f457"}.fa-alipay:before{content:"\f642"}.fa-youtube:before{content:"\f167"}.fa-skyatlas:before{content:"\f216"}.fa-firefox-browser:before{content:"\e007"}.fa-replyd:before{content:"\f3e6"}.fa-suse:before{content:"\f7d6"}.fa-jenkins:before{content:"\f3b6"}.fa-twitter:before{content:"\f099"}.fa-rockrms:before{content:"\f3e9"}.fa-pinterest:before{content:"\f0d2"}.fa-buffer:before{content:"\f837"}.fa-npm:before{content:"\f3d4"}.fa-yammer:before{content:"\f840"}.fa-btc:before{content:"\f15a"}.fa-dribbble:before{content:"\f17d"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-internet-explorer:before{content:"\f26b"}.fa-stubber:before{content:"\e5c7"}.fa-telegram-plane:before,.fa-telegram:before{content:"\f2c6"}.fa-old-republic:before{content:"\f510"}.fa-odysee:before{content:"\e5c6"}.fa-square-whatsapp:before,.fa-whatsapp-square:before{content:"\f40c"}.fa-node-js:before{content:"\f3d3"}.fa-edge-legacy:before{content:"\e078"}.fa-slack-hash:before,.fa-slack:before{content:"\f198"}.fa-medrt:before{content:"\f3c8"}.fa-usb:before{content:"\f287"}.fa-tumblr:before{content:"\f173"}.fa-vaadin:before{content:"\f408"}.fa-quora:before{content:"\f2c4"}.fa-square-x-twitter:before{content:"\e61a"}.fa-reacteurope:before{content:"\f75d"}.fa-medium-m:before,.fa-medium:before{content:"\f23a"}.fa-amilia:before{content:"\f36d"}.fa-mixcloud:before{content:"\f289"}.fa-flipboard:before{content:"\f44d"}.fa-viacoin:before{content:"\f237"}.fa-critical-role:before{content:"\f6c9"}.fa-sitrox:before{content:"\e44a"}.fa-discourse:before{content:"\f393"}.fa-joomla:before{content:"\f1aa"}.fa-mastodon:before{content:"\f4f6"}.fa-airbnb:before{content:"\f834"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-buy-n-large:before{content:"\f8a6"}.fa-gulp:before{content:"\f3ae"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-strava:before{content:"\f428"}.fa-ember:before{content:"\f423"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-teamspeak:before{content:"\f4f9"}.fa-pushed:before{content:"\f3e1"}.fa-wordpress-simple:before{content:"\f411"}.fa-nutritionix:before{content:"\f3d6"}.fa-wodu:before{content:"\e088"}.fa-google-pay:before{content:"\e079"}.fa-intercom:before{content:"\f7af"}.fa-zhihu:before{content:"\f63f"}.fa-korvue:before{content:"\f42f"}.fa-pix:before{content:"\e43a"}.fa-steam-symbol:before{content:"\f3f6"}:host,:root{--fa-font-regular:normal 400 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype")}.fa-regular,.far{font-weight:400}:host,:root{--fa-style-family-classic:"Font Awesome 6 Free";--fa-font-solid:normal 900 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}.fa-solid,.fas{font-weight:900}@font-face{font-family:"Font Awesome 5 Brands";font-display:block;font-weight:400;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}@font-face{font-family:"Font Awesome 5 Free";font-display:block;font-weight:900;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}@font-face{font-family:"Font Awesome 5 Free";font-display:block;font-weight:400;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype");unicode-range:u+f003,u+f006,u+f014,u+f016-f017,u+f01a-f01b,u+f01d,u+f022,u+f03e,u+f044,u+f046,u+f05c-f05d,u+f06e,u+f070,u+f087-f088,u+f08a,u+f094,u+f096-f097,u+f09d,u+f0a0,u+f0a2,u+f0a4-f0a7,u+f0c5,u+f0c7,u+f0e5-f0e6,u+f0eb,u+f0f6-f0f8,u+f10c,u+f114-f115,u+f118-f11a,u+f11c-f11d,u+f133,u+f147,u+f14e,u+f150-f152,u+f185-f186,u+f18e,u+f190-f192,u+f196,u+f1c1-f1c9,u+f1d9,u+f1db,u+f1e3,u+f1ea,u+f1f7,u+f1f9,u+f20a,u+f247-f248,u+f24a,u+f24d,u+f255-f25b,u+f25d,u+f271-f274,u+f278,u+f27b,u+f28c,u+f28e,u+f29c,u+f2b5,u+f2b7,u+f2ba,u+f2bc,u+f2be,u+f2c0-f2c1,u+f2c3,u+f2d0,u+f2d2,u+f2d4,u+f2dc}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-v4compatibility.woff2) format("woff2"),url(../webfonts/fa-v4compatibility.ttf) format("truetype");unicode-range:u+f041,u+f047,u+f065-f066,u+f07d-f07e,u+f080,u+f08b,u+f08e,u+f090,u+f09a,u+f0ac,u+f0ae,u+f0b2,u+f0d0,u+f0d6,u+f0e4,u+f0ec,u+f10a-f10b,u+f123,u+f13e,u+f148-f149,u+f14c,u+f156,u+f15e,u+f160-f161,u+f163,u+f175-f178,u+f195,u+f1f8,u+f219,u+f27a} diff --git a/pydis_site/static/fontawesome/webfonts/fa-brands-400.ttf b/pydis_site/static/fontawesome/webfonts/fa-brands-400.ttf new file mode 100644 index 000000000..5efb1d4f9 Binary files /dev/null and b/pydis_site/static/fontawesome/webfonts/fa-brands-400.ttf differ diff --git a/pydis_site/static/fontawesome/webfonts/fa-brands-400.woff2 b/pydis_site/static/fontawesome/webfonts/fa-brands-400.woff2 new file mode 100644 index 000000000..36fbda7d3 Binary files /dev/null and b/pydis_site/static/fontawesome/webfonts/fa-brands-400.woff2 differ diff --git a/pydis_site/static/fontawesome/webfonts/fa-regular-400.ttf b/pydis_site/static/fontawesome/webfonts/fa-regular-400.ttf new file mode 100644 index 000000000..838b4e2cf Binary files /dev/null and b/pydis_site/static/fontawesome/webfonts/fa-regular-400.ttf differ diff --git a/pydis_site/static/fontawesome/webfonts/fa-regular-400.woff2 b/pydis_site/static/fontawesome/webfonts/fa-regular-400.woff2 new file mode 100644 index 000000000..b6cabbacb Binary files /dev/null and b/pydis_site/static/fontawesome/webfonts/fa-regular-400.woff2 differ diff --git a/pydis_site/static/fontawesome/webfonts/fa-solid-900.ttf b/pydis_site/static/fontawesome/webfonts/fa-solid-900.ttf new file mode 100644 index 000000000..ec24749db Binary files /dev/null and b/pydis_site/static/fontawesome/webfonts/fa-solid-900.ttf differ diff --git a/pydis_site/static/fontawesome/webfonts/fa-solid-900.woff2 b/pydis_site/static/fontawesome/webfonts/fa-solid-900.woff2 new file mode 100644 index 000000000..824d518eb Binary files /dev/null and b/pydis_site/static/fontawesome/webfonts/fa-solid-900.woff2 differ diff --git a/pydis_site/static/fontawesome/webfonts/fa-v4compatibility.ttf b/pydis_site/static/fontawesome/webfonts/fa-v4compatibility.ttf new file mode 100644 index 000000000..b175aa8ec Binary files /dev/null and b/pydis_site/static/fontawesome/webfonts/fa-v4compatibility.ttf differ diff --git a/pydis_site/static/fontawesome/webfonts/fa-v4compatibility.woff2 b/pydis_site/static/fontawesome/webfonts/fa-v4compatibility.woff2 new file mode 100644 index 000000000..e09b5a550 Binary files /dev/null and b/pydis_site/static/fontawesome/webfonts/fa-v4compatibility.woff2 differ diff --git a/pydis_site/templates/base/base.html b/pydis_site/templates/base/base.html index e497298ab..0eb109755 100644 --- a/pydis_site/templates/base/base.html +++ b/pydis_site/templates/base/base.html @@ -26,7 +26,8 @@ {% bulma %} {% bulma 'dark' include_js=False %} - {% font_awesome %} + <link href="{% static 'fontawesome/css/all.min.css' %}" rel="stylesheet" type="text/css"> + <link rel="stylesheet" href="{% static "css/base/base.css" %}"> {% block head %}{% endblock %}
cisagov__manage.get.gov-114
[ { "content": "\"\"\"\nDjango settings for .gov registrar project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/4.0/ref/settings/\n\nIF you'd like to see all of these settings in the running app:\n\n```shell\n$ docker-compose exec app python manage.py shell\n>>> from django.conf import settings\n>>> dir(settings)\n```\n\n\"\"\"\nimport environs\nfrom cfenv import AppEnv\nfrom pathlib import Path\n\n# # # ###\n# Setup code goes here #\n# # # ###\n\nenv = environs.Env()\n\n# Get secrets from Cloud.gov user provided service, if exists\n# If not, get secrets from environment variables\nkey_service = AppEnv().get_service(name=\"getgov-credentials\")\nif key_service and key_service.credentials:\n secret = key_service.credentials.get\nelse:\n secret = env\n\n# # # ###\n# Values obtained externally #\n# # # ###\n\npath = Path(__file__)\n\nenv_db_url = env.dj_db_url(\"DATABASE_URL\")\nenv_debug = env.bool(\"DJANGO_DEBUG\", default=False)\nenv_log_level = env.str(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\n\nsecret_key = secret(\"DJANGO_SECRET_KEY\")\n\n# region: Basic Django Config-----------------------------------------------###\n\n# Build paths inside the project like this: BASE_DIR / \"subdir\".\nBASE_DIR = path.resolve().parent.parent\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env_debug\n\n\n# Applications are modular pieces of code.\n# They are provided by Django, by third-parties, or by yourself.\n# Installing them here makes them available for execution.\n# Do not access INSTALLED_APPS directly. Use `django.apps.apps` instead.\nINSTALLED_APPS = [\n # Django automatic admin interface reads metadata\n # from database models to provide a quick, model-centric\n # interface where trusted users can manage content\n \"django.contrib.admin\",\n # vv Required by django.contrib.admin vv\n # the \"user\" model! *\\o/*\n \"django.contrib.auth\",\n # generic interface for Django models\n \"django.contrib.contenttypes\",\n # required for CSRF protection and many other things\n \"django.contrib.sessions\",\n # framework for displaying messages to the user\n \"django.contrib.messages\",\n # ^^ Required by django.contrib.admin ^^\n # collects static files from each of your applications\n # (and any other places you specify) into a single location\n # that can easily be served in production\n \"django.contrib.staticfiles\",\n # let's be sure to install our own application!\n \"registrar\",\n]\n\n# Middleware are routines for processing web requests.\n# Adding them here turns them \"on\"; Django will perform the\n# specified routines on each incoming request and outgoing response.\nMIDDLEWARE = [\n # django-allow-cidr: enable use of CIDR IP ranges in ALLOWED_HOSTS\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n # provide security enhancements to the request/response cycle\n \"django.middleware.security.SecurityMiddleware\",\n # store and retrieve arbitrary data on a per-site-visitor basis\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n # add a few conveniences for perfectionists, see documentation\n \"django.middleware.common.CommonMiddleware\",\n # add protection against Cross Site Request Forgeries by adding\n # hidden form fields to POST forms and checking requests for the correct value\n \"django.middleware.csrf.CsrfViewMiddleware\",\n # add `user` (the currently-logged-in user) to incoming HttpRequest objects\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # provide framework for displaying messages to the user, see documentation\n \"django.contrib.messages.middleware.MessageMiddleware\",\n # provide clickjacking protection via the X-Frame-Options header\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n # django-csp: enable use of Content-Security-Policy header\n \"csp.middleware.CSPMiddleware\",\n]\n\n# application object used by Django’s built-in servers (e.g. `runserver`)\nWSGI_APPLICATION = \"registrar.config.wsgi.application\"\n\n# endregion\n# region: Assets and HTML and Caching---------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/howto/static-files/\n\n\n# Caching is disabled by default.\n# For a low to medium traffic site, caching causes more\n# problems than it solves. Should caching be desired,\n# a reasonable start might be:\n# CACHES = {\n# \"default\": {\n# \"BACKEND\": \"django.core.cache.backends.db.DatabaseCache\",\n# }\n# }\n\n# Absolute path to the directory where `collectstatic`\n# will place static files for deployment.\n# Do not use this directory for permanent storage -\n# it is for Django!\nSTATIC_ROOT = BASE_DIR / \"static\"\n\n# TODO: decide on template engine and document in ADR\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR / \"templates\"],\n # look for templates inside installed apps\n # required by django-debug-toolbar\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n # IMPORTANT security setting: escapes HTMLEntities,\n # helping to prevent XSS attacks\n \"autoescape\": True,\n # context processors are callables which return\n # dicts - Django merges them into the context\n # dictionary used to render the templates\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\n# endregion\n# region: Database----------------------------------------------------------###\n\n# Wrap each view in a transaction on the database\n# A decorator can be used for views which have no database activity:\n# from django.db import transaction\n# @transaction.non_atomic_requests\nenv_db_url[\"ATOMIC_REQUESTS\"] = True\n\nDATABASES = {\n # dj-database-url package takes the supplied Postgres connection string\n # and converts it into a dictionary with the correct USER, HOST, etc\n \"default\": env_db_url,\n}\n\n# Specify default field type to use for primary keys\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n# endregion\n# region: Email-------------------------------------------------------------###\n\n# email address to use for various automated correspondence\n# TODO: pick something sensible here\nDEFAULT_FROM_EMAIL = \"registrar@get.gov\"\n\n# connect to an (external) SMTP server for sending email\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# TODO: configure these when the values are known\n# EMAIL_HOST = \"\"\n# EMAIL_HOST_PASSWORD = \"\"\n# EMAIL_HOST_USER = \"\"\n# EMAIL_PORT = 587\n\n# for mail sent with mail_admins or mail_managers\nEMAIL_SUBJECT_PREFIX = \"[Attn: .gov admin] \"\n\n# use a TLS (secure) connection when talking to the SMTP server\n# TLS generally uses port 587\nEMAIL_USE_TLS = True\n\n# mutually exclusive with EMAIL_USE_TLS = True\n# SSL generally uses port 465\nEMAIL_USE_SSL = False\n\n# timeout in seconds for blocking operations, like the connection attempt\nEMAIL_TIMEOUT = 30\n\n# email address to use for sending error reports\nSERVER_EMAIL = \"root@get.gov\"\n\n# endregion\n# region: Headers-----------------------------------------------------------###\n\n# Content-Length header is set by django.middleware.common.CommonMiddleware\n\n# X-Frame-Options header is set by\n# django.middleware.clickjacking.XFrameOptionsMiddleware\n# and configured in the Security and Privacy section of this file.\n# Strict-Transport-Security is set by django.middleware.security.SecurityMiddleware\n# and configured in the Security and Privacy section of this file.\n\n# prefer contents of X-Forwarded-Host header to Host header\n# as Host header may contain a proxy rather than the actual client\nUSE_X_FORWARDED_HOST = True\n\n# endregion\n# region: Internationalisation----------------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/topics/i18n/\n\n# Charset to use for HttpResponse objects; used in Content-Type header\nDEFAULT_CHARSET = \"utf-8\"\n\n# provide fallback language if translation file is missing or\n# user's locale is not supported - requires USE_I18N = True\nLANGUAGE_CODE = \"en-us\"\n\n# allows language cookie to be sent if the user\n# is coming to our site from an external page.\nLANGUAGE_COOKIE_SAMESITE = None\n\n# only send via HTTPS connection\nLANGUAGE_COOKIE_SECURE = True\n\n# to display datetimes in templates\n# and to interpret datetimes entered in forms\nTIME_ZONE = \"UTC\"\n\n# enable Django’s translation system\nUSE_I18N = True\n\n# enable localized formatting of numbers and dates\nUSE_L10N = True\n\n# make datetimes timezone-aware by default\nUSE_TZ = True\n\n# endregion\n# region: Logging-----------------------------------------------------------###\n\n# No file logger is configured, because containerized apps\n# do not log to the file system.\n# TODO: Configure better logging options\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] \"\n \"%(message)s\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n },\n \"simple\": {\n \"format\": \"%(levelname)s %(message)s\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"level\": env_log_level,\n },\n \"django.template\": {\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"level\": \"INFO\",\n },\n \"registrar\": {\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"level\": \"INFO\",\n },\n },\n}\n\n# endregion\n# region: Login-------------------------------------------------------------###\n\n# TODO: FAC example for login.gov\n# SIMPLE_JWT = {\n# \"ALGORITHM\": \"RS256\",\n# \"AUDIENCE\": None,\n# \"ISSUER\": \"https://idp.int.identitysandbox.gov/\",\n# \"JWK_URL\": \"https://idp.int.identitysandbox.gov/api/openid_connect/certs\",\n# \"LEEWAY\": 0,\n# \"AUTH_TOKEN_CLASSES\": (\"rest_framework_simplejwt.tokens.UntypedToken\",),\n# \"USER_ID_CLAIM\": \"sub\",\n# }\n# TOKEN_AUTH = {\"TOKEN_TTL\": 3600}\n\n# endregion\n# region: Rest Framework/API------------------------------------------------###\n\n# Enable CORS if api is served at subdomain\n# https://github.com/adamchainz/django-cors-headers\n# TODO: FAC example for REST framework\n# API_VERSION = \"0\"\n# REST_FRAMEWORK = {\n# \"DEFAULT_AUTHENTICATION_CLASSES\": [\n# \"rest_framework.authentication.BasicAuthentication\",\n# \"users.auth.ExpiringTokenAuthentication\",\n# ],\n# \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n# \"DEFAULT_PAGINATION_CLASS\": \"rest_framework.pagination.PageNumberPagination\",\n# \"PAGE_SIZE\": 10,\n# \"TEST_REQUEST_RENDERER_CLASSES\": [\n# \"rest_framework.renderers.MultiPartRenderer\",\n# \"rest_framework.renderers.JSONRenderer\",\n# \"rest_framework.renderers.TemplateHTMLRenderer\",\n# \"rest_framework.renderers.BrowsableAPIRenderer\",\n# ],\n# \"TEST_REQUEST_DEFAULT_FORMAT\": \"api\",\n# }\n\n# endregion\n# region: Routing-----------------------------------------------------------###\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# APPEND_SLASH = True\n# PREPEND_WWW = False\n\n# full Python import path to the root URLconf\nROOT_URLCONF = \"registrar.config.urls\"\n\n# URL to use when referring to static files located in STATIC_ROOT\n# Must be relative and end with \"/\"\nSTATIC_URL = \"public/\"\n\n# endregion\n# region: Security and Privacy----------------------------------------------###\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secret_key\n\n# Use this variable for doing SECRET_KEY rotation, see documentation\nSECRET_KEY_FALLBACKS: \"list[str]\" = []\n\n# ~ Set by django.middleware.security.SecurityMiddleware\n# SECURE_CONTENT_TYPE_NOSNIFF = True\n# SECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin\"\n# SECURE_REDIRECT_EXEMPT = []\n# SECURE_REFERRER_POLICY = \"same-origin\"\n# SECURE_SSL_HOST = None\n\n# ~ Overridden from django.middleware.security.SecurityMiddleware\n# adds the includeSubDomains directive to the HTTP Strict Transport Security header\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# adds the preload directive to the HTTP Strict Transport Security header\nSECURE_HSTS_PRELOAD = True\n# TODO: set this value to 31536000 (1 year) for production\nSECURE_HSTS_SECONDS = 300\n# redirect all non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = True\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# DISALLOWED_USER_AGENTS = []\n\n# The host/domain names that Django can serve.\n# This is a security measure to prevent HTTP Host header attacks,\n# which are possible even under many seemingly-safe\n# web server configurations.\nALLOWED_HOSTS = [\n \"getgov-unstable.app.cloud.gov\",\n \"get.gov\",\n]\n\n\n# Extend ALLOWED_HOSTS.\n# IP addresses can also be hosts, which are used by internal\n# load balancers for health checks, etc.\nALLOWED_CIDR_NETS = [\"10.0.0.0/8\"]\n\n# ~ Below are some protections from cross-site request forgery.\n# This is canonically done by including a nonce value\n# in pages sent to the user, which the user is expected\n# to send back. The specifics of implementation are\n# intricate and varied.\n\n# Store the token server-side, do not send it\n# to the user via a cookie. This means each page\n# which requires protection must place the token\n# in the HTML explicitly, otherwise the user will\n# get a 403 error when they submit.\nCSRF_USE_SESSIONS = True\n\n# Expiry of CSRF cookie, in seconds.\n# None means \"use session-based CSRF cookies\".\nCSRF_COOKIE_AGE = None\n\n# Prevent JavaScript from reading the CSRF cookie.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_HTTPONLY = True\n\n# Only send the cookie via HTTPS connections.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SECURE = True\n\n# Protect from non-targeted attacks by obscuring\n# the CSRF cookie name from the default.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_NAME = \"CrSiReFo\"\n\n# Prevents CSRF cookie from being sent if the user\n# is coming to our site from an external page.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SAMESITE = \"Strict\"\n\n# Change header name to match cookie name.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_HEADER_NAME = \"HTTP_X_CRSIREFO\"\n\n# Max parameters that may be received via GET or POST\n# TODO: 1000 is the default, may need to tune upward for\n# large DNS zone files, if records are represented by\n# individual form fields.\nDATA_UPLOAD_MAX_NUMBER_FIELDS = 1000\n\n# age of session cookies, in seconds (28800 = 8 hours)\nSESSION_COOKIE_AGE = 28800\n\n# instruct the browser to forbid client-side JavaScript\n# from accessing the cookie\nSESSION_COOKIE_HTTPONLY = True\n\n# are we a spring boot application? who knows!\nSESSION_COOKIE_NAME = \"JSESSIONID\"\n\n# Prevents session cookie from being sent if the user\n# is coming to our site from an external page.\nSESSION_COOKIE_SAMESITE = \"Strict\"\n\n# instruct browser to only send cookie via HTTPS\nSESSION_COOKIE_SECURE = True\n\n# ~ Set by django.middleware.clickjacking.XFrameOptionsMiddleware\n# prevent clickjacking by instructing the browser not to load\n# our site within an iframe\n# X_FRAME_OPTIONS = \"Deny\"\n\n# endregion\n# region: Testing-----------------------------------------------------------###\n\n# Additional directories searched for fixture files.\n# The fixtures directory of each application is searched by default.\n# Must use unix style \"/\" path separators.\nFIXTURE_DIRS: \"list[str]\" = []\n\n# endregion\n\n\n# # # ###\n# Development settings #\n# # # ###\n\nif DEBUG:\n # used by debug() context processor\n INTERNAL_IPS = [\n \"127.0.0.1\",\n \"::1\",\n ]\n\n # allow dev laptop to connect\n ALLOWED_HOSTS += (\"localhost\",)\n SECURE_SSL_REDIRECT = False\n SECURE_HSTS_PRELOAD = False\n\n # discover potentially inefficient database queries\n # TODO: use settings overrides to ensure this always is True during tests\n INSTALLED_APPS += (\"nplusone.ext.django\",)\n MIDDLEWARE += (\"nplusone.ext.django.NPlusOneMiddleware\",)\n NPLUSONE_RAISE = True\n\n # insert the amazing django-debug-toolbar\n INSTALLED_APPS += (\"debug_toolbar\",)\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n\n DEBUG_TOOLBAR_CONFIG = {\n # due to Docker, bypass Debug Toolbar's check on INTERNAL_IPS\n \"SHOW_TOOLBAR_CALLBACK\": lambda _: True,\n }\n", "path": "src/registrar/config/settings.py" } ]
[ { "content": "\"\"\"\nDjango settings for .gov registrar project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/4.0/ref/settings/\n\nIF you'd like to see all of these settings in the running app:\n\n```shell\n$ docker-compose exec app python manage.py shell\n>>> from django.conf import settings\n>>> dir(settings)\n```\n\n\"\"\"\nimport environs\nfrom cfenv import AppEnv\nfrom pathlib import Path\n\n# # # ###\n# Setup code goes here #\n# # # ###\n\nenv = environs.Env()\n\n# Get secrets from Cloud.gov user provided service, if exists\n# If not, get secrets from environment variables\nkey_service = AppEnv().get_service(name=\"getgov-credentials\")\nif key_service and key_service.credentials:\n secret = key_service.credentials.get\nelse:\n secret = env\n\n# # # ###\n# Values obtained externally #\n# # # ###\n\npath = Path(__file__)\n\nenv_db_url = env.dj_db_url(\"DATABASE_URL\")\nenv_debug = env.bool(\"DJANGO_DEBUG\", default=False)\nenv_log_level = env.str(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\n\nsecret_key = secret(\"DJANGO_SECRET_KEY\")\n\n# region: Basic Django Config-----------------------------------------------###\n\n# Build paths inside the project like this: BASE_DIR / \"subdir\".\nBASE_DIR = path.resolve().parent.parent\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env_debug\n\n\n# Applications are modular pieces of code.\n# They are provided by Django, by third-parties, or by yourself.\n# Installing them here makes them available for execution.\n# Do not access INSTALLED_APPS directly. Use `django.apps.apps` instead.\nINSTALLED_APPS = [\n # Django automatic admin interface reads metadata\n # from database models to provide a quick, model-centric\n # interface where trusted users can manage content\n \"django.contrib.admin\",\n # vv Required by django.contrib.admin vv\n # the \"user\" model! *\\o/*\n \"django.contrib.auth\",\n # generic interface for Django models\n \"django.contrib.contenttypes\",\n # required for CSRF protection and many other things\n \"django.contrib.sessions\",\n # framework for displaying messages to the user\n \"django.contrib.messages\",\n # ^^ Required by django.contrib.admin ^^\n # collects static files from each of your applications\n # (and any other places you specify) into a single location\n # that can easily be served in production\n \"django.contrib.staticfiles\",\n # let's be sure to install our own application!\n \"registrar\",\n]\n\n# Middleware are routines for processing web requests.\n# Adding them here turns them \"on\"; Django will perform the\n# specified routines on each incoming request and outgoing response.\nMIDDLEWARE = [\n # django-allow-cidr: enable use of CIDR IP ranges in ALLOWED_HOSTS\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n # provide security enhancements to the request/response cycle\n \"django.middleware.security.SecurityMiddleware\",\n # store and retrieve arbitrary data on a per-site-visitor basis\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n # add a few conveniences for perfectionists, see documentation\n \"django.middleware.common.CommonMiddleware\",\n # add protection against Cross Site Request Forgeries by adding\n # hidden form fields to POST forms and checking requests for the correct value\n \"django.middleware.csrf.CsrfViewMiddleware\",\n # add `user` (the currently-logged-in user) to incoming HttpRequest objects\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # provide framework for displaying messages to the user, see documentation\n \"django.contrib.messages.middleware.MessageMiddleware\",\n # provide clickjacking protection via the X-Frame-Options header\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n # django-csp: enable use of Content-Security-Policy header\n \"csp.middleware.CSPMiddleware\",\n]\n\n# application object used by Django’s built-in servers (e.g. `runserver`)\nWSGI_APPLICATION = \"registrar.config.wsgi.application\"\n\n# endregion\n# region: Assets and HTML and Caching---------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/howto/static-files/\n\n\n# Caching is disabled by default.\n# For a low to medium traffic site, caching causes more\n# problems than it solves. Should caching be desired,\n# a reasonable start might be:\n# CACHES = {\n# \"default\": {\n# \"BACKEND\": \"django.core.cache.backends.db.DatabaseCache\",\n# }\n# }\n\n# Absolute path to the directory where `collectstatic`\n# will place static files for deployment.\n# Do not use this directory for permanent storage -\n# it is for Django!\nSTATIC_ROOT = BASE_DIR / \"static\"\n\n# TODO: decide on template engine and document in ADR\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR / \"templates\"],\n # look for templates inside installed apps\n # required by django-debug-toolbar\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n # IMPORTANT security setting: escapes HTMLEntities,\n # helping to prevent XSS attacks\n \"autoescape\": True,\n # context processors are callables which return\n # dicts - Django merges them into the context\n # dictionary used to render the templates\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\n# endregion\n# region: Database----------------------------------------------------------###\n\n# Wrap each view in a transaction on the database\n# A decorator can be used for views which have no database activity:\n# from django.db import transaction\n# @transaction.non_atomic_requests\nenv_db_url[\"ATOMIC_REQUESTS\"] = True\n\nDATABASES = {\n # dj-database-url package takes the supplied Postgres connection string\n # and converts it into a dictionary with the correct USER, HOST, etc\n \"default\": env_db_url,\n}\n\n# Specify default field type to use for primary keys\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n# endregion\n# region: Email-------------------------------------------------------------###\n\n# email address to use for various automated correspondence\n# TODO: pick something sensible here\nDEFAULT_FROM_EMAIL = \"registrar@get.gov\"\n\n# connect to an (external) SMTP server for sending email\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# TODO: configure these when the values are known\n# EMAIL_HOST = \"\"\n# EMAIL_HOST_PASSWORD = \"\"\n# EMAIL_HOST_USER = \"\"\n# EMAIL_PORT = 587\n\n# for mail sent with mail_admins or mail_managers\nEMAIL_SUBJECT_PREFIX = \"[Attn: .gov admin] \"\n\n# use a TLS (secure) connection when talking to the SMTP server\n# TLS generally uses port 587\nEMAIL_USE_TLS = True\n\n# mutually exclusive with EMAIL_USE_TLS = True\n# SSL generally uses port 465\nEMAIL_USE_SSL = False\n\n# timeout in seconds for blocking operations, like the connection attempt\nEMAIL_TIMEOUT = 30\n\n# email address to use for sending error reports\nSERVER_EMAIL = \"root@get.gov\"\n\n# endregion\n# region: Headers-----------------------------------------------------------###\n\n# Content-Length header is set by django.middleware.common.CommonMiddleware\n\n# X-Frame-Options header is set by\n# django.middleware.clickjacking.XFrameOptionsMiddleware\n# and configured in the Security and Privacy section of this file.\n# Strict-Transport-Security is set by django.middleware.security.SecurityMiddleware\n# and configured in the Security and Privacy section of this file.\n\n# prefer contents of X-Forwarded-Host header to Host header\n# as Host header may contain a proxy rather than the actual client\nUSE_X_FORWARDED_HOST = True\n\n# endregion\n# region: Internationalisation----------------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/topics/i18n/\n\n# Charset to use for HttpResponse objects; used in Content-Type header\nDEFAULT_CHARSET = \"utf-8\"\n\n# provide fallback language if translation file is missing or\n# user's locale is not supported - requires USE_I18N = True\nLANGUAGE_CODE = \"en-us\"\n\n# allows language cookie to be sent if the user\n# is coming to our site from an external page.\nLANGUAGE_COOKIE_SAMESITE = None\n\n# only send via HTTPS connection\nLANGUAGE_COOKIE_SECURE = True\n\n# to display datetimes in templates\n# and to interpret datetimes entered in forms\nTIME_ZONE = \"UTC\"\n\n# enable Django’s translation system\nUSE_I18N = True\n\n# enable localized formatting of numbers and dates\nUSE_L10N = True\n\n# make datetimes timezone-aware by default\nUSE_TZ = True\n\n# endregion\n# region: Logging-----------------------------------------------------------###\n\n# No file logger is configured, because containerized apps\n# do not log to the file system.\n# TODO: Configure better logging options\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] \"\n \"%(message)s\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n },\n \"simple\": {\n \"format\": \"%(levelname)s %(message)s\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"level\": env_log_level,\n },\n \"django.template\": {\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"level\": \"INFO\",\n },\n \"registrar\": {\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"level\": \"INFO\",\n },\n },\n}\n\n# endregion\n# region: Login-------------------------------------------------------------###\n\n# TODO: FAC example for login.gov\n# SIMPLE_JWT = {\n# \"ALGORITHM\": \"RS256\",\n# \"AUDIENCE\": None,\n# \"ISSUER\": \"https://idp.int.identitysandbox.gov/\",\n# \"JWK_URL\": \"https://idp.int.identitysandbox.gov/api/openid_connect/certs\",\n# \"LEEWAY\": 0,\n# \"AUTH_TOKEN_CLASSES\": (\"rest_framework_simplejwt.tokens.UntypedToken\",),\n# \"USER_ID_CLAIM\": \"sub\",\n# }\n# TOKEN_AUTH = {\"TOKEN_TTL\": 3600}\n\n# endregion\n# region: Rest Framework/API------------------------------------------------###\n\n# Enable CORS if api is served at subdomain\n# https://github.com/adamchainz/django-cors-headers\n# TODO: FAC example for REST framework\n# API_VERSION = \"0\"\n# REST_FRAMEWORK = {\n# \"DEFAULT_AUTHENTICATION_CLASSES\": [\n# \"rest_framework.authentication.BasicAuthentication\",\n# \"users.auth.ExpiringTokenAuthentication\",\n# ],\n# \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n# \"DEFAULT_PAGINATION_CLASS\": \"rest_framework.pagination.PageNumberPagination\",\n# \"PAGE_SIZE\": 10,\n# \"TEST_REQUEST_RENDERER_CLASSES\": [\n# \"rest_framework.renderers.MultiPartRenderer\",\n# \"rest_framework.renderers.JSONRenderer\",\n# \"rest_framework.renderers.TemplateHTMLRenderer\",\n# \"rest_framework.renderers.BrowsableAPIRenderer\",\n# ],\n# \"TEST_REQUEST_DEFAULT_FORMAT\": \"api\",\n# }\n\n# endregion\n# region: Routing-----------------------------------------------------------###\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# APPEND_SLASH = True\n# PREPEND_WWW = False\n\n# full Python import path to the root URLconf\nROOT_URLCONF = \"registrar.config.urls\"\n\n# URL to use when referring to static files located in STATIC_ROOT\n# Must be relative and end with \"/\"\nSTATIC_URL = \"public/\"\n\n# endregion\n# region: Security and Privacy----------------------------------------------###\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secret_key\n\n# Use this variable for doing SECRET_KEY rotation, see documentation\nSECRET_KEY_FALLBACKS: \"list[str]\" = []\n\n# ~ Set by django.middleware.security.SecurityMiddleware\n# SECURE_CONTENT_TYPE_NOSNIFF = True\n# SECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin\"\n# SECURE_REDIRECT_EXEMPT = []\n# SECURE_REFERRER_POLICY = \"same-origin\"\n# SECURE_SSL_HOST = None\n\n# ~ Overridden from django.middleware.security.SecurityMiddleware\n# adds the includeSubDomains directive to the HTTP Strict Transport Security header\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# adds the preload directive to the HTTP Strict Transport Security header\nSECURE_HSTS_PRELOAD = True\n# TODO: set this value to 31536000 (1 year) for production\nSECURE_HSTS_SECONDS = 300\n# redirect all non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = True\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# DISALLOWED_USER_AGENTS = []\n\n# The host/domain names that Django can serve.\n# This is a security measure to prevent HTTP Host header attacks,\n# which are possible even under many seemingly-safe\n# web server configurations.\nALLOWED_HOSTS = [\n \"getgov-unstable.app.cloud.gov\",\n \"getgov-staging.app.cloud.gov\",\n \"get.gov\",\n]\n\n\n# Extend ALLOWED_HOSTS.\n# IP addresses can also be hosts, which are used by internal\n# load balancers for health checks, etc.\nALLOWED_CIDR_NETS = [\"10.0.0.0/8\"]\n\n# ~ Below are some protections from cross-site request forgery.\n# This is canonically done by including a nonce value\n# in pages sent to the user, which the user is expected\n# to send back. The specifics of implementation are\n# intricate and varied.\n\n# Store the token server-side, do not send it\n# to the user via a cookie. This means each page\n# which requires protection must place the token\n# in the HTML explicitly, otherwise the user will\n# get a 403 error when they submit.\nCSRF_USE_SESSIONS = True\n\n# Expiry of CSRF cookie, in seconds.\n# None means \"use session-based CSRF cookies\".\nCSRF_COOKIE_AGE = None\n\n# Prevent JavaScript from reading the CSRF cookie.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_HTTPONLY = True\n\n# Only send the cookie via HTTPS connections.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SECURE = True\n\n# Protect from non-targeted attacks by obscuring\n# the CSRF cookie name from the default.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_NAME = \"CrSiReFo\"\n\n# Prevents CSRF cookie from being sent if the user\n# is coming to our site from an external page.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SAMESITE = \"Strict\"\n\n# Change header name to match cookie name.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_HEADER_NAME = \"HTTP_X_CRSIREFO\"\n\n# Max parameters that may be received via GET or POST\n# TODO: 1000 is the default, may need to tune upward for\n# large DNS zone files, if records are represented by\n# individual form fields.\nDATA_UPLOAD_MAX_NUMBER_FIELDS = 1000\n\n# age of session cookies, in seconds (28800 = 8 hours)\nSESSION_COOKIE_AGE = 28800\n\n# instruct the browser to forbid client-side JavaScript\n# from accessing the cookie\nSESSION_COOKIE_HTTPONLY = True\n\n# are we a spring boot application? who knows!\nSESSION_COOKIE_NAME = \"JSESSIONID\"\n\n# Prevents session cookie from being sent if the user\n# is coming to our site from an external page.\nSESSION_COOKIE_SAMESITE = \"Strict\"\n\n# instruct browser to only send cookie via HTTPS\nSESSION_COOKIE_SECURE = True\n\n# ~ Set by django.middleware.clickjacking.XFrameOptionsMiddleware\n# prevent clickjacking by instructing the browser not to load\n# our site within an iframe\n# X_FRAME_OPTIONS = \"Deny\"\n\n# endregion\n# region: Testing-----------------------------------------------------------###\n\n# Additional directories searched for fixture files.\n# The fixtures directory of each application is searched by default.\n# Must use unix style \"/\" path separators.\nFIXTURE_DIRS: \"list[str]\" = []\n\n# endregion\n\n\n# # # ###\n# Development settings #\n# # # ###\n\nif DEBUG:\n # used by debug() context processor\n INTERNAL_IPS = [\n \"127.0.0.1\",\n \"::1\",\n ]\n\n # allow dev laptop to connect\n ALLOWED_HOSTS += (\"localhost\",)\n SECURE_SSL_REDIRECT = False\n SECURE_HSTS_PRELOAD = False\n\n # discover potentially inefficient database queries\n # TODO: use settings overrides to ensure this always is True during tests\n INSTALLED_APPS += (\"nplusone.ext.django\",)\n MIDDLEWARE += (\"nplusone.ext.django.NPlusOneMiddleware\",)\n NPLUSONE_RAISE = True\n\n # insert the amazing django-debug-toolbar\n INSTALLED_APPS += (\"debug_toolbar\",)\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n\n DEBUG_TOOLBAR_CONFIG = {\n # due to Docker, bypass Debug Toolbar's check on INTERNAL_IPS\n \"SHOW_TOOLBAR_CALLBACK\": lambda _: True,\n }\n", "path": "src/registrar/config/settings.py" } ]
diff --git a/.github/ISSUE_TEMPLATE/developer-onboarding.md b/.github/ISSUE_TEMPLATE/developer-onboarding.md index 5dbcdb822..82c8c2c63 100644 --- a/.github/ISSUE_TEMPLATE/developer-onboarding.md +++ b/.github/ISSUE_TEMPLATE/developer-onboarding.md @@ -35,10 +35,10 @@ cf login -a api.fr.cloud.gov --sso - [ ] Setup [commit signing in Github](#setting-up-commit-signing) and with git locally. ### Steps for the onboarder -- [ ] Add the onboardee to cloud.gov org and relevant spaces as a SpaceDeveloper +- [ ] Add the onboardee to cloud.gov org (cisa-getgov-prototyping) and relevant spaces (unstable) as a SpaceDeveloper ```bash -cf set-space-role <cloud.account@email.gov> sandbox-gsa dotgov-poc SpaceDeveloper +cf set-space-role <cloud.account@email.gov> cisa-getgov-prototyping unstable SpaceDeveloper ``` - [ ] Add the onboardee to our login.gov sandbox team (`.gov registrar poc`) via the [dashboard](https://dashboard.int.identitysandbox.gov/) diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml index 1f5620cd3..5c1a68ac6 100644 --- a/.github/workflows/deploy.yaml +++ b/.github/workflows/deploy.yaml @@ -3,8 +3,7 @@ name: Build and deploy # This workflow runs on pushes to main (typically, # a merged pull request) and on pushes of tagged commits. -# Pushes to main will deploy to Unstable; tagged commits -# will deploy to Staging +# Pushes to main will deploy to Staging on: push: @@ -17,9 +16,9 @@ on: workflow_dispatch: jobs: - deploy-unstable: + deploy-staging: # if this job runs on a branch, we deduce that code - # has been pushed to main and should be deployed to unstable + # has been pushed to main and should be deployed to staging if: ${{ github.ref_type == 'branch' }} runs-on: ubuntu-latest steps: @@ -30,13 +29,8 @@ jobs: env: DEPLOY_NOW: thanks with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: sandbox-gsa - cf_space: dotgov-poc - push_arguments: "-f ops/manifests/manifest-unstable.yaml" - - # deploy-staging: - # # if this job runs on a tag, we deduce that code - # # has been tagged for release and should be deployed to staging - # if: ${{ github.ref_type == 'tag' }} + cf_username: ${{ secrets.CF_STAGING_USERNAME }} + cf_password: ${{ secrets.CF_STAGING_PASSWORD }} + cf_org: cisa-getgov-prototyping + cf_space: staging + push_arguments: "-f ops/manifests/manifest-staging.yaml" diff --git a/.github/workflows/migrate.yaml b/.github/workflows/migrate.yaml index 004352e48..a58bd30d4 100644 --- a/.github/workflows/migrate.yaml +++ b/.github/workflows/migrate.yaml @@ -3,7 +3,7 @@ name: Run Migrations # This workflow can be run from the CLI # gh workflow run migrate.yaml -f environment=sandbox # OR -# cf run-task getgov-unstable --wait \ +# cf run-task getgov-staging --wait \ # --command 'python manage.py migrate' --name migrate on: @@ -13,22 +13,19 @@ on: type: choice description: Where should we run migrations options: - - unstable - staging jobs: - migrate-unstable: - if: ${{ github.event.inputs.environment == 'unstable' }} + migrate-staging: + if: ${{ github.event.inputs.environment == 'staging' }} runs-on: ubuntu-latest steps: - - name: Run Django migrations for unstable + - name: Run Django migrations for staging uses: 18f/cg-deploy-action@main with: cf_username: ${{ secrets.CF_USERNAME }} cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: sandbox-gsa - cf_space: dotgov-poc - full_command: "cf run-task getgov-unstable --wait --command 'python manage.py migrate' --name migrate" + cf_org: cisa-getgov-prototyping + cf_space: staging + full_command: "cf run-task getgov-staging --wait --command 'python manage.py migrate' --name migrate" - # migrate: - # if: ${{ github.event.inputs.environment == 'staging' }} \ No newline at end of file diff --git a/docs/operations/README.md b/docs/operations/README.md index 3e3d32481..a45d4dd8a 100644 --- a/docs/operations/README.md +++ b/docs/operations/README.md @@ -28,8 +28,18 @@ cf target -o <ORG> -s <SPACE> ## Database -In sandbox, created with `cf create-service aws-rds micro-psql getgov-database`. +In sandbox, created with `cf create-service aws-rds micro-psql getgov-ENV-database`. Binding the database in `manifest-<ENVIRONMENT>.json` automatically inserts the connection string into the environment as `DATABASE_URL`. -[Cloud.gov RDS documentation](https://cloud.gov/docs/services/relational-database/). \ No newline at end of file +[Cloud.gov RDS documentation](https://cloud.gov/docs/services/relational-database/). + +# Deploy + +We have two environments: `unstable` and `staging`. Developers can deploy locally to unstable whenever they want. However, only our CD service can deploy to `staging`, and it does so on every commit to `main`. This is to ensure that we have a "golden" environment to point to, and can still test things out in an unstable space. To deploy locally to `unstable`: + +```bash +cf target -o cisa-getgov-prototyping -s unstable +cf push getgov-unstable -f ops/manifests/manifest-unstable.yaml +cf run-task getgov-unstable --command 'python manage.py migrate' --name migrate +``` diff --git a/docs/operations/runbooks/rotate_application_secrets.md b/docs/operations/runbooks/rotate_application_secrets.md index a82453d44..0c2045ebe 100644 --- a/docs/operations/runbooks/rotate_application_secrets.md +++ b/docs/operations/runbooks/rotate_application_secrets.md @@ -27,7 +27,7 @@ To rotate secrets, create a new `credentials-<ENVIRONMENT>.json` file, upload it Example: ```bash -cf uups getgov-credentials -p credentials-unstable.json +cf cups getgov-credentials -p credentials-unstable.json cf restage getgov-unstable --strategy rolling ``` diff --git a/ops/manifests/manifest-staging.yaml b/ops/manifests/manifest-staging.yaml new file mode 100644 index 000000000..e474f8ba2 --- /dev/null +++ b/ops/manifests/manifest-staging.yaml @@ -0,0 +1,23 @@ +--- +applications: +- name: getgov-staging + buildpacks: + - python_buildpack + path: ../../src + instances: 1 + memory: 512M + stack: cflinuxfs3 + timeout: 180 + command: gunicorn registrar.config.wsgi -t 60 + health-check-type: http + health-check-http-endpoint: /health + env: + # Send stdout and stderr straight to the terminal without buffering + PYTHONUNBUFFERED: yup + # Tell Django where to find its configuration + DJANGO_SETTINGS_MODULE: registrar.config.settings + routes: + - route: getgov-staging.app.cloud.gov + services: + - getgov-credentials + - getgov-staging-database \ No newline at end of file diff --git a/ops/manifests/manifest-unstable.yaml b/ops/manifests/manifest-unstable.yaml index 976bd4425..037af2288 100644 --- a/ops/manifests/manifest-unstable.yaml +++ b/ops/manifests/manifest-unstable.yaml @@ -20,4 +20,4 @@ applications: - route: getgov-unstable.app.cloud.gov services: - getgov-credentials - - getgov-database \ No newline at end of file + - getgov-unstable-database \ No newline at end of file diff --git a/ops/scripts/rotate_cloud_secrets.sh b/ops/scripts/rotate_cloud_secrets.sh index 68f5371dc..ff1d05e27 100755 --- a/ops/scripts/rotate_cloud_secrets.sh +++ b/ops/scripts/rotate_cloud_secrets.sh @@ -1,11 +1,16 @@ # NOTE: This script does not work with cf v8. We recommend using cf v7 for all cloud.gov commands. if [ ! $(command -v gh) ] || [ ! $(command -v jq) ] || [ ! $(command -v cf) ]; then - echo "jq, cf, and gh packages must be installed. Please install via your preferred manager." - exit 1 + echo "jq, cf, and gh packages must be installed. Please install via your preferred manager." + exit 1 +fi + +if [ -z "$1" ]; then + echo 'Please specify a space to target (i.e. unstable, staging)' >&2 + exit 1 fi -cf spaces -read -p "Are you logged in to the dotgov-poc CF space above? (y/n) " -n 1 -r +cf target -o cisa-getgov-prototyping -s $1 +read -p "Are you logged in to the cisa-getgov-prototyping CF org above and targeting the correct space? (y/n) " -n 1 -r echo if [[ ! $REPLY =~ ^[Yy]$ ]] then @@ -13,7 +18,7 @@ then fi gh auth status -read -p "Are you logged into a Github account with access to cisagov/dotgov? (y/n) " -n 1 -r +read -p "Are you logged into a Github account with access to cisagov/getgov? (y/n) " -n 1 -r echo if [[ ! $REPLY =~ ^[Yy]$ ]] then @@ -21,6 +26,7 @@ then fi echo "Great, removing and replacing Github CD account..." +cf target -s $1 cf delete-service-key github-cd-account github-cd-key cf create-service-key github-cd-account github-cd-key cf service-key github-cd-account github-cd-key @@ -31,8 +37,9 @@ then exit 1 fi +upcase_space=$(printf "%s" "$1" | tr '[:lower:]' '[:upper:]') cf service-key github-cd-account github-cd-key | sed 1,2d | jq -r '[.username, .password]|@tsv' | while read -r username password; do - gh secret --repo cisagov/dotgov set CF_USERNAME --body $username - gh secret --repo cisagov/dotgov set CF_PASSWORD --body $password + gh secret --repo cisagov/getgov set CF_${upcase_space}_USERNAME --body $username + gh secret --repo cisagov/getgov set CF_${upcase_space}_PASSWORD --body $password done diff --git a/src/registrar/config/settings.py b/src/registrar/config/settings.py index b765688bb..04d4664ba 100644 --- a/src/registrar/config/settings.py +++ b/src/registrar/config/settings.py @@ -388,6 +388,7 @@ # web server configurations. ALLOWED_HOSTS = [ "getgov-unstable.app.cloud.gov", + "getgov-staging.app.cloud.gov", "get.gov", ]
NVIDIA__NVFlare-314
[ { "content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport socketserver\nimport ssl\nimport threading\n\nfrom nvflare.fuel.hci.conn import Connection, receive_til_end\nfrom nvflare.fuel.hci.proto import validate_proto\nfrom nvflare.fuel.hci.security import get_certificate_common_name\n\nfrom .reg import ServerCommandRegister\n\nMAX_ADMIN_CONNECTIONS = 128\n\n\nclass _MsgHandler(socketserver.BaseRequestHandler):\n \"\"\"Message handler.\n\n Used by the AdminServer to receive admin commands, validate, then process and do command through the\n ServerCommandRegister.\n \"\"\"\n\n connections = 0\n lock = threading.Lock()\n\n def __init__(self, request, client_address, server):\n # handle() is called in the constructor so logger must be initialized first\n self.logger = logging.getLogger(self.__class__.__name__)\n super().__init__(request, client_address, server)\n\n def handle(self):\n try:\n with _MsgHandler.lock:\n _MsgHandler.connections += 1\n\n self.logger.debug(f\"Concurrent admin connections: {_MsgHandler.connections}\")\n if _MsgHandler.connections > MAX_ADMIN_CONNECTIONS:\n raise ConnectionRefusedError(f\"Admin connection limit ({MAX_ADMIN_CONNECTIONS}) reached\")\n\n conn = Connection(self.request, self.server)\n\n if self.server.use_ssl:\n cn = get_certificate_common_name(self.request.getpeercert())\n conn.set_prop(\"_client_cn\", cn)\n valid = self.server.validate_client_cn(cn)\n else:\n valid = True\n\n if not valid:\n conn.append_error(\"authentication error\")\n else:\n req = receive_til_end(self.request).strip()\n command = None\n req_json = validate_proto(req)\n conn.request = req_json\n if req_json is not None:\n data = req_json[\"data\"]\n for item in data:\n it = item[\"type\"]\n if it == \"command\":\n command = item[\"data\"]\n break\n\n if command is None:\n conn.append_error(\"protocol violation\")\n else:\n self.server.cmd_reg.process_command(conn, command)\n else:\n # not json encoded\n conn.append_error(\"protocol violation\")\n\n if not conn.ended:\n conn.close()\n except BaseException as exc:\n self.logger.error(f\"Admin connection terminated due to exception: {str(exc)}\")\n if self.logger.getEffectiveLevel() <= logging.DEBUG:\n self.logger.exception(\"Admin connection error\")\n finally:\n with _MsgHandler.lock:\n _MsgHandler.connections -= 1\n\n\ndef initialize_hci():\n socketserver.TCPServer.allow_reuse_address = True\n\n\nclass AdminServer(socketserver.ThreadingTCPServer):\n # faster re-binding\n allow_reuse_address = True\n\n # make this bigger than five\n request_queue_size = 10\n\n # kick connections when we exit\n daemon_threads = True\n\n def __init__(\n self,\n cmd_reg: ServerCommandRegister,\n host,\n port,\n ca_cert=None,\n server_cert=None,\n server_key=None,\n accepted_client_cns=None,\n ):\n \"\"\"Base class of FedAdminServer to create a server that can receive commands.\n\n Args:\n cmd_reg: CommandRegister\n host: the IP address of the admin server\n port: port number of admin server\n ca_cert: the root CA's cert file name\n server_cert: server's cert, signed by the CA\n server_key: server's private key file\n accepted_client_cns: list of accepted Common Names from client, if specified\n \"\"\"\n socketserver.TCPServer.__init__(self, (host, port), _MsgHandler, False)\n\n self.use_ssl = False\n if ca_cert and server_cert:\n if accepted_client_cns:\n assert isinstance(accepted_client_cns, list), \"accepted_client_cns must be list but got {}.\".format(\n accepted_client_cns\n )\n\n ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ctx.verify_mode = ssl.CERT_REQUIRED\n ctx.load_verify_locations(ca_cert)\n ctx.load_cert_chain(certfile=server_cert, keyfile=server_key)\n\n # replace the socket with an SSL version of itself\n self.socket = ctx.wrap_socket(self.socket, server_side=True)\n self.use_ssl = True\n\n # bind the socket and start the server\n self.server_bind()\n self.server_activate()\n\n self._thread = None\n self.host = host\n self.port = port\n self.accepted_client_cns = accepted_client_cns\n self.cmd_reg = cmd_reg\n cmd_reg.finalize()\n self.logger = logging.getLogger(self.__class__.__name__)\n\n def validate_client_cn(self, cn):\n if self.accepted_client_cns:\n return cn in self.accepted_client_cns\n else:\n return True\n\n def stop(self):\n self.shutdown()\n self.cmd_reg.close()\n\n if self._thread.is_alive():\n self._thread.join()\n\n self.logger.info(f\"Admin Server {self.host} on Port {self.port} shutdown!\")\n\n def set_command_registry(self, cmd_reg: ServerCommandRegister):\n if cmd_reg:\n cmd_reg.finalize()\n\n if self.cmd_reg:\n self.cmd_reg.close()\n\n self.cmd_reg = cmd_reg\n\n def start(self):\n if self._thread is None:\n self._thread = threading.Thread(target=self._run, args=())\n\n if not self._thread.is_alive():\n self._thread.start()\n\n def _run(self):\n self.logger.info(f\"Starting Admin Server {self.host} on Port {self.port}\")\n self.serve_forever()\n", "path": "nvflare/fuel/hci/server/hci.py" } ]
[ { "content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport socketserver\nimport ssl\nimport threading\n\nfrom nvflare.fuel.hci.conn import Connection, receive_til_end\nfrom nvflare.fuel.hci.proto import validate_proto\nfrom nvflare.fuel.hci.security import get_certificate_common_name\n\nfrom .reg import ServerCommandRegister\n\nMAX_ADMIN_CONNECTIONS = 16\n\n\nclass _MsgHandler(socketserver.BaseRequestHandler):\n \"\"\"Message handler.\n\n Used by the AdminServer to receive admin commands, validate, then process and do command through the\n ServerCommandRegister.\n \"\"\"\n\n connections = 0\n lock = threading.Lock()\n\n def __init__(self, request, client_address, server):\n # handle() is called in the constructor so logger must be initialized first\n self.logger = logging.getLogger(self.__class__.__name__)\n super().__init__(request, client_address, server)\n\n def handle(self):\n try:\n with _MsgHandler.lock:\n _MsgHandler.connections += 1\n\n self.logger.debug(f\"Concurrent admin connections: {_MsgHandler.connections}\")\n if _MsgHandler.connections > MAX_ADMIN_CONNECTIONS:\n raise ConnectionRefusedError(f\"Admin connection limit ({MAX_ADMIN_CONNECTIONS}) reached\")\n\n conn = Connection(self.request, self.server)\n\n if self.server.use_ssl:\n cn = get_certificate_common_name(self.request.getpeercert())\n conn.set_prop(\"_client_cn\", cn)\n valid = self.server.validate_client_cn(cn)\n else:\n valid = True\n\n if not valid:\n conn.append_error(\"authentication error\")\n else:\n req = receive_til_end(self.request).strip()\n command = None\n req_json = validate_proto(req)\n conn.request = req_json\n if req_json is not None:\n data = req_json[\"data\"]\n for item in data:\n it = item[\"type\"]\n if it == \"command\":\n command = item[\"data\"]\n break\n\n if command is None:\n conn.append_error(\"protocol violation\")\n else:\n self.server.cmd_reg.process_command(conn, command)\n else:\n # not json encoded\n conn.append_error(\"protocol violation\")\n\n if not conn.ended:\n conn.close()\n except BaseException as exc:\n self.logger.error(f\"Admin connection terminated due to exception: {str(exc)}\")\n if self.logger.getEffectiveLevel() <= logging.DEBUG:\n self.logger.exception(\"Admin connection error\")\n finally:\n with _MsgHandler.lock:\n _MsgHandler.connections -= 1\n\n\ndef initialize_hci():\n socketserver.TCPServer.allow_reuse_address = True\n\n\nclass AdminServer(socketserver.ThreadingTCPServer):\n # faster re-binding\n allow_reuse_address = True\n\n # make this bigger than five\n request_queue_size = 10\n\n # kick connections when we exit\n daemon_threads = True\n\n def __init__(\n self,\n cmd_reg: ServerCommandRegister,\n host,\n port,\n ca_cert=None,\n server_cert=None,\n server_key=None,\n accepted_client_cns=None,\n ):\n \"\"\"Base class of FedAdminServer to create a server that can receive commands.\n\n Args:\n cmd_reg: CommandRegister\n host: the IP address of the admin server\n port: port number of admin server\n ca_cert: the root CA's cert file name\n server_cert: server's cert, signed by the CA\n server_key: server's private key file\n accepted_client_cns: list of accepted Common Names from client, if specified\n \"\"\"\n socketserver.TCPServer.__init__(self, (host, port), _MsgHandler, False)\n\n self.use_ssl = False\n if ca_cert and server_cert:\n if accepted_client_cns:\n assert isinstance(accepted_client_cns, list), \"accepted_client_cns must be list but got {}.\".format(\n accepted_client_cns\n )\n\n ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ctx.verify_mode = ssl.CERT_REQUIRED\n ctx.load_verify_locations(ca_cert)\n ctx.load_cert_chain(certfile=server_cert, keyfile=server_key)\n\n # replace the socket with an SSL version of itself\n self.socket = ctx.wrap_socket(self.socket, server_side=True)\n self.use_ssl = True\n\n # bind the socket and start the server\n self.server_bind()\n self.server_activate()\n\n self._thread = None\n self.host = host\n self.port = port\n self.accepted_client_cns = accepted_client_cns\n self.cmd_reg = cmd_reg\n cmd_reg.finalize()\n self.logger = logging.getLogger(self.__class__.__name__)\n\n def validate_client_cn(self, cn):\n if self.accepted_client_cns:\n return cn in self.accepted_client_cns\n else:\n return True\n\n def stop(self):\n self.shutdown()\n self.cmd_reg.close()\n\n if self._thread.is_alive():\n self._thread.join()\n\n self.logger.info(f\"Admin Server {self.host} on Port {self.port} shutdown!\")\n\n def set_command_registry(self, cmd_reg: ServerCommandRegister):\n if cmd_reg:\n cmd_reg.finalize()\n\n if self.cmd_reg:\n self.cmd_reg.close()\n\n self.cmd_reg = cmd_reg\n\n def start(self):\n if self._thread is None:\n self._thread = threading.Thread(target=self._run, args=())\n\n if not self._thread.is_alive():\n self._thread.start()\n\n def _run(self):\n self.logger.info(f\"Starting Admin Server {self.host} on Port {self.port}\")\n self.serve_forever()\n", "path": "nvflare/fuel/hci/server/hci.py" } ]
diff --git a/nvflare/fuel/hci/server/hci.py b/nvflare/fuel/hci/server/hci.py index fcc27834a4..96e0d0cc73 100644 --- a/nvflare/fuel/hci/server/hci.py +++ b/nvflare/fuel/hci/server/hci.py @@ -23,7 +23,7 @@ from .reg import ServerCommandRegister -MAX_ADMIN_CONNECTIONS = 128 +MAX_ADMIN_CONNECTIONS = 16 class _MsgHandler(socketserver.BaseRequestHandler):
Netflix__lemur-924
[ { "content": "from __future__ import unicode_literals # at top of module\n\nimport os\nimport sys\nimport base64\nimport requests\nimport json\n\nfrom gunicorn.config import make_settings\n\nfrom cryptography.fernet import Fernet\n\nfrom flask import current_app\nfrom flask_script import Manager, Command, Option, prompt_pass\nfrom flask_migrate import Migrate, MigrateCommand, stamp\nfrom flask_script.commands import ShowUrls, Clean, Server\n\nfrom lemur.sources.cli import manager as source_manager\nfrom lemur.policies.cli import manager as policy_manager\nfrom lemur.reporting.cli import manager as report_manager\nfrom lemur.endpoints.cli import manager as endpoint_manager\nfrom lemur.certificates.cli import manager as certificate_manager\nfrom lemur.notifications.cli import manager as notification_manager\n\nfrom lemur import database\nfrom lemur.users import service as user_service\nfrom lemur.roles import service as role_service\nfrom lemur.policies import service as policy_service\nfrom lemur.notifications import service as notification_service\n\nfrom lemur.common.utils import validate_conf\n\nfrom lemur import create_app\n\n# Needed to be imported so that SQLAlchemy create_all can find our models\nfrom lemur.users.models import User # noqa\nfrom lemur.roles.models import Role # noqa\nfrom lemur.authorities.models import Authority # noqa\nfrom lemur.certificates.models import Certificate # noqa\nfrom lemur.destinations.models import Destination # noqa\nfrom lemur.domains.models import Domain # noqa\nfrom lemur.notifications.models import Notification # noqa\nfrom lemur.sources.models import Source # noqa\nfrom lemur.logs.models import Log # noqa\nfrom lemur.endpoints.models import Endpoint # noqa\nfrom lemur.policies.models import RotationPolicy # noqa\n\n\nmanager = Manager(create_app)\nmanager.add_option('-c', '--config', dest='config')\n\nmigrate = Migrate(create_app)\n\nREQUIRED_VARIABLES = [\n 'LEMUR_SECURITY_TEAM_EMAIL',\n 'LEMUR_DEFAULT_ORGANIZATIONAL_UNIT',\n 'LEMUR_DEFAULT_ORGANIZATION',\n 'LEMUR_DEFAULT_LOCATION',\n 'LEMUR_DEFAULT_COUNTRY',\n 'LEMUR_DEFAULT_STATE',\n 'SQLALCHEMY_DATABASE_URI'\n]\n\nKEY_LENGTH = 40\nDEFAULT_CONFIG_PATH = '~/.lemur/lemur.conf.py'\nDEFAULT_SETTINGS = 'lemur.conf.server'\nSETTINGS_ENVVAR = 'LEMUR_CONF'\n\nCONFIG_TEMPLATE = \"\"\"\n# This is just Python which means you can inherit and tweak settings\n\nimport os\n_basedir = os.path.abspath(os.path.dirname(__file__))\n\nTHREADS_PER_PAGE = 8\n\n# General\n\n# These will need to be set to `True` if you are developing locally\nCORS = False\ndebug = False\n\n# this is the secret key used by flask session management\nSECRET_KEY = '{flask_secret_key}'\n\n# You should consider storing these separately from your config\nLEMUR_TOKEN_SECRET = '{secret_token}'\nLEMUR_ENCRYPTION_KEYS = '{encryption_key}'\n\n# List of domain regular expressions that non-admin users can issue\nLEMUR_WHITELISTED_DOMAINS = []\n\n# Mail Server\n\nLEMUR_EMAIL = ''\nLEMUR_SECURITY_TEAM_EMAIL = []\n\n# Certificate Defaults\n\nLEMUR_DEFAULT_COUNTRY = ''\nLEMUR_DEFAULT_STATE = ''\nLEMUR_DEFAULT_LOCATION = ''\nLEMUR_DEFAULT_ORGANIZATION = ''\nLEMUR_DEFAULT_ORGANIZATIONAL_UNIT = ''\n\n# Authentication Providers\nACTIVE_PROVIDERS = []\n\n# Logging\n\nLOG_LEVEL = \"DEBUG\"\nLOG_FILE = \"lemur.log\"\n\n\n# Database\n\n# modify this if you are not using a local database\nSQLALCHEMY_DATABASE_URI = 'postgresql://lemur:lemur@localhost:5432/lemur'\n\n# AWS\n\n#LEMUR_INSTANCE_PROFILE = 'Lemur'\n\n# Issuers\n\n# These will be dependent on which 3rd party that Lemur is\n# configured to use.\n\n# VERISIGN_URL = ''\n# VERISIGN_PEM_PATH = ''\n# VERISIGN_FIRST_NAME = ''\n# VERISIGN_LAST_NAME = ''\n# VERSIGN_EMAIL = ''\n\"\"\"\n\n\n@MigrateCommand.command\ndef create():\n database.db.create_all()\n stamp(revision='head')\n\n\n@MigrateCommand.command\ndef drop_all():\n database.db.drop_all()\n\n\n@manager.shell\ndef make_shell_context():\n \"\"\"\n Creates a python REPL with several default imports\n in the context of the current_app\n\n :return:\n \"\"\"\n return dict(current_app=current_app)\n\n\ndef generate_settings():\n \"\"\"\n This command is run when ``default_path`` doesn't exist, or ``init`` is\n run and returns a string representing the default data to put into their\n settings file.\n \"\"\"\n output = CONFIG_TEMPLATE.format(\n # we use Fernet.generate_key to make sure that the key length is\n # compatible with Fernet\n encryption_key=Fernet.generate_key().decode('utf-8'),\n secret_token=base64.b64encode(os.urandom(KEY_LENGTH)).decode('utf-8'),\n flask_secret_key=base64.b64encode(os.urandom(KEY_LENGTH)).decode('utf-8'),\n )\n\n return output\n\n\nclass InitializeApp(Command):\n \"\"\"\n This command will bootstrap our database with any destinations as\n specified by our config.\n\n Additionally a Lemur user will be created as a default user\n and be used when certificates are discovered by Lemur.\n \"\"\"\n option_list = (\n Option('-p', '--password', dest='password'),\n )\n\n def run(self, password):\n create()\n user = user_service.get_by_username(\"lemur\")\n\n admin_role = role_service.get_by_name('admin')\n\n if admin_role:\n sys.stdout.write(\"[-] Admin role already created, skipping...!\\n\")\n else:\n # we create an admin role\n admin_role = role_service.create('admin', description='This is the Lemur administrator role.')\n sys.stdout.write(\"[+] Created 'admin' role\\n\")\n\n operator_role = role_service.get_by_name('operator')\n\n if operator_role:\n sys.stdout.write(\"[-] Operator role already created, skipping...!\\n\")\n else:\n # we create an admin role\n operator_role = role_service.create('operator', description='This is the Lemur operator role.')\n sys.stdout.write(\"[+] Created 'operator' role\\n\")\n\n read_only_role = role_service.get_by_name('read-only')\n\n if read_only_role:\n sys.stdout.write(\"[-] Operator role already created, skipping...!\\n\")\n else:\n # we create an admin role\n read_only_role = role_service.create('read-only', description='This is the Lemur read only role.')\n sys.stdout.write(\"[+] Created 'read-only' role\\n\")\n\n if not user:\n if not password:\n sys.stdout.write(\"We need to set Lemur's password to continue!\\n\")\n password = prompt_pass(\"Password\")\n password1 = prompt_pass(\"Confirm Password\")\n\n if password != password1:\n sys.stderr.write(\"[!] Passwords do not match!\\n\")\n sys.exit(1)\n\n user_service.create(\"lemur\", password, 'lemur@nobody.com', True, None, [admin_role])\n sys.stdout.write(\"[+] Created the user 'lemur' and granted it the 'admin' role!\\n\")\n\n else:\n sys.stdout.write(\"[-] Default user has already been created, skipping...!\\n\")\n\n sys.stdout.write(\"[+] Creating expiration email notifications!\\n\")\n sys.stdout.write(\"[!] Using {0} as specified by LEMUR_SECURITY_TEAM_EMAIL for notifications\\n\".format(\"LEMUR_SECURITY_TEAM_EMAIL\"))\n\n intervals = current_app.config.get(\"LEMUR_DEFAULT_EXPIRATION_NOTIFICATION_INTERVALS\", [])\n sys.stdout.write(\n \"[!] Creating {num} notifications for {intervals} days as specified by LEMUR_DEFAULT_EXPIRATION_NOTIFICATION_INTERVALS\\n\".format(\n num=len(intervals),\n intervals=\",\".join([str(x) for x in intervals])\n )\n )\n\n recipients = current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')\n notification_service.create_default_expiration_notifications(\"DEFAULT_SECURITY\", recipients=recipients)\n\n days = current_app.config.get(\"LEMUR_DEFAULT_ROTATION_INTERVAL\", 30)\n sys.stdout.write(\"[+] Creating default certificate rotation policy of {days} days before issuance.\\n\".format(\n days=days\n ))\n\n policy_service.create(days=days)\n sys.stdout.write(\"[/] Done!\\n\")\n\n\nclass CreateUser(Command):\n \"\"\"\n This command allows for the creation of a new user within Lemur.\n \"\"\"\n option_list = (\n Option('-u', '--username', dest='username', required=True),\n Option('-e', '--email', dest='email', required=True),\n Option('-a', '--active', dest='active', default=True),\n Option('-r', '--roles', dest='roles', action='append', default=[])\n )\n\n def run(self, username, email, active, roles):\n role_objs = []\n for r in roles:\n role_obj = role_service.get_by_name(r)\n if role_obj:\n role_objs.append(role_obj)\n else:\n sys.stderr.write(\"[!] Cannot find role {0}\\n\".format(r))\n sys.exit(1)\n\n password1 = prompt_pass(\"Password\")\n password2 = prompt_pass(\"Confirm Password\")\n\n if password1 != password2:\n sys.stderr.write(\"[!] Passwords do not match!\\n\")\n sys.exit(1)\n\n user_service.create(username, password1, email, active, None, role_objs)\n sys.stdout.write(\"[+] Created new user: {0}\\n\".format(username))\n\n\nclass ResetPassword(Command):\n \"\"\"\n This command allows you to reset a user's password.\n \"\"\"\n option_list = (\n Option('-u', '--username', dest='username', required=True),\n )\n\n def run(self, username):\n user = user_service.get_by_username(username)\n\n if not user:\n sys.stderr.write(\"[!] No user found for username: {0}\\n\".format(username))\n sys.exit(1)\n\n sys.stderr.write(\"[+] Resetting password for {0}\\n\".format(username))\n password1 = prompt_pass(\"Password\")\n password2 = prompt_pass(\"Confirm Password\")\n\n if password1 != password2:\n sys.stderr.write(\"[!] Passwords do not match\\n\")\n sys.exit(1)\n\n user.password = password1\n user.hash_password()\n database.commit()\n\n\nclass CreateRole(Command):\n \"\"\"\n This command allows for the creation of a new role within Lemur\n \"\"\"\n option_list = (\n Option('-n', '--name', dest='name', required=True),\n Option('-u', '--users', dest='users', default=[]),\n Option('-d', '--description', dest='description', required=True)\n )\n\n def run(self, name, users, description):\n user_objs = []\n for u in users:\n user_obj = user_service.get_by_username(u)\n if user_obj:\n user_objs.append(user_obj)\n else:\n sys.stderr.write(\"[!] Cannot find user {0}\".format(u))\n sys.exit(1)\n role_service.create(name, description=description, users=users)\n sys.stdout.write(\"[+] Created new role: {0}\".format(name))\n\n\nclass LemurServer(Command):\n \"\"\"\n This is the main Lemur server, it runs the flask app with gunicorn and\n uses any configuration options passed to it.\n\n\n You can pass all standard gunicorn flags to this command as if you were\n running gunicorn itself.\n\n For example:\n\n lemur start -w 4 -b 127.0.0.0:8002\n\n Will start gunicorn with 4 workers bound to 127.0.0.0:8002\n \"\"\"\n description = 'Run the app within Gunicorn'\n\n def get_options(self):\n settings = make_settings()\n options = []\n for setting, klass in settings.items():\n if klass.cli:\n if klass.action:\n if klass.action == 'store_const':\n options.append(Option(*klass.cli, const=klass.const, action=klass.action))\n else:\n options.append(Option(*klass.cli, action=klass.action))\n else:\n options.append(Option(*klass.cli))\n\n return options\n\n def run(self, *args, **kwargs):\n from gunicorn.app.wsgiapp import WSGIApplication\n\n app = WSGIApplication()\n\n # run startup tasks on a app like object\n validate_conf(current_app, REQUIRED_VARIABLES)\n\n app.app_uri = 'lemur:create_app(config=\"{0}\")'.format(current_app.config.get('CONFIG_PATH'))\n\n return app.run()\n\n\n@manager.command\ndef create_config(config_path=None):\n \"\"\"\n Creates a new configuration file if one does not already exist\n \"\"\"\n if not config_path:\n config_path = DEFAULT_CONFIG_PATH\n\n config_path = os.path.expanduser(config_path)\n dir = os.path.dirname(config_path)\n\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n config = generate_settings()\n with open(config_path, 'w') as f:\n f.write(config)\n\n sys.stdout.write(\"[+] Created a new configuration file {0}\\n\".format(config_path))\n\n\n@manager.command\ndef lock(path=None):\n \"\"\"\n Encrypts a given path. This directory can be used to store secrets needed for normal\n Lemur operation. This is especially useful for storing secrets needed for communication\n with third parties (e.g. external certificate authorities).\n\n Lemur does not assume anything about the contents of the directory and will attempt to\n encrypt all files contained within. Currently this has only been tested against plain\n text files.\n\n Path defaults ~/.lemur/keys\n\n :param: path\n \"\"\"\n if not path:\n path = os.path.expanduser('~/.lemur/keys')\n\n dest_dir = os.path.join(path, \"encrypted\")\n sys.stdout.write(\"[!] Generating a new key...\\n\")\n\n key = Fernet.generate_key()\n\n if not os.path.exists(dest_dir):\n sys.stdout.write(\"[+] Creating encryption directory: {0}\\n\".format(dest_dir))\n os.makedirs(dest_dir)\n\n for root, dirs, files in os.walk(os.path.join(path, 'decrypted')):\n for f in files:\n source = os.path.join(root, f)\n dest = os.path.join(dest_dir, f + \".enc\")\n with open(source, 'rb') as in_file, open(dest, 'wb') as out_file:\n f = Fernet(key)\n data = f.encrypt(in_file.read())\n out_file.write(data)\n sys.stdout.write(\"[+] Writing file: {0} Source: {1}\\n\".format(dest, source))\n\n sys.stdout.write(\"[+] Keys have been encrypted with key {0}\\n\".format(key))\n\n\n@manager.command\ndef unlock(path=None):\n \"\"\"\n Decrypts all of the files in a given directory with provided password.\n This is most commonly used during the startup sequence of Lemur\n allowing it to go from source code to something that can communicate\n with external services.\n\n Path defaults ~/.lemur/keys\n\n :param: path\n \"\"\"\n key = prompt_pass(\"[!] Please enter the encryption password\")\n\n if not path:\n path = os.path.expanduser('~/.lemur/keys')\n\n dest_dir = os.path.join(path, \"decrypted\")\n source_dir = os.path.join(path, \"encrypted\")\n\n if not os.path.exists(dest_dir):\n sys.stdout.write(\"[+] Creating decryption directory: {0}\\n\".format(dest_dir))\n os.makedirs(dest_dir)\n\n for root, dirs, files in os.walk(source_dir):\n for f in files:\n source = os.path.join(source_dir, f)\n dest = os.path.join(dest_dir, \".\".join(f.split(\".\")[:-1]))\n with open(source, 'rb') as in_file, open(dest, 'wb') as out_file:\n f = Fernet(key)\n data = f.decrypt(in_file.read())\n out_file.write(data)\n sys.stdout.write(\"[+] Writing file: {0} Source: {1}\\n\".format(dest, source))\n\n sys.stdout.write(\"[+] Keys have been unencrypted!\\n\")\n\n\n@manager.command\ndef publish_verisign_units():\n \"\"\"\n Simple function that queries verisign for API units and posts the mertics to\n Atlas API for other teams to consume.\n :return:\n \"\"\"\n from lemur.plugins import plugins\n v = plugins.get('verisign-issuer')\n units = v.get_available_units()\n\n metrics = {}\n for item in units:\n if item['@type'] in metrics.keys():\n metrics[item['@type']] += int(item['@remaining'])\n else:\n metrics.update({item['@type']: int(item['@remaining'])})\n\n for name, value in metrics.items():\n metric = [\n {\n \"timestamp\": 1321351651,\n \"type\": \"GAUGE\",\n \"name\": \"Symantec {0} Unit Count\".format(name),\n \"tags\": {},\n \"value\": value\n }\n ]\n\n requests.post('http://localhost:8078/metrics', data=json.dumps(metric))\n\n\n@manager.command\ndef publish_unapproved_verisign_certificates():\n \"\"\"\n Query the Verisign for any certificates that need to be approved.\n :return:\n \"\"\"\n from lemur.plugins import plugins\n from lemur.extensions import metrics\n v = plugins.get('verisign-issuer')\n certs = v.get_pending_certificates()\n metrics.send('pending_certificates', 'gauge', certs)\n\n\ndef main():\n manager.add_command(\"start\", LemurServer())\n manager.add_command(\"runserver\", Server(host='127.0.0.1', threaded=True))\n manager.add_command(\"clean\", Clean())\n manager.add_command(\"show_urls\", ShowUrls())\n manager.add_command(\"db\", MigrateCommand)\n manager.add_command(\"init\", InitializeApp())\n manager.add_command(\"create_user\", CreateUser())\n manager.add_command(\"reset_password\", ResetPassword())\n manager.add_command(\"create_role\", CreateRole())\n manager.add_command(\"source\", source_manager)\n manager.add_command(\"certificate\", certificate_manager)\n manager.add_command(\"notify\", notification_manager)\n manager.add_command(\"endpoint\", endpoint_manager)\n manager.add_command(\"report\", report_manager)\n manager.add_command(\"policy\", policy_manager)\n manager.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "lemur/manage.py" } ]
[ { "content": "from __future__ import unicode_literals # at top of module\n\nimport os\nimport sys\nimport base64\nimport requests\nimport json\n\nfrom gunicorn.config import make_settings\n\nfrom cryptography.fernet import Fernet\n\nfrom flask import current_app\nfrom flask_script import Manager, Command, Option, prompt_pass\nfrom flask_migrate import Migrate, MigrateCommand, stamp\nfrom flask_script.commands import ShowUrls, Clean, Server\n\nfrom lemur.sources.cli import manager as source_manager\nfrom lemur.policies.cli import manager as policy_manager\nfrom lemur.reporting.cli import manager as report_manager\nfrom lemur.endpoints.cli import manager as endpoint_manager\nfrom lemur.certificates.cli import manager as certificate_manager\nfrom lemur.notifications.cli import manager as notification_manager\n\nfrom lemur import database\nfrom lemur.users import service as user_service\nfrom lemur.roles import service as role_service\nfrom lemur.policies import service as policy_service\nfrom lemur.notifications import service as notification_service\n\nfrom lemur.common.utils import validate_conf\n\nfrom lemur import create_app\n\n# Needed to be imported so that SQLAlchemy create_all can find our models\nfrom lemur.users.models import User # noqa\nfrom lemur.roles.models import Role # noqa\nfrom lemur.authorities.models import Authority # noqa\nfrom lemur.certificates.models import Certificate # noqa\nfrom lemur.destinations.models import Destination # noqa\nfrom lemur.domains.models import Domain # noqa\nfrom lemur.notifications.models import Notification # noqa\nfrom lemur.sources.models import Source # noqa\nfrom lemur.logs.models import Log # noqa\nfrom lemur.endpoints.models import Endpoint # noqa\nfrom lemur.policies.models import RotationPolicy # noqa\n\n\nmanager = Manager(create_app)\nmanager.add_option('-c', '--config', dest='config')\n\nmigrate = Migrate(create_app)\n\nREQUIRED_VARIABLES = [\n 'LEMUR_SECURITY_TEAM_EMAIL',\n 'LEMUR_DEFAULT_ORGANIZATIONAL_UNIT',\n 'LEMUR_DEFAULT_ORGANIZATION',\n 'LEMUR_DEFAULT_LOCATION',\n 'LEMUR_DEFAULT_COUNTRY',\n 'LEMUR_DEFAULT_STATE',\n 'SQLALCHEMY_DATABASE_URI'\n]\n\nKEY_LENGTH = 40\nDEFAULT_CONFIG_PATH = '~/.lemur/lemur.conf.py'\nDEFAULT_SETTINGS = 'lemur.conf.server'\nSETTINGS_ENVVAR = 'LEMUR_CONF'\n\nCONFIG_TEMPLATE = \"\"\"\n# This is just Python which means you can inherit and tweak settings\n\nimport os\n_basedir = os.path.abspath(os.path.dirname(__file__))\n\nTHREADS_PER_PAGE = 8\n\n# General\n\n# These will need to be set to `True` if you are developing locally\nCORS = False\ndebug = False\n\n# this is the secret key used by flask session management\nSECRET_KEY = '{flask_secret_key}'\n\n# You should consider storing these separately from your config\nLEMUR_TOKEN_SECRET = '{secret_token}'\nLEMUR_ENCRYPTION_KEYS = '{encryption_key}'\n\n# List of domain regular expressions that non-admin users can issue\nLEMUR_WHITELISTED_DOMAINS = []\n\n# Mail Server\n\nLEMUR_EMAIL = ''\nLEMUR_SECURITY_TEAM_EMAIL = []\n\n# Certificate Defaults\n\nLEMUR_DEFAULT_COUNTRY = ''\nLEMUR_DEFAULT_STATE = ''\nLEMUR_DEFAULT_LOCATION = ''\nLEMUR_DEFAULT_ORGANIZATION = ''\nLEMUR_DEFAULT_ORGANIZATIONAL_UNIT = ''\n\n# Authentication Providers\nACTIVE_PROVIDERS = []\n\n# Logging\n\nLOG_LEVEL = \"DEBUG\"\nLOG_FILE = \"lemur.log\"\n\n\n# Database\n\n# modify this if you are not using a local database\nSQLALCHEMY_DATABASE_URI = 'postgresql://lemur:lemur@localhost:5432/lemur'\n\n# AWS\n\n#LEMUR_INSTANCE_PROFILE = 'Lemur'\n\n# Issuers\n\n# These will be dependent on which 3rd party that Lemur is\n# configured to use.\n\n# VERISIGN_URL = ''\n# VERISIGN_PEM_PATH = ''\n# VERISIGN_FIRST_NAME = ''\n# VERISIGN_LAST_NAME = ''\n# VERSIGN_EMAIL = ''\n\"\"\"\n\n\n@MigrateCommand.command\ndef create():\n database.db.create_all()\n stamp(revision='head')\n\n\n@MigrateCommand.command\ndef drop_all():\n database.db.drop_all()\n\n\n@manager.shell\ndef make_shell_context():\n \"\"\"\n Creates a python REPL with several default imports\n in the context of the current_app\n\n :return:\n \"\"\"\n return dict(current_app=current_app)\n\n\ndef generate_settings():\n \"\"\"\n This command is run when ``default_path`` doesn't exist, or ``init`` is\n run and returns a string representing the default data to put into their\n settings file.\n \"\"\"\n output = CONFIG_TEMPLATE.format(\n # we use Fernet.generate_key to make sure that the key length is\n # compatible with Fernet\n encryption_key=Fernet.generate_key().decode('utf-8'),\n secret_token=base64.b64encode(os.urandom(KEY_LENGTH)).decode('utf-8'),\n flask_secret_key=base64.b64encode(os.urandom(KEY_LENGTH)).decode('utf-8'),\n )\n\n return output\n\n\nclass InitializeApp(Command):\n \"\"\"\n This command will bootstrap our database with any destinations as\n specified by our config.\n\n Additionally a Lemur user will be created as a default user\n and be used when certificates are discovered by Lemur.\n \"\"\"\n option_list = (\n Option('-p', '--password', dest='password'),\n )\n\n def run(self, password):\n create()\n user = user_service.get_by_username(\"lemur\")\n\n admin_role = role_service.get_by_name('admin')\n\n if admin_role:\n sys.stdout.write(\"[-] Admin role already created, skipping...!\\n\")\n else:\n # we create an admin role\n admin_role = role_service.create('admin', description='This is the Lemur administrator role.')\n sys.stdout.write(\"[+] Created 'admin' role\\n\")\n\n operator_role = role_service.get_by_name('operator')\n\n if operator_role:\n sys.stdout.write(\"[-] Operator role already created, skipping...!\\n\")\n else:\n # we create an admin role\n operator_role = role_service.create('operator', description='This is the Lemur operator role.')\n sys.stdout.write(\"[+] Created 'operator' role\\n\")\n\n read_only_role = role_service.get_by_name('read-only')\n\n if read_only_role:\n sys.stdout.write(\"[-] Operator role already created, skipping...!\\n\")\n else:\n # we create an admin role\n read_only_role = role_service.create('read-only', description='This is the Lemur read only role.')\n sys.stdout.write(\"[+] Created 'read-only' role\\n\")\n\n if not user:\n if not password:\n sys.stdout.write(\"We need to set Lemur's password to continue!\\n\")\n password = prompt_pass(\"Password\")\n password1 = prompt_pass(\"Confirm Password\")\n\n if password != password1:\n sys.stderr.write(\"[!] Passwords do not match!\\n\")\n sys.exit(1)\n\n user_service.create(\"lemur\", password, 'lemur@nobody.com', True, None, [admin_role])\n sys.stdout.write(\"[+] Created the user 'lemur' and granted it the 'admin' role!\\n\")\n\n else:\n sys.stdout.write(\"[-] Default user has already been created, skipping...!\\n\")\n\n sys.stdout.write(\"[+] Creating expiration email notifications!\\n\")\n sys.stdout.write(\"[!] Using {0} as specified by LEMUR_SECURITY_TEAM_EMAIL for notifications\\n\".format(\"LEMUR_SECURITY_TEAM_EMAIL\"))\n\n intervals = current_app.config.get(\"LEMUR_DEFAULT_EXPIRATION_NOTIFICATION_INTERVALS\", [])\n sys.stdout.write(\n \"[!] Creating {num} notifications for {intervals} days as specified by LEMUR_DEFAULT_EXPIRATION_NOTIFICATION_INTERVALS\\n\".format(\n num=len(intervals),\n intervals=\",\".join([str(x) for x in intervals])\n )\n )\n\n recipients = current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')\n notification_service.create_default_expiration_notifications(\"DEFAULT_SECURITY\", recipients=recipients)\n\n days = current_app.config.get(\"LEMUR_DEFAULT_ROTATION_INTERVAL\", 30)\n sys.stdout.write(\"[+] Creating default certificate rotation policy of {days} days before issuance.\\n\".format(\n days=days\n ))\n\n policy_service.create(days=days, name='default')\n sys.stdout.write(\"[/] Done!\\n\")\n\n\nclass CreateUser(Command):\n \"\"\"\n This command allows for the creation of a new user within Lemur.\n \"\"\"\n option_list = (\n Option('-u', '--username', dest='username', required=True),\n Option('-e', '--email', dest='email', required=True),\n Option('-a', '--active', dest='active', default=True),\n Option('-r', '--roles', dest='roles', action='append', default=[])\n )\n\n def run(self, username, email, active, roles):\n role_objs = []\n for r in roles:\n role_obj = role_service.get_by_name(r)\n if role_obj:\n role_objs.append(role_obj)\n else:\n sys.stderr.write(\"[!] Cannot find role {0}\\n\".format(r))\n sys.exit(1)\n\n password1 = prompt_pass(\"Password\")\n password2 = prompt_pass(\"Confirm Password\")\n\n if password1 != password2:\n sys.stderr.write(\"[!] Passwords do not match!\\n\")\n sys.exit(1)\n\n user_service.create(username, password1, email, active, None, role_objs)\n sys.stdout.write(\"[+] Created new user: {0}\\n\".format(username))\n\n\nclass ResetPassword(Command):\n \"\"\"\n This command allows you to reset a user's password.\n \"\"\"\n option_list = (\n Option('-u', '--username', dest='username', required=True),\n )\n\n def run(self, username):\n user = user_service.get_by_username(username)\n\n if not user:\n sys.stderr.write(\"[!] No user found for username: {0}\\n\".format(username))\n sys.exit(1)\n\n sys.stderr.write(\"[+] Resetting password for {0}\\n\".format(username))\n password1 = prompt_pass(\"Password\")\n password2 = prompt_pass(\"Confirm Password\")\n\n if password1 != password2:\n sys.stderr.write(\"[!] Passwords do not match\\n\")\n sys.exit(1)\n\n user.password = password1\n user.hash_password()\n database.commit()\n\n\nclass CreateRole(Command):\n \"\"\"\n This command allows for the creation of a new role within Lemur\n \"\"\"\n option_list = (\n Option('-n', '--name', dest='name', required=True),\n Option('-u', '--users', dest='users', default=[]),\n Option('-d', '--description', dest='description', required=True)\n )\n\n def run(self, name, users, description):\n user_objs = []\n for u in users:\n user_obj = user_service.get_by_username(u)\n if user_obj:\n user_objs.append(user_obj)\n else:\n sys.stderr.write(\"[!] Cannot find user {0}\".format(u))\n sys.exit(1)\n role_service.create(name, description=description, users=users)\n sys.stdout.write(\"[+] Created new role: {0}\".format(name))\n\n\nclass LemurServer(Command):\n \"\"\"\n This is the main Lemur server, it runs the flask app with gunicorn and\n uses any configuration options passed to it.\n\n\n You can pass all standard gunicorn flags to this command as if you were\n running gunicorn itself.\n\n For example:\n\n lemur start -w 4 -b 127.0.0.0:8002\n\n Will start gunicorn with 4 workers bound to 127.0.0.0:8002\n \"\"\"\n description = 'Run the app within Gunicorn'\n\n def get_options(self):\n settings = make_settings()\n options = []\n for setting, klass in settings.items():\n if klass.cli:\n if klass.action:\n if klass.action == 'store_const':\n options.append(Option(*klass.cli, const=klass.const, action=klass.action))\n else:\n options.append(Option(*klass.cli, action=klass.action))\n else:\n options.append(Option(*klass.cli))\n\n return options\n\n def run(self, *args, **kwargs):\n from gunicorn.app.wsgiapp import WSGIApplication\n\n app = WSGIApplication()\n\n # run startup tasks on a app like object\n validate_conf(current_app, REQUIRED_VARIABLES)\n\n app.app_uri = 'lemur:create_app(config=\"{0}\")'.format(current_app.config.get('CONFIG_PATH'))\n\n return app.run()\n\n\n@manager.command\ndef create_config(config_path=None):\n \"\"\"\n Creates a new configuration file if one does not already exist\n \"\"\"\n if not config_path:\n config_path = DEFAULT_CONFIG_PATH\n\n config_path = os.path.expanduser(config_path)\n dir = os.path.dirname(config_path)\n\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n config = generate_settings()\n with open(config_path, 'w') as f:\n f.write(config)\n\n sys.stdout.write(\"[+] Created a new configuration file {0}\\n\".format(config_path))\n\n\n@manager.command\ndef lock(path=None):\n \"\"\"\n Encrypts a given path. This directory can be used to store secrets needed for normal\n Lemur operation. This is especially useful for storing secrets needed for communication\n with third parties (e.g. external certificate authorities).\n\n Lemur does not assume anything about the contents of the directory and will attempt to\n encrypt all files contained within. Currently this has only been tested against plain\n text files.\n\n Path defaults ~/.lemur/keys\n\n :param: path\n \"\"\"\n if not path:\n path = os.path.expanduser('~/.lemur/keys')\n\n dest_dir = os.path.join(path, \"encrypted\")\n sys.stdout.write(\"[!] Generating a new key...\\n\")\n\n key = Fernet.generate_key()\n\n if not os.path.exists(dest_dir):\n sys.stdout.write(\"[+] Creating encryption directory: {0}\\n\".format(dest_dir))\n os.makedirs(dest_dir)\n\n for root, dirs, files in os.walk(os.path.join(path, 'decrypted')):\n for f in files:\n source = os.path.join(root, f)\n dest = os.path.join(dest_dir, f + \".enc\")\n with open(source, 'rb') as in_file, open(dest, 'wb') as out_file:\n f = Fernet(key)\n data = f.encrypt(in_file.read())\n out_file.write(data)\n sys.stdout.write(\"[+] Writing file: {0} Source: {1}\\n\".format(dest, source))\n\n sys.stdout.write(\"[+] Keys have been encrypted with key {0}\\n\".format(key))\n\n\n@manager.command\ndef unlock(path=None):\n \"\"\"\n Decrypts all of the files in a given directory with provided password.\n This is most commonly used during the startup sequence of Lemur\n allowing it to go from source code to something that can communicate\n with external services.\n\n Path defaults ~/.lemur/keys\n\n :param: path\n \"\"\"\n key = prompt_pass(\"[!] Please enter the encryption password\")\n\n if not path:\n path = os.path.expanduser('~/.lemur/keys')\n\n dest_dir = os.path.join(path, \"decrypted\")\n source_dir = os.path.join(path, \"encrypted\")\n\n if not os.path.exists(dest_dir):\n sys.stdout.write(\"[+] Creating decryption directory: {0}\\n\".format(dest_dir))\n os.makedirs(dest_dir)\n\n for root, dirs, files in os.walk(source_dir):\n for f in files:\n source = os.path.join(source_dir, f)\n dest = os.path.join(dest_dir, \".\".join(f.split(\".\")[:-1]))\n with open(source, 'rb') as in_file, open(dest, 'wb') as out_file:\n f = Fernet(key)\n data = f.decrypt(in_file.read())\n out_file.write(data)\n sys.stdout.write(\"[+] Writing file: {0} Source: {1}\\n\".format(dest, source))\n\n sys.stdout.write(\"[+] Keys have been unencrypted!\\n\")\n\n\n@manager.command\ndef publish_verisign_units():\n \"\"\"\n Simple function that queries verisign for API units and posts the mertics to\n Atlas API for other teams to consume.\n :return:\n \"\"\"\n from lemur.plugins import plugins\n v = plugins.get('verisign-issuer')\n units = v.get_available_units()\n\n metrics = {}\n for item in units:\n if item['@type'] in metrics.keys():\n metrics[item['@type']] += int(item['@remaining'])\n else:\n metrics.update({item['@type']: int(item['@remaining'])})\n\n for name, value in metrics.items():\n metric = [\n {\n \"timestamp\": 1321351651,\n \"type\": \"GAUGE\",\n \"name\": \"Symantec {0} Unit Count\".format(name),\n \"tags\": {},\n \"value\": value\n }\n ]\n\n requests.post('http://localhost:8078/metrics', data=json.dumps(metric))\n\n\n@manager.command\ndef publish_unapproved_verisign_certificates():\n \"\"\"\n Query the Verisign for any certificates that need to be approved.\n :return:\n \"\"\"\n from lemur.plugins import plugins\n from lemur.extensions import metrics\n v = plugins.get('verisign-issuer')\n certs = v.get_pending_certificates()\n metrics.send('pending_certificates', 'gauge', certs)\n\n\ndef main():\n manager.add_command(\"start\", LemurServer())\n manager.add_command(\"runserver\", Server(host='127.0.0.1', threaded=True))\n manager.add_command(\"clean\", Clean())\n manager.add_command(\"show_urls\", ShowUrls())\n manager.add_command(\"db\", MigrateCommand)\n manager.add_command(\"init\", InitializeApp())\n manager.add_command(\"create_user\", CreateUser())\n manager.add_command(\"reset_password\", ResetPassword())\n manager.add_command(\"create_role\", CreateRole())\n manager.add_command(\"source\", source_manager)\n manager.add_command(\"certificate\", certificate_manager)\n manager.add_command(\"notify\", notification_manager)\n manager.add_command(\"endpoint\", endpoint_manager)\n manager.add_command(\"report\", report_manager)\n manager.add_command(\"policy\", policy_manager)\n manager.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "lemur/manage.py" } ]
diff --git a/lemur/manage.py b/lemur/manage.py index b30a68dbd1..15e8610274 100755 --- a/lemur/manage.py +++ b/lemur/manage.py @@ -251,7 +251,7 @@ def run(self, password): days=days )) - policy_service.create(days=days) + policy_service.create(days=days, name='default') sys.stdout.write("[/] Done!\n")
meltano__meltano-6488
[ { "content": "#!/usr/bin/env python3\n\n\"\"\"Script to freeze the Meltano database - executed by the Makefile.\"\"\"\n\nfrom __future__ import annotations\n\nfrom alembic.script import ScriptDirectory\n\nfrom meltano.migrations import LOCK_PATH, MIGRATION_DIR\n\nscripts = ScriptDirectory(str(MIGRATION_DIR))\n\nwith LOCK_PATH.open(\"w\") as lock:\n HEAD = scripts.get_current_head()\n lock.write(HEAD)\n\nprint(f\"Meltano database frozen at {HEAD}.\")\n", "path": "scripts/alembic_freeze.py" } ]
[ { "content": "#!/usr/bin/env python3\n\n\"\"\"Script to freeze the Meltano database - executed by GitHub CI.\"\"\"\n\nfrom __future__ import annotations\n\nfrom alembic.script import ScriptDirectory\n\nfrom meltano.migrations import LOCK_PATH, MIGRATION_DIR\n\nscripts = ScriptDirectory(str(MIGRATION_DIR))\n\nwith LOCK_PATH.open(\"w\") as lock:\n HEAD = scripts.get_current_head()\n lock.write(HEAD)\n\nprint(f\"Meltano database frozen at {HEAD}.\")\n", "path": "scripts/alembic_freeze.py" } ]
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 32881dac61..59ef13bd0e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -55,8 +55,8 @@ jobs: - name: Release Marker if: ${{ github.event.release }} - run: | - touch src/meltano/core/tracking/.release_marker + # The release marker differentiates installations 'in the wild' versus internal dev builds and tests + run: touch src/meltano/core/tracking/.release_marker - name: Build distribution run: | diff --git a/.gitignore b/.gitignore index 6f53cc39f4..573f862e4d 100644 --- a/.gitignore +++ b/.gitignore @@ -154,6 +154,9 @@ src/meltano/api/static/js # sqlite *.db +# Poetry build artifact +setup.py + # Integration test generated files *.jsonl **/.meltano/** diff --git a/Makefile b/Makefile deleted file mode 100644 index 0ed4af42de..0000000000 --- a/Makefile +++ /dev/null @@ -1,191 +0,0 @@ -# General -# ======= -# -# - `make` or `make build` initializes the project -# - Install node_modules needed by UI and API -# - Build static UI artifacts -# - Build all docker images needed for dev -# - `make test` runs pytest -# - `make clean` deletes all the build artifacts -# - `make docker_images` builds all the docker images including the production -# image -# -# To build and publish: -# -# > make sdist-public -# > poetry publish --build - -ifdef DOCKER_REGISTRY -base_image_tag = ${DOCKER_REGISTRY}/meltano/meltano/base -prod_image_tag = ${DOCKER_REGISTRY}/meltano/meltano -else -base_image_tag = meltano/meltano/base -prod_image_tag = meltano/meltano -endif - -DOCKER_RUN=docker run -it --rm -v $(shell pwd):/app -w /app -PYTHON_RUN=${DOCKER_RUN} --name python-$(shell uuidgen) python -DCR=docker-compose run --rm -DCRN=${DCR} --no-deps - -MELTANO_WEBAPP = src/webapp -MELTANO_API = src/meltano/api -MELTANO_RELEASE_MARKER_FILE = ./src/meltano/core/tracking/.release_marker - -.PHONY: build test clean docker_images release help -.DEFAULT_GOAL := help - -help: ## Display this help text - @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-16s\033[0m %s\n", $$1, $$2}' - -build: ui api ## Build the Meltano UI and API - -test: ## Run the tests - ${DCRN} api poetry run pytest tests/ - -# pip related -TO_CLEAN = ./build ./dist -# built UI -TO_CLEAN += ./${MELTANO_API}/static/js -TO_CLEAN += ./${MELTANO_API}/static/css -TO_CLEAN += ./${MELTANO_WEBAPP}/dist -# release marker -TO_CLEAN += ${MELTANO_RELEASE_MARKER_FILE} - - -clean: ## Delete build artifacts - rm -rf ${TO_CLEAN} - -docker_images: base_image prod_image ## Build the Meltano Docker images - -docs: ## Serve docs - cd docs &&\ - bundle exec jekyll serve - -# Docker Image Related -# ==================== -# -# - `make base_image` builds meltano/base -# - `make prod_image` builds meltano/meltano which is an all-in-one production -# image that includes the static ui artifacts in the image. - -.PHONY: base_image prod_image docs - -base_image: ## Build the Meltano base image - docker build \ - --file docker/base/Dockerfile \ - -t $(base_image_tag) \ - . - -prod_image: base_image ui ## Build the Meltano prod image - docker build \ - --file docker/main/Dockerfile \ - -t $(prod_image_tag) \ - --build-arg BASE_IMAGE=$(base_image_tag) \ - . - -# API Related -# =========== -# -# - `make api` assembles all the necessary dependencies to run the API - -.PHONY: api - -api: prod_image ${MELTANO_API}/node_modules ## Build the Meltano API - -${MELTANO_API}/node_modules: - ${DCRN} -w /meltano/${MELTANO_API} api yarn - -# Packaging Related -# =========== -# -# - `make lock` pins dependency versions. We use Poetry to generate -# a lockfile. - -lock: ## Update Poetry lock file - poetry lock - -bundle: clean ui ## Clean build artifacts, then build & bundle the UI - mkdir -p src/meltano/api/templates && \ - cp src/webapp/dist/index.html src/meltano/api/templates/webapp.html && \ - cp src/webapp/dist/index-embed.html src/meltano/api/templates/embed.html && \ - cp -r src/webapp/dist/static/. src/meltano/api/static - -freeze_db: ## Freeze the Meltano database to support DB migration during upgrades - poetry run scripts/alembic_freeze.py - -# sdist: -# Build the source distribution -# Note: please use `sdist-public` for the actual release build. -# Note: despite being called "sdist", this builds both the sdist and wheel. -sdist: freeze_db bundle ## Build the Meltano sdist for development - poetry build --format sdist - poetry run pip wheel --no-deps . --wheel-dir dist/ - -# sdist_public: -# Same as sdist, except add release marker before poetry build -# The release marker differentiates installations 'in the wild' versus inernal dev builds and tests -sdist_public: freeze_db bundle ## Build the Meltano sdist for release - touch src/meltano/core/tracking/.release_marker - poetry build --format sdist - poetry run pip wheel --no-deps . --wheel-dir dist/ - echo "Builds complete. You can now publish to PyPI using 'poetry publish'." - -docker_sdist: base_image ## Build an image off of the base image that includes the Meltano sdist - docker run --rm -v `pwd`:/meltano ${base_image_tag} \ - bash -c "make sdist" && \ - bash -c "chmod 777 dist/*" - -# UI Related Tasks -# ================= -# -# - `make ui` assembles the necessary UI dependencies and builds the static UI -# artifacts to ui/dist - -.PHONY: ui - -ui: ## Build the Meltano UI - cd src/webapp && yarn && yarn build - -${MELTANO_WEBAPP}/node_modules: ${MELTANO_WEBAPP}/yarn.lock - cd ${MELTANO_WEBAPP} && yarn install --frozen-lockfile - - -# Lint Related Tasks -# ================== -# - -.PHONY: lint show_lint - -TOX_RUN = poetry run tox -e -ESLINT_RUN = cd ${MELTANO_WEBAPP} && yarn run lint -JSON_YML_VALIDATE = poetry run python src/meltano/core/utils/validate_json_schema.py - -args = `arg="$(filter-out $@,$(MAKECMDGOALS))" && echo $${arg:-${1}}` - -lint_python: ## Run Python linters & automatically apply fixes where possible - ${JSON_YML_VALIDATE} - ${TOX_RUN} fix -- $(call args) - -lint_eslint: ${MELTANO_WEBAPP}/node_modules ## Run eslint & automatically apply fixes where possible - ${ESLINT_RUN} --fix - -show_lint_python: ## Run Python linters & display output - ${TOX_RUN} lint -- $(call args) - -show_lint_eslint: ${MELTANO_WEBAPP}/node_modules ## Run eslint & display output - ${ESLINT_RUN} - -lint: lint_python lint_eslint ## Run linters & automatically apply fixes where possible -show_lint: show_lint_python show_lint_eslint ## Run linters & display output - -# Release -# ===================== -# Note: -# - this code is old and may be stale. -# - process currently runs in CI -release: ## Execute the automated steps of the deprecated release process - git diff --quiet || { echo "Working directory is dirty, please commit or stash your changes."; exit 1; } - yes | poetry run changelog release --$(type) - git add CHANGELOG.md - poetry run bumpversion --tag --allow-dirty --new-version `poetry run changelog current` $(type) diff --git a/docs/src/_contribute/style.md b/docs/src/_contribute/style.md index d7385c9592..4e9cef87a8 100644 --- a/docs/src/_contribute/style.md +++ b/docs/src/_contribute/style.md @@ -20,8 +20,9 @@ Python: - [wemake-python-styleguide](https://wemake-python-stylegui.de/en/latest/) - [MyPy](https://mypy.readthedocs.io/en/stable/) -Flake8 is a python tool that glues together `pycodestyle`, `pyflakes`, `mccabe`, and third-party plugins to check the style and quality of python code, -and `wemake-python-styleguide` is a plugin for Flake8 that offers an extensive set of opinionated rules that encourage clean and correct code. +Flake8 is a python tool that glues together `pycodestyle`, `pyflakes`, `mccabe`, and third-party plugins to check the style and quality of python code. Notable among these is `wemake-python-styleguide`, which offers an extensive set of opinionated rules that encourage clean and correct code. + +To lint your Python code, install the project using `poetry install`, then run `poetry run pre-commit --all-files flakeheaven` from the root of the project. The `pre-commit` check will be run in CI on all PRs. Javascript: @@ -29,7 +30,7 @@ Javascript: - [ESLint Vue Plugin](https://github.com/vuejs/eslint-plugin-vue) - [Prettier](https://prettier.io/) -You may use `make lint` to automatically lint all your code, or `make show_lint` if you only want to see what needs to change. +To lint your Javascript code, run `yarn lint` from the root of the project. ### Static typing diff --git a/scripts/alembic_freeze.py b/scripts/alembic_freeze.py index 1c8b18b90a..7a9bc7d558 100755 --- a/scripts/alembic_freeze.py +++ b/scripts/alembic_freeze.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -"""Script to freeze the Meltano database - executed by the Makefile.""" +"""Script to freeze the Meltano database - executed by GitHub CI.""" from __future__ import annotations
conda__conda-3931
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport re\nimport socket\nimport sys\nfrom getpass import getpass\nfrom logging import getLogger\nfrom os.path import abspath, expanduser\n\ntry:\n # Python 3\n from urllib.parse import (quote, quote_plus, unquote, unquote_plus, # NOQA\n urlunparse as stdlib_urlparse, urljoin) # NOQA\n from urllib.request import pathname2url # NOQA\nexcept ImportError:\n # Python 2\n from urllib import quote, quote_plus, unquote, unquote_plus, pathname2url # NOQA\n from urlparse import urlunparse as stdlib_urlparse, urljoin # NOQA\n\nfrom requests.packages.urllib3.exceptions import LocationParseError\nfrom requests.packages.urllib3.util.url import Url, parse_url\n\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\non_win = bool(sys.platform == \"win32\")\n\n\n@memoize\ndef path_to_url(path):\n path = abspath(expanduser(path))\n url = urljoin('file:', pathname2url(path))\n log.debug(\"%s converted to %s\", path, url)\n return url\n\n\ndef url_to_path(url): # NOQA\n \"\"\"Convert a file:// URL to a path.\"\"\"\n assert url.startswith('file:'), \"You can only turn file: urls into filenames (not %r)\" % url\n path = url[len('file:'):].lstrip('/')\n path = unquote(path)\n if re.match('^([a-z])[:|]', path, re.I):\n path = path[0] + ':' + path[2:]\n elif not path.startswith(r'\\\\'):\n # if not a Windows UNC path\n path = '/' + path\n return path\n\n\n@memoize\ndef urlparse(url):\n if on_win and url.startswith('file:'):\n url.replace('\\\\', '/')\n return parse_url(url)\n\n\ndef url_to_s3_info(url):\n \"\"\"\n Convert a S3 url to a tuple of bucket and key\n \"\"\"\n parsed_url = parse_url(url)\n assert parsed_url.scheme == 's3', \"You can only use s3: urls (not %r)\" % url\n bucket, key = parsed_url.host, parsed_url.path\n return bucket, key\n\n\ndef is_url(url):\n try:\n p = urlparse(url)\n return p.netloc is not None or p.scheme == \"file\"\n except LocationParseError:\n log.debug(\"Could not parse url ({0}).\".format(url))\n return False\n\n\ndef is_ipv4_address(string_ip):\n \"\"\"\n Examples:\n >>> [is_ipv4_address(ip) for ip in ('8.8.8.8', '192.168.10.10', '255.255.255.255')]\n [True, True, True]\n >>> [is_ipv4_address(ip) for ip in ('8.8.8', '192.168.10.10.20', '256.255.255.255', '::1')]\n [False, False, False, False]\n \"\"\"\n try:\n socket.inet_aton(string_ip)\n except socket.error:\n return False\n return string_ip.count('.') == 3\n\n\ndef is_ipv6_address(string_ip):\n \"\"\"\n Examples:\n >>> [is_ipv6_address(ip) for ip in ('::1', '2001:db8:85a3::370:7334', '1234:'*7+'1234')]\n [True, True, True]\n >>> [is_ipv6_address(ip) for ip in ('192.168.10.10', '1234:'*8+'1234')]\n [False, False]\n \"\"\"\n try:\n socket.inet_pton(socket.AF_INET6, string_ip)\n except socket.error:\n return False\n return True\n\n\ndef is_ip_address(string_ip):\n \"\"\"\n Examples:\n >>> is_ip_address('192.168.10.10')\n True\n >>> is_ip_address('::1')\n True\n >>> is_ip_address('www.google.com')\n False\n \"\"\"\n return is_ipv4_address(string_ip) or is_ipv6_address(string_ip)\n\n\ndef join(*args):\n start = '/' if not args[0] or args[0].startswith('/') else ''\n return start + '/'.join(y for y in (x.strip('/') for x in args if x) if y)\n\n\njoin_url = join\n\n\ndef has_scheme(value):\n return re.match(r'[a-z][a-z0-9]{0,11}://', value)\n\n\ndef strip_scheme(url):\n return url.split('://', 1)[-1]\n\n\ndef mask_anaconda_token(url):\n _, token = split_anaconda_token(url)\n return url.replace(token, \"<TOKEN>\", 1) if token else url\n\n\ndef split_anaconda_token(url):\n \"\"\"\n Examples:\n >>> split_anaconda_token(\"https://1.2.3.4/t/tk-123-456/path\")\n (u'https://1.2.3.4/path', u'tk-123-456')\n >>> split_anaconda_token(\"https://1.2.3.4/t//path\")\n (u'https://1.2.3.4/path', u'')\n >>> split_anaconda_token(\"https://some.domain/api/t/tk-123-456/path\")\n (u'https://some.domain/api/path', u'tk-123-456')\n >>> split_anaconda_token(\"https://1.2.3.4/conda/t/tk-123-456/path\")\n (u'https://1.2.3.4/conda/path', u'tk-123-456')\n >>> split_anaconda_token(\"https://1.2.3.4/path\")\n (u'https://1.2.3.4/path', None)\n >>> split_anaconda_token(\"https://10.2.3.4:8080/conda/t/tk-123-45\")\n (u'https://10.2.3.4:8080/conda', u'tk-123-45')\n \"\"\"\n _token_match = re.search(r'/t/([a-zA-Z0-9-]*)', url)\n token = _token_match.groups()[0] if _token_match else None\n cleaned_url = url.replace('/t/' + token, '', 1) if token is not None else url\n return cleaned_url.rstrip('/'), token\n\n\ndef split_platform(url):\n \"\"\"\n\n Examples:\n >>> split_platform(\"https://1.2.3.4/t/tk-123/osx-64/path\")\n (u'https://1.2.3.4/t/tk-123/path', u'osx-64')\n\n \"\"\"\n from conda.base.constants import PLATFORM_DIRECTORIES\n _platform_match_regex = r'/(%s)/?' % r'|'.join(r'%s' % d for d in PLATFORM_DIRECTORIES)\n _platform_match = re.search(_platform_match_regex, url, re.IGNORECASE)\n platform = _platform_match.groups()[0] if _platform_match else None\n cleaned_url = url.replace('/' + platform, '', 1) if platform is not None else url\n return cleaned_url.rstrip('/'), platform\n\n\ndef split_package_filename(url):\n cleaned_url, package_filename = (url.rsplit('/', 1) if url.endswith(('.tar.bz2', '.json'))\n else (url, None))\n return cleaned_url, package_filename\n\n\ndef split_scheme_auth_token(url):\n if not url:\n return None, None, None, None\n cleaned_url, token = split_anaconda_token(url)\n url_parts = urlparse(cleaned_url)\n remainder_url = Url(host=url_parts.host, port=url_parts.port, path=url_parts.path,\n query=url_parts.query).url\n return remainder_url, url_parts.scheme, url_parts.auth, token\n\n\ndef split_conda_url_easy_parts(url):\n # scheme, auth, token, platform, package_filename, host, port, path, query\n cleaned_url, token = split_anaconda_token(url)\n cleaned_url, platform = split_platform(cleaned_url)\n cleaned_url, package_filename = split_package_filename(cleaned_url)\n\n # TODO: split out namespace using regex\n\n url_parts = urlparse(cleaned_url)\n\n return (url_parts.scheme, url_parts.auth, token, platform, package_filename, url_parts.host,\n url_parts.port, url_parts.path, url_parts.query)\n\n\ndef is_windows_path(value):\n return re.match(r'[a-z]:[/\\\\]', value, re.IGNORECASE)\n\n\n@memoize\ndef get_proxy_username_and_pass(scheme):\n username = input(\"\\n%s proxy username: \" % scheme)\n passwd = getpass(\"Password:\")\n return username, passwd\n\n\ndef add_username_and_password(url, username, password):\n url_parts = parse_url(url)._asdict()\n url_parts['auth'] = username + ':' + quote(password, '')\n return Url(**url_parts).url\n\n\ndef maybe_add_auth(url, auth, force=False):\n \"\"\"add auth if the url doesn't currently have it\"\"\"\n if not auth:\n return url\n url_parts = urlparse(url)._asdict()\n if url_parts['auth'] and not force:\n return url\n url_parts['auth'] = auth\n return Url(**url_parts).url\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n", "path": "conda/common/url.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport re\nimport socket\nimport sys\nfrom getpass import getpass\nfrom logging import getLogger\nfrom os.path import abspath, expanduser\n\ntry:\n # Python 3\n from urllib.parse import (quote, quote_plus, unquote, unquote_plus, # NOQA\n urlunparse as stdlib_urlparse, urljoin) # NOQA\n from urllib.request import pathname2url # NOQA\nexcept ImportError:\n # Python 2\n from urllib import quote, quote_plus, unquote, unquote_plus, pathname2url # NOQA\n from urlparse import urlunparse as stdlib_urlparse, urljoin # NOQA\n\nfrom requests.packages.urllib3.exceptions import LocationParseError\nfrom requests.packages.urllib3.util.url import Url, parse_url\n\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\non_win = bool(sys.platform == \"win32\")\n\n\n@memoize\ndef path_to_url(path):\n path = abspath(expanduser(path))\n url = urljoin('file:', pathname2url(path))\n log.debug(\"%s converted to %s\", path, url)\n return url\n\n\ndef url_to_path(url): # NOQA\n \"\"\"Convert a file:// URL to a path.\"\"\"\n assert url.startswith('file:'), \"You can only turn file: urls into filenames (not %r)\" % url\n path = url[len('file:'):].lstrip('/')\n path = unquote(path)\n if re.match('^([a-z])[:|]', path, re.I):\n path = path[0] + ':' + path[2:]\n elif not path.startswith(r'\\\\'):\n # if not a Windows UNC path\n path = '/' + path\n return path\n\n\n@memoize\ndef urlparse(url):\n if on_win and url.startswith('file:'):\n url.replace('\\\\', '/')\n return parse_url(url)\n\n\ndef url_to_s3_info(url):\n \"\"\"\n Convert a S3 url to a tuple of bucket and key\n \"\"\"\n parsed_url = parse_url(url)\n assert parsed_url.scheme == 's3', \"You can only use s3: urls (not %r)\" % url\n bucket, key = parsed_url.host, parsed_url.path\n return bucket, key\n\n\ndef is_url(url):\n if not url:\n return False\n try:\n p = urlparse(url)\n return p.netloc is not None or p.scheme == \"file\"\n except LocationParseError:\n log.debug(\"Could not parse url ({0}).\".format(url))\n return False\n\n\ndef is_ipv4_address(string_ip):\n \"\"\"\n Examples:\n >>> [is_ipv4_address(ip) for ip in ('8.8.8.8', '192.168.10.10', '255.255.255.255')]\n [True, True, True]\n >>> [is_ipv4_address(ip) for ip in ('8.8.8', '192.168.10.10.20', '256.255.255.255', '::1')]\n [False, False, False, False]\n \"\"\"\n try:\n socket.inet_aton(string_ip)\n except socket.error:\n return False\n return string_ip.count('.') == 3\n\n\ndef is_ipv6_address(string_ip):\n \"\"\"\n Examples:\n >>> [is_ipv6_address(ip) for ip in ('::1', '2001:db8:85a3::370:7334', '1234:'*7+'1234')]\n [True, True, True]\n >>> [is_ipv6_address(ip) for ip in ('192.168.10.10', '1234:'*8+'1234')]\n [False, False]\n \"\"\"\n try:\n socket.inet_pton(socket.AF_INET6, string_ip)\n except socket.error:\n return False\n return True\n\n\ndef is_ip_address(string_ip):\n \"\"\"\n Examples:\n >>> is_ip_address('192.168.10.10')\n True\n >>> is_ip_address('::1')\n True\n >>> is_ip_address('www.google.com')\n False\n \"\"\"\n return is_ipv4_address(string_ip) or is_ipv6_address(string_ip)\n\n\ndef join(*args):\n start = '/' if not args[0] or args[0].startswith('/') else ''\n return start + '/'.join(y for y in (x.strip('/') for x in args if x) if y)\n\n\njoin_url = join\n\n\ndef has_scheme(value):\n return re.match(r'[a-z][a-z0-9]{0,11}://', value)\n\n\ndef strip_scheme(url):\n return url.split('://', 1)[-1]\n\n\ndef mask_anaconda_token(url):\n _, token = split_anaconda_token(url)\n return url.replace(token, \"<TOKEN>\", 1) if token else url\n\n\ndef split_anaconda_token(url):\n \"\"\"\n Examples:\n >>> split_anaconda_token(\"https://1.2.3.4/t/tk-123-456/path\")\n (u'https://1.2.3.4/path', u'tk-123-456')\n >>> split_anaconda_token(\"https://1.2.3.4/t//path\")\n (u'https://1.2.3.4/path', u'')\n >>> split_anaconda_token(\"https://some.domain/api/t/tk-123-456/path\")\n (u'https://some.domain/api/path', u'tk-123-456')\n >>> split_anaconda_token(\"https://1.2.3.4/conda/t/tk-123-456/path\")\n (u'https://1.2.3.4/conda/path', u'tk-123-456')\n >>> split_anaconda_token(\"https://1.2.3.4/path\")\n (u'https://1.2.3.4/path', None)\n >>> split_anaconda_token(\"https://10.2.3.4:8080/conda/t/tk-123-45\")\n (u'https://10.2.3.4:8080/conda', u'tk-123-45')\n \"\"\"\n _token_match = re.search(r'/t/([a-zA-Z0-9-]*)', url)\n token = _token_match.groups()[0] if _token_match else None\n cleaned_url = url.replace('/t/' + token, '', 1) if token is not None else url\n return cleaned_url.rstrip('/'), token\n\n\ndef split_platform(url):\n \"\"\"\n\n Examples:\n >>> split_platform(\"https://1.2.3.4/t/tk-123/osx-64/path\")\n (u'https://1.2.3.4/t/tk-123/path', u'osx-64')\n\n \"\"\"\n from conda.base.constants import PLATFORM_DIRECTORIES\n _platform_match_regex = r'/(%s)/?' % r'|'.join(r'%s' % d for d in PLATFORM_DIRECTORIES)\n _platform_match = re.search(_platform_match_regex, url, re.IGNORECASE)\n platform = _platform_match.groups()[0] if _platform_match else None\n cleaned_url = url.replace('/' + platform, '', 1) if platform is not None else url\n return cleaned_url.rstrip('/'), platform\n\n\ndef split_package_filename(url):\n cleaned_url, package_filename = (url.rsplit('/', 1) if url.endswith(('.tar.bz2', '.json'))\n else (url, None))\n return cleaned_url, package_filename\n\n\ndef split_scheme_auth_token(url):\n if not url:\n return None, None, None, None\n cleaned_url, token = split_anaconda_token(url)\n url_parts = urlparse(cleaned_url)\n remainder_url = Url(host=url_parts.host, port=url_parts.port, path=url_parts.path,\n query=url_parts.query).url\n return remainder_url, url_parts.scheme, url_parts.auth, token\n\n\ndef split_conda_url_easy_parts(url):\n # scheme, auth, token, platform, package_filename, host, port, path, query\n cleaned_url, token = split_anaconda_token(url)\n cleaned_url, platform = split_platform(cleaned_url)\n cleaned_url, package_filename = split_package_filename(cleaned_url)\n\n # TODO: split out namespace using regex\n\n url_parts = urlparse(cleaned_url)\n\n return (url_parts.scheme, url_parts.auth, token, platform, package_filename, url_parts.host,\n url_parts.port, url_parts.path, url_parts.query)\n\n\ndef is_windows_path(value):\n return re.match(r'[a-z]:[/\\\\]', value, re.IGNORECASE)\n\n\n@memoize\ndef get_proxy_username_and_pass(scheme):\n username = input(\"\\n%s proxy username: \" % scheme)\n passwd = getpass(\"Password:\")\n return username, passwd\n\n\ndef add_username_and_password(url, username, password):\n url_parts = parse_url(url)._asdict()\n url_parts['auth'] = username + ':' + quote(password, '')\n return Url(**url_parts).url\n\n\ndef maybe_add_auth(url, auth, force=False):\n \"\"\"add auth if the url doesn't currently have it\"\"\"\n if not auth:\n return url\n url_parts = urlparse(url)._asdict()\n if url_parts['auth'] and not force:\n return url\n url_parts['auth'] = auth\n return Url(**url_parts).url\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n", "path": "conda/common/url.py" } ]
diff --git a/conda/common/url.py b/conda/common/url.py index a25525815e9..4cda8593d25 100644 --- a/conda/common/url.py +++ b/conda/common/url.py @@ -68,6 +68,8 @@ def url_to_s3_info(url): def is_url(url): + if not url: + return False try: p = urlparse(url) return p.netloc is not None or p.scheme == "file"
acl-org__acl-anthology-724
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <marcel@bollmann.me>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging as log\nfrom .utils import (\n build_anthology_id,\n parse_element,\n infer_attachment_url,\n remove_extra_whitespace,\n is_journal,\n is_volume_id,\n)\nfrom . import data\n\n# For BibTeX export\nfrom .formatter import bibtex_encode, bibtex_make_entry\n\n\nclass Paper:\n def __init__(self, paper_id, ingest_date, volume, formatter):\n self.parent_volume = volume\n self.formatter = formatter\n self._id = paper_id\n self._ingest_date = ingest_date\n self._bibkey = False\n self.is_volume = paper_id == \"0\"\n\n # initialize metadata with keys inherited from volume\n self.attrib = {}\n for key, value in volume.attrib.items():\n # Only inherit 'editor' for frontmatter\n if (key == \"editor\" and not self.is_volume) or key in (\n \"collection_id\",\n \"booktitle\",\n \"id\",\n \"meta_data\",\n \"meta_journal_title\",\n \"meta_volume\",\n \"meta_issue\",\n \"sigs\",\n \"venues\",\n \"meta_date\",\n \"url\",\n ):\n continue\n\n self.attrib[key] = value\n\n def from_xml(xml_element, *args):\n ingest_date = xml_element.get(\"ingest-date\", data.UNKNOWN_INGEST_DATE)\n\n # Default to paper ID \"0\" (for front matter)\n paper = Paper(xml_element.get(\"id\", \"0\"), ingest_date, *args)\n\n # Set values from parsing the XML element (overwriting\n # and changing some initialized from the volume metadata)\n for key, value in parse_element(xml_element).items():\n if key == \"author\" and \"editor\" in paper.attrib:\n del paper.attrib[\"editor\"]\n paper.attrib[key] = value\n\n # Frontmatter title is the volume 'booktitle'\n if paper.is_volume:\n paper.attrib[\"xml_title\"] = paper.attrib[\"xml_booktitle\"]\n paper.attrib[\"xml_title\"].tag = \"title\"\n\n # Remove booktitle for frontmatter and journals\n if paper.is_volume or is_journal(paper.full_id):\n del paper.attrib[\"xml_booktitle\"]\n\n # Expand URLs with paper ID\n for tag in (\"revision\", \"erratum\"):\n if tag in paper.attrib:\n for item in paper.attrib[tag]:\n if not item[\"url\"].startswith(paper.full_id):\n log.error(\n \"{} must begin with paper ID '{}', but is '{}'\".format(\n tag, paper.full_id, item[\"url\"]\n )\n )\n item[\"url\"] = data.ANTHOLOGY_PDF.format(item[\"url\"])\n\n if \"attachment\" in paper.attrib:\n for item in paper.attrib[\"attachment\"]:\n item[\"url\"] = infer_attachment_url(item[\"url\"], paper.full_id)\n\n # Explicitly construct URL of original version of the paper\n # -- this is a bit hacky, but it's not given in the XML\n # explicitly\n if \"revision\" in paper.attrib:\n paper.attrib[\"revision\"].insert(\n 0,\n {\n \"value\": \"{}v1\".format(paper.full_id),\n \"id\": \"1\",\n \"url\": data.ANTHOLOGY_PDF.format(\"{}v1\".format(paper.full_id)),\n },\n )\n\n paper.attrib[\"title\"] = paper.get_title(\"plain\")\n paper.attrib[\"booktitle\"] = paper.get_booktitle(\"plain\")\n\n if \"editor\" in paper.attrib:\n if paper.is_volume:\n if \"author\" in paper.attrib:\n log.warn(\n \"Paper {} has both <editor> and <author>; ignoring <author>\".format(\n paper.full_id\n )\n )\n # Proceedings editors are considered authors for their front matter\n paper.attrib[\"author\"] = paper.attrib[\"editor\"]\n del paper.attrib[\"editor\"]\n else:\n log.warn(\n \"Paper {} has <editor> but is not a proceedings volume; ignoring <editor>\".format(\n paper.full_id\n )\n )\n if \"pages\" in paper.attrib:\n if paper.attrib[\"pages\"] is not None:\n paper._interpret_pages()\n else:\n del paper.attrib[\"pages\"]\n\n if \"author\" in paper.attrib:\n paper.attrib[\"author_string\"] = \", \".join(\n [x[0].full for x in paper.attrib[\"author\"]]\n )\n\n paper.attrib[\"thumbnail\"] = data.ANTHOLOGY_THUMBNAIL.format(paper.full_id)\n\n return paper\n\n def _interpret_pages(self):\n \"\"\"Splits up 'pages' field into first and last page, if possible.\n\n This is used for metadata in the generated HTML.\"\"\"\n for s in (\"--\", \"-\", \"–\"):\n if self.attrib[\"pages\"].count(s) == 1:\n self.attrib[\"page_first\"], self.attrib[\"page_last\"] = self.attrib[\n \"pages\"\n ].split(s)\n self.attrib[\"pages\"] = self.attrib[\"pages\"].replace(s, \"–\")\n return\n\n @property\n def ingest_date(self):\n \"\"\"Inherit publication date from parent, but self overrides. May be undefined.\"\"\"\n if self._ingest_date:\n return self._ingest_date\n if self.parent_volume:\n return self.parent_volume.ingest_date\n return data.UNKNOWN_INGEST_DATE\n\n @property\n def collection_id(self):\n return self.parent_volume.collection_id\n\n @property\n def volume_id(self):\n return self.parent_volume.volume_id\n\n @property\n def paper_id(self):\n return self._id\n\n @property\n def full_id(self):\n return self.anthology_id\n\n @property\n def anthology_id(self):\n return build_anthology_id(self.collection_id, self.volume_id, self.paper_id)\n\n @property\n def bibkey(self):\n if not self._bibkey:\n self._bibkey = self.full_id # fallback\n return self._bibkey\n\n @bibkey.setter\n def bibkey(self, value):\n self._bibkey = value\n\n @property\n def bibtype(self):\n if is_journal(self.full_id):\n return \"article\"\n elif self.is_volume:\n return \"proceedings\"\n else:\n return \"inproceedings\"\n\n @property\n def parent_volume_id(self):\n if self.parent_volume is not None:\n return self.parent_volume.full_id\n return None\n\n @property\n def has_abstract(self):\n return \"xml_abstract\" in self.attrib\n\n def get(self, name, default=None):\n try:\n return self.attrib[name]\n except KeyError:\n return default\n\n def get_title(self, form=\"xml\"):\n \"\"\"Returns the paper title, optionally formatting it.\n\n Accepted formats:\n - xml: Include any contained XML tags unchanged\n - plain: Strip all XML tags, returning only plain text\n - html: Convert XML tags into valid HTML tags\n - latex: Convert XML tags into LaTeX commands\n \"\"\"\n return self.formatter(self.get(\"xml_title\"), form)\n\n def get_abstract(self, form=\"xml\"):\n \"\"\"Returns the abstract, optionally formatting it.\n\n See `get_title()` for details.\n \"\"\"\n return self.formatter(self.get(\"xml_abstract\"), form, allow_url=True)\n\n def get_booktitle(self, form=\"xml\", default=\"\"):\n \"\"\"Returns the booktitle, optionally formatting it.\n\n See `get_title()` for details.\n \"\"\"\n if \"xml_booktitle\" in self.attrib:\n return self.formatter(self.get(\"xml_booktitle\"), form)\n elif self.parent_volume is not None:\n return self.parent_volume.get(\"title\")\n else:\n return default\n\n def as_bibtex(self, concise=False):\n \"\"\"Return the BibTeX entry for this paper.\"\"\"\n # Build BibTeX entry\n bibkey = self.bibkey\n bibtype = self.bibtype\n entries = [(\"title\", self.get_title(form=\"latex\"))]\n for people in (\"author\", \"editor\"):\n if people in self.attrib:\n entries.append(\n (people, \" and \".join(p.as_bibtex() for p, _ in self.get(people)))\n )\n if is_journal(self.full_id):\n entries.append(\n (\"journal\", bibtex_encode(self.parent_volume.get(\"meta_journal_title\")))\n )\n journal_volume = self.parent_volume.get(\n \"meta_volume\", self.parent_volume.get(\"volume\")\n )\n if journal_volume:\n entries.append((\"volume\", journal_volume))\n journal_issue = self.parent_volume.get(\n \"meta_issue\", self.parent_volume.get(\"issue\")\n )\n if journal_issue:\n entries.append((\"number\", journal_issue))\n else:\n # not is_journal(self.full_id)\n if \"xml_booktitle\" in self.attrib:\n entries.append((\"booktitle\", self.get_booktitle(form=\"latex\")))\n elif bibtype != \"proceedings\":\n entries.append((\"booktitle\", self.parent_volume.get_title(form=\"latex\")))\n for entry in (\"month\", \"year\", \"address\", \"publisher\", \"note\"):\n if self.get(entry) is not None:\n entries.append((entry, bibtex_encode(self.get(entry))))\n for entry in (\"url\", \"doi\"):\n if entry in self.attrib:\n # don't want latex escapes such as\n # doi = \"10.1162/coli{\\_}a{\\_}00008\",\n entries.append((entry, self.get(entry)))\n if \"pages\" in self.attrib:\n entries.append((\"pages\", self.get(\"pages\").replace(\"–\", \"--\")))\n if \"xml_abstract\" in self.attrib and not concise:\n entries.append((\"abstract\", self.get_abstract(form=\"latex\")))\n\n # Serialize it\n return bibtex_make_entry(bibkey, bibtype, entries)\n\n def as_dict(self):\n value = self.attrib\n value[\"paper_id\"] = self.paper_id\n value[\"parent_volume_id\"] = self.parent_volume_id\n value[\"bibkey\"] = self.bibkey\n value[\"bibtype\"] = self.bibtype\n return value\n\n def items(self):\n return self.attrib.items()\n", "path": "bin/anthology/papers.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <marcel@bollmann.me>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging as log\nfrom .utils import (\n build_anthology_id,\n parse_element,\n infer_attachment_url,\n remove_extra_whitespace,\n is_journal,\n is_volume_id,\n)\nfrom . import data\n\n# For BibTeX export\nfrom .formatter import bibtex_encode, bibtex_make_entry\n\n\nclass Paper:\n def __init__(self, paper_id, ingest_date, volume, formatter):\n self.parent_volume = volume\n self.formatter = formatter\n self._id = paper_id\n self._ingest_date = ingest_date\n self._bibkey = False\n self.is_volume = paper_id == \"0\"\n\n # initialize metadata with keys inherited from volume\n self.attrib = {}\n for key, value in volume.attrib.items():\n # Only inherit 'editor' for frontmatter\n if (key == \"editor\" and not self.is_volume) or key in (\n \"collection_id\",\n \"booktitle\",\n \"id\",\n \"meta_data\",\n \"meta_journal_title\",\n \"meta_volume\",\n \"meta_issue\",\n \"sigs\",\n \"venues\",\n \"meta_date\",\n \"url\",\n \"pdf\",\n ):\n continue\n\n self.attrib[key] = value\n\n def from_xml(xml_element, *args):\n ingest_date = xml_element.get(\"ingest-date\", data.UNKNOWN_INGEST_DATE)\n\n # Default to paper ID \"0\" (for front matter)\n paper = Paper(xml_element.get(\"id\", \"0\"), ingest_date, *args)\n\n # Set values from parsing the XML element (overwriting\n # and changing some initialized from the volume metadata)\n for key, value in parse_element(xml_element).items():\n if key == \"author\" and \"editor\" in paper.attrib:\n del paper.attrib[\"editor\"]\n paper.attrib[key] = value\n\n # Frontmatter title is the volume 'booktitle'\n if paper.is_volume:\n paper.attrib[\"xml_title\"] = paper.attrib[\"xml_booktitle\"]\n paper.attrib[\"xml_title\"].tag = \"title\"\n\n # Remove booktitle for frontmatter and journals\n if paper.is_volume or is_journal(paper.full_id):\n del paper.attrib[\"xml_booktitle\"]\n\n # Expand URLs with paper ID\n for tag in (\"revision\", \"erratum\"):\n if tag in paper.attrib:\n for item in paper.attrib[tag]:\n if not item[\"url\"].startswith(paper.full_id):\n log.error(\n \"{} must begin with paper ID '{}', but is '{}'\".format(\n tag, paper.full_id, item[\"url\"]\n )\n )\n item[\"url\"] = data.ANTHOLOGY_PDF.format(item[\"url\"])\n\n if \"attachment\" in paper.attrib:\n for item in paper.attrib[\"attachment\"]:\n item[\"url\"] = infer_attachment_url(item[\"url\"], paper.full_id)\n\n # Explicitly construct URL of original version of the paper\n # -- this is a bit hacky, but it's not given in the XML\n # explicitly\n if \"revision\" in paper.attrib:\n paper.attrib[\"revision\"].insert(\n 0,\n {\n \"value\": \"{}v1\".format(paper.full_id),\n \"id\": \"1\",\n \"url\": data.ANTHOLOGY_PDF.format(\"{}v1\".format(paper.full_id)),\n },\n )\n\n paper.attrib[\"title\"] = paper.get_title(\"plain\")\n paper.attrib[\"booktitle\"] = paper.get_booktitle(\"plain\")\n\n if \"editor\" in paper.attrib:\n if paper.is_volume:\n if \"author\" in paper.attrib:\n log.warn(\n \"Paper {} has both <editor> and <author>; ignoring <author>\".format(\n paper.full_id\n )\n )\n # Proceedings editors are considered authors for their front matter\n paper.attrib[\"author\"] = paper.attrib[\"editor\"]\n del paper.attrib[\"editor\"]\n else:\n log.warn(\n \"Paper {} has <editor> but is not a proceedings volume; ignoring <editor>\".format(\n paper.full_id\n )\n )\n if \"pages\" in paper.attrib:\n if paper.attrib[\"pages\"] is not None:\n paper._interpret_pages()\n else:\n del paper.attrib[\"pages\"]\n\n if \"author\" in paper.attrib:\n paper.attrib[\"author_string\"] = \", \".join(\n [x[0].full for x in paper.attrib[\"author\"]]\n )\n\n paper.attrib[\"thumbnail\"] = data.ANTHOLOGY_THUMBNAIL.format(paper.full_id)\n\n return paper\n\n def _interpret_pages(self):\n \"\"\"Splits up 'pages' field into first and last page, if possible.\n\n This is used for metadata in the generated HTML.\"\"\"\n for s in (\"--\", \"-\", \"–\"):\n if self.attrib[\"pages\"].count(s) == 1:\n self.attrib[\"page_first\"], self.attrib[\"page_last\"] = self.attrib[\n \"pages\"\n ].split(s)\n self.attrib[\"pages\"] = self.attrib[\"pages\"].replace(s, \"–\")\n return\n\n @property\n def ingest_date(self):\n \"\"\"Inherit publication date from parent, but self overrides. May be undefined.\"\"\"\n if self._ingest_date:\n return self._ingest_date\n if self.parent_volume:\n return self.parent_volume.ingest_date\n return data.UNKNOWN_INGEST_DATE\n\n @property\n def collection_id(self):\n return self.parent_volume.collection_id\n\n @property\n def volume_id(self):\n return self.parent_volume.volume_id\n\n @property\n def paper_id(self):\n return self._id\n\n @property\n def full_id(self):\n return self.anthology_id\n\n @property\n def anthology_id(self):\n return build_anthology_id(self.collection_id, self.volume_id, self.paper_id)\n\n @property\n def bibkey(self):\n if not self._bibkey:\n self._bibkey = self.full_id # fallback\n return self._bibkey\n\n @bibkey.setter\n def bibkey(self, value):\n self._bibkey = value\n\n @property\n def bibtype(self):\n if is_journal(self.full_id):\n return \"article\"\n elif self.is_volume:\n return \"proceedings\"\n else:\n return \"inproceedings\"\n\n @property\n def parent_volume_id(self):\n if self.parent_volume is not None:\n return self.parent_volume.full_id\n return None\n\n @property\n def has_abstract(self):\n return \"xml_abstract\" in self.attrib\n\n def get(self, name, default=None):\n try:\n return self.attrib[name]\n except KeyError:\n return default\n\n def get_title(self, form=\"xml\"):\n \"\"\"Returns the paper title, optionally formatting it.\n\n Accepted formats:\n - xml: Include any contained XML tags unchanged\n - plain: Strip all XML tags, returning only plain text\n - html: Convert XML tags into valid HTML tags\n - latex: Convert XML tags into LaTeX commands\n \"\"\"\n return self.formatter(self.get(\"xml_title\"), form)\n\n def get_abstract(self, form=\"xml\"):\n \"\"\"Returns the abstract, optionally formatting it.\n\n See `get_title()` for details.\n \"\"\"\n return self.formatter(self.get(\"xml_abstract\"), form, allow_url=True)\n\n def get_booktitle(self, form=\"xml\", default=\"\"):\n \"\"\"Returns the booktitle, optionally formatting it.\n\n See `get_title()` for details.\n \"\"\"\n if \"xml_booktitle\" in self.attrib:\n return self.formatter(self.get(\"xml_booktitle\"), form)\n elif self.parent_volume is not None:\n return self.parent_volume.get(\"title\")\n else:\n return default\n\n def as_bibtex(self, concise=False):\n \"\"\"Return the BibTeX entry for this paper.\"\"\"\n # Build BibTeX entry\n bibkey = self.bibkey\n bibtype = self.bibtype\n entries = [(\"title\", self.get_title(form=\"latex\"))]\n for people in (\"author\", \"editor\"):\n if people in self.attrib:\n entries.append(\n (people, \" and \".join(p.as_bibtex() for p, _ in self.get(people)))\n )\n if is_journal(self.full_id):\n entries.append(\n (\"journal\", bibtex_encode(self.parent_volume.get(\"meta_journal_title\")))\n )\n journal_volume = self.parent_volume.get(\n \"meta_volume\", self.parent_volume.get(\"volume\")\n )\n if journal_volume:\n entries.append((\"volume\", journal_volume))\n journal_issue = self.parent_volume.get(\n \"meta_issue\", self.parent_volume.get(\"issue\")\n )\n if journal_issue:\n entries.append((\"number\", journal_issue))\n else:\n # not is_journal(self.full_id)\n if \"xml_booktitle\" in self.attrib:\n entries.append((\"booktitle\", self.get_booktitle(form=\"latex\")))\n elif bibtype != \"proceedings\":\n entries.append((\"booktitle\", self.parent_volume.get_title(form=\"latex\")))\n for entry in (\"month\", \"year\", \"address\", \"publisher\", \"note\"):\n if self.get(entry) is not None:\n entries.append((entry, bibtex_encode(self.get(entry))))\n for entry in (\"url\", \"doi\"):\n if entry in self.attrib:\n # don't want latex escapes such as\n # doi = \"10.1162/coli{\\_}a{\\_}00008\",\n entries.append((entry, self.get(entry)))\n if \"pages\" in self.attrib:\n entries.append((\"pages\", self.get(\"pages\").replace(\"–\", \"--\")))\n if \"xml_abstract\" in self.attrib and not concise:\n entries.append((\"abstract\", self.get_abstract(form=\"latex\")))\n\n # Serialize it\n return bibtex_make_entry(bibkey, bibtype, entries)\n\n def as_dict(self):\n value = self.attrib\n value[\"paper_id\"] = self.paper_id\n value[\"parent_volume_id\"] = self.parent_volume_id\n value[\"bibkey\"] = self.bibkey\n value[\"bibtype\"] = self.bibtype\n return value\n\n def items(self):\n return self.attrib.items()\n", "path": "bin/anthology/papers.py" } ]
diff --git a/bin/anthology/papers.py b/bin/anthology/papers.py index 5067c25598..9d14242df5 100644 --- a/bin/anthology/papers.py +++ b/bin/anthology/papers.py @@ -54,6 +54,7 @@ def __init__(self, paper_id, ingest_date, volume, formatter): "venues", "meta_date", "url", + "pdf", ): continue
graphql-python__graphene-django-639
[ { "content": "import inspect\nimport json\nimport re\n\nimport six\nfrom django.http import HttpResponse, HttpResponseNotAllowed\nfrom django.http.response import HttpResponseBadRequest\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import View\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\nfrom graphql import get_default_backend\nfrom graphql.error import format_error as format_graphql_error\nfrom graphql.error import GraphQLError\nfrom graphql.execution import ExecutionResult\nfrom graphql.type.schema import GraphQLSchema\n\nfrom .settings import graphene_settings\n\n\nclass HttpError(Exception):\n def __init__(self, response, message=None, *args, **kwargs):\n self.response = response\n self.message = message = message or response.content.decode()\n super(HttpError, self).__init__(message, *args, **kwargs)\n\n\ndef get_accepted_content_types(request):\n def qualify(x):\n parts = x.split(\";\", 1)\n if len(parts) == 2:\n match = re.match(r\"(^|;)q=(0(\\.\\d{,3})?|1(\\.0{,3})?)(;|$)\", parts[1])\n if match:\n return parts[0].strip(), float(match.group(2))\n return parts[0].strip(), 1\n\n raw_content_types = request.META.get(\"HTTP_ACCEPT\", \"*/*\").split(\",\")\n qualified_content_types = map(qualify, raw_content_types)\n return list(\n x[0] for x in sorted(qualified_content_types, key=lambda x: x[1], reverse=True)\n )\n\n\ndef instantiate_middleware(middlewares):\n for middleware in middlewares:\n if inspect.isclass(middleware):\n yield middleware()\n continue\n yield middleware\n\n\nclass GraphQLView(View):\n graphiql_version = \"0.11.10\"\n graphiql_template = \"graphene/graphiql.html\"\n\n schema = None\n graphiql = False\n executor = None\n backend = None\n middleware = None\n root_value = None\n pretty = False\n batch = False\n\n def __init__(\n self,\n schema=None,\n executor=None,\n middleware=None,\n root_value=None,\n graphiql=False,\n pretty=False,\n batch=False,\n backend=None,\n ):\n if not schema:\n schema = graphene_settings.SCHEMA\n\n if backend is None:\n backend = get_default_backend()\n\n if middleware is None:\n middleware = graphene_settings.MIDDLEWARE\n\n self.schema = self.schema or schema\n if middleware is not None:\n self.middleware = list(instantiate_middleware(middleware))\n self.executor = executor\n self.root_value = root_value\n self.pretty = self.pretty or pretty\n self.graphiql = self.graphiql or graphiql\n self.batch = self.batch or batch\n self.backend = backend\n\n assert isinstance(\n self.schema, GraphQLSchema\n ), \"A Schema is required to be provided to GraphQLView.\"\n assert not all((graphiql, batch)), \"Use either graphiql or batch processing\"\n\n # noinspection PyUnusedLocal\n def get_root_value(self, request):\n return self.root_value\n\n def get_middleware(self, request):\n return self.middleware\n\n def get_context(self, request):\n return request\n\n def get_backend(self, request):\n return self.backend\n\n @method_decorator(ensure_csrf_cookie)\n def dispatch(self, request, *args, **kwargs):\n try:\n if request.method.lower() not in (\"get\", \"post\"):\n raise HttpError(\n HttpResponseNotAllowed(\n [\"GET\", \"POST\"], \"GraphQL only supports GET and POST requests.\"\n )\n )\n\n data = self.parse_body(request)\n show_graphiql = self.graphiql and self.can_display_graphiql(request, data)\n\n if show_graphiql:\n return self.render_graphiql(\n request, graphiql_version=self.graphiql_version\n )\n\n if self.batch:\n responses = [self.get_response(request, entry) for entry in data]\n result = \"[{}]\".format(\n \",\".join([response[0] for response in responses])\n )\n status_code = (\n responses\n and max(responses, key=lambda response: response[1])[1]\n or 200\n )\n else:\n result, status_code = self.get_response(request, data, show_graphiql)\n\n return HttpResponse(\n status=status_code, content=result, content_type=\"application/json\"\n )\n\n except HttpError as e:\n response = e.response\n response[\"Content-Type\"] = \"application/json\"\n response.content = self.json_encode(\n request, {\"errors\": [self.format_error(e)]}\n )\n return response\n\n def get_response(self, request, data, show_graphiql=False):\n query, variables, operation_name, id = self.get_graphql_params(request, data)\n\n execution_result = self.execute_graphql_request(\n request, data, query, variables, operation_name, show_graphiql\n )\n\n status_code = 200\n if execution_result:\n response = {}\n\n if execution_result.errors:\n response[\"errors\"] = [\n self.format_error(e) for e in execution_result.errors\n ]\n\n if execution_result.invalid:\n status_code = 400\n else:\n response[\"data\"] = execution_result.data\n\n if self.batch:\n response[\"id\"] = id\n response[\"status\"] = status_code\n\n result = self.json_encode(request, response, pretty=show_graphiql)\n else:\n result = None\n\n return result, status_code\n\n def render_graphiql(self, request, **data):\n return render(request, self.graphiql_template, data)\n\n def json_encode(self, request, d, pretty=False):\n if not (self.pretty or pretty) and not request.GET.get(\"pretty\"):\n return json.dumps(d, separators=(\",\", \":\"))\n\n return json.dumps(d, sort_keys=True, indent=2, separators=(\",\", \": \"))\n\n def parse_body(self, request):\n content_type = self.get_content_type(request)\n\n if content_type == \"application/graphql\":\n return {\"query\": request.body.decode()}\n\n elif content_type == \"application/json\":\n # noinspection PyBroadException\n try:\n body = request.body.decode(\"utf-8\")\n except Exception as e:\n raise HttpError(HttpResponseBadRequest(str(e)))\n\n try:\n request_json = json.loads(body)\n if self.batch:\n assert isinstance(request_json, list), (\n \"Batch requests should receive a list, but received {}.\"\n ).format(repr(request_json))\n assert (\n len(request_json) > 0\n ), \"Received an empty list in the batch request.\"\n else:\n assert isinstance(\n request_json, dict\n ), \"The received data is not a valid JSON query.\"\n return request_json\n except AssertionError as e:\n raise HttpError(HttpResponseBadRequest(str(e)))\n except (TypeError, ValueError):\n raise HttpError(HttpResponseBadRequest(\"POST body sent invalid JSON.\"))\n\n elif content_type in [\n \"application/x-www-form-urlencoded\",\n \"multipart/form-data\",\n ]:\n return request.POST\n\n return {}\n\n def execute_graphql_request(\n self, request, data, query, variables, operation_name, show_graphiql=False\n ):\n if not query:\n if show_graphiql:\n return None\n raise HttpError(HttpResponseBadRequest(\"Must provide query string.\"))\n\n try:\n backend = self.get_backend(request)\n document = backend.document_from_string(self.schema, query)\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n if request.method.lower() == \"get\":\n operation_type = document.get_operation_type(operation_name)\n if operation_type and operation_type != \"query\":\n if show_graphiql:\n return None\n\n raise HttpError(\n HttpResponseNotAllowed(\n [\"POST\"],\n \"Can only perform a {} operation from a POST request.\".format(\n operation_type\n ),\n )\n )\n\n try:\n extra_options = {}\n if self.executor:\n # We only include it optionally since\n # executor is not a valid argument in all backends\n extra_options[\"executor\"] = self.executor\n\n return document.execute(\n root=self.get_root_value(request),\n variables=variables,\n operation_name=operation_name,\n context=self.get_context(request),\n middleware=self.get_middleware(request),\n **extra_options\n )\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n @classmethod\n def can_display_graphiql(cls, request, data):\n raw = \"raw\" in request.GET or \"raw\" in data\n return not raw and cls.request_wants_html(request)\n\n @classmethod\n def request_wants_html(cls, request):\n accepted = get_accepted_content_types(request)\n accepted_length = len(accepted)\n # the list will be ordered in preferred first - so we have to make\n # sure the most preferred gets the highest number\n html_priority = (\n accepted_length - accepted.index(\"text/html\")\n if \"text/html\" in accepted\n else 0\n )\n json_priority = (\n accepted_length - accepted.index(\"application/json\")\n if \"application/json\" in accepted\n else 0\n )\n\n return html_priority > json_priority\n\n @staticmethod\n def get_graphql_params(request, data):\n query = request.GET.get(\"query\") or data.get(\"query\")\n variables = request.GET.get(\"variables\") or data.get(\"variables\")\n id = request.GET.get(\"id\") or data.get(\"id\")\n\n if variables and isinstance(variables, six.text_type):\n try:\n variables = json.loads(variables)\n except Exception:\n raise HttpError(HttpResponseBadRequest(\"Variables are invalid JSON.\"))\n\n operation_name = request.GET.get(\"operationName\") or data.get(\"operationName\")\n if operation_name == \"null\":\n operation_name = None\n\n return query, variables, operation_name, id\n\n @staticmethod\n def format_error(error):\n if isinstance(error, GraphQLError):\n return format_graphql_error(error)\n\n return {\"message\": six.text_type(error)}\n\n @staticmethod\n def get_content_type(request):\n meta = request.META\n content_type = meta.get(\"CONTENT_TYPE\", meta.get(\"HTTP_CONTENT_TYPE\", \"\"))\n return content_type.split(\";\", 1)[0].lower()\n", "path": "graphene_django/views.py" } ]
[ { "content": "import inspect\nimport json\nimport re\n\nimport six\nfrom django.http import HttpResponse, HttpResponseNotAllowed\nfrom django.http.response import HttpResponseBadRequest\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import View\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\nfrom graphql import get_default_backend\nfrom graphql.error import format_error as format_graphql_error\nfrom graphql.error import GraphQLError\nfrom graphql.execution import ExecutionResult\nfrom graphql.type.schema import GraphQLSchema\n\nfrom .settings import graphene_settings\n\n\nclass HttpError(Exception):\n def __init__(self, response, message=None, *args, **kwargs):\n self.response = response\n self.message = message = message or response.content.decode()\n super(HttpError, self).__init__(message, *args, **kwargs)\n\n\ndef get_accepted_content_types(request):\n def qualify(x):\n parts = x.split(\";\", 1)\n if len(parts) == 2:\n match = re.match(r\"(^|;)q=(0(\\.\\d{,3})?|1(\\.0{,3})?)(;|$)\", parts[1])\n if match:\n return parts[0].strip(), float(match.group(2))\n return parts[0].strip(), 1\n\n raw_content_types = request.META.get(\"HTTP_ACCEPT\", \"*/*\").split(\",\")\n qualified_content_types = map(qualify, raw_content_types)\n return list(\n x[0] for x in sorted(qualified_content_types, key=lambda x: x[1], reverse=True)\n )\n\n\ndef instantiate_middleware(middlewares):\n for middleware in middlewares:\n if inspect.isclass(middleware):\n yield middleware()\n continue\n yield middleware\n\n\nclass GraphQLView(View):\n graphiql_version = \"0.11.11\"\n graphiql_template = \"graphene/graphiql.html\"\n\n schema = None\n graphiql = False\n executor = None\n backend = None\n middleware = None\n root_value = None\n pretty = False\n batch = False\n\n def __init__(\n self,\n schema=None,\n executor=None,\n middleware=None,\n root_value=None,\n graphiql=False,\n pretty=False,\n batch=False,\n backend=None,\n ):\n if not schema:\n schema = graphene_settings.SCHEMA\n\n if backend is None:\n backend = get_default_backend()\n\n if middleware is None:\n middleware = graphene_settings.MIDDLEWARE\n\n self.schema = self.schema or schema\n if middleware is not None:\n self.middleware = list(instantiate_middleware(middleware))\n self.executor = executor\n self.root_value = root_value\n self.pretty = self.pretty or pretty\n self.graphiql = self.graphiql or graphiql\n self.batch = self.batch or batch\n self.backend = backend\n\n assert isinstance(\n self.schema, GraphQLSchema\n ), \"A Schema is required to be provided to GraphQLView.\"\n assert not all((graphiql, batch)), \"Use either graphiql or batch processing\"\n\n # noinspection PyUnusedLocal\n def get_root_value(self, request):\n return self.root_value\n\n def get_middleware(self, request):\n return self.middleware\n\n def get_context(self, request):\n return request\n\n def get_backend(self, request):\n return self.backend\n\n @method_decorator(ensure_csrf_cookie)\n def dispatch(self, request, *args, **kwargs):\n try:\n if request.method.lower() not in (\"get\", \"post\"):\n raise HttpError(\n HttpResponseNotAllowed(\n [\"GET\", \"POST\"], \"GraphQL only supports GET and POST requests.\"\n )\n )\n\n data = self.parse_body(request)\n show_graphiql = self.graphiql and self.can_display_graphiql(request, data)\n\n if show_graphiql:\n return self.render_graphiql(\n request, graphiql_version=self.graphiql_version\n )\n\n if self.batch:\n responses = [self.get_response(request, entry) for entry in data]\n result = \"[{}]\".format(\n \",\".join([response[0] for response in responses])\n )\n status_code = (\n responses\n and max(responses, key=lambda response: response[1])[1]\n or 200\n )\n else:\n result, status_code = self.get_response(request, data, show_graphiql)\n\n return HttpResponse(\n status=status_code, content=result, content_type=\"application/json\"\n )\n\n except HttpError as e:\n response = e.response\n response[\"Content-Type\"] = \"application/json\"\n response.content = self.json_encode(\n request, {\"errors\": [self.format_error(e)]}\n )\n return response\n\n def get_response(self, request, data, show_graphiql=False):\n query, variables, operation_name, id = self.get_graphql_params(request, data)\n\n execution_result = self.execute_graphql_request(\n request, data, query, variables, operation_name, show_graphiql\n )\n\n status_code = 200\n if execution_result:\n response = {}\n\n if execution_result.errors:\n response[\"errors\"] = [\n self.format_error(e) for e in execution_result.errors\n ]\n\n if execution_result.invalid:\n status_code = 400\n else:\n response[\"data\"] = execution_result.data\n\n if self.batch:\n response[\"id\"] = id\n response[\"status\"] = status_code\n\n result = self.json_encode(request, response, pretty=show_graphiql)\n else:\n result = None\n\n return result, status_code\n\n def render_graphiql(self, request, **data):\n return render(request, self.graphiql_template, data)\n\n def json_encode(self, request, d, pretty=False):\n if not (self.pretty or pretty) and not request.GET.get(\"pretty\"):\n return json.dumps(d, separators=(\",\", \":\"))\n\n return json.dumps(d, sort_keys=True, indent=2, separators=(\",\", \": \"))\n\n def parse_body(self, request):\n content_type = self.get_content_type(request)\n\n if content_type == \"application/graphql\":\n return {\"query\": request.body.decode()}\n\n elif content_type == \"application/json\":\n # noinspection PyBroadException\n try:\n body = request.body.decode(\"utf-8\")\n except Exception as e:\n raise HttpError(HttpResponseBadRequest(str(e)))\n\n try:\n request_json = json.loads(body)\n if self.batch:\n assert isinstance(request_json, list), (\n \"Batch requests should receive a list, but received {}.\"\n ).format(repr(request_json))\n assert (\n len(request_json) > 0\n ), \"Received an empty list in the batch request.\"\n else:\n assert isinstance(\n request_json, dict\n ), \"The received data is not a valid JSON query.\"\n return request_json\n except AssertionError as e:\n raise HttpError(HttpResponseBadRequest(str(e)))\n except (TypeError, ValueError):\n raise HttpError(HttpResponseBadRequest(\"POST body sent invalid JSON.\"))\n\n elif content_type in [\n \"application/x-www-form-urlencoded\",\n \"multipart/form-data\",\n ]:\n return request.POST\n\n return {}\n\n def execute_graphql_request(\n self, request, data, query, variables, operation_name, show_graphiql=False\n ):\n if not query:\n if show_graphiql:\n return None\n raise HttpError(HttpResponseBadRequest(\"Must provide query string.\"))\n\n try:\n backend = self.get_backend(request)\n document = backend.document_from_string(self.schema, query)\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n if request.method.lower() == \"get\":\n operation_type = document.get_operation_type(operation_name)\n if operation_type and operation_type != \"query\":\n if show_graphiql:\n return None\n\n raise HttpError(\n HttpResponseNotAllowed(\n [\"POST\"],\n \"Can only perform a {} operation from a POST request.\".format(\n operation_type\n ),\n )\n )\n\n try:\n extra_options = {}\n if self.executor:\n # We only include it optionally since\n # executor is not a valid argument in all backends\n extra_options[\"executor\"] = self.executor\n\n return document.execute(\n root=self.get_root_value(request),\n variables=variables,\n operation_name=operation_name,\n context=self.get_context(request),\n middleware=self.get_middleware(request),\n **extra_options\n )\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n @classmethod\n def can_display_graphiql(cls, request, data):\n raw = \"raw\" in request.GET or \"raw\" in data\n return not raw and cls.request_wants_html(request)\n\n @classmethod\n def request_wants_html(cls, request):\n accepted = get_accepted_content_types(request)\n accepted_length = len(accepted)\n # the list will be ordered in preferred first - so we have to make\n # sure the most preferred gets the highest number\n html_priority = (\n accepted_length - accepted.index(\"text/html\")\n if \"text/html\" in accepted\n else 0\n )\n json_priority = (\n accepted_length - accepted.index(\"application/json\")\n if \"application/json\" in accepted\n else 0\n )\n\n return html_priority > json_priority\n\n @staticmethod\n def get_graphql_params(request, data):\n query = request.GET.get(\"query\") or data.get(\"query\")\n variables = request.GET.get(\"variables\") or data.get(\"variables\")\n id = request.GET.get(\"id\") or data.get(\"id\")\n\n if variables and isinstance(variables, six.text_type):\n try:\n variables = json.loads(variables)\n except Exception:\n raise HttpError(HttpResponseBadRequest(\"Variables are invalid JSON.\"))\n\n operation_name = request.GET.get(\"operationName\") or data.get(\"operationName\")\n if operation_name == \"null\":\n operation_name = None\n\n return query, variables, operation_name, id\n\n @staticmethod\n def format_error(error):\n if isinstance(error, GraphQLError):\n return format_graphql_error(error)\n\n return {\"message\": six.text_type(error)}\n\n @staticmethod\n def get_content_type(request):\n meta = request.META\n content_type = meta.get(\"CONTENT_TYPE\", meta.get(\"HTTP_CONTENT_TYPE\", \"\"))\n return content_type.split(\";\", 1)[0].lower()\n", "path": "graphene_django/views.py" } ]
diff --git a/graphene_django/views.py b/graphene_django/views.py index 0b840f97f..72cca8866 100644 --- a/graphene_django/views.py +++ b/graphene_django/views.py @@ -51,7 +51,7 @@ def instantiate_middleware(middlewares): class GraphQLView(View): - graphiql_version = "0.11.10" + graphiql_version = "0.11.11" graphiql_template = "graphene/graphiql.html" schema = None
localstack__localstack-1842
[ { "content": "import json\nimport time\nfrom random import randint\nfrom flask import Flask, jsonify, request, make_response\nfrom localstack.services import generic_proxy\nfrom localstack.utils.aws import aws_stack\nfrom localstack.constants import TEST_AWS_ACCOUNT_ID\nfrom localstack.utils.common import to_str\nfrom localstack.utils.analytics import event_publisher\n\nAPP_NAME = 'es_api'\nAPI_PREFIX = '/2015-01-01'\n\nES_DOMAINS = {}\n\napp = Flask(APP_NAME)\n\n\ndef error_response(error_type, code=400, message='Unknown error.'):\n if not message:\n if error_type == 'ResourceNotFoundException':\n message = 'Resource not found.'\n elif error_type == 'ResourceAlreadyExistsException':\n message = 'Resource already exists.'\n response = make_response(jsonify({'error': message}))\n response.headers['x-amzn-errortype'] = error_type\n return response, code\n\n\ndef get_domain_config_status():\n return {\n 'CreationDate': '%.2f' % time.time(),\n 'PendingDeletion': False,\n 'State': 'Active',\n 'UpdateDate': '%.2f' % time.time(),\n 'UpdateVersion': randint(1, 100)\n }\n\n\ndef get_domain_config(domain_name):\n config_status = get_domain_config_status()\n return {\n 'DomainConfig': {\n 'AccessPolicies': {\n 'Options': '{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::%s:root\"},\"Action\":\"es:*\",\"Resource\":\"arn:aws:es:%s:%s:domain/%s/*\"}]}' % (TEST_AWS_ACCOUNT_ID, aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), # noqa: E501\n 'Status': config_status\n },\n 'AdvancedOptions': {\n 'Options': {\n 'indices.fielddata.cache.size': '',\n 'rest.action.multi.allow_explicit_index': 'true'\n },\n 'Status': config_status\n },\n 'EBSOptions': {\n 'Options': {\n 'EBSEnabled': True,\n 'EncryptionEnabled': False,\n 'Iops': 0,\n 'VolumeSize': 10,\n 'VolumeType': 'gp2'\n },\n 'Status': config_status\n },\n 'ElasticsearchClusterConfig': {\n 'Options': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'Status': config_status\n },\n 'ElasticsearchVersion': {\n 'Options': '5.3',\n 'Status': config_status\n },\n 'EncryptionAtRestOptions': {\n 'Options': {\n 'Enabled': False,\n 'KmsKeyId': ''\n },\n 'Status': config_status\n },\n 'LogPublishingOptions': {\n 'Options': {\n 'INDEX_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False\n },\n 'SEARCH_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False,\n }\n },\n 'Status': config_status\n },\n 'SnapshotOptions': {\n 'Options': {\n 'AutomatedSnapshotStartHour': randint(0, 23)\n },\n 'Status': config_status\n },\n 'VPCOptions': {\n 'Options': {\n 'AvailabilityZones': [\n 'us-east-1b'\n ],\n 'SecurityGroupIds': [\n 'sg-12345678'\n ],\n 'SubnetIds': [\n 'subnet-12345678'\n ],\n 'VPCId': 'vpc-12345678'\n },\n 'Status': config_status\n }\n }\n }\n\n\ndef get_domain_status(domain_name, deleted=False):\n return {\n 'DomainStatus': {\n 'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name),\n 'Created': True,\n 'Deleted': deleted,\n 'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name),\n 'DomainName': domain_name,\n 'ElasticsearchClusterConfig': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'ElasticsearchVersion': '6.7',\n 'Endpoint': aws_stack.get_elasticsearch_endpoint(domain_name),\n 'Processing': False,\n 'EBSOptions': {\n 'EBSEnabled': True,\n 'VolumeType': 'gp2',\n 'VolumeSize': 10,\n 'Iops': 0\n },\n }\n }\n\n\n@app.route('%s/domain' % API_PREFIX, methods=['GET'])\ndef list_domain_names():\n result = {\n 'DomainNames': [{'DomainName': name} for name in ES_DOMAINS.keys()]\n }\n return jsonify(result)\n\n\n@app.route('%s/es/domain' % API_PREFIX, methods=['POST'])\ndef create_domain():\n data = json.loads(to_str(request.data))\n domain_name = data['DomainName']\n if domain_name in ES_DOMAINS:\n return error_response(error_type='ResourceAlreadyExistsException')\n ES_DOMAINS[domain_name] = data\n result = get_domain_status(domain_name)\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_CREATE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n return jsonify(result)\n\n\n@app.route('%s/es/domain/<domain_name>' % API_PREFIX, methods=['GET'])\ndef describe_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name)\n return jsonify(result)\n\n\n@app.route('%s/es/domain/<domain_name>/config' % API_PREFIX, methods=['GET', 'POST'])\ndef domain_config(domain_name):\n config = get_domain_config(domain_name)\n return jsonify(config)\n\n\n@app.route('%s/es/domain/<domain_name>' % API_PREFIX, methods=['DELETE'])\ndef delete_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name, deleted=True)\n ES_DOMAINS.pop(domain_name)\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_DELETE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n return jsonify(result)\n\n\n@app.route('%s/tags' % API_PREFIX, methods=['GET', 'POST'])\ndef add_list_tags():\n if request.method == 'GET' and request.args.get('arn'):\n response = {\n 'TagList': [\n {\n 'Key': 'Example1',\n 'Value': 'Value'\n },\n {\n 'Key': 'Example2',\n 'Value': 'Value'\n }\n ]\n }\n return jsonify(response)\n\n return jsonify({})\n\n\ndef serve(port, quiet=True):\n generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)\n", "path": "localstack/services/es/es_api.py" } ]
[ { "content": "import json\nimport time\nfrom random import randint\nfrom flask import Flask, jsonify, request, make_response\nfrom localstack.services import generic_proxy\nfrom localstack.utils.aws import aws_stack\nfrom localstack.constants import TEST_AWS_ACCOUNT_ID\nfrom localstack.utils.common import to_str\nfrom localstack.utils.analytics import event_publisher\n\nAPP_NAME = 'es_api'\nAPI_PREFIX = '/2015-01-01'\n\nES_DOMAINS = {}\n\napp = Flask(APP_NAME)\napp.url_map.strict_slashes = False\n\n\ndef error_response(error_type, code=400, message='Unknown error.'):\n if not message:\n if error_type == 'ResourceNotFoundException':\n message = 'Resource not found.'\n elif error_type == 'ResourceAlreadyExistsException':\n message = 'Resource already exists.'\n response = make_response(jsonify({'error': message}))\n response.headers['x-amzn-errortype'] = error_type\n return response, code\n\n\ndef get_domain_config_status():\n return {\n 'CreationDate': '%.2f' % time.time(),\n 'PendingDeletion': False,\n 'State': 'Active',\n 'UpdateDate': '%.2f' % time.time(),\n 'UpdateVersion': randint(1, 100)\n }\n\n\ndef get_domain_config(domain_name):\n config_status = get_domain_config_status()\n return {\n 'DomainConfig': {\n 'AccessPolicies': {\n 'Options': '{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::%s:root\"},\"Action\":\"es:*\",\"Resource\":\"arn:aws:es:%s:%s:domain/%s/*\"}]}' % (TEST_AWS_ACCOUNT_ID, aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), # noqa: E501\n 'Status': config_status\n },\n 'AdvancedOptions': {\n 'Options': {\n 'indices.fielddata.cache.size': '',\n 'rest.action.multi.allow_explicit_index': 'true'\n },\n 'Status': config_status\n },\n 'EBSOptions': {\n 'Options': {\n 'EBSEnabled': True,\n 'EncryptionEnabled': False,\n 'Iops': 0,\n 'VolumeSize': 10,\n 'VolumeType': 'gp2'\n },\n 'Status': config_status\n },\n 'ElasticsearchClusterConfig': {\n 'Options': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'Status': config_status\n },\n 'ElasticsearchVersion': {\n 'Options': '5.3',\n 'Status': config_status\n },\n 'EncryptionAtRestOptions': {\n 'Options': {\n 'Enabled': False,\n 'KmsKeyId': ''\n },\n 'Status': config_status\n },\n 'LogPublishingOptions': {\n 'Options': {\n 'INDEX_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False\n },\n 'SEARCH_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False,\n }\n },\n 'Status': config_status\n },\n 'SnapshotOptions': {\n 'Options': {\n 'AutomatedSnapshotStartHour': randint(0, 23)\n },\n 'Status': config_status\n },\n 'VPCOptions': {\n 'Options': {\n 'AvailabilityZones': [\n 'us-east-1b'\n ],\n 'SecurityGroupIds': [\n 'sg-12345678'\n ],\n 'SubnetIds': [\n 'subnet-12345678'\n ],\n 'VPCId': 'vpc-12345678'\n },\n 'Status': config_status\n }\n }\n }\n\n\ndef get_domain_status(domain_name, deleted=False):\n return {\n 'DomainStatus': {\n 'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name),\n 'Created': True,\n 'Deleted': deleted,\n 'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name),\n 'DomainName': domain_name,\n 'ElasticsearchClusterConfig': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'ElasticsearchVersion': '6.7',\n 'Endpoint': aws_stack.get_elasticsearch_endpoint(domain_name),\n 'Processing': False,\n 'EBSOptions': {\n 'EBSEnabled': True,\n 'VolumeType': 'gp2',\n 'VolumeSize': 10,\n 'Iops': 0\n },\n }\n }\n\n\n@app.route('%s/domain' % API_PREFIX, methods=['GET'])\ndef list_domain_names():\n result = {\n 'DomainNames': [{'DomainName': name} for name in ES_DOMAINS.keys()]\n }\n return jsonify(result)\n\n\n@app.route('%s/es/domain' % API_PREFIX, methods=['POST'])\ndef create_domain():\n data = json.loads(to_str(request.data))\n domain_name = data['DomainName']\n if domain_name in ES_DOMAINS:\n return error_response(error_type='ResourceAlreadyExistsException')\n ES_DOMAINS[domain_name] = data\n result = get_domain_status(domain_name)\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_CREATE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n return jsonify(result)\n\n\n@app.route('%s/es/domain/<domain_name>' % API_PREFIX, methods=['GET'])\ndef describe_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name)\n return jsonify(result)\n\n\n@app.route('%s/es/domain/<domain_name>/config' % API_PREFIX, methods=['GET', 'POST'])\ndef domain_config(domain_name):\n config = get_domain_config(domain_name)\n return jsonify(config)\n\n\n@app.route('%s/es/domain/<domain_name>' % API_PREFIX, methods=['DELETE'])\ndef delete_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name, deleted=True)\n ES_DOMAINS.pop(domain_name)\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_DELETE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n return jsonify(result)\n\n\n@app.route('%s/tags' % API_PREFIX, methods=['GET', 'POST'])\ndef add_list_tags():\n if request.method == 'GET' and request.args.get('arn'):\n response = {\n 'TagList': [\n {\n 'Key': 'Example1',\n 'Value': 'Value'\n },\n {\n 'Key': 'Example2',\n 'Value': 'Value'\n }\n ]\n }\n return jsonify(response)\n\n return jsonify({})\n\n\ndef serve(port, quiet=True):\n generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)\n", "path": "localstack/services/es/es_api.py" } ]
diff --git a/Makefile b/Makefile index 30b5c73351da5..263e9237bc65d 100644 --- a/Makefile +++ b/Makefile @@ -97,7 +97,7 @@ test: ## Run automated tests ($(VENV_RUN); DEBUG=$(DEBUG) PYTHONPATH=`pwd` nosetests --with-timer --with-coverage --logging-level=WARNING --nocapture --no-skip --exe --cover-erase --cover-tests --cover-inclusive --cover-package=localstack --with-xunit --exclude='$(VENV_DIR).*' --ignore-files='lambda_python3.py' $(TEST_PATH)) test-java: ## Run tests for Java/JUnit compatibility - cd localstack/ext/java; USE_SSL=1 SERVICES=serverless,kinesis,sns,sqs mvn $(MVN_ARGS) -q test + cd localstack/ext/java; USE_SSL=1 SERVICES=serverless,kinesis,sns,sqs,cloudwatch mvn $(MVN_ARGS) -q test prepare-java-tests-if-changed: @(! (git log -n 1 --no-merges --raw | grep localstack/ext/java/)) || (\ diff --git a/localstack/ext/java/src/main/java/cloud/localstack/Localstack.java b/localstack/ext/java/src/main/java/cloud/localstack/Localstack.java index 234529ec20b2e..6a88a02532bac 100644 --- a/localstack/ext/java/src/main/java/cloud/localstack/Localstack.java +++ b/localstack/ext/java/src/main/java/cloud/localstack/Localstack.java @@ -160,6 +160,10 @@ public String getEndpointRedshift() { return endpointForService(ServiceName.REDSHIFT); } + public String getEndpointCloudWatch() { + return endpointForService(ServiceName.CLOUDWATCH); + } + public String getEndpointSES() { return endpointForService(ServiceName.SES); } @@ -172,10 +176,6 @@ public String getEndpointCloudFormation() { return endpointForService(ServiceName.CLOUDFORMATION); } - public String getEndpointCloudWatch() { - return endpointForService(ServiceName.CLOUDWATCH); - } - public String getEndpointSSM() { return endpointForService(ServiceName.SSM); } diff --git a/localstack/ext/java/src/main/java/cloud/localstack/TestUtils.java b/localstack/ext/java/src/main/java/cloud/localstack/TestUtils.java index 429fa848a8694..d822e2fc23a0e 100644 --- a/localstack/ext/java/src/main/java/cloud/localstack/TestUtils.java +++ b/localstack/ext/java/src/main/java/cloud/localstack/TestUtils.java @@ -6,6 +6,8 @@ import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.client.builder.ExecutorFactory; +import com.amazonaws.services.cloudwatch.AmazonCloudWatch; +import com.amazonaws.services.cloudwatch.AmazonCloudWatchClientBuilder; import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; import com.amazonaws.services.dynamodbv2.AmazonDynamoDBStreams; @@ -162,6 +164,12 @@ public static AmazonKinesisAsync getClientKinesisAsync(final ExecutorFactory exe withCredentials(getCredentialsProvider()).build(); } + public static AmazonCloudWatch getClientCloudWatch() { + return AmazonCloudWatchClientBuilder.standard(). + withEndpointConfiguration(getEndpointConfigurationCloudWatch()). + withCredentials(getCredentialsProvider()).build(); + } + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationLambda() { return getEndpointConfiguration(Localstack.INSTANCE.getEndpointLambda()); } @@ -190,6 +198,10 @@ protected static AwsClientBuilder.EndpointConfiguration getEndpointConfiguration return getEndpointConfiguration(Localstack.INSTANCE.getEndpointSNS()); } + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationCloudWatch() { + return getEndpointConfiguration(Localstack.INSTANCE.getEndpointCloudWatch()); + } + protected static AwsClientBuilder.EndpointConfiguration getEndpointConfigurationSecretsManager() { return getEndpointConfiguration(Localstack.INSTANCE.getEndpointSecretsmanager()); } diff --git a/localstack/ext/java/src/test/java/cloud/localstack/docker/BasicDockerFunctionalityTest.java b/localstack/ext/java/src/test/java/cloud/localstack/docker/BasicDockerFunctionalityTest.java index 221ecb4aa1d71..9d32d809427ca 100644 --- a/localstack/ext/java/src/test/java/cloud/localstack/docker/BasicDockerFunctionalityTest.java +++ b/localstack/ext/java/src/test/java/cloud/localstack/docker/BasicDockerFunctionalityTest.java @@ -7,6 +7,8 @@ import com.amazon.sqs.javamessaging.SQSConnection; import com.amazon.sqs.javamessaging.SQSConnectionFactory; import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.services.cloudwatch.*; +import com.amazonaws.services.cloudwatch.model.*; import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; import com.amazonaws.services.dynamodbv2.model.AttributeDefinition; import com.amazonaws.services.dynamodbv2.model.CreateTableRequest; @@ -33,6 +35,7 @@ import org.assertj.core.api.Assertions; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.runner.RunWith; +import org.junit.Assert; import javax.jms.MessageConsumer; import javax.jms.MessageProducer; @@ -171,6 +174,30 @@ public void testSQS() throws Exception { Assertions.assertThat(received.getText()).isEqualTo("Hello World!"); } + @org.junit.Test + @org.junit.jupiter.api.Test + public void testCloudWatch() throws Exception { + AmazonCloudWatch client = TestUtils.getClientCloudWatch(); + Dimension dimension = new Dimension() + .withName("UNIQUE_PAGES") + .withValue("URLS"); + MetricDatum datum = new MetricDatum() + .withMetricName("PAGES_VISITED") + .withUnit(StandardUnit.None) + .withDimensions(dimension); + PutMetricDataRequest request = new PutMetricDataRequest() + .withNamespace("SITE/TRAFFIC") + .withMetricData(datum); + // assert no error gets thrown for null values + datum.setValue(null); + PutMetricDataResult response = client.putMetricData(request); + Assert.assertNotNull(response); + // assert success for double values + datum.setValue(123.4); + response = client.putMetricData(request); + Assert.assertNotNull(response); + } + private SQSConnection createSQSConnection() throws Exception { SQSConnectionFactory connectionFactory = SQSConnectionFactory.builder().withEndpoint( Localstack.INSTANCE.getEndpointSQS()).withAWSCredentialsProvider( diff --git a/localstack/services/es/es_api.py b/localstack/services/es/es_api.py index f56fed16d6bd9..2dd192508dc5d 100644 --- a/localstack/services/es/es_api.py +++ b/localstack/services/es/es_api.py @@ -14,6 +14,7 @@ ES_DOMAINS = {} app = Flask(APP_NAME) +app.url_map.strict_slashes = False def error_response(error_type, code=400, message='Unknown error.'): diff --git a/tests/integration/test_cloudwatch.py b/tests/integration/test_cloudwatch.py new file mode 100644 index 0000000000000..b81591e4f3b99 --- /dev/null +++ b/tests/integration/test_cloudwatch.py @@ -0,0 +1,35 @@ +import unittest +from localstack.utils.aws import aws_stack + + +class CloudWatchTest(unittest.TestCase): + + def test_put_metric_data(self): + client = aws_stack.connect_to_service('cloudwatch') + + data = [ + { + 'MetricName': 'm1', + 'Dimensions': [{ + 'Name': 'foo', + 'Value': 'bar' + }], + 'Value': 123.45, + 'StatisticValues': { + 'SampleCount': 123.0, + 'Sum': 123.0, + 'Minimum': 123.0, + 'Maximum': 123.0 + }, + 'Values': [ + 123.0, + ], + 'Counts': [ + 123.0, + ], + 'Unit': 'Seconds', + 'StorageResolution': 123 + }, + ] + response = client.put_metric_data(Namespace='string', MetricData=data) + self.assertEquals(response['ResponseMetadata']['HTTPStatusCode'], 200) diff --git a/tests/integration/test_logs.py b/tests/integration/test_logs.py new file mode 100644 index 0000000000000..9b04fef8ff788 --- /dev/null +++ b/tests/integration/test_logs.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- + +import unittest +from localstack.utils.aws import aws_stack +from localstack.utils.common import short_uid + + +class CloudWatchLogsTest(unittest.TestCase): + + def test_put_events_multibyte_msg(self): + client = aws_stack.connect_to_service('logs') + + group = 'g-%s' % short_uid() + stream = 's-%s' % short_uid() + response = client.create_log_group(logGroupName=group) + self.assertEquals(response['ResponseMetadata']['HTTPStatusCode'], 200) + response = client.create_log_stream(logGroupName=group, logStreamName=stream) + self.assertEquals(response['ResponseMetadata']['HTTPStatusCode'], 200) + + # send message with non-ASCII (multi-byte) chars + body_msg = 'πŸ™€ - ε‚γ‚ˆ' + events = [{ + 'timestamp': 1234567, + 'message': body_msg + }] + response = client.put_log_events(logGroupName=group, logStreamName=stream, logEvents=events) + self.assertEquals(response['ResponseMetadata']['HTTPStatusCode'], 200)
pymedusa__Medusa-4086
[ { "content": "# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport socket\nfrom builtins import object\n\nimport gntp\n\nfrom medusa import app, common\nfrom medusa.helper.exceptions import ex\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, host, password):\n self._sendRegistration(host, password)\n return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password,\n force=True)\n\n def notify_snatch(self, ep_name, is_proper):\n if app.GROWL_NOTIFY_ONSNATCH:\n self._sendGrowl(\n common.notifyStrings[\n (common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]\n ], ep_name)\n\n def notify_download(self, ep_name):\n if app.GROWL_NOTIFY_ONDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)\n\n def notify_subtitle_download(self, ep_name, lang):\n if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendGrowl(title, update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendGrowl(title, update_text.format(ipaddress))\n\n def _send_growl(self, options, message=None):\n\n # Initialize Notification\n notice = gntp.core.GNTPNotice(\n app=options['app'],\n name=options['name'],\n title=options['title'],\n password=options['password'],\n )\n\n # Optional\n if options['sticky']:\n notice.add_header('Notification-Sticky', options['sticky'])\n if options['priority']:\n notice.add_header('Notification-Priority', options['priority'])\n if options['icon']:\n notice.add_header('Notification-Icon', app.LOGO_URL)\n\n if message:\n notice.add_header('Notification-Text', message)\n\n response = self._send(options['host'], options['port'], notice.encode(), options['debug'])\n return True if isinstance(response, gntp.core.GNTPOK) else False\n\n @staticmethod\n def _send(host, port, data, debug=False):\n if debug:\n print('<Sending>\\n', data, '\\n</Sending>')\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(data)\n response = gntp.core.parse_gntp(s.recv(1024))\n s.close()\n\n if debug:\n print('<Received>\\n', response, '\\n</Received>')\n\n return response\n\n def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None,\n force=False):\n if not app.USE_GROWL and not force:\n return False\n\n if name is None:\n name = title\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n growlHosts = [(hostParts[0], port)]\n\n opts = {\n 'name': name,\n 'title': title,\n 'app': 'Medusa',\n 'sticky': None,\n 'priority': None,\n 'debug': False\n }\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['icon'] = True\n\n for pc in growlHosts:\n opts['host'] = pc[0]\n opts['port'] = pc[1]\n log.debug(\n u'GROWL: Sending growl to {host}:{port} - {msg!r}',\n {'msg': message, 'host': opts['host'], 'port': opts['port']}\n )\n try:\n if self._send_growl(opts, message):\n return True\n else:\n if self._sendRegistration(host, password):\n return self._send_growl(opts, message)\n else:\n return False\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n\n def _sendRegistration(self, host=None, password=None):\n opts = {}\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n opts['host'] = hostParts[0]\n opts['port'] = port\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['app'] = 'Medusa'\n opts['debug'] = False\n\n # Send Registration\n register = gntp.core.GNTPRegister()\n register.add_header('Application-Name', opts['app'])\n register.add_header('Application-Icon', app.LOGO_URL)\n\n register.add_notification('Test', True)\n register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)\n\n if opts['password']:\n register.set_password(opts['password'])\n\n try:\n return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n", "path": "medusa/notifiers/growl.py" } ]
[ { "content": "# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport socket\nfrom builtins import object\n\nimport gntp.core\n\nfrom medusa import app, common\nfrom medusa.helper.exceptions import ex\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, host, password):\n self._sendRegistration(host, password)\n return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password,\n force=True)\n\n def notify_snatch(self, ep_name, is_proper):\n if app.GROWL_NOTIFY_ONSNATCH:\n self._sendGrowl(\n common.notifyStrings[\n (common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]\n ], ep_name)\n\n def notify_download(self, ep_name):\n if app.GROWL_NOTIFY_ONDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)\n\n def notify_subtitle_download(self, ep_name, lang):\n if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendGrowl(title, update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendGrowl(title, update_text.format(ipaddress))\n\n def _send_growl(self, options, message=None):\n\n # Initialize Notification\n notice = gntp.core.GNTPNotice(\n app=options['app'],\n name=options['name'],\n title=options['title'],\n password=options['password'],\n )\n\n # Optional\n if options['sticky']:\n notice.add_header('Notification-Sticky', options['sticky'])\n if options['priority']:\n notice.add_header('Notification-Priority', options['priority'])\n if options['icon']:\n notice.add_header('Notification-Icon', app.LOGO_URL)\n\n if message:\n notice.add_header('Notification-Text', message)\n\n response = self._send(options['host'], options['port'], notice.encode(), options['debug'])\n return True if isinstance(response, gntp.core.GNTPOK) else False\n\n @staticmethod\n def _send(host, port, data, debug=False):\n if debug:\n print('<Sending>\\n', data, '\\n</Sending>')\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(data)\n response = gntp.core.parse_gntp(s.recv(1024))\n s.close()\n\n if debug:\n print('<Received>\\n', response, '\\n</Received>')\n\n return response\n\n def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None,\n force=False):\n if not app.USE_GROWL and not force:\n return False\n\n if name is None:\n name = title\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n growlHosts = [(hostParts[0], port)]\n\n opts = {\n 'name': name,\n 'title': title,\n 'app': 'Medusa',\n 'sticky': None,\n 'priority': None,\n 'debug': False\n }\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['icon'] = True\n\n for pc in growlHosts:\n opts['host'] = pc[0]\n opts['port'] = pc[1]\n log.debug(\n u'GROWL: Sending growl to {host}:{port} - {msg!r}',\n {'msg': message, 'host': opts['host'], 'port': opts['port']}\n )\n try:\n if self._send_growl(opts, message):\n return True\n else:\n if self._sendRegistration(host, password):\n return self._send_growl(opts, message)\n else:\n return False\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n\n def _sendRegistration(self, host=None, password=None):\n opts = {}\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n opts['host'] = hostParts[0]\n opts['port'] = port\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['app'] = 'Medusa'\n opts['debug'] = False\n\n # Send Registration\n register = gntp.core.GNTPRegister()\n register.add_header('Application-Name', opts['app'])\n register.add_header('Application-Icon', app.LOGO_URL)\n\n register.add_notification('Test', True)\n register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)\n\n if opts['password']:\n register.set_password(opts['password'])\n\n try:\n return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n", "path": "medusa/notifiers/growl.py" } ]
diff --git a/medusa/notifiers/growl.py b/medusa/notifiers/growl.py index ca7a1e0f41..a573134591 100644 --- a/medusa/notifiers/growl.py +++ b/medusa/notifiers/growl.py @@ -7,7 +7,7 @@ import socket from builtins import object -import gntp +import gntp.core from medusa import app, common from medusa.helper.exceptions import ex
encode__httpx-589
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"tom@tomchristie.com\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"hstspreload\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"tom@tomchristie.com\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"hstspreload\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n \"rfc3986>=1.3,<2\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index e58fd8ce68..a28e0e03f1 100644 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ def get_packages(package): "h11==0.8.*", "h2==3.*", "idna==2.*", - "rfc3986==1.*", + "rfc3986>=1.3,<2", "sniffio==1.*", ], classifiers=[
getredash__redash-740
[ { "content": "import datetime\nimport time\nimport logging\nimport signal\nfrom flask.ext.mail import Message\nimport redis\nimport hipchat\nimport requests\nfrom redash.utils import json_dumps\nfrom requests.auth import HTTPBasicAuth\nfrom celery import Task\nfrom celery.result import AsyncResult\nfrom celery.utils.log import get_task_logger\nfrom redash import redis_connection, models, statsd_client, settings, utils, mail\nfrom redash.utils import gen_query_hash\nfrom redash.worker import celery\nfrom redash.query_runner import get_query_runner, InterruptException\nfrom version_check import run_version_check\n\nlogger = get_task_logger(__name__)\n\n\nclass BaseTask(Task):\n abstract = True\n\n def after_return(self, *args, **kwargs):\n models.db.close_db(None)\n\n def __call__(self, *args, **kwargs):\n models.db.connect_db()\n return super(BaseTask, self).__call__(*args, **kwargs)\n\n\nclass QueryTask(object):\n MAX_RETRIES = 5\n\n # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this\n STATUSES = {\n 'PENDING': 1,\n 'STARTED': 2,\n 'SUCCESS': 3,\n 'FAILURE': 4,\n 'REVOKED': 4\n }\n\n def __init__(self, job_id=None, async_result=None):\n if async_result:\n self._async_result = async_result\n else:\n self._async_result = AsyncResult(job_id, app=celery)\n\n @property\n def id(self):\n return self._async_result.id\n\n @classmethod\n def add_task(cls, query, data_source, scheduled=False, metadata={}):\n query_hash = gen_query_hash(query)\n logging.info(\"[Manager][%s] Inserting job\", query_hash)\n logging.info(\"[Manager] Metadata: [%s]\", metadata)\n try_count = 0\n job = None\n \n while try_count < cls.MAX_RETRIES:\n try_count += 1\n\n pipe = redis_connection.pipeline()\n try:\n pipe.watch(cls._job_lock_id(query_hash, data_source.id))\n job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))\n if job_id:\n logging.info(\"[Manager][%s] Found existing job: %s\", query_hash, job_id)\n\n job = cls(job_id=job_id)\n if job.ready():\n logging.info(\"[%s] job found is ready (%s), removing lock\", query_hash, job.celery_status)\n redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))\n job = None\n\n if not job:\n pipe.multi()\n\n if scheduled:\n queue_name = data_source.scheduled_queue_name\n else:\n queue_name = data_source.queue_name\n\n result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)\n job = cls(async_result=result)\n \n logging.info(\"[Manager][%s] Created new job: %s\", query_hash, job.id)\n pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)\n pipe.execute()\n break\n\n except redis.WatchError:\n continue\n\n if not job:\n logging.error(\"[Manager][%s] Failed adding job for query.\", query_hash)\n\n return job\n\n def to_dict(self):\n if self._async_result.status == 'STARTED':\n updated_at = self._async_result.result.get('start_time', 0)\n else:\n updated_at = 0\n\n if self._async_result.failed() and isinstance(self._async_result.result, Exception):\n error = self._async_result.result.message\n elif self._async_result.status == 'REVOKED':\n error = 'Query execution cancelled.'\n else:\n error = ''\n\n if self._async_result.successful():\n query_result_id = self._async_result.result\n else:\n query_result_id = None\n\n return {\n 'id': self._async_result.id,\n 'updated_at': updated_at,\n 'status': self.STATUSES[self._async_result.status],\n 'error': error,\n 'query_result_id': query_result_id,\n }\n\n @property\n def is_cancelled(self):\n return self._async_result.status == 'REVOKED'\n\n @property\n def celery_status(self):\n return self._async_result.status\n\n def ready(self):\n return self._async_result.ready()\n\n def cancel(self):\n return self._async_result.revoke(terminate=True, signal='SIGINT')\n\n @staticmethod\n def _job_lock_id(query_hash, data_source_id):\n return \"query_hash_job:%s:%s\" % (data_source_id, query_hash)\n\n\n@celery.task(base=BaseTask)\ndef refresh_queries():\n # self.status['last_refresh_at'] = time.time()\n # self._save_status()\n\n logger.info(\"Refreshing queries...\")\n\n outdated_queries_count = 0\n for query in models.Query.outdated_queries():\n QueryTask.add_task(query.query, query.data_source, scheduled=True,\n metadata={'Query ID': query.id, 'Username': 'Scheduled'})\n outdated_queries_count += 1\n\n statsd_client.gauge('manager.outdated_queries', outdated_queries_count)\n\n logger.info(\"Done refreshing queries. Found %d outdated queries.\" % outdated_queries_count)\n\n status = redis_connection.hgetall('redash:status')\n now = time.time()\n\n redis_connection.hmset('redash:status', {\n 'outdated_queries_count': outdated_queries_count,\n 'last_refresh_at': now\n })\n\n statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))\n\n\n@celery.task(base=BaseTask)\ndef cleanup_tasks():\n # in case of cold restart of the workers, there might be jobs that still have their \"lock\" object, but aren't really\n # going to run. this job removes them.\n lock_keys = redis_connection.keys(\"query_hash_job:*\") # TODO: use set instead of keys command\n if not lock_keys:\n return\n \n query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]\n\n logger.info(\"Found %d locks\", len(query_tasks))\n\n inspect = celery.control.inspect()\n active_tasks = inspect.active()\n if active_tasks is None:\n active_tasks = []\n else:\n active_tasks = active_tasks.values()\n\n all_tasks = set()\n for task_list in active_tasks:\n for task in task_list:\n all_tasks.add(task['id'])\n\n logger.info(\"Active jobs count: %d\", len(all_tasks))\n\n for i, t in enumerate(query_tasks):\n if t.ready():\n # if locked task is ready already (failed, finished, revoked), we don't need the lock anymore\n logger.warning(\"%s is ready (%s), removing lock.\", lock_keys[i], t.celery_status)\n redis_connection.delete(lock_keys[i])\n\n # if t.celery_status == 'STARTED' and t.id not in all_tasks:\n # logger.warning(\"Couldn't find active job for: %s, removing lock.\", lock_keys[i])\n # redis_connection.delete(lock_keys[i])\n\n\n@celery.task(base=BaseTask)\ndef cleanup_query_results():\n \"\"\"\n Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less\n likely to be open in someone's browser and be used).\n\n Each time the job deletes only 100 query results so it won't choke the database in case of many such results.\n \"\"\"\n\n logging.info(\"Running query results clean up (removing maximum of %d unused results, that are %d days old or more)\",\n settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE)\n\n unused_query_results = models.QueryResult.unused(settings.QUERY_RESULTS_CLEANUP_MAX_AGE).limit(settings.QUERY_RESULTS_CLEANUP_COUNT)\n total_unused_query_results = models.QueryResult.unused().count()\n deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()\n\n logger.info(\"Deleted %d unused query results out of total of %d.\" % (deleted_count, total_unused_query_results))\n\n\n@celery.task(base=BaseTask)\ndef refresh_schemas():\n \"\"\"\n Refreshs the datasources schema.\n \"\"\"\n\n for ds in models.DataSource.select():\n logger.info(\"Refreshing schema for: {}\".format(ds.name))\n ds.get_schema(refresh=True)\n\n\ndef signal_handler(*args):\n raise InterruptException\n\n\n@celery.task(bind=True, base=BaseTask, track_started=True)\ndef execute_query(self, query, data_source_id, metadata):\n signal.signal(signal.SIGINT, signal_handler)\n start_time = time.time()\n\n logger.info(\"Loading data source (%d)...\", data_source_id)\n\n # TODO: we should probably cache data sources in Redis\n data_source = models.DataSource.get_by_id(data_source_id)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})\n\n logger.info(\"Executing query:\\n%s\", query)\n\n query_hash = gen_query_hash(query)\n query_runner = get_query_runner(data_source.type, data_source.options)\n\n if query_runner.annotate_query():\n metadata['Task ID'] = self.request.id\n metadata['Query Hash'] = query_hash\n metadata['Queue'] = self.request.delivery_info['routing_key']\n\n annotation = u\", \".join([u\"{}: {}\".format(k, v) for k, v in metadata.iteritems()])\n\n logging.debug(u\"Annotation: %s\", annotation)\n\n annotated_query = u\"/* {} */ {}\".format(annotation, query)\n else:\n annotated_query = query\n\n with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):\n data, error = query_runner.run_query(annotated_query)\n\n run_time = time.time() - start_time\n logger.info(\"Query finished... data length=%s, error=%s\", data and len(data), error)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})\n\n # Delete query_hash\n redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))\n\n if not error:\n query_result, updated_query_ids = models.QueryResult.store_result(data_source.org_id, data_source.id, query_hash, query, data, run_time, utils.utcnow())\n for query_id in updated_query_ids:\n check_alerts_for_query.delay(query_id)\n else:\n raise Exception(error)\n\n return query_result.id\n\n\n@celery.task(base=BaseTask)\ndef record_event(event):\n models.Event.record(event)\n\n@celery.task(base=BaseTask)\ndef version_check():\n run_version_check()\n\n\ndef base_url(org):\n if org.domain:\n return 'https://{}'.format(org.domain)\n return settings.HOST\n\n\n@celery.task(bind=True, base=BaseTask)\ndef check_alerts_for_query(self, query_id):\n from redash.wsgi import app\n\n logger.debug(\"Checking query %d for alerts\", query_id)\n query = models.Query.get_by_id(query_id)\n for alert in query.alerts:\n alert.query = query\n new_state = alert.evaluate()\n passed_rearm_threshold = False\n if alert.rearm and alert.last_triggered_at:\n passed_rearm_threshold = alert.last_triggered_at + datetime.timedelta(seconds=alert.rearm) < utils.utcnow()\n if new_state != alert.state or (alert.state == models.Alert.TRIGGERED_STATE and passed_rearm_threshold ):\n logger.info(\"Alert %d new state: %s\", alert.id, new_state)\n old_state = alert.state\n alert.update_instance(state=new_state, last_triggered_at=utils.utcnow())\n\n if old_state == models.Alert.UNKNOWN_STATE and new_state == models.Alert.OK_STATE:\n logger.debug(\"Skipping notification (previous state was unknown and now it's ok).\")\n continue\n\n # message = Message\n html = \"\"\"\n Check <a href=\"{host}/alerts/{alert_id}\">alert</a> / check <a href=\"{host}/queries/{query_id}\">query</a>.\n \"\"\".format(host=base_url(alert.query.org), alert_id=alert.id, query_id=query.id)\n\n notify_mail(alert, html, new_state, app)\n\n if settings.HIPCHAT_API_TOKEN:\n notify_hipchat(alert, html, new_state)\n\n if settings.WEBHOOK_ENDPOINT:\n notify_webhook(alert, query, html, new_state)\n\n\ndef notify_hipchat(alert, html, new_state):\n try:\n hipchat_client = hipchat.HipChat(token=settings.HIPCHAT_API_TOKEN)\n message = '[' + new_state.upper() + '] ' + alert.name + '<br />' + html\n hipchat_client.message_room(settings.HIPCHAT_ROOM_ID, settings.NAME, message.encode('utf-8', 'ignore'), message_format='html')\n except Exception:\n logger.exception(\"hipchat send ERROR.\")\n\n\ndef notify_mail(alert, html, new_state, app):\n recipients = [s.email for s in alert.subscribers()]\n logger.debug(\"Notifying: %s\", recipients)\n try:\n with app.app_context():\n message = Message(recipients=recipients,\n subject=\"[{1}] {0}\".format(alert.name.encode('utf-8', 'ignore'), new_state.upper()),\n html=html)\n mail.send(message)\n except Exception:\n logger.exception(\"mail send ERROR.\")\n\n\ndef notify_webhook(alert, query, html, new_state):\n try:\n data = {\n 'event': 'alert_state_change',\n 'alert': alert.to_dict(full=False),\n 'url_base': base_url(query.org)\n }\n headers = {'Content-Type': 'application/json'}\n auth = HTTPBasicAuth(settings.WEBHOOK_USERNAME, settings.WEBHOOK_PASSWORD) if settings.WEBHOOK_USERNAME else None\n resp = requests.post(settings.WEBHOOK_ENDPOINT, data=json_dumps(data), auth=auth, headers=headers)\n if resp.status_code != 200:\n logger.error(\"webhook send ERROR. status_code => {status}\".format(status=resp.status_code))\n except Exception:\n logger.exception(\"webhook send ERROR.\")\n", "path": "redash/tasks.py" } ]
[ { "content": "import datetime\nimport time\nimport logging\nimport signal\nfrom flask.ext.mail import Message\nimport redis\nimport hipchat\nimport requests\nfrom redash.utils import json_dumps\nfrom requests.auth import HTTPBasicAuth\nfrom celery import Task\nfrom celery.result import AsyncResult\nfrom celery.utils.log import get_task_logger\nfrom redash import redis_connection, models, statsd_client, settings, utils, mail\nfrom redash.utils import gen_query_hash\nfrom redash.worker import celery\nfrom redash.query_runner import get_query_runner, InterruptException\nfrom version_check import run_version_check\n\nlogger = get_task_logger(__name__)\n\n\nclass BaseTask(Task):\n abstract = True\n\n def after_return(self, *args, **kwargs):\n models.db.close_db(None)\n\n def __call__(self, *args, **kwargs):\n models.db.connect_db()\n return super(BaseTask, self).__call__(*args, **kwargs)\n\n\nclass QueryTask(object):\n MAX_RETRIES = 5\n\n # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this\n STATUSES = {\n 'PENDING': 1,\n 'STARTED': 2,\n 'SUCCESS': 3,\n 'FAILURE': 4,\n 'REVOKED': 4\n }\n\n def __init__(self, job_id=None, async_result=None):\n if async_result:\n self._async_result = async_result\n else:\n self._async_result = AsyncResult(job_id, app=celery)\n\n @property\n def id(self):\n return self._async_result.id\n\n @classmethod\n def add_task(cls, query, data_source, scheduled=False, metadata={}):\n query_hash = gen_query_hash(query)\n logging.info(\"[Manager][%s] Inserting job\", query_hash)\n logging.info(\"[Manager] Metadata: [%s]\", metadata)\n try_count = 0\n job = None\n \n while try_count < cls.MAX_RETRIES:\n try_count += 1\n\n pipe = redis_connection.pipeline()\n try:\n pipe.watch(cls._job_lock_id(query_hash, data_source.id))\n job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))\n if job_id:\n logging.info(\"[Manager][%s] Found existing job: %s\", query_hash, job_id)\n\n job = cls(job_id=job_id)\n if job.ready():\n logging.info(\"[%s] job found is ready (%s), removing lock\", query_hash, job.celery_status)\n redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))\n job = None\n\n if not job:\n pipe.multi()\n\n if scheduled:\n queue_name = data_source.scheduled_queue_name\n else:\n queue_name = data_source.queue_name\n\n result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)\n job = cls(async_result=result)\n \n logging.info(\"[Manager][%s] Created new job: %s\", query_hash, job.id)\n pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)\n pipe.execute()\n break\n\n except redis.WatchError:\n continue\n\n if not job:\n logging.error(\"[Manager][%s] Failed adding job for query.\", query_hash)\n\n return job\n\n def to_dict(self):\n if self._async_result.status == 'STARTED':\n updated_at = self._async_result.result.get('start_time', 0)\n else:\n updated_at = 0\n\n if self._async_result.failed() and isinstance(self._async_result.result, Exception):\n error = self._async_result.result.message\n elif self._async_result.status == 'REVOKED':\n error = 'Query execution cancelled.'\n else:\n error = ''\n\n if self._async_result.successful():\n query_result_id = self._async_result.result\n else:\n query_result_id = None\n\n return {\n 'id': self._async_result.id,\n 'updated_at': updated_at,\n 'status': self.STATUSES[self._async_result.status],\n 'error': error,\n 'query_result_id': query_result_id,\n }\n\n @property\n def is_cancelled(self):\n return self._async_result.status == 'REVOKED'\n\n @property\n def celery_status(self):\n return self._async_result.status\n\n def ready(self):\n return self._async_result.ready()\n\n def cancel(self):\n return self._async_result.revoke(terminate=True, signal='SIGINT')\n\n @staticmethod\n def _job_lock_id(query_hash, data_source_id):\n return \"query_hash_job:%s:%s\" % (data_source_id, query_hash)\n\n\n@celery.task(base=BaseTask)\ndef refresh_queries():\n # self.status['last_refresh_at'] = time.time()\n # self._save_status()\n\n logger.info(\"Refreshing queries...\")\n\n outdated_queries_count = 0\n for query in models.Query.outdated_queries():\n QueryTask.add_task(query.query, query.data_source, scheduled=True,\n metadata={'Query ID': query.id, 'Username': 'Scheduled'})\n outdated_queries_count += 1\n\n statsd_client.gauge('manager.outdated_queries', outdated_queries_count)\n\n logger.info(\"Done refreshing queries. Found %d outdated queries.\" % outdated_queries_count)\n\n status = redis_connection.hgetall('redash:status')\n now = time.time()\n\n redis_connection.hmset('redash:status', {\n 'outdated_queries_count': outdated_queries_count,\n 'last_refresh_at': now\n })\n\n statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))\n\n\n@celery.task(base=BaseTask)\ndef cleanup_tasks():\n # in case of cold restart of the workers, there might be jobs that still have their \"lock\" object, but aren't really\n # going to run. this job removes them.\n lock_keys = redis_connection.keys(\"query_hash_job:*\") # TODO: use set instead of keys command\n if not lock_keys:\n return\n \n query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]\n\n logger.info(\"Found %d locks\", len(query_tasks))\n\n inspect = celery.control.inspect()\n active_tasks = inspect.active()\n if active_tasks is None:\n active_tasks = []\n else:\n active_tasks = active_tasks.values()\n\n all_tasks = set()\n for task_list in active_tasks:\n for task in task_list:\n all_tasks.add(task['id'])\n\n logger.info(\"Active jobs count: %d\", len(all_tasks))\n\n for i, t in enumerate(query_tasks):\n if t.ready():\n # if locked task is ready already (failed, finished, revoked), we don't need the lock anymore\n logger.warning(\"%s is ready (%s), removing lock.\", lock_keys[i], t.celery_status)\n redis_connection.delete(lock_keys[i])\n\n # if t.celery_status == 'STARTED' and t.id not in all_tasks:\n # logger.warning(\"Couldn't find active job for: %s, removing lock.\", lock_keys[i])\n # redis_connection.delete(lock_keys[i])\n\n\n@celery.task(base=BaseTask)\ndef cleanup_query_results():\n \"\"\"\n Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less\n likely to be open in someone's browser and be used).\n\n Each time the job deletes only 100 query results so it won't choke the database in case of many such results.\n \"\"\"\n\n logging.info(\"Running query results clean up (removing maximum of %d unused results, that are %d days old or more)\",\n settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE)\n\n unused_query_results = models.QueryResult.unused(settings.QUERY_RESULTS_CLEANUP_MAX_AGE).limit(settings.QUERY_RESULTS_CLEANUP_COUNT)\n total_unused_query_results = models.QueryResult.unused().count()\n deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()\n\n logger.info(\"Deleted %d unused query results out of total of %d.\" % (deleted_count, total_unused_query_results))\n\n\n@celery.task(base=BaseTask)\ndef refresh_schemas():\n \"\"\"\n Refreshs the datasources schema.\n \"\"\"\n\n for ds in models.DataSource.select():\n logger.info(\"Refreshing schema for: {}\".format(ds.name))\n ds.get_schema(refresh=True)\n\n\ndef signal_handler(*args):\n raise InterruptException\n\n\n@celery.task(bind=True, base=BaseTask, track_started=True)\ndef execute_query(self, query, data_source_id, metadata):\n signal.signal(signal.SIGINT, signal_handler)\n start_time = time.time()\n\n logger.info(\"Loading data source (%d)...\", data_source_id)\n\n # TODO: we should probably cache data sources in Redis\n data_source = models.DataSource.get_by_id(data_source_id)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})\n\n logger.info(\"Executing query:\\n%s\", query)\n\n query_hash = gen_query_hash(query)\n query_runner = get_query_runner(data_source.type, data_source.options)\n\n if query_runner.annotate_query():\n metadata['Task ID'] = self.request.id\n metadata['Query Hash'] = query_hash\n metadata['Queue'] = self.request.delivery_info['routing_key']\n\n annotation = u\", \".join([u\"{}: {}\".format(k, v) for k, v in metadata.iteritems()])\n\n logging.debug(u\"Annotation: %s\", annotation)\n\n annotated_query = u\"/* {} */ {}\".format(annotation, query)\n else:\n annotated_query = query\n\n with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):\n data, error = query_runner.run_query(annotated_query)\n\n run_time = time.time() - start_time\n logger.info(\"Query finished... data length=%s, error=%s\", data and len(data), error)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})\n\n # Delete query_hash\n redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))\n\n if not error:\n query_result, updated_query_ids = models.QueryResult.store_result(data_source.org_id, data_source.id, query_hash, query, data, run_time, utils.utcnow())\n for query_id in updated_query_ids:\n check_alerts_for_query.delay(query_id)\n else:\n raise Exception(error)\n\n return query_result.id\n\n\n@celery.task(base=BaseTask)\ndef record_event(event):\n models.Event.record(event)\n\n@celery.task(base=BaseTask)\ndef version_check():\n run_version_check()\n\n\ndef base_url(org):\n if settings.MULTI_ORG:\n return \"https://{}/{}\".format(settings.HOST, org.slug)\n\n return settings.HOST\n\n\n@celery.task(bind=True, base=BaseTask)\ndef check_alerts_for_query(self, query_id):\n from redash.wsgi import app\n\n logger.debug(\"Checking query %d for alerts\", query_id)\n query = models.Query.get_by_id(query_id)\n for alert in query.alerts:\n alert.query = query\n new_state = alert.evaluate()\n passed_rearm_threshold = False\n if alert.rearm and alert.last_triggered_at:\n passed_rearm_threshold = alert.last_triggered_at + datetime.timedelta(seconds=alert.rearm) < utils.utcnow()\n if new_state != alert.state or (alert.state == models.Alert.TRIGGERED_STATE and passed_rearm_threshold ):\n logger.info(\"Alert %d new state: %s\", alert.id, new_state)\n old_state = alert.state\n alert.update_instance(state=new_state, last_triggered_at=utils.utcnow())\n\n if old_state == models.Alert.UNKNOWN_STATE and new_state == models.Alert.OK_STATE:\n logger.debug(\"Skipping notification (previous state was unknown and now it's ok).\")\n continue\n\n # message = Message\n html = \"\"\"\n Check <a href=\"{host}/alerts/{alert_id}\">alert</a> / check <a href=\"{host}/queries/{query_id}\">query</a>.\n \"\"\".format(host=base_url(alert.query.org), alert_id=alert.id, query_id=query.id)\n\n notify_mail(alert, html, new_state, app)\n\n if settings.HIPCHAT_API_TOKEN:\n notify_hipchat(alert, html, new_state)\n\n if settings.WEBHOOK_ENDPOINT:\n notify_webhook(alert, query, html, new_state)\n\n\ndef notify_hipchat(alert, html, new_state):\n try:\n hipchat_client = hipchat.HipChat(token=settings.HIPCHAT_API_TOKEN)\n message = '[' + new_state.upper() + '] ' + alert.name + '<br />' + html\n hipchat_client.message_room(settings.HIPCHAT_ROOM_ID, settings.NAME, message, message_format='html')\n except Exception:\n logger.exception(\"hipchat send ERROR.\")\n\n\ndef notify_mail(alert, html, new_state, app):\n recipients = [s.email for s in alert.subscribers()]\n logger.debug(\"Notifying: %s\", recipients)\n try:\n with app.app_context():\n message = Message(recipients=recipients,\n subject=\"[{1}] {0}\".format(alert.name, new_state.upper()),\n html=html)\n mail.send(message)\n except Exception:\n logger.exception(\"mail send ERROR.\")\n\n\ndef notify_webhook(alert, query, html, new_state):\n try:\n data = {\n 'event': 'alert_state_change',\n 'alert': alert.to_dict(full=False),\n 'url_base': base_url(query.org)\n }\n headers = {'Content-Type': 'application/json'}\n auth = HTTPBasicAuth(settings.WEBHOOK_USERNAME, settings.WEBHOOK_PASSWORD) if settings.WEBHOOK_USERNAME else None\n resp = requests.post(settings.WEBHOOK_ENDPOINT, data=json_dumps(data), auth=auth, headers=headers)\n if resp.status_code != 200:\n logger.error(\"webhook send ERROR. status_code => {status}\".format(status=resp.status_code))\n except Exception:\n logger.exception(\"webhook send ERROR.\")\n", "path": "redash/tasks.py" } ]
diff --git a/redash/tasks.py b/redash/tasks.py index 81b1d0d3f2..18540d7682 100644 --- a/redash/tasks.py +++ b/redash/tasks.py @@ -306,8 +306,9 @@ def version_check(): def base_url(org): - if org.domain: - return 'https://{}'.format(org.domain) + if settings.MULTI_ORG: + return "https://{}/{}".format(settings.HOST, org.slug) + return settings.HOST
microsoft__ptvsd-1986
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\"\"\"Python 2/3 compatibility helpers.\n\"\"\"\n\nimport inspect\nimport itertools\nimport sys\n\nfrom ptvsd.common import fmt\n\n\ntry:\n import builtins\nexcept ImportError:\n import __builtin__ as builtins # noqa\n\ntry:\n unicode = builtins.unicode\n bytes = builtins.str\nexcept AttributeError:\n unicode = builtins.str\n bytes = builtins.bytes\n\ntry:\n xrange = builtins.xrange\nexcept AttributeError:\n xrange = builtins.range\n\ntry:\n izip = itertools.izip\nexcept AttributeError:\n izip = builtins.zip\n\ntry:\n reload = builtins.reload\nexcept AttributeError:\n from importlib import reload # noqa\n\ntry:\n import queue\nexcept ImportError:\n import Queue as queue # noqa\n\n\ndef force_unicode(s, encoding, errors=\"strict\"):\n \"\"\"Converts s to Unicode, using the provided encoding. If s is already Unicode,\n it is returned as is.\n \"\"\"\n return s.decode(encoding, errors) if isinstance(s, bytes) else unicode(s)\n\n\ndef force_bytes(s, encoding, errors=\"strict\"):\n \"\"\"Converts s to bytes, using the provided encoding. If s is already bytes,\n it is returned as is.\n\n If errors=\"strict\" and s is bytes, its encoding is verified by decoding it;\n UnicodeError is raised if it cannot be decoded.\n \"\"\"\n if isinstance(s, unicode):\n return s.encode(encoding, errors)\n else:\n s = bytes(s)\n if errors == \"strict\":\n # Return value ignored - invoked solely for verification.\n s.decode(encoding, errors)\n return s\n\n\ndef force_str(s, encoding=\"ascii\", errors=\"strict\"):\n \"\"\"Converts s to str (which is bytes on Python 2, and unicode on Python 3), using\n the provided encoding if necessary. If s is already str, it is returned as is.\n\n If errors=\"strict\", str is bytes, and s is str, its encoding is verified by decoding\n it; UnicodeError is raised if it cannot be decoded.\n \"\"\"\n return (force_bytes if str is bytes else force_unicode)(s, encoding, errors)\n\n\ndef force_ascii(s, errors=\"strict\"):\n \"\"\"Same as force_bytes(s, \"ascii\", errors)\n \"\"\"\n return force_bytes(s, \"ascii\", errors)\n\n\ndef force_utf8(s, errors=\"strict\"):\n \"\"\"Same as force_bytes(s, \"utf8\", errors)\n \"\"\"\n return force_bytes(s, \"utf8\", errors)\n\n\ndef filename(s, errors=\"strict\"):\n \"\"\"Same as force_unicode(s, sys.getfilesystemencoding(), errors)\n \"\"\"\n return force_unicode(s, sys.getfilesystemencoding(), errors)\n\n\ndef filename_bytes(s, errors=\"strict\"):\n \"\"\"Same as force_bytes(s, sys.getfilesystemencoding(), errors)\n \"\"\"\n return force_bytes(s, sys.getfilesystemencoding(), errors)\n\n\ndef filename_str(s, errors=\"strict\"):\n \"\"\"Same as force_str(s, sys.getfilesystemencoding(), errors)\n \"\"\"\n return force_str(s, sys.getfilesystemencoding(), errors)\n\n\ndef nameof(obj, quote=False):\n \"\"\"Returns the most descriptive name of a Python module, class, or function,\n as a Unicode string\n\n If quote=True, name is quoted with repr().\n\n Best-effort, but guaranteed to not fail - always returns something.\n \"\"\"\n\n try:\n name = obj.__qualname__\n except Exception:\n try:\n name = obj.__name__\n except Exception:\n # Fall back to raw repr(), and skip quoting.\n try:\n name = repr(obj)\n except Exception:\n return \"<unknown>\"\n else:\n quote = False\n\n if quote:\n try:\n name = repr(name)\n except Exception:\n pass\n\n return force_unicode(name, \"utf-8\", \"replace\")\n\n\ndef unicode_repr(obj):\n \"\"\"Like repr(), but guarantees that the result is Unicode even on Python 2.\n \"\"\"\n return force_unicode(repr(obj), \"ascii\")\n\n\ndef srcnameof(obj):\n \"\"\"Returns the most descriptive name of a Python module, class, or function,\n including source information (filename and linenumber), if available.\n\n Best-effort, but guaranteed to not fail - always returns something.\n \"\"\"\n\n name = nameof(obj, quote=True)\n\n # Get the source information if possible.\n try:\n src_file = filename(inspect.getsourcefile(obj), \"replace\")\n except Exception:\n pass\n else:\n name += fmt(\" (file {0!r}\", src_file)\n try:\n _, src_lineno = inspect.getsourcelines(obj)\n except Exception:\n pass\n else:\n name += fmt(\", line {0}\", src_lineno)\n name += \")\"\n\n return name\n", "path": "src/ptvsd/common/compat.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\"\"\"Python 2/3 compatibility helpers.\n\"\"\"\n\nimport inspect\nimport itertools\nimport sys\n\nfrom ptvsd.common import fmt\n\n\ntry:\n import __builtin__ as builtins\nexcept ImportError:\n import builtins\n\ntry:\n unicode = builtins.unicode\n bytes = builtins.str\nexcept AttributeError:\n unicode = builtins.str\n bytes = builtins.bytes\n\ntry:\n xrange = builtins.xrange\nexcept AttributeError:\n xrange = builtins.range\n\ntry:\n izip = itertools.izip\nexcept AttributeError:\n izip = builtins.zip\n\ntry:\n reload = builtins.reload\nexcept AttributeError:\n from importlib import reload # noqa\n\ntry:\n import queue\nexcept ImportError:\n import Queue as queue # noqa\n\n\ndef force_unicode(s, encoding, errors=\"strict\"):\n \"\"\"Converts s to Unicode, using the provided encoding. If s is already Unicode,\n it is returned as is.\n \"\"\"\n return s.decode(encoding, errors) if isinstance(s, bytes) else unicode(s)\n\n\ndef force_bytes(s, encoding, errors=\"strict\"):\n \"\"\"Converts s to bytes, using the provided encoding. If s is already bytes,\n it is returned as is.\n\n If errors=\"strict\" and s is bytes, its encoding is verified by decoding it;\n UnicodeError is raised if it cannot be decoded.\n \"\"\"\n if isinstance(s, unicode):\n return s.encode(encoding, errors)\n else:\n s = bytes(s)\n if errors == \"strict\":\n # Return value ignored - invoked solely for verification.\n s.decode(encoding, errors)\n return s\n\n\ndef force_str(s, encoding=\"ascii\", errors=\"strict\"):\n \"\"\"Converts s to str (which is bytes on Python 2, and unicode on Python 3), using\n the provided encoding if necessary. If s is already str, it is returned as is.\n\n If errors=\"strict\", str is bytes, and s is str, its encoding is verified by decoding\n it; UnicodeError is raised if it cannot be decoded.\n \"\"\"\n return (force_bytes if str is bytes else force_unicode)(s, encoding, errors)\n\n\ndef force_ascii(s, errors=\"strict\"):\n \"\"\"Same as force_bytes(s, \"ascii\", errors)\n \"\"\"\n return force_bytes(s, \"ascii\", errors)\n\n\ndef force_utf8(s, errors=\"strict\"):\n \"\"\"Same as force_bytes(s, \"utf8\", errors)\n \"\"\"\n return force_bytes(s, \"utf8\", errors)\n\n\ndef filename(s, errors=\"strict\"):\n \"\"\"Same as force_unicode(s, sys.getfilesystemencoding(), errors)\n \"\"\"\n return force_unicode(s, sys.getfilesystemencoding(), errors)\n\n\ndef filename_bytes(s, errors=\"strict\"):\n \"\"\"Same as force_bytes(s, sys.getfilesystemencoding(), errors)\n \"\"\"\n return force_bytes(s, sys.getfilesystemencoding(), errors)\n\n\ndef filename_str(s, errors=\"strict\"):\n \"\"\"Same as force_str(s, sys.getfilesystemencoding(), errors)\n \"\"\"\n return force_str(s, sys.getfilesystemencoding(), errors)\n\n\ndef nameof(obj, quote=False):\n \"\"\"Returns the most descriptive name of a Python module, class, or function,\n as a Unicode string\n\n If quote=True, name is quoted with repr().\n\n Best-effort, but guaranteed to not fail - always returns something.\n \"\"\"\n\n try:\n name = obj.__qualname__\n except Exception:\n try:\n name = obj.__name__\n except Exception:\n # Fall back to raw repr(), and skip quoting.\n try:\n name = repr(obj)\n except Exception:\n return \"<unknown>\"\n else:\n quote = False\n\n if quote:\n try:\n name = repr(name)\n except Exception:\n pass\n\n return force_unicode(name, \"utf-8\", \"replace\")\n\n\ndef unicode_repr(obj):\n \"\"\"Like repr(), but guarantees that the result is Unicode even on Python 2.\n \"\"\"\n return force_unicode(repr(obj), \"ascii\")\n\n\ndef srcnameof(obj):\n \"\"\"Returns the most descriptive name of a Python module, class, or function,\n including source information (filename and linenumber), if available.\n\n Best-effort, but guaranteed to not fail - always returns something.\n \"\"\"\n\n name = nameof(obj, quote=True)\n\n # Get the source information if possible.\n try:\n src_file = filename(inspect.getsourcefile(obj), \"replace\")\n except Exception:\n pass\n else:\n name += fmt(\" (file {0!r}\", src_file)\n try:\n _, src_lineno = inspect.getsourcelines(obj)\n except Exception:\n pass\n else:\n name += fmt(\", line {0}\", src_lineno)\n name += \")\"\n\n return name\n", "path": "src/ptvsd/common/compat.py" } ]
diff --git a/src/ptvsd/common/compat.py b/src/ptvsd/common/compat.py index ec0b61d29..d7cb2d7f2 100644 --- a/src/ptvsd/common/compat.py +++ b/src/ptvsd/common/compat.py @@ -15,9 +15,9 @@ try: - import builtins + import __builtin__ as builtins except ImportError: - import __builtin__ as builtins # noqa + import builtins try: unicode = builtins.unicode diff --git a/tests/debug/session.py b/tests/debug/session.py index 199184a8f..5f18af833 100644 --- a/tests/debug/session.py +++ b/tests/debug/session.py @@ -512,7 +512,7 @@ def _start_channel(self, stream): telemetry = self.wait_for_next_event("output") assert telemetry == { "category": "telemetry", - "output": "ptvsd.adapter", + "output": "ptvsd", "data": {"packageVersion": some.str}, }
chainer__chainer-751
[ { "content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(index, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Eacy array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py" } ]
[ { "content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n if len(indices) == 0:\n return [ary]\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(index, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Eacy array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py" } ]
diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py index 4e1a897bcc6c..02e89d770339 100644 --- a/cupy/manipulation/split.py +++ b/cupy/manipulation/split.py @@ -23,6 +23,9 @@ def array_split(ary, indices_or_sections, axis=0): else: indices = indices_or_sections + if len(indices) == 0: + return [ary] + skip = (slice(None),) * axis ret = [] i = 0 diff --git a/tests/cupy_tests/manipulation_tests/test_split.py b/tests/cupy_tests/manipulation_tests/test_split.py index 52d31b6c5124..9b4dd941ad3e 100644 --- a/tests/cupy_tests/manipulation_tests/test_split.py +++ b/tests/cupy_tests/manipulation_tests/test_split.py @@ -20,6 +20,12 @@ def test_array_spliti2(self, xp): split = xp.array_split(a, 4, 1) return xp.concatenate(split, -1) + @testing.numpy_cupy_array_equal() + def test_array_spliti_empty(self, xp): + a = testing.shaped_arange((3, 11), xp) + split = xp.array_split(a, []) + return xp.concatenate(split, -1) + @testing.numpy_cupy_array_equal() def test_dsplit(self, xp): a = testing.shaped_arange((3, 3, 12), xp)
ivy-llc__ivy-13563
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, integer_arrays_to_float\nimport ivy.functional.frontends.torch as torch_frontend\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef add(input, other, *, alpha=1, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.add(input, other, alpha=alpha, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef tan(input, *, out=None):\n return ivy.tan(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef remainder(input, other, *, out=None):\n if ivy.is_array(input) and ivy.isscalar(other):\n other = ivy.full(input.shape, other)\n return ivy.remainder(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef atan(input, *, out=None):\n return ivy.atan(input, out=out)\n\n\narctan = atan\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef tanh(input, *, out=None):\n return ivy.tanh(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef cos(input, *, out=None):\n return ivy.cos(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sin(input, *, out=None):\n return ivy.sin(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef acos(input, *, out=None):\n return ivy.acos(input, out=out)\n\n\narccos = acos\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sinh(input, *, out=None):\n return ivy.sinh(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef acosh(input, *, out=None):\n return ivy.acosh(input, out=out)\n\n\narccosh = acosh\n\n\n@to_ivy_arrays_and_back\ndef abs(input, *, out=None):\n return ivy.abs(input, out=out)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef cosh(input, *, out=None):\n return ivy.cosh(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef subtract(input, other, *, alpha=1, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.subtract(input, other * alpha, out=out)\n\n\nsub = subtract\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef exp(input, *, out=None):\n return ivy.exp(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef asin(input, *, out=None):\n return ivy.asin(input, out=out)\n\n\narcsin = asin\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef asinh(input, *, out=None):\n return ivy.asinh(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef atanh(input, *, out=None):\n return ivy.atanh(input, out=out)\n\n\narctanh = atanh\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef log2(input, *, out=None):\n return ivy.log2(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef square(input, *, out=None):\n return ivy.square(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n@to_ivy_arrays_and_back\ndef atan2(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.atan2(input, other, out=out)\n\n\narctan2 = atan2\n\n\n@to_ivy_arrays_and_back\ndef negative(input, *, out=None):\n return ivy.negative(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_and(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_and(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_not(input, *, out=None):\n return ivy.bitwise_invert(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_xor(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_xor(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_or(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_or(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_left_shift(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_left_shift(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_right_shift(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_right_shift(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef log10(input, *, out=None):\n return ivy.log10(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef trunc(input, *, out=None):\n return ivy.trunc(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sqrt(input, *, out=None):\n return ivy.sqrt(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef sign(input, *, out=None):\n return ivy.sign(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_not(input, *, out=None):\n return ivy.logical_not(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_and(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.logical_and(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.logical_or(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.logical_xor(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef round(input, *, decimals=0, out=None):\n m = ivy.full(input.shape, 10**decimals)\n upscale = ivy.multiply(input, m, out=out)\n rounded = ivy.round(upscale, out=out)\n return ivy.divide(rounded, m, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef ceil(input, *, out=None):\n return ivy.ceil(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef clamp(input, min=None, max=None, *, out=None):\n ivy.utils.assertions.check_all_or_any_fn(\n min,\n max,\n fn=ivy.exists,\n type=\"any\",\n limit=[1, 2],\n message=\"at most one of min or max can be None\",\n )\n input = ivy.array(input)\n if min is None:\n return ivy.minimum(input, max, out=out)\n if max is None:\n return ivy.maximum(input, min, out=out)\n return ivy.clip(input, min, max, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef clip(input, min=None, max=None, *, out=None):\n ivy.utils.assertions.check_all_or_any_fn(\n min,\n max,\n fn=ivy.exists,\n type=\"any\",\n limit=[1, 2],\n message=\"at most one of min or max can be None\",\n )\n input = ivy.array(input)\n if min is None:\n return ivy.minimum(input, max, out=out)\n if max is None:\n return ivy.maximum(input, min, out=out)\n return ivy.clip(input, min, max, out=out)\n\n\n@to_ivy_arrays_and_back\ndef mul(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.multiply(input, other, out=out)\n\n\nmultiply = mul\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef div(input, other, *, rounding_mode=None, out=None):\n if rounding_mode is not None:\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n promoted = input.dtype\n if rounding_mode == \"trunc\":\n return ivy.trunc_divide(input, other, out=out).astype(promoted)\n else:\n return ivy.floor_divide(input, other, out=out).astype(promoted)\n else:\n return ivy.divide(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal(input, *, out=None):\n return ivy.reciprocal(input)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef floor(input, *, out=None):\n return ivy.floor(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef flipud(input):\n return ivy.flipud(input)\n\n\n@integer_arrays_to_float\n@to_ivy_arrays_and_back\ndef deg2rad(input, *, out=None):\n return ivy.array(input * 3.1416 / 180, out=out)\n\n\narcsinh = asinh\n\n\ndivide = div\n\n\n@to_ivy_arrays_and_back\ndef true_divide(input, other, *, out=None):\n return ivy.divide(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef floor_divide(input, other, *, out=None):\n return ivy.floor_divide(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef log1p(input, *, out=None):\n return ivy.log1p(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef addcdiv(input, tensor1, tensor2, *, value=1, out=None):\n return ivy.add(input, ivy.multiply(value, ivy.divide(tensor1, tensor2)), out=out)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef addcmul(input, tensor1, tensor2, *, value=1, out=None):\n return ivy.add(input, ivy.multiply(value, ivy.multiply(tensor1, tensor2)), out=out)\n\n\n@to_ivy_arrays_and_back\ndef pow(input, exponent, *, out=None):\n return ivy.pow(input, exponent, out=out)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef log(input, *, out=None):\n return ivy.log(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef logaddexp(x1, x2, out=None):\n return ivy.logaddexp(x1, x2, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef exp2(input, out=None):\n return ivy.exp2(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef rsqrt(input, *, out=None):\n return ivy.reciprocal(ivy.sqrt(input), out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef expm1(input, out=None):\n return ivy.expm1(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef logaddexp2(x1, x2, out=None):\n return ivy.logaddexp2(x1, x2, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef i0(x, out=None):\n return ivy.i0(x, out=out)\n\n\ndef rad2deg(input, *, out=None):\n return ivy.rad2deg(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef positive(input, *, out=None):\n return ivy.positive(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef frac(input, *, out=None):\n return input - ivy.sign(input) * ivy.floor(ivy.abs(input))\n\n\n@with_unsupported_dtypes({\"2.9.0 and below\": (\"bfloat16\",)}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef xlogy(input, other, *, out=None):\n return ivy.xlogy(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef copysign(input, other, *, out=None):\n return ivy.copysign(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sinc(input, *, out=None):\n return ivy.sinc(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef hypot(input, other, *, out=None):\n return ivy.hypot(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sigmoid(input, *, out=None):\n return ivy.sigmoid(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n@to_ivy_arrays_and_back\ndef lerp(input, end, weight, *, out=None):\n return ivy.add(input, ivy.multiply(weight, ivy.subtract(end, input)), out=out)\n\n\n@to_ivy_arrays_and_back\ndef signbit(input, *, out=None):\n return ivy.signbit(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef angle(input, *, out=None):\n return ivy.angle(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef arctan(input, *, out=None):\n return ivy.arctan(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef conj_physical(input, *, out=None):\n return ivy.conj_physical(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef nextafter(input, *, out=None):\n return ivy.nextafter(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n@to_ivy_arrays_and_back\ndef fmod(x1, x2, out=None):\n return ivy.fmod(x1, x2, out=out)\n", "path": "ivy/functional/frontends/torch/pointwise_ops.py" } ]
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, integer_arrays_to_float\nimport ivy.functional.frontends.torch as torch_frontend\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef add(input, other, *, alpha=1, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.add(input, other, alpha=alpha, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef tan(input, *, out=None):\n return ivy.tan(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef remainder(input, other, *, out=None):\n if ivy.is_array(input) and ivy.isscalar(other):\n other = ivy.full(input.shape, other)\n return ivy.remainder(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef atan(input, *, out=None):\n return ivy.atan(input, out=out)\n\n\narctan = atan\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef tanh(input, *, out=None):\n return ivy.tanh(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef cos(input, *, out=None):\n return ivy.cos(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sin(input, *, out=None):\n return ivy.sin(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef acos(input, *, out=None):\n return ivy.acos(input, out=out)\n\n\narccos = acos\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sinh(input, *, out=None):\n return ivy.sinh(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef acosh(input, *, out=None):\n return ivy.acosh(input, out=out)\n\n\narccosh = acosh\n\n\n@to_ivy_arrays_and_back\ndef abs(input, *, out=None):\n return ivy.abs(input, out=out)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef cosh(input, *, out=None):\n return ivy.cosh(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef subtract(input, other, *, alpha=1, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.subtract(input, other * alpha, out=out)\n\n\nsub = subtract\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef exp(input, *, out=None):\n return ivy.exp(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef asin(input, *, out=None):\n return ivy.asin(input, out=out)\n\n\narcsin = asin\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef asinh(input, *, out=None):\n return ivy.asinh(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef atanh(input, *, out=None):\n return ivy.atanh(input, out=out)\n\n\narctanh = atanh\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef log2(input, *, out=None):\n return ivy.log2(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef square(input, *, out=None):\n return ivy.square(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n@to_ivy_arrays_and_back\ndef atan2(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.atan2(input, other, out=out)\n\n\narctan2 = atan2\n\n\n@to_ivy_arrays_and_back\ndef negative(input, *, out=None):\n return ivy.negative(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_and(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_and(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_not(input, *, out=None):\n return ivy.bitwise_invert(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_xor(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_xor(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_or(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_or(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_left_shift(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_left_shift(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_right_shift(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.bitwise_right_shift(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef log10(input, *, out=None):\n return ivy.log10(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef trunc(input, *, out=None):\n return ivy.trunc(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sqrt(input, *, out=None):\n return ivy.sqrt(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef sign(input, *, out=None):\n return ivy.sign(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_not(input, *, out=None):\n return ivy.logical_not(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_and(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.logical_and(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.logical_or(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.logical_xor(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef round(input, *, decimals=0, out=None):\n m = ivy.full(input.shape, 10**decimals)\n upscale = ivy.multiply(input, m, out=out)\n rounded = ivy.round(upscale, out=out)\n return ivy.divide(rounded, m, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef ceil(input, *, out=None):\n return ivy.ceil(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef clamp(input, min=None, max=None, *, out=None):\n ivy.utils.assertions.check_all_or_any_fn(\n min,\n max,\n fn=ivy.exists,\n type=\"any\",\n limit=[1, 2],\n message=\"at most one of min or max can be None\",\n )\n input = ivy.array(input)\n if min is None:\n return ivy.minimum(input, max, out=out)\n if max is None:\n return ivy.maximum(input, min, out=out)\n return ivy.clip(input, min, max, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef clip(input, min=None, max=None, *, out=None):\n ivy.utils.assertions.check_all_or_any_fn(\n min,\n max,\n fn=ivy.exists,\n type=\"any\",\n limit=[1, 2],\n message=\"at most one of min or max can be None\",\n )\n input = ivy.array(input)\n if min is None:\n return ivy.minimum(input, max, out=out)\n if max is None:\n return ivy.maximum(input, min, out=out)\n return ivy.clip(input, min, max, out=out)\n\n\n@to_ivy_arrays_and_back\ndef mul(input, other, *, out=None):\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n return ivy.multiply(input, other, out=out)\n\n\nmultiply = mul\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef div(input, other, *, rounding_mode=None, out=None):\n if rounding_mode is not None:\n input, other = torch_frontend.promote_types_of_torch_inputs(input, other)\n promoted = input.dtype\n if rounding_mode == \"trunc\":\n return ivy.trunc_divide(input, other, out=out).astype(promoted)\n else:\n return ivy.floor_divide(input, other, out=out).astype(promoted)\n else:\n return ivy.divide(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal(input, *, out=None):\n return ivy.reciprocal(input)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef floor(input, *, out=None):\n return ivy.floor(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef flipud(input):\n return ivy.flipud(input)\n\n\n@integer_arrays_to_float\n@to_ivy_arrays_and_back\ndef deg2rad(input, *, out=None):\n return ivy.array(input * 3.1416 / 180, out=out)\n\n\narcsinh = asinh\n\n\ndivide = div\n\n\n@to_ivy_arrays_and_back\ndef true_divide(input, other, *, out=None):\n return ivy.divide(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef floor_divide(input, other, *, out=None):\n return ivy.floor_divide(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef log1p(input, *, out=None):\n return ivy.log1p(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef addcdiv(input, tensor1, tensor2, *, value=1, out=None):\n return ivy.add(input, ivy.multiply(value, ivy.divide(tensor1, tensor2)), out=out)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef addcmul(input, tensor1, tensor2, *, value=1, out=None):\n return ivy.add(input, ivy.multiply(value, ivy.multiply(tensor1, tensor2)), out=out)\n\n\n@to_ivy_arrays_and_back\ndef pow(input, exponent, *, out=None):\n return ivy.pow(input, exponent, out=out)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef log(input, *, out=None):\n return ivy.log(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef logaddexp(x1, x2, out=None):\n return ivy.logaddexp(x1, x2, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef exp2(input, out=None):\n return ivy.exp2(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef rsqrt(input, *, out=None):\n return ivy.reciprocal(ivy.sqrt(input), out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef expm1(input, out=None):\n return ivy.expm1(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef logaddexp2(x1, x2, out=None):\n return ivy.logaddexp2(x1, x2, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\ndef i0(x, out=None):\n return ivy.i0(x, out=out)\n\n\ndef rad2deg(input, *, out=None):\n return ivy.rad2deg(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef positive(input, *, out=None):\n return ivy.positive(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef frac(input, *, out=None):\n return input - ivy.sign(input) * ivy.floor(ivy.abs(input))\n\n\n@with_unsupported_dtypes({\"2.9.0 and below\": (\"bfloat16\",)}, \"tensorflow\")\n@to_ivy_arrays_and_back\ndef xlogy(input, other, *, out=None):\n return ivy.xlogy(input, other, out=out)\n\n\n@to_ivy_arrays_and_back\ndef copysign(input, other, *, out=None):\n return ivy.copysign(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sinc(input, *, out=None):\n return ivy.sinc(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef hypot(input, other, *, out=None):\n return ivy.hypot(input, other, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef sigmoid(input, *, out=None):\n return ivy.sigmoid(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n@to_ivy_arrays_and_back\ndef lerp(input, end, weight, *, out=None):\n return ivy.add(input, ivy.multiply(weight, ivy.subtract(end, input)), out=out)\n\n\n@to_ivy_arrays_and_back\ndef signbit(input, *, out=None):\n return ivy.signbit(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef angle(input, *, out=None):\n return ivy.angle(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef arctan(input, *, out=None):\n return ivy.arctan(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef conj_physical(input, *, out=None):\n return ivy.conj_physical(input, out=out)\n\n\n@to_ivy_arrays_and_back\ndef nextafter(input, *, out=None):\n return ivy.nextafter(input, out=out)\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n@to_ivy_arrays_and_back\ndef fmod(x1, x2, out=None):\n return ivy.fmod(x1, x2, out=out)\n\n\n@to_ivy_arrays_and_back\ndef imag(input):\n return ivy.imag(input)\n", "path": "ivy/functional/frontends/torch/pointwise_ops.py" } ]
diff --git a/ivy/functional/frontends/torch/pointwise_ops.py b/ivy/functional/frontends/torch/pointwise_ops.py index 8a9bcead0d954..132818ec4eced 100644 --- a/ivy/functional/frontends/torch/pointwise_ops.py +++ b/ivy/functional/frontends/torch/pointwise_ops.py @@ -487,3 +487,8 @@ def nextafter(input, *, out=None): @to_ivy_arrays_and_back def fmod(x1, x2, out=None): return ivy.fmod(x1, x2, out=out) + + +@to_ivy_arrays_and_back +def imag(input): + return ivy.imag(input) diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py index fa1cf0215510a..a546ee1ee1cb8 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py @@ -2278,3 +2278,29 @@ def test_torch_fmod( x1=x[0], x2=x[1], ) + + +# imag +@handle_frontend_test( + fn_tree="torch.imag", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("complex"), + ), +) +def test_torch_imag( + *, + dtype_and_x, + on_device, + fn_tree, + frontend, + test_flags, +): + input_dtype, input = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + input=input[0], + )
larq__larq-80
[ { "content": "\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n\nimport tensorflow as tf\nfrom larq import utils\n\n\ndef sign(x):\n \"\"\"A sign function that will never be zero\"\"\"\n return tf.sign(tf.sign(x) + 0.1)\n\n\n@tf.custom_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return sign(x), grad\n\n\n@tf.custom_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return sign(x), grad\n\n\n@utils.register_keras_custom_object\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\n@utils.register_keras_custom_object\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for birealnet.\n\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.stop_gradient(\n tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n )\n return scale_factor * ste_sign(x)\n\n\n@utils.register_keras_custom_object\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n", "path": "larq/quantizers.py" } ]
[ { "content": "\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n\nimport tensorflow as tf\nfrom larq import utils\n\n\ndef sign(x):\n \"\"\"A sign function that will never be zero\"\"\"\n return tf.sign(tf.sign(x) + 0.1)\n\n\n@tf.custom_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return sign(x), grad\n\n\n@tf.custom_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return sign(x), grad\n\n\n@utils.register_keras_custom_object\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\n@utils.register_keras_custom_object\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for Bi-Real Net.\n\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.stop_gradient(\n tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n )\n return scale_factor * ste_sign(x)\n\n\n@utils.register_keras_custom_object\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n", "path": "larq/quantizers.py" } ]
diff --git a/README.md b/README.md index fd8f466d..16450c41 100644 --- a/README.md +++ b/README.md @@ -10,17 +10,21 @@ Existing deep neural networks use 32 bits, 16 bits or 8 bits to encode each weig ## Getting Started -To build a QNN, Larq introduces the concept of _Quantized Layers_ and _Quantizers_. A _Quantizer_ defines the way of transforming a full precision input to a quantized output and the pseudo-gradient method used for the backwards pass. Each _Quantized Layer_ requires a `kernel_quantizer` and an `input_quantizer` that describe the way of quantizing the weights of the layer and the activations of the previous layer respectively. If both `input_quantizer` and `kernel_quantizer` are `None` the layer is equivalent to a full precision layer. +To build a QNN, Larq introduces the concept of [quantized layers](https://plumerai.github.io/larq/api/layers/) and [quantizers](https://plumerai.github.io/larq/api/quantizers/). A quantizer defines the way of transforming a full precision input to a quantized output and the pseudo-gradient method used for the backwards pass. Each quantized layer requires a `kernel_quantizer` and an `input_quantizer` that describe the way of quantizing the weights of the layer and the activations of the previous layer respectively. If both `input_quantizer` and `kernel_quantizer` are `None` the layer is equivalent to a full precision layer. -You can define a binarized densely-connected layer using the Straight-Through Estimator the following way: +You can define a simple binarized fully-connected Keras model using the [Straight-Through Estimator](https://plumerai.github.io/larq/api/quantizers/#ste_sign) the following way: ```python -larq.layers.QuantDense( - 512, - input_quantizer="ste_sign", - kernel_quantizer="ste_sign", - kernel_constraint="weight_clip", -) +model = tf.keras.models.Sequential([ + tf.keras.layers.Flatten(), + larq.layers.QuantDense(512, + kernel_quantizer="ste_sign", + kernel_constraint="weight_clip"), + larq.layers.QuantDense(10, + input_quantizer="ste_sign", + kernel_quantizer="ste_sign", + kernel_constraint="weight_clip", + activation="softmax")]) ``` This layer can be used inside a [Keras model](https://www.tensorflow.org/alpha/guide/keras/overview#sequential_model) or with a [custom training loop](https://www.tensorflow.org/alpha/guide/keras/overview#model_subclassing). diff --git a/apidocs.yml b/apidocs.yml index 95220ced..5f67c561 100644 --- a/apidocs.yml +++ b/apidocs.yml @@ -24,3 +24,4 @@ - larq.quantizers: - larq.quantizers.ste_sign - larq.quantizers.approx_sign + - larq.quantizers.magnitude_aware_sign diff --git a/docs/examples/binarynet_advanced_cifar10.ipynb b/docs/examples/binarynet_advanced_cifar10.ipynb index 2c8248e3..8599082e 100644 --- a/docs/examples/binarynet_advanced_cifar10.ipynb +++ b/docs/examples/binarynet_advanced_cifar10.ipynb @@ -96,7 +96,7 @@ { "cell_type": "markdown", "source": [ - "## Build Binarynet\n", + "## Build BinaryNet\n", "\n", "Here we build the binarynet model layer by layer using a keras sequential model:" ], diff --git a/docs/guide.md b/docs/guide.md new file mode 100644 index 00000000..294645e5 --- /dev/null +++ b/docs/guide.md @@ -0,0 +1,94 @@ +# User Guide + +To build a Quantized Neural Network (QNN), Larq introduces the concept of [quantized layers](https://plumerai.github.io/larq/api/layers/) and [quantizers](https://plumerai.github.io/larq/api/quantizers/). A quantizer defines the way of transforming a full precision input to a quantized output and the pseudo-gradient method used for the backwards pass. + +Each quantized layer requires a `kernel_quantizer` and an `input_quantizer` that describe the way of quantizing the weights of the layer and the activations of the previous layer respectively. If both `input_quantizer` and `kernel_quantizer` are `None` the layer is equivalent to a full precision layer. Larq layers are fully compatible with the Keras API so you can use them with Keras Layers interchangeably: + +```python tab="Larq 32-bit model" +model = tf.keras.models.Sequential([ + tf.keras.layers.Flatten(), + larq.layers.QuantDense(512, activation="relu"), + larq.layers.QuantDense(10, activation="softmax") +]) +``` + +```python tab="Keras 32-bit model" +model = tf.keras.models.Sequential([ + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(512, activation="relu"), + tf.keras.layers.Dense(10, activation="softmax") +]) +``` + +A simple fully-connected Binarized Neural Network (BNN) using the [Straight-Through Estimator](https://plumerai.github.io/larq/api/quantizers/#ste_sign) can be defined in just a few lines of code using either the Keras sequential, functional or model subclassing APIs: + +```python tab="Larq 1-bit model" +model = tf.keras.models.Sequential([ + tf.keras.layers.Flatten(), + larq.layers.QuantDense(512, + kernel_quantizer="ste_sign", + kernel_constraint="weight_clip"), + larq.layers.QuantDense(10, + input_quantizer="ste_sign", + kernel_quantizer="ste_sign", + kernel_constraint="weight_clip", + activation="softmax")]) +``` + +```python tab="Larq 1-bit model functional" +x = tf.keras.Input(shape=(28, 28, 1)) +y = tf.keras.layers.Flatten()(x) +y = larq.layers.QuantDense(512, + kernel_quantizer="ste_sign", + kernel_constraint="weight_clip")(y) +y = larq.layers.QuantDense(10, + input_quantizer="ste_sign", + kernel_quantizer="ste_sign", + kernel_constraint="weight_clip", + activation="softmax")(y) +model = tf.keras.Model(inputs=x, outputs=y) +``` + +```python tab="Larq 1-bit model subclassing" +class MyModel(tf.keras.Model): + def __init__(self): + super().__init__() + self.flatten = tf.keras.layers.Flatten() + self.dense1 = larq.layers.QuantDense(512, + kernel_quantizer="ste_sign", + kernel_constraint="weight_clip") + self.dense2 = larq.layers.QuantDense(10, + input_quantizer="ste_sign", + kernel_quantizer="ste_sign", + kernel_constraint="weight_clip", + activation="softmax") + + def call(self, inputs): + x = self.flatten(inputs) + x = self.dense1(x) + return self.dense2(x) + +model = MyModel() +``` + +## Using Custom Quantizers + +Quantizers are functions that transform a full precision input to a quantized output. Since this transformation usually is non-differentiable it is necessary to modify the gradient in order to be able to train the resulting QNN. This can be done with the [`tf.custom_gradient`](https://www.tensorflow.org/api_docs/python/tf/custom_gradient) decorator. + +In this example we will define a binarization function with an identity gradient: + +```python +@tf.custom_gradient +def identity_sign(x): + def grad(dy): + return dy + return tf.sign(x), grad +``` + +This function can now be used as an `input_quantizer` or a `kernel_quantizer`: + +```python +larq.layers.QuantDense(10, + input_quantizer=identity_sign, + kernel_quantizer=identity_sign) +``` diff --git a/larq/quantizers.py b/larq/quantizers.py index d2c984ad..bb1b62ac 100644 --- a/larq/quantizers.py +++ b/larq/quantizers.py @@ -64,7 +64,7 @@ def ste_sign(x): @utils.register_keras_custom_object def magnitude_aware_sign(x): r""" - Magnitude-aware sign for birealnet. + Magnitude-aware sign for Bi-Real Net. # Arguments diff --git a/mkdocs.yml b/mkdocs.yml index 2603af87..abf47471 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -4,14 +4,15 @@ site_description: "An Open Source Machine Learning Framework for Training Extrem nav: - Home: - Getting Started: index.md - - Library Development: + - User Guide: guide.md + - Developer Guide: - Contributing Guide: contributing.md - Code of Conduct: code_of_conduct.md - Examples: - Introduction to BNNs with Larq: examples/mnist.ipynb - - Binarynet on CIFAR10: examples/binarynet_cifar10.ipynb - - Binarynet on CIFAR10 (Advanced): examples/binarynet_advanced_cifar10.ipynb - - Documentation: + - BinaryNet on CIFAR10: examples/binarynet_cifar10.ipynb + - BinaryNet on CIFAR10 (Advanced): examples/binarynet_advanced_cifar10.ipynb + - API: - Layers: api/layers.md - Quantizers: api/quantizers.md - Activations: api/activations.md
scverse__scanpy-721
[ { "content": "from . import tl\nfrom . import pl\nfrom . import pp\n\nfrom .. import _exporting as exporting\n\nimport sys\nfrom .. import utils\nutils.annotate_doc_types(sys.modules[__name__], 'scanpy')\ndel sys, utils\n\n\n__doc__ = \"\"\"\\\nExternal API\n============\n\n\nImport Scanpy's wrappers to external tools as::\n\n import scanpy.external as sce\n\nPreprocessing: PP\n------------------\n\nBatch effect correction\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.bbknn\n pp.mnn_correct\n\nImputation\n~~~~~~~~~~\n\nNote that the fundamental limitations of imputation are still under `debate\n<https://github.com/theislab/scanpy/issues/189>`__.\n\n.. autosummary::\n :toctree: .\n\n pp.dca\n pp.magic\n\n\nTools: TL\n----------\n\nEmbeddings\n~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phate\n tl.palantir\n\nClustering and trajectory inference\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phenograph\n\nGene scores, Cell cycle\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.sandbag\n tl.cyclone\n\n\nPlotting: PL\n------------\n\n.. autosummary::\n :toctree: .\n\n pl.phate\n tl.palantir\n\n\nExporting\n---------\n\n.. autosummary::\n :toctree: .\n\n exporting.spring_project\n exporting.cellbrowser\n\"\"\"\n", "path": "scanpy/external/__init__.py" } ]
[ { "content": "from . import tl\nfrom . import pl\nfrom . import pp\n\nfrom .. import _exporting as exporting\n\nimport sys\nfrom .. import utils\nutils.annotate_doc_types(sys.modules[__name__], 'scanpy')\ndel sys, utils\n\n\n__doc__ = \"\"\"\\\nExternal API\n============\n\n\nImport Scanpy's wrappers to external tools as::\n\n import scanpy.external as sce\n\nIf you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!\n\nPreprocessing: PP\n------------------\n\nBatch effect correction\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.bbknn\n pp.mnn_correct\n\nImputation\n~~~~~~~~~~\n\nNote that the fundamental limitations of imputation are still under `debate\n<https://github.com/theislab/scanpy/issues/189>`__.\n\n.. autosummary::\n :toctree: .\n\n pp.dca\n pp.magic\n\n\nTools: TL\n----------\n\nEmbeddings\n~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phate\n tl.palantir\n\nClustering and trajectory inference\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phenograph\n\nGene scores, Cell cycle\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.sandbag\n tl.cyclone\n\n\nPlotting: PL\n------------\n\n.. autosummary::\n :toctree: .\n\n pl.phate\n tl.palantir\n\n\nExporting\n---------\n\n.. autosummary::\n :toctree: .\n\n exporting.spring_project\n exporting.cellbrowser\n\"\"\"\n", "path": "scanpy/external/__init__.py" } ]
diff --git a/.gitignore b/.gitignore index ac4119e4de..4c4ad3ee7b 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ # Docs /docs/_build/ /docs/api/scanpy.*.rst +/docs/external/scanpy.*.rst !/docs/api/scanpy.api.rst !/docs/api/scanpy.api.AnnData.rst !/docs/api/scanpy.plotting.rstk diff --git a/docs/api/index.rst b/docs/api/index.rst index 044864e3eb..eb0a706ca2 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -307,7 +307,6 @@ Further modules .. autosummary:: :toctree: . - external api plotting diff --git a/docs/external/index.rst b/docs/external/index.rst new file mode 100644 index 0000000000..f4eee974ef --- /dev/null +++ b/docs/external/index.rst @@ -0,0 +1 @@ +.. automodule:: scanpy.external diff --git a/docs/index.rst b/docs/index.rst index 36299ee077..47c1634791 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -16,4 +16,5 @@ If Scanpy is useful for your research, consider citing `Genome Biology (2018) <h basic_usage installation api/index + external/index references diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py index 10f2c40cfa..15fcbdb2c2 100644 --- a/scanpy/external/__init__.py +++ b/scanpy/external/__init__.py @@ -19,6 +19,8 @@ import scanpy.external as sce +If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_! + Preprocessing: PP ------------------
Flexget__Flexget-2778
[ { "content": "import base64\nimport os\nimport re\nfrom datetime import datetime, timedelta\nfrom fnmatch import fnmatch\nfrom netrc import NetrcParseError, netrc\nfrom time import sleep\nfrom urllib.parse import urlparse\n\nfrom loguru import logger\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.pathscrub import pathscrub\nfrom flexget.utils.template import RenderError\nfrom flexget.utils.tools import parse_timedelta\n\ntry:\n import transmissionrpc\n from transmissionrpc import HTTPHandlerError, TransmissionError\nexcept ImportError:\n # If transmissionrpc is not found, errors will be shown later\n pass\n\nlogger = logger.bind(name='transmission')\n\n\nclass TransmissionBase:\n def __init__(self):\n self.client = None\n self.opener = None\n\n def prepare_config(self, config):\n if isinstance(config, bool):\n config = {'enabled': config}\n config.setdefault('enabled', True)\n config.setdefault('host', 'localhost')\n config.setdefault('port', 9091)\n config.setdefault('main_file_ratio', 0.9)\n if 'netrc' in config:\n netrc_path = os.path.expanduser(config['netrc'])\n try:\n config['username'], _, config['password'] = netrc(netrc_path).authenticators(\n config['host']\n )\n except OSError as e:\n logger.error('netrc: unable to open: {}', e.filename)\n except NetrcParseError as e:\n logger.error('netrc: {}, file: {}, line: {}', e.msg, e.filename, e.lineno)\n return config\n\n def create_rpc_client(self, config):\n user, password = config.get('username'), config.get('password')\n\n try:\n cli = transmissionrpc.Client(config['host'], config['port'], user, password)\n except TransmissionError as e:\n if isinstance(e.original, HTTPHandlerError):\n if e.original.code == 111:\n raise plugin.PluginError(\"Cannot connect to transmission. Is it running?\")\n elif e.original.code == 401:\n raise plugin.PluginError(\n \"Username/password for transmission is incorrect. Cannot connect.\"\n )\n elif e.original.code == 110:\n raise plugin.PluginError(\n \"Cannot connect to transmission: Connection timed out.\"\n )\n else:\n raise plugin.PluginError(\n \"Error connecting to transmission: %s\" % e.original.message\n )\n else:\n raise plugin.PluginError(\"Error connecting to transmission: %s\" % e.message)\n return cli\n\n def torrent_info(self, torrent, config):\n done = torrent.totalSize > 0\n vloc = None\n best = None\n for t in torrent.files().items():\n tf = t[1]\n if tf['selected']:\n if tf['size'] <= 0 or tf['completed'] < tf['size']:\n done = False\n break\n if not best or tf['size'] > best[1]:\n best = (tf['name'], tf['size'])\n if (\n done\n and best\n and (100 * float(best[1]) / float(torrent.totalSize))\n >= (config['main_file_ratio'] * 100)\n ):\n vloc = ('%s/%s' % (torrent.downloadDir, best[0])).replace('/', os.sep)\n return done, vloc\n\n def check_seed_limits(self, torrent, session):\n seed_limit_ok = True # will remain if no seed ratio defined\n idle_limit_ok = True # will remain if no idle limit defined\n\n if torrent.seedRatioMode == 1: # use torrent's own seed ratio limit\n seed_limit_ok = torrent.uploadRatio >= torrent.seedRatioLimit\n elif torrent.seedRatioMode == 0: # use global rules\n if session.seedRatioLimited:\n seed_limit_ok = torrent.uploadRatio >= session.seedRatioLimit\n\n if torrent.seedIdleMode == 1: # use torrent's own idle limit\n idle_limit_ok = (\n torrent.date_active + timedelta(minutes=torrent.seedIdleLimit) < datetime.now()\n )\n elif torrent.seedIdleMode == 0: # use global rules\n if session.idle_seeding_limit_enabled:\n idle_limit_ok = (\n torrent.date_active + timedelta(minutes=session.idle_seeding_limit)\n < datetime.now()\n )\n\n return seed_limit_ok, idle_limit_ok\n\n def on_task_start(self, task, config):\n try:\n import transmissionrpc\n from transmissionrpc import HTTPHandlerError # noqa\n from transmissionrpc import TransmissionError # noqa\n except:\n raise plugin.PluginError(\n 'Transmissionrpc module version 0.11 or higher required.', logger\n )\n if [int(part) for part in transmissionrpc.__version__.split('.')] < [0, 11]:\n raise plugin.PluginError(\n 'Transmissionrpc module version 0.11 or higher required, please upgrade', logger\n )\n\n # Mark rpc client for garbage collector so every task can start\n # a fresh new according its own config - fix to bug #2804\n self.client = None\n config = self.prepare_config(config)\n if config['enabled']:\n if task.options.test:\n logger.info('Trying to connect to transmission...')\n self.client = self.create_rpc_client(config)\n if self.client:\n logger.info('Successfully connected to transmission.')\n else:\n logger.error('It looks like there was a problem connecting to transmission.')\n\n\nclass PluginTransmissionInput(TransmissionBase):\n schema = {\n 'anyOf': [\n {'type': 'boolean'},\n {\n 'type': 'object',\n 'properties': {\n 'host': {'type': 'string'},\n 'port': {'type': 'integer'},\n 'netrc': {'type': 'string', 'format': 'file'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'enabled': {'type': 'boolean'},\n 'only_complete': {'type': 'boolean'},\n },\n 'additionalProperties': False,\n },\n ]\n }\n\n def prepare_config(self, config):\n config = TransmissionBase.prepare_config(self, config)\n config.setdefault('only_complete', False)\n return config\n\n def on_task_input(self, task, config):\n config = self.prepare_config(config)\n if not config['enabled']:\n return\n\n if not self.client:\n self.client = self.create_rpc_client(config)\n entries = []\n\n # Hack/Workaround for http://flexget.com/ticket/2002\n # TODO: Proper fix\n if 'username' in config and 'password' in config:\n self.client.http_handler.set_authentication(\n self.client.url, config['username'], config['password']\n )\n\n session = self.client.get_session()\n\n for torrent in self.client.get_torrents():\n seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)\n if config['only_complete'] and not (\n seed_ratio_ok and idle_limit_ok and torrent.progress == 100\n ):\n continue\n entry = Entry(\n title=torrent.name,\n url='',\n torrent_info_hash=torrent.hashString,\n content_size=torrent.totalSize / (1024 * 1024),\n )\n # Location of torrent is only valid if transmission is on same machine as flexget\n if config['host'] in ('localhost', '127.0.0.1'):\n entry['location'] = torrent.torrentFile\n entry['url'] = 'file://' + torrent.torrentFile\n for attr in [\n 'id',\n 'comment',\n 'desiredAvailable',\n 'downloadDir',\n 'isFinished',\n 'isPrivate',\n 'leftUntilDone',\n 'ratio',\n 'status',\n 'date_active',\n 'date_added',\n 'date_done',\n 'date_started',\n 'errorString',\n 'priority',\n 'progress',\n 'secondsDownloading',\n 'secondsSeeding',\n 'torrentFile',\n ]:\n try:\n entry['transmission_' + attr] = getattr(torrent, attr)\n except Exception:\n logger.opt(exception=True).debug(\n 'error when requesting transmissionrpc attribute {}', attr\n )\n # Availability in percent\n entry['transmission_availability'] = (torrent.desiredAvailable / torrent.leftUntilDone) if torrent.leftUntilDone else 0\n \n entry['transmission_trackers'] = [t['announce'] for t in torrent.trackers]\n entry['transmission_seed_ratio_ok'] = seed_ratio_ok\n entry['transmission_idle_limit_ok'] = idle_limit_ok\n st_error_to_desc = {\n 0: 'OK',\n 1: 'tracker_warning',\n 2: 'tracker_error',\n 3: 'local_error',\n }\n entry['transmission_error_state'] = st_error_to_desc[torrent.error]\n # Built in done_date doesn't work when user adds an already completed file to transmission\n if torrent.progress == 100:\n entry['transmission_date_done'] = datetime.fromtimestamp(\n max(torrent.addedDate, torrent.doneDate)\n )\n entries.append(entry)\n return entries\n\n\nclass PluginTransmission(TransmissionBase):\n \"\"\"\n Add url from entry url to transmission\n\n Example::\n\n transmission:\n host: localhost\n port: 9091\n netrc: /home/flexget/.tmnetrc\n username: myusername\n password: mypassword\n path: the download location\n\n Default values for the config elements::\n\n transmission:\n host: localhost\n port: 9091\n enabled: yes\n \"\"\"\n\n schema = {\n 'anyOf': [\n {'type': 'boolean'},\n {\n 'type': 'object',\n 'properties': {\n 'host': {'type': 'string'},\n 'port': {'type': 'integer'},\n 'netrc': {'type': 'string'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'action': {\n 'type': 'string',\n 'enum': ['add', 'remove', 'purge', 'pause', 'resume', 'bypass_queue'],\n },\n 'path': {'type': 'string'},\n 'max_up_speed': {'type': 'number'},\n 'max_down_speed': {'type': 'number'},\n 'max_connections': {'type': 'integer'},\n 'ratio': {'type': 'number'},\n 'add_paused': {'type': 'boolean'},\n 'content_filename': {'type': 'string'},\n 'main_file_only': {'type': 'boolean'},\n 'main_file_ratio': {'type': 'number'},\n 'magnetization_timeout': {'type': 'integer'},\n 'enabled': {'type': 'boolean'},\n 'include_subs': {'type': 'boolean'},\n 'bandwidth_priority': {'type': 'number'},\n 'honor_limits': {'type': 'boolean'},\n 'include_files': one_or_more({'type': 'string'}),\n 'skip_files': one_or_more({'type': 'string'}),\n 'rename_like_files': {'type': 'boolean'},\n 'queue_position': {'type': 'integer'},\n },\n 'additionalProperties': False,\n },\n ]\n }\n\n def prepare_config(self, config):\n config = TransmissionBase.prepare_config(self, config)\n config.setdefault('action', 'add')\n config.setdefault('path', '')\n config.setdefault('main_file_only', False)\n config.setdefault('magnetization_timeout', 0)\n config.setdefault('include_subs', False)\n config.setdefault('rename_like_files', False)\n config.setdefault('include_files', [])\n return config\n\n @plugin.priority(120)\n def on_task_download(self, task, config):\n \"\"\"\n Call download plugin to generate the temp files we will load\n into deluge then verify they are valid torrents\n \"\"\"\n config = self.prepare_config(config)\n if not config['enabled']:\n return\n # If the download plugin is not enabled, we need to call it to get our temp .torrent files\n if 'download' not in task.config:\n download = plugin.get('download', self)\n for entry in task.accepted:\n if entry.get('transmission_id'):\n # The torrent is already loaded in deluge, we don't need to get anything\n continue\n if config['action'] != 'add' and entry.get('torrent_info_hash'):\n # If we aren't adding the torrent new, all we need is info hash\n continue\n download.get_temp_file(task, entry, handle_magnets=True, fail_html=True)\n\n @plugin.priority(135)\n def on_task_output(self, task, config):\n config = self.prepare_config(config)\n # don't add when learning\n if task.options.learn:\n return\n if not config['enabled']:\n return\n # Do not run if there is nothing to do\n if not task.accepted:\n return\n if self.client is None:\n self.client = self.create_rpc_client(config)\n if self.client:\n logger.debug('Successfully connected to transmission.')\n else:\n raise plugin.PluginError(\"Couldn't connect to transmission.\")\n session_torrents = self.client.get_torrents()\n for entry in task.accepted:\n if task.options.test:\n logger.info('Would {} {} in transmission.', config['action'], entry['title'])\n continue\n # Compile user options into appropriate dict\n options = self._make_torrent_options_dict(config, entry)\n torrent_info = None\n for t in session_torrents:\n if t.hashString.lower() == entry.get(\n 'torrent_info_hash', ''\n ).lower() or t.id == entry.get('transmission_id'):\n torrent_info = t\n logger.debug(\n 'Found {} already loaded in transmission as {}',\n entry['title'],\n torrent_info.name,\n )\n break\n\n if not torrent_info:\n if config['action'] != 'add':\n logger.warning(\n 'Cannot {} {} because it is not loaded in transmission.',\n config['action'],\n entry['title'],\n )\n continue\n downloaded = not entry['url'].startswith('magnet:')\n\n # Check that file is downloaded\n if downloaded and 'file' not in entry:\n entry.fail('`file` field missing?')\n continue\n\n # Verify the temp file exists\n if downloaded and not os.path.exists(entry['file']):\n tmp_path = os.path.join(task.manager.config_base, 'temp')\n logger.debug('entry: {}', entry)\n logger.debug('temp: {}', ', '.join(os.listdir(tmp_path)))\n entry.fail(\"Downloaded temp file '%s' doesn't exist!?\" % entry['file'])\n continue\n\n try:\n if downloaded:\n with open(entry['file'], 'rb') as f:\n filedump = base64.b64encode(f.read()).decode('utf-8')\n torrent_info = self.client.add_torrent(filedump, 30, **options['add'])\n else:\n if options['post'].get('magnetization_timeout', 0) > 0:\n options['add']['paused'] = False\n torrent_info = self.client.add_torrent(\n entry['url'], timeout=30, **options['add']\n )\n except TransmissionError as e:\n logger.opt(exception=True).debug('TransmissionError')\n logger.debug('Failed options dict: {}', options['add'])\n msg = 'Error adding {} to transmission. TransmissionError: {}'.format(\n entry['title'], e.message or 'N/A'\n )\n logger.error(msg)\n entry.fail(msg)\n continue\n logger.info('\"{}\" torrent added to transmission', entry['title'])\n # The info returned by the add call is incomplete, refresh it\n torrent_info = self.client.get_torrent(torrent_info.id)\n else:\n # Torrent already loaded in transmission\n if options['add'].get('download_dir'):\n logger.verbose(\n 'Moving {} to \"{}\"', torrent_info.name, options['add']['download_dir']\n )\n # Move data even if current reported torrent location matches new location\n # as transmission may fail to automatically move completed file to final\n # location but continue reporting final location instead of real location.\n # In such case this will kick transmission to really move data.\n # If data is already located at new location then transmission just ignore\n # this command.\n self.client.move_torrent_data(\n torrent_info.id, options['add']['download_dir'], 120\n )\n\n try:\n total_size = torrent_info.totalSize\n main_id = None\n find_main_file = (\n options['post'].get('main_file_only') or 'content_filename' in options['post']\n )\n skip_files = options['post'].get('skip_files')\n # We need to index the files if any of the following are defined\n if find_main_file or skip_files:\n file_list = self.client.get_files(torrent_info.id)[torrent_info.id]\n\n if options['post'].get('magnetization_timeout', 0) > 0 and not file_list:\n logger.debug(\n 'Waiting {} seconds for \"{}\" to magnetize',\n options['post']['magnetization_timeout'],\n entry['title'],\n )\n for _ in range(options['post']['magnetization_timeout']):\n sleep(1)\n file_list = self.client.get_files(torrent_info.id)[torrent_info.id]\n if file_list:\n total_size = self.client.get_torrent(\n torrent_info.id, ['id', 'totalSize']\n ).totalSize\n break\n else:\n logger.warning(\n '\"{}\" did not magnetize before the timeout elapsed, file list unavailable for processing.',\n entry['title'],\n )\n\n # Find files based on config\n dl_list = []\n skip_list = []\n main_list = []\n ext_list = ['*.srt', '*.sub', '*.idx', '*.ssa', '*.ass']\n\n main_ratio = config['main_file_ratio']\n if 'main_file_ratio' in options['post']:\n main_ratio = options['post']['main_file_ratio']\n\n for f in file_list:\n # No need to set main_id if we're not going to need it\n if find_main_file and file_list[f]['size'] > total_size * main_ratio:\n main_id = f\n\n if 'include_files' in options['post']:\n if any(\n fnmatch(file_list[f]['name'], mask)\n for mask in options['post']['include_files']\n ):\n dl_list.append(f)\n elif options['post'].get('include_subs') and any(\n fnmatch(file_list[f]['name'], mask) for mask in ext_list\n ):\n dl_list.append(f)\n\n if skip_files:\n if any(fnmatch(file_list[f]['name'], mask) for mask in skip_files):\n skip_list.append(f)\n\n if main_id is not None:\n # Look for files matching main ID title but with a different extension\n if options['post'].get('rename_like_files'):\n for f in file_list:\n # if this filename matches main filename we want to rename it as well\n fs = os.path.splitext(file_list[f]['name'])\n if fs[0] == os.path.splitext(file_list[main_id]['name'])[0]:\n main_list.append(f)\n else:\n main_list = [main_id]\n\n if main_id not in dl_list:\n dl_list.append(main_id)\n elif find_main_file:\n logger.warning(\n 'No files in \"{}\" are > {:.0f}% of content size, no files renamed.',\n entry['title'],\n main_ratio * 100,\n )\n\n # If we have a main file and want to rename it and associated files\n if 'content_filename' in options['post'] and main_id is not None:\n if 'download_dir' not in options['add']:\n download_dir = self.client.get_session().download_dir\n else:\n download_dir = options['add']['download_dir']\n\n # Get new filename without ext\n file_ext = os.path.splitext(file_list[main_id]['name'])[1]\n file_path = os.path.dirname(\n os.path.join(download_dir, file_list[main_id]['name'])\n )\n filename = options['post']['content_filename']\n if config['host'] == 'localhost' or config['host'] == '127.0.0.1':\n counter = 1\n while os.path.exists(os.path.join(file_path, filename + file_ext)):\n # Try appending a (#) suffix till a unique filename is found\n filename = '%s(%s)' % (\n options['post']['content_filename'],\n counter,\n )\n counter += 1\n else:\n logger.debug(\n 'Cannot ensure content_filename is unique '\n 'when adding to a remote transmission daemon.'\n )\n\n for index in main_list:\n file_ext = os.path.splitext(file_list[index]['name'])[1]\n logger.debug(\n 'File {} renamed to {}',\n file_list[index]['name'],\n filename + file_ext,\n )\n # change to below when set_files will allow setting name, more efficient to have one call\n # fl[index]['name'] = os.path.basename(pathscrub(filename + file_ext).encode('utf-8'))\n try:\n self.client.rename_torrent_path(\n torrent_info.id,\n file_list[index]['name'],\n os.path.basename(str(pathscrub(filename + file_ext))),\n )\n except TransmissionError:\n logger.error(\n 'content_filename only supported with transmission 2.8+'\n )\n\n if options['post'].get('main_file_only') and main_id is not None:\n # Set Unwanted Files\n options['change']['files_unwanted'] = [\n x for x in file_list if x not in dl_list\n ]\n options['change']['files_wanted'] = dl_list\n logger.debug(\n 'Downloading {} of {} files in torrent.',\n len(options['change']['files_wanted']),\n len(file_list),\n )\n elif (\n not options['post'].get('main_file_only') or main_id is None\n ) and skip_files:\n # If no main file and we want to skip files\n\n if len(skip_list) >= len(file_list):\n logger.debug(\n 'skip_files filter would cause no files to be downloaded; '\n 'including all files in torrent.'\n )\n else:\n options['change']['files_unwanted'] = skip_list\n options['change']['files_wanted'] = [\n x for x in file_list if x not in skip_list\n ]\n logger.debug(\n 'Downloading {} of {} files in torrent.',\n len(options['change']['files_wanted']),\n len(file_list),\n )\n\n # Set any changed file properties\n if list(options['change'].keys()):\n self.client.change_torrent(torrent_info.id, 30, **options['change'])\n\n start_torrent = partial(self.client.start_torrent, [torrent_info.id])\n\n if config['action'] == 'add':\n # if add_paused was defined and set to False start the torrent;\n # prevents downloading data before we set what files we want\n start_paused = (\n options['post']['paused']\n if 'paused' in options['post']\n else not self.client.get_session().start_added_torrents\n )\n if start_paused:\n self.client.stop_torrent(torrent_info.id)\n else:\n self.client.start_torrent(torrent_info.id)\n elif config['action'] in ('remove', 'purge'):\n self.client.remove_torrent(\n [torrent_info.id], delete_data=config['action'] == 'purge'\n )\n logger.info('{}d {} from transmission', config['action'], torrent_info.name)\n elif config['action'] == 'pause':\n self.client.stop_torrent([torrent_info.id])\n logger.info('paused {} in transmission', torrent_info.name)\n elif config['action'] == 'resume':\n start_torrent()\n logger.info('resumed {} in transmission', torrent_info.name)\n elif config['action'] == 'bypass_queue':\n start_torrent(bypass_queue=True)\n logger.info('resumed (bypass queue) {} in transmission', torrent_info.name)\n \n except TransmissionError as e:\n logger.opt(exception=True).debug('TransmissionError')\n logger.debug('Failed options dict: {}', options)\n msg = 'Error trying to {} {}, TransmissionError: {}'.format(\n config['action'], entry['title'], e.message or 'N/A'\n )\n logger.error(msg)\n continue\n\n def _make_torrent_options_dict(self, config, entry):\n\n opt_dic = {}\n\n for opt_key in (\n 'path',\n 'add_paused',\n 'honor_limits',\n 'bandwidth_priority',\n 'max_connections',\n 'max_up_speed',\n 'max_down_speed',\n 'ratio',\n 'main_file_only',\n 'main_file_ratio',\n 'magnetization_timeout',\n 'include_subs',\n 'content_filename',\n 'include_files',\n 'skip_files',\n 'rename_like_files',\n 'queue_position',\n ):\n # Values do not merge config with task\n # Task takes priority then config is used\n if opt_key in entry:\n opt_dic[opt_key] = entry[opt_key]\n elif opt_key in config:\n opt_dic[opt_key] = config[opt_key]\n\n options = {'add': {}, 'change': {}, 'post': {}}\n\n add = options['add']\n if opt_dic.get('path'):\n try:\n path = os.path.expanduser(entry.render(opt_dic['path']))\n except RenderError as e:\n logger.error('Error setting path for {}: {}', entry['title'], e)\n else:\n # Transmission doesn't like it when paths end in a separator\n path = path.rstrip('\\\\/')\n add['download_dir'] = pathscrub(path)\n # make sure we add it paused, will modify status after adding\n add['paused'] = True\n\n change = options['change']\n if 'bandwidth_priority' in opt_dic:\n change['bandwidthPriority'] = opt_dic['bandwidth_priority']\n if 'honor_limits' in opt_dic and not opt_dic['honor_limits']:\n change['honorsSessionLimits'] = False\n if 'max_up_speed' in opt_dic:\n change['uploadLimit'] = opt_dic['max_up_speed']\n change['uploadLimited'] = True\n if 'max_down_speed' in opt_dic:\n change['downloadLimit'] = opt_dic['max_down_speed']\n change['downloadLimited'] = True\n if 'max_connections' in opt_dic:\n change['peer_limit'] = opt_dic['max_connections']\n\n if 'ratio' in opt_dic:\n change['seedRatioLimit'] = opt_dic['ratio']\n if opt_dic['ratio'] == -1:\n # seedRatioMode:\n # 0 follow the global settings\n # 1 override the global settings, seeding until a certain ratio\n # 2 override the global settings, seeding regardless of ratio\n change['seedRatioMode'] = 2\n else:\n change['seedRatioMode'] = 1\n\n if 'queue_position' in opt_dic:\n change['queuePosition'] = opt_dic['queue_position']\n\n post = options['post']\n # set to modify paused status after\n if 'add_paused' in opt_dic:\n post['paused'] = opt_dic['add_paused']\n if 'main_file_only' in opt_dic:\n post['main_file_only'] = opt_dic['main_file_only']\n if 'main_file_ratio' in opt_dic:\n post['main_file_ratio'] = opt_dic['main_file_ratio']\n if 'magnetization_timeout' in opt_dic:\n post['magnetization_timeout'] = opt_dic['magnetization_timeout']\n if 'include_subs' in opt_dic:\n post['include_subs'] = opt_dic['include_subs']\n if 'content_filename' in opt_dic:\n try:\n post['content_filename'] = entry.render(opt_dic['content_filename'])\n except RenderError as e:\n logger.error('Unable to render content_filename {}: {}', entry['title'], e)\n if 'skip_files' in opt_dic:\n post['skip_files'] = opt_dic['skip_files']\n if not isinstance(post['skip_files'], list):\n post['skip_files'] = [post['skip_files']]\n if 'include_files' in opt_dic:\n post['include_files'] = opt_dic['include_files']\n if not isinstance(post['include_files'], list):\n post['include_files'] = [post['include_files']]\n if 'rename_like_files' in opt_dic:\n post['rename_like_files'] = opt_dic['rename_like_files']\n return options\n\n def on_task_learn(self, task, config):\n \"\"\" Make sure all temp files are cleaned up when entries are learned \"\"\"\n # If download plugin is enabled, it will handle cleanup.\n if 'download' not in task.config:\n download = plugin.get('download', self)\n download.cleanup_temp_files(task)\n\n on_task_abort = on_task_learn\n\n\nclass PluginTransmissionClean(TransmissionBase):\n \"\"\"\n DEPRECATED: A separate task using from_transmission and transmission with remove action should be used instead.\n\n Remove completed torrents from Transmission.\n\n Examples::\n\n clean_transmission: yes # ignore both time and ratio\n\n clean_transmission: # uses transmission's internal limits for idle time and seed ratio ( if defined )\n transmission_seed_limits: yes\n\n clean_transmission: # matches time only\n finished_for: 2 hours\n\n clean_transmission: # matches ratio only\n min_ratio: 0.5\n\n clean_transmission: # matches time OR ratio\n finished_for: 2 hours\n min_ratio: 0.5\n\n Default values for the config elements::\n\n clean_transmission:\n host: localhost\n port: 9091\n enabled: yes\n \"\"\"\n\n schema = {\n \"deprecated\": \"The clean_transmission plugin is deprecated. Configure a new task using the from_transmission \"\n \"plugin as well as the transmission plugin using the remove or purge action.\",\n \"anyOf\": [\n {\"type\": \"boolean\"},\n {\n \"type\": \"object\",\n \"properties\": {\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"integer\"},\n \"netrc\": {\"type\": \"string\", \"format\": \"file\"},\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"enabled\": {\"type\": \"boolean\"},\n \"min_ratio\": {\"type\": \"number\"},\n \"finished_for\": {\"type\": \"string\", \"format\": \"interval\"},\n \"transmission_seed_limits\": {\"type\": \"boolean\"},\n \"delete_files\": {\"type\": \"boolean\"},\n \"tracker\": {\"type\": \"string\", \"format\": \"regex\"},\n \"preserve_tracker\": {\"type\": \"string\", \"format\": \"regex\"},\n \"directories\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\", \"format\": \"regex\"},\n },\n },\n \"additionalProperties\": False,\n },\n ],\n }\n\n def on_task_exit(self, task, config):\n config = self.prepare_config(config)\n if not config['enabled'] or task.options.learn:\n return\n if not self.client:\n self.client = self.create_rpc_client(config)\n tracker_re = re.compile(config['tracker'], re.IGNORECASE) if 'tracker' in config else None\n preserve_tracker_re = (\n re.compile(config['preserve_tracker'], re.IGNORECASE)\n if 'preserve_tracker' in config\n else None\n )\n\n session = self.client.get_session()\n\n remove_ids = []\n for torrent in self.client.get_torrents():\n logger.verbose(\n 'Torrent \"{}\": status: \"{}\" - ratio: {} - date added: {}',\n torrent.name,\n torrent.status,\n torrent.ratio,\n torrent.date_added,\n )\n downloaded, dummy = self.torrent_info(torrent, config)\n if not downloaded:\n continue\n if config.get('transmission_seed_limits'):\n seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)\n if not seed_ratio_ok or not idle_limit_ok:\n continue\n if 'min_ratio' in config:\n if torrent.ratio < config['min_ratio']:\n continue\n if 'finished_for' in config:\n # done date might be invalid if this torrent was added to transmission when already completed\n started_seeding = datetime.fromtimestamp(max(torrent.addedDate, torrent.doneDate))\n if started_seeding + parse_timedelta(config['finished_for']) > datetime.now():\n continue\n tracker_hosts = (\n urlparse(tracker['announce']).hostname for tracker in torrent.trackers\n )\n if 'tracker' in config:\n if not any(tracker_re.search(tracker) for tracker in tracker_hosts):\n continue\n if 'preserve_tracker' in config:\n if any(preserve_tracker_re.search(tracker) for tracker in tracker_hosts):\n continue\n if config.get('directories'):\n if not any(\n re.search(d, torrent.downloadDir, re.IGNORECASE) for d in config['directories']\n ):\n continue\n if task.options.test:\n logger.info('Would remove finished torrent `{}` from transmission', torrent.name)\n continue\n logger.info('Removing finished torrent `{}` from transmission', torrent.name)\n remove_ids.append(torrent.id)\n if remove_ids:\n self.client.remove_torrent(remove_ids, config.get('delete_files'))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PluginTransmission, 'transmission', api_ver=2)\n plugin.register(PluginTransmissionInput, 'from_transmission', api_ver=2)\n plugin.register(PluginTransmissionClean, 'clean_transmission', api_ver=2)\n", "path": "flexget/plugins/clients/transmission.py" } ]
[ { "content": "import base64\nimport os\nimport re\nfrom datetime import datetime, timedelta\nfrom fnmatch import fnmatch\nfrom netrc import NetrcParseError, netrc\nfrom time import sleep\nfrom urllib.parse import urlparse\n\nfrom functools import partial\nfrom loguru import logger\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.pathscrub import pathscrub\nfrom flexget.utils.template import RenderError\nfrom flexget.utils.tools import parse_timedelta\n\ntry:\n import transmissionrpc\n from transmissionrpc import HTTPHandlerError, TransmissionError\nexcept ImportError:\n # If transmissionrpc is not found, errors will be shown later\n pass\n\nlogger = logger.bind(name='transmission')\n\n\nclass TransmissionBase:\n def __init__(self):\n self.client = None\n self.opener = None\n\n def prepare_config(self, config):\n if isinstance(config, bool):\n config = {'enabled': config}\n config.setdefault('enabled', True)\n config.setdefault('host', 'localhost')\n config.setdefault('port', 9091)\n config.setdefault('main_file_ratio', 0.9)\n if 'netrc' in config:\n netrc_path = os.path.expanduser(config['netrc'])\n try:\n config['username'], _, config['password'] = netrc(netrc_path).authenticators(\n config['host']\n )\n except OSError as e:\n logger.error('netrc: unable to open: {}', e.filename)\n except NetrcParseError as e:\n logger.error('netrc: {}, file: {}, line: {}', e.msg, e.filename, e.lineno)\n return config\n\n def create_rpc_client(self, config):\n user, password = config.get('username'), config.get('password')\n\n try:\n cli = transmissionrpc.Client(config['host'], config['port'], user, password)\n except TransmissionError as e:\n if isinstance(e.original, HTTPHandlerError):\n if e.original.code == 111:\n raise plugin.PluginError(\"Cannot connect to transmission. Is it running?\")\n elif e.original.code == 401:\n raise plugin.PluginError(\n \"Username/password for transmission is incorrect. Cannot connect.\"\n )\n elif e.original.code == 110:\n raise plugin.PluginError(\n \"Cannot connect to transmission: Connection timed out.\"\n )\n else:\n raise plugin.PluginError(\n \"Error connecting to transmission: %s\" % e.original.message\n )\n else:\n raise plugin.PluginError(\"Error connecting to transmission: %s\" % e.message)\n return cli\n\n def torrent_info(self, torrent, config):\n done = torrent.totalSize > 0\n vloc = None\n best = None\n for t in torrent.files().items():\n tf = t[1]\n if tf['selected']:\n if tf['size'] <= 0 or tf['completed'] < tf['size']:\n done = False\n break\n if not best or tf['size'] > best[1]:\n best = (tf['name'], tf['size'])\n if (\n done\n and best\n and (100 * float(best[1]) / float(torrent.totalSize))\n >= (config['main_file_ratio'] * 100)\n ):\n vloc = ('%s/%s' % (torrent.downloadDir, best[0])).replace('/', os.sep)\n return done, vloc\n\n def check_seed_limits(self, torrent, session):\n seed_limit_ok = True # will remain if no seed ratio defined\n idle_limit_ok = True # will remain if no idle limit defined\n\n if torrent.seedRatioMode == 1: # use torrent's own seed ratio limit\n seed_limit_ok = torrent.uploadRatio >= torrent.seedRatioLimit\n elif torrent.seedRatioMode == 0: # use global rules\n if session.seedRatioLimited:\n seed_limit_ok = torrent.uploadRatio >= session.seedRatioLimit\n\n if torrent.seedIdleMode == 1: # use torrent's own idle limit\n idle_limit_ok = (\n torrent.date_active + timedelta(minutes=torrent.seedIdleLimit) < datetime.now()\n )\n elif torrent.seedIdleMode == 0: # use global rules\n if session.idle_seeding_limit_enabled:\n idle_limit_ok = (\n torrent.date_active + timedelta(minutes=session.idle_seeding_limit)\n < datetime.now()\n )\n\n return seed_limit_ok, idle_limit_ok\n\n def on_task_start(self, task, config):\n try:\n import transmissionrpc\n from transmissionrpc import HTTPHandlerError # noqa\n from transmissionrpc import TransmissionError # noqa\n except:\n raise plugin.PluginError(\n 'Transmissionrpc module version 0.11 or higher required.', logger\n )\n if [int(part) for part in transmissionrpc.__version__.split('.')] < [0, 11]:\n raise plugin.PluginError(\n 'Transmissionrpc module version 0.11 or higher required, please upgrade', logger\n )\n\n # Mark rpc client for garbage collector so every task can start\n # a fresh new according its own config - fix to bug #2804\n self.client = None\n config = self.prepare_config(config)\n if config['enabled']:\n if task.options.test:\n logger.info('Trying to connect to transmission...')\n self.client = self.create_rpc_client(config)\n if self.client:\n logger.info('Successfully connected to transmission.')\n else:\n logger.error('It looks like there was a problem connecting to transmission.')\n\n\nclass PluginTransmissionInput(TransmissionBase):\n schema = {\n 'anyOf': [\n {'type': 'boolean'},\n {\n 'type': 'object',\n 'properties': {\n 'host': {'type': 'string'},\n 'port': {'type': 'integer'},\n 'netrc': {'type': 'string', 'format': 'file'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'enabled': {'type': 'boolean'},\n 'only_complete': {'type': 'boolean'},\n },\n 'additionalProperties': False,\n },\n ]\n }\n\n def prepare_config(self, config):\n config = TransmissionBase.prepare_config(self, config)\n config.setdefault('only_complete', False)\n return config\n\n def on_task_input(self, task, config):\n config = self.prepare_config(config)\n if not config['enabled']:\n return\n\n if not self.client:\n self.client = self.create_rpc_client(config)\n entries = []\n\n # Hack/Workaround for http://flexget.com/ticket/2002\n # TODO: Proper fix\n if 'username' in config and 'password' in config:\n self.client.http_handler.set_authentication(\n self.client.url, config['username'], config['password']\n )\n\n session = self.client.get_session()\n\n for torrent in self.client.get_torrents():\n seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)\n if config['only_complete'] and not (\n seed_ratio_ok and idle_limit_ok and torrent.progress == 100\n ):\n continue\n entry = Entry(\n title=torrent.name,\n url='',\n torrent_info_hash=torrent.hashString,\n content_size=torrent.totalSize / (1024 * 1024),\n )\n # Location of torrent is only valid if transmission is on same machine as flexget\n if config['host'] in ('localhost', '127.0.0.1'):\n entry['location'] = torrent.torrentFile\n entry['url'] = 'file://' + torrent.torrentFile\n for attr in [\n 'id',\n 'comment',\n 'desiredAvailable',\n 'downloadDir',\n 'isFinished',\n 'isPrivate',\n 'leftUntilDone',\n 'ratio',\n 'status',\n 'date_active',\n 'date_added',\n 'date_done',\n 'date_started',\n 'errorString',\n 'priority',\n 'progress',\n 'secondsDownloading',\n 'secondsSeeding',\n 'torrentFile',\n ]:\n try:\n entry['transmission_' + attr] = getattr(torrent, attr)\n except Exception:\n logger.opt(exception=True).debug(\n 'error when requesting transmissionrpc attribute {}', attr\n )\n # Availability in percent\n entry['transmission_availability'] = (torrent.desiredAvailable / torrent.leftUntilDone) if torrent.leftUntilDone else 0\n \n entry['transmission_trackers'] = [t['announce'] for t in torrent.trackers]\n entry['transmission_seed_ratio_ok'] = seed_ratio_ok\n entry['transmission_idle_limit_ok'] = idle_limit_ok\n st_error_to_desc = {\n 0: 'OK',\n 1: 'tracker_warning',\n 2: 'tracker_error',\n 3: 'local_error',\n }\n entry['transmission_error_state'] = st_error_to_desc[torrent.error]\n # Built in done_date doesn't work when user adds an already completed file to transmission\n if torrent.progress == 100:\n entry['transmission_date_done'] = datetime.fromtimestamp(\n max(torrent.addedDate, torrent.doneDate)\n )\n entries.append(entry)\n return entries\n\n\nclass PluginTransmission(TransmissionBase):\n \"\"\"\n Add url from entry url to transmission\n\n Example::\n\n transmission:\n host: localhost\n port: 9091\n netrc: /home/flexget/.tmnetrc\n username: myusername\n password: mypassword\n path: the download location\n\n Default values for the config elements::\n\n transmission:\n host: localhost\n port: 9091\n enabled: yes\n \"\"\"\n\n schema = {\n 'anyOf': [\n {'type': 'boolean'},\n {\n 'type': 'object',\n 'properties': {\n 'host': {'type': 'string'},\n 'port': {'type': 'integer'},\n 'netrc': {'type': 'string'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'action': {\n 'type': 'string',\n 'enum': ['add', 'remove', 'purge', 'pause', 'resume', 'bypass_queue'],\n },\n 'path': {'type': 'string'},\n 'max_up_speed': {'type': 'number'},\n 'max_down_speed': {'type': 'number'},\n 'max_connections': {'type': 'integer'},\n 'ratio': {'type': 'number'},\n 'add_paused': {'type': 'boolean'},\n 'content_filename': {'type': 'string'},\n 'main_file_only': {'type': 'boolean'},\n 'main_file_ratio': {'type': 'number'},\n 'magnetization_timeout': {'type': 'integer'},\n 'enabled': {'type': 'boolean'},\n 'include_subs': {'type': 'boolean'},\n 'bandwidth_priority': {'type': 'number'},\n 'honor_limits': {'type': 'boolean'},\n 'include_files': one_or_more({'type': 'string'}),\n 'skip_files': one_or_more({'type': 'string'}),\n 'rename_like_files': {'type': 'boolean'},\n 'queue_position': {'type': 'integer'},\n },\n 'additionalProperties': False,\n },\n ]\n }\n\n def prepare_config(self, config):\n config = TransmissionBase.prepare_config(self, config)\n config.setdefault('action', 'add')\n config.setdefault('path', '')\n config.setdefault('main_file_only', False)\n config.setdefault('magnetization_timeout', 0)\n config.setdefault('include_subs', False)\n config.setdefault('rename_like_files', False)\n config.setdefault('include_files', [])\n return config\n\n @plugin.priority(120)\n def on_task_download(self, task, config):\n \"\"\"\n Call download plugin to generate the temp files we will load\n into deluge then verify they are valid torrents\n \"\"\"\n config = self.prepare_config(config)\n if not config['enabled']:\n return\n # If the download plugin is not enabled, we need to call it to get our temp .torrent files\n if 'download' not in task.config:\n download = plugin.get('download', self)\n for entry in task.accepted:\n if entry.get('transmission_id'):\n # The torrent is already loaded in deluge, we don't need to get anything\n continue\n if config['action'] != 'add' and entry.get('torrent_info_hash'):\n # If we aren't adding the torrent new, all we need is info hash\n continue\n download.get_temp_file(task, entry, handle_magnets=True, fail_html=True)\n\n @plugin.priority(135)\n def on_task_output(self, task, config):\n config = self.prepare_config(config)\n # don't add when learning\n if task.options.learn:\n return\n if not config['enabled']:\n return\n # Do not run if there is nothing to do\n if not task.accepted:\n return\n if self.client is None:\n self.client = self.create_rpc_client(config)\n if self.client:\n logger.debug('Successfully connected to transmission.')\n else:\n raise plugin.PluginError(\"Couldn't connect to transmission.\")\n session_torrents = self.client.get_torrents()\n for entry in task.accepted:\n if task.options.test:\n logger.info('Would {} {} in transmission.', config['action'], entry['title'])\n continue\n # Compile user options into appropriate dict\n options = self._make_torrent_options_dict(config, entry)\n torrent_info = None\n for t in session_torrents:\n if t.hashString.lower() == entry.get(\n 'torrent_info_hash', ''\n ).lower() or t.id == entry.get('transmission_id'):\n torrent_info = t\n logger.debug(\n 'Found {} already loaded in transmission as {}',\n entry['title'],\n torrent_info.name,\n )\n break\n\n if not torrent_info:\n if config['action'] != 'add':\n logger.warning(\n 'Cannot {} {} because it is not loaded in transmission.',\n config['action'],\n entry['title'],\n )\n continue\n downloaded = not entry['url'].startswith('magnet:')\n\n # Check that file is downloaded\n if downloaded and 'file' not in entry:\n entry.fail('`file` field missing?')\n continue\n\n # Verify the temp file exists\n if downloaded and not os.path.exists(entry['file']):\n tmp_path = os.path.join(task.manager.config_base, 'temp')\n logger.debug('entry: {}', entry)\n logger.debug('temp: {}', ', '.join(os.listdir(tmp_path)))\n entry.fail(\"Downloaded temp file '%s' doesn't exist!?\" % entry['file'])\n continue\n\n try:\n if downloaded:\n with open(entry['file'], 'rb') as f:\n filedump = base64.b64encode(f.read()).decode('utf-8')\n torrent_info = self.client.add_torrent(filedump, 30, **options['add'])\n else:\n if options['post'].get('magnetization_timeout', 0) > 0:\n options['add']['paused'] = False\n torrent_info = self.client.add_torrent(\n entry['url'], timeout=30, **options['add']\n )\n except TransmissionError as e:\n logger.opt(exception=True).debug('TransmissionError')\n logger.debug('Failed options dict: {}', options['add'])\n msg = 'Error adding {} to transmission. TransmissionError: {}'.format(\n entry['title'], e.message or 'N/A'\n )\n logger.error(msg)\n entry.fail(msg)\n continue\n logger.info('\"{}\" torrent added to transmission', entry['title'])\n # The info returned by the add call is incomplete, refresh it\n torrent_info = self.client.get_torrent(torrent_info.id)\n else:\n # Torrent already loaded in transmission\n if options['add'].get('download_dir'):\n logger.verbose(\n 'Moving {} to \"{}\"', torrent_info.name, options['add']['download_dir']\n )\n # Move data even if current reported torrent location matches new location\n # as transmission may fail to automatically move completed file to final\n # location but continue reporting final location instead of real location.\n # In such case this will kick transmission to really move data.\n # If data is already located at new location then transmission just ignore\n # this command.\n self.client.move_torrent_data(\n torrent_info.id, options['add']['download_dir'], 120\n )\n\n try:\n total_size = torrent_info.totalSize\n main_id = None\n find_main_file = (\n options['post'].get('main_file_only') or 'content_filename' in options['post']\n )\n skip_files = options['post'].get('skip_files')\n # We need to index the files if any of the following are defined\n if find_main_file or skip_files:\n file_list = self.client.get_files(torrent_info.id)[torrent_info.id]\n\n if options['post'].get('magnetization_timeout', 0) > 0 and not file_list:\n logger.debug(\n 'Waiting {} seconds for \"{}\" to magnetize',\n options['post']['magnetization_timeout'],\n entry['title'],\n )\n for _ in range(options['post']['magnetization_timeout']):\n sleep(1)\n file_list = self.client.get_files(torrent_info.id)[torrent_info.id]\n if file_list:\n total_size = self.client.get_torrent(\n torrent_info.id, ['id', 'totalSize']\n ).totalSize\n break\n else:\n logger.warning(\n '\"{}\" did not magnetize before the timeout elapsed, file list unavailable for processing.',\n entry['title'],\n )\n\n # Find files based on config\n dl_list = []\n skip_list = []\n main_list = []\n ext_list = ['*.srt', '*.sub', '*.idx', '*.ssa', '*.ass']\n\n main_ratio = config['main_file_ratio']\n if 'main_file_ratio' in options['post']:\n main_ratio = options['post']['main_file_ratio']\n\n for f in file_list:\n # No need to set main_id if we're not going to need it\n if find_main_file and file_list[f]['size'] > total_size * main_ratio:\n main_id = f\n\n if 'include_files' in options['post']:\n if any(\n fnmatch(file_list[f]['name'], mask)\n for mask in options['post']['include_files']\n ):\n dl_list.append(f)\n elif options['post'].get('include_subs') and any(\n fnmatch(file_list[f]['name'], mask) for mask in ext_list\n ):\n dl_list.append(f)\n\n if skip_files:\n if any(fnmatch(file_list[f]['name'], mask) for mask in skip_files):\n skip_list.append(f)\n\n if main_id is not None:\n # Look for files matching main ID title but with a different extension\n if options['post'].get('rename_like_files'):\n for f in file_list:\n # if this filename matches main filename we want to rename it as well\n fs = os.path.splitext(file_list[f]['name'])\n if fs[0] == os.path.splitext(file_list[main_id]['name'])[0]:\n main_list.append(f)\n else:\n main_list = [main_id]\n\n if main_id not in dl_list:\n dl_list.append(main_id)\n elif find_main_file:\n logger.warning(\n 'No files in \"{}\" are > {:.0f}% of content size, no files renamed.',\n entry['title'],\n main_ratio * 100,\n )\n\n # If we have a main file and want to rename it and associated files\n if 'content_filename' in options['post'] and main_id is not None:\n if 'download_dir' not in options['add']:\n download_dir = self.client.get_session().download_dir\n else:\n download_dir = options['add']['download_dir']\n\n # Get new filename without ext\n file_ext = os.path.splitext(file_list[main_id]['name'])[1]\n file_path = os.path.dirname(\n os.path.join(download_dir, file_list[main_id]['name'])\n )\n filename = options['post']['content_filename']\n if config['host'] == 'localhost' or config['host'] == '127.0.0.1':\n counter = 1\n while os.path.exists(os.path.join(file_path, filename + file_ext)):\n # Try appending a (#) suffix till a unique filename is found\n filename = '%s(%s)' % (\n options['post']['content_filename'],\n counter,\n )\n counter += 1\n else:\n logger.debug(\n 'Cannot ensure content_filename is unique '\n 'when adding to a remote transmission daemon.'\n )\n\n for index in main_list:\n file_ext = os.path.splitext(file_list[index]['name'])[1]\n logger.debug(\n 'File {} renamed to {}',\n file_list[index]['name'],\n filename + file_ext,\n )\n # change to below when set_files will allow setting name, more efficient to have one call\n # fl[index]['name'] = os.path.basename(pathscrub(filename + file_ext).encode('utf-8'))\n try:\n self.client.rename_torrent_path(\n torrent_info.id,\n file_list[index]['name'],\n os.path.basename(str(pathscrub(filename + file_ext))),\n )\n except TransmissionError:\n logger.error(\n 'content_filename only supported with transmission 2.8+'\n )\n\n if options['post'].get('main_file_only') and main_id is not None:\n # Set Unwanted Files\n options['change']['files_unwanted'] = [\n x for x in file_list if x not in dl_list\n ]\n options['change']['files_wanted'] = dl_list\n logger.debug(\n 'Downloading {} of {} files in torrent.',\n len(options['change']['files_wanted']),\n len(file_list),\n )\n elif (\n not options['post'].get('main_file_only') or main_id is None\n ) and skip_files:\n # If no main file and we want to skip files\n\n if len(skip_list) >= len(file_list):\n logger.debug(\n 'skip_files filter would cause no files to be downloaded; '\n 'including all files in torrent.'\n )\n else:\n options['change']['files_unwanted'] = skip_list\n options['change']['files_wanted'] = [\n x for x in file_list if x not in skip_list\n ]\n logger.debug(\n 'Downloading {} of {} files in torrent.',\n len(options['change']['files_wanted']),\n len(file_list),\n )\n\n # Set any changed file properties\n if list(options['change'].keys()):\n self.client.change_torrent(torrent_info.id, 30, **options['change'])\n\n start_torrent = partial(self.client.start_torrent, [torrent_info.id])\n\n if config['action'] == 'add':\n # if add_paused was defined and set to False start the torrent;\n # prevents downloading data before we set what files we want\n start_paused = (\n options['post']['paused']\n if 'paused' in options['post']\n else not self.client.get_session().start_added_torrents\n )\n if start_paused:\n self.client.stop_torrent(torrent_info.id)\n else:\n self.client.start_torrent(torrent_info.id)\n elif config['action'] in ('remove', 'purge'):\n self.client.remove_torrent(\n [torrent_info.id], delete_data=config['action'] == 'purge'\n )\n logger.info('{}d {} from transmission', config['action'], torrent_info.name)\n elif config['action'] == 'pause':\n self.client.stop_torrent([torrent_info.id])\n logger.info('paused {} in transmission', torrent_info.name)\n elif config['action'] == 'resume':\n start_torrent()\n logger.info('resumed {} in transmission', torrent_info.name)\n elif config['action'] == 'bypass_queue':\n start_torrent(bypass_queue=True)\n logger.info('resumed (bypass queue) {} in transmission', torrent_info.name)\n \n except TransmissionError as e:\n logger.opt(exception=True).debug('TransmissionError')\n logger.debug('Failed options dict: {}', options)\n msg = 'Error trying to {} {}, TransmissionError: {}'.format(\n config['action'], entry['title'], e.message or 'N/A'\n )\n logger.error(msg)\n continue\n\n def _make_torrent_options_dict(self, config, entry):\n\n opt_dic = {}\n\n for opt_key in (\n 'path',\n 'add_paused',\n 'honor_limits',\n 'bandwidth_priority',\n 'max_connections',\n 'max_up_speed',\n 'max_down_speed',\n 'ratio',\n 'main_file_only',\n 'main_file_ratio',\n 'magnetization_timeout',\n 'include_subs',\n 'content_filename',\n 'include_files',\n 'skip_files',\n 'rename_like_files',\n 'queue_position',\n ):\n # Values do not merge config with task\n # Task takes priority then config is used\n if opt_key in entry:\n opt_dic[opt_key] = entry[opt_key]\n elif opt_key in config:\n opt_dic[opt_key] = config[opt_key]\n\n options = {'add': {}, 'change': {}, 'post': {}}\n\n add = options['add']\n if opt_dic.get('path'):\n try:\n path = os.path.expanduser(entry.render(opt_dic['path']))\n except RenderError as e:\n logger.error('Error setting path for {}: {}', entry['title'], e)\n else:\n # Transmission doesn't like it when paths end in a separator\n path = path.rstrip('\\\\/')\n add['download_dir'] = pathscrub(path)\n # make sure we add it paused, will modify status after adding\n add['paused'] = True\n\n change = options['change']\n if 'bandwidth_priority' in opt_dic:\n change['bandwidthPriority'] = opt_dic['bandwidth_priority']\n if 'honor_limits' in opt_dic and not opt_dic['honor_limits']:\n change['honorsSessionLimits'] = False\n if 'max_up_speed' in opt_dic:\n change['uploadLimit'] = opt_dic['max_up_speed']\n change['uploadLimited'] = True\n if 'max_down_speed' in opt_dic:\n change['downloadLimit'] = opt_dic['max_down_speed']\n change['downloadLimited'] = True\n if 'max_connections' in opt_dic:\n change['peer_limit'] = opt_dic['max_connections']\n\n if 'ratio' in opt_dic:\n change['seedRatioLimit'] = opt_dic['ratio']\n if opt_dic['ratio'] == -1:\n # seedRatioMode:\n # 0 follow the global settings\n # 1 override the global settings, seeding until a certain ratio\n # 2 override the global settings, seeding regardless of ratio\n change['seedRatioMode'] = 2\n else:\n change['seedRatioMode'] = 1\n\n if 'queue_position' in opt_dic:\n change['queuePosition'] = opt_dic['queue_position']\n\n post = options['post']\n # set to modify paused status after\n if 'add_paused' in opt_dic:\n post['paused'] = opt_dic['add_paused']\n if 'main_file_only' in opt_dic:\n post['main_file_only'] = opt_dic['main_file_only']\n if 'main_file_ratio' in opt_dic:\n post['main_file_ratio'] = opt_dic['main_file_ratio']\n if 'magnetization_timeout' in opt_dic:\n post['magnetization_timeout'] = opt_dic['magnetization_timeout']\n if 'include_subs' in opt_dic:\n post['include_subs'] = opt_dic['include_subs']\n if 'content_filename' in opt_dic:\n try:\n post['content_filename'] = entry.render(opt_dic['content_filename'])\n except RenderError as e:\n logger.error('Unable to render content_filename {}: {}', entry['title'], e)\n if 'skip_files' in opt_dic:\n post['skip_files'] = opt_dic['skip_files']\n if not isinstance(post['skip_files'], list):\n post['skip_files'] = [post['skip_files']]\n if 'include_files' in opt_dic:\n post['include_files'] = opt_dic['include_files']\n if not isinstance(post['include_files'], list):\n post['include_files'] = [post['include_files']]\n if 'rename_like_files' in opt_dic:\n post['rename_like_files'] = opt_dic['rename_like_files']\n return options\n\n def on_task_learn(self, task, config):\n \"\"\" Make sure all temp files are cleaned up when entries are learned \"\"\"\n # If download plugin is enabled, it will handle cleanup.\n if 'download' not in task.config:\n download = plugin.get('download', self)\n download.cleanup_temp_files(task)\n\n on_task_abort = on_task_learn\n\n\nclass PluginTransmissionClean(TransmissionBase):\n \"\"\"\n DEPRECATED: A separate task using from_transmission and transmission with remove action should be used instead.\n\n Remove completed torrents from Transmission.\n\n Examples::\n\n clean_transmission: yes # ignore both time and ratio\n\n clean_transmission: # uses transmission's internal limits for idle time and seed ratio ( if defined )\n transmission_seed_limits: yes\n\n clean_transmission: # matches time only\n finished_for: 2 hours\n\n clean_transmission: # matches ratio only\n min_ratio: 0.5\n\n clean_transmission: # matches time OR ratio\n finished_for: 2 hours\n min_ratio: 0.5\n\n Default values for the config elements::\n\n clean_transmission:\n host: localhost\n port: 9091\n enabled: yes\n \"\"\"\n\n schema = {\n \"deprecated\": \"The clean_transmission plugin is deprecated. Configure a new task using the from_transmission \"\n \"plugin as well as the transmission plugin using the remove or purge action.\",\n \"anyOf\": [\n {\"type\": \"boolean\"},\n {\n \"type\": \"object\",\n \"properties\": {\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"integer\"},\n \"netrc\": {\"type\": \"string\", \"format\": \"file\"},\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"enabled\": {\"type\": \"boolean\"},\n \"min_ratio\": {\"type\": \"number\"},\n \"finished_for\": {\"type\": \"string\", \"format\": \"interval\"},\n \"transmission_seed_limits\": {\"type\": \"boolean\"},\n \"delete_files\": {\"type\": \"boolean\"},\n \"tracker\": {\"type\": \"string\", \"format\": \"regex\"},\n \"preserve_tracker\": {\"type\": \"string\", \"format\": \"regex\"},\n \"directories\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\", \"format\": \"regex\"},\n },\n },\n \"additionalProperties\": False,\n },\n ],\n }\n\n def on_task_exit(self, task, config):\n config = self.prepare_config(config)\n if not config['enabled'] or task.options.learn:\n return\n if not self.client:\n self.client = self.create_rpc_client(config)\n tracker_re = re.compile(config['tracker'], re.IGNORECASE) if 'tracker' in config else None\n preserve_tracker_re = (\n re.compile(config['preserve_tracker'], re.IGNORECASE)\n if 'preserve_tracker' in config\n else None\n )\n\n session = self.client.get_session()\n\n remove_ids = []\n for torrent in self.client.get_torrents():\n logger.verbose(\n 'Torrent \"{}\": status: \"{}\" - ratio: {} - date added: {}',\n torrent.name,\n torrent.status,\n torrent.ratio,\n torrent.date_added,\n )\n downloaded, dummy = self.torrent_info(torrent, config)\n if not downloaded:\n continue\n if config.get('transmission_seed_limits'):\n seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)\n if not seed_ratio_ok or not idle_limit_ok:\n continue\n if 'min_ratio' in config:\n if torrent.ratio < config['min_ratio']:\n continue\n if 'finished_for' in config:\n # done date might be invalid if this torrent was added to transmission when already completed\n started_seeding = datetime.fromtimestamp(max(torrent.addedDate, torrent.doneDate))\n if started_seeding + parse_timedelta(config['finished_for']) > datetime.now():\n continue\n tracker_hosts = (\n urlparse(tracker['announce']).hostname for tracker in torrent.trackers\n )\n if 'tracker' in config:\n if not any(tracker_re.search(tracker) for tracker in tracker_hosts):\n continue\n if 'preserve_tracker' in config:\n if any(preserve_tracker_re.search(tracker) for tracker in tracker_hosts):\n continue\n if config.get('directories'):\n if not any(\n re.search(d, torrent.downloadDir, re.IGNORECASE) for d in config['directories']\n ):\n continue\n if task.options.test:\n logger.info('Would remove finished torrent `{}` from transmission', torrent.name)\n continue\n logger.info('Removing finished torrent `{}` from transmission', torrent.name)\n remove_ids.append(torrent.id)\n if remove_ids:\n self.client.remove_torrent(remove_ids, config.get('delete_files'))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PluginTransmission, 'transmission', api_ver=2)\n plugin.register(PluginTransmissionInput, 'from_transmission', api_ver=2)\n plugin.register(PluginTransmissionClean, 'clean_transmission', api_ver=2)\n", "path": "flexget/plugins/clients/transmission.py" } ]
diff --git a/flexget/plugins/clients/transmission.py b/flexget/plugins/clients/transmission.py index c200265e69..ca86b84564 100644 --- a/flexget/plugins/clients/transmission.py +++ b/flexget/plugins/clients/transmission.py @@ -7,6 +7,7 @@ from time import sleep from urllib.parse import urlparse +from functools import partial from loguru import logger from flexget import plugin
CTPUG__wafer-111
[ { "content": "from django.http import Http404\nfrom django.core.exceptions import PermissionDenied\nfrom django.views.generic import DetailView, TemplateView, UpdateView\n\nfrom wafer.pages.models import Page\nfrom wafer.pages.forms import PageForm\n\n\nclass ShowPage(DetailView):\n template_name = 'wafer.pages/page.html'\n model = Page\n\n\nclass EditPage(UpdateView):\n template_name = 'wafer.pages/page_form.html'\n model = Page\n form_class = PageForm\n fields = ['name', 'content']\n\n\ndef slug(request, url):\n \"\"\"Look up a page by url (which is a tree of slugs)\"\"\"\n page = None\n for slug in url.split('/'):\n if not slug:\n continue\n try:\n page = Page.objects.get(slug=slug, parent=page)\n except Page.DoesNotExist:\n raise Http404\n\n if page is None:\n try:\n page = Page.objects.get(slug='index')\n except Page.DoesNotExist:\n return TemplateView.as_view(\n template_name='wafer/index.html')(request)\n\n if 'edit' in request.GET.keys():\n if not request.user.has_perm('pages.change_page'):\n raise PermissionDenied\n return EditPage.as_view()(request, pk=page.id)\n\n return ShowPage.as_view()(request, pk=page.id)\n", "path": "wafer/pages/views.py" } ]
[ { "content": "from django.http import Http404\nfrom django.core.exceptions import PermissionDenied\nfrom django.views.generic import DetailView, TemplateView, UpdateView\n\nfrom wafer.pages.models import Page\nfrom wafer.pages.forms import PageForm\n\n\nclass ShowPage(DetailView):\n template_name = 'wafer.pages/page.html'\n model = Page\n\n\nclass EditPage(UpdateView):\n template_name = 'wafer.pages/page_form.html'\n model = Page\n form_class = PageForm\n\n\ndef slug(request, url):\n \"\"\"Look up a page by url (which is a tree of slugs)\"\"\"\n page = None\n for slug in url.split('/'):\n if not slug:\n continue\n try:\n page = Page.objects.get(slug=slug, parent=page)\n except Page.DoesNotExist:\n raise Http404\n\n if page is None:\n try:\n page = Page.objects.get(slug='index')\n except Page.DoesNotExist:\n return TemplateView.as_view(\n template_name='wafer/index.html')(request)\n\n if 'edit' in request.GET.keys():\n if not request.user.has_perm('pages.change_page'):\n raise PermissionDenied\n return EditPage.as_view()(request, pk=page.id)\n\n return ShowPage.as_view()(request, pk=page.id)\n", "path": "wafer/pages/views.py" } ]
diff --git a/wafer/pages/views.py b/wafer/pages/views.py index 70dd7fb1..fee10798 100644 --- a/wafer/pages/views.py +++ b/wafer/pages/views.py @@ -15,7 +15,6 @@ class EditPage(UpdateView): template_name = 'wafer.pages/page_form.html' model = Page form_class = PageForm - fields = ['name', 'content'] def slug(request, url):
dbt-labs__dbt-core-1148
[ { "content": "import codecs\nimport linecache\nimport os\n\nimport jinja2\nimport jinja2._compat\nimport jinja2.ext\nimport jinja2.nodes\nimport jinja2.parser\nimport jinja2.sandbox\n\nimport dbt.compat\nimport dbt.exceptions\n\nfrom dbt.node_types import NodeType\nfrom dbt.utils import AttrDict\n\nfrom dbt.logger import GLOBAL_LOGGER as logger # noqa\n\n\nclass MacroFuzzParser(jinja2.parser.Parser):\n def parse_macro(self):\n node = jinja2.nodes.Macro(lineno=next(self.stream).lineno)\n\n # modified to fuzz macros defined in the same file. this way\n # dbt can understand the stack of macros being called.\n # - @cmcarthur\n node.name = dbt.utils.get_dbt_macro_name(\n self.parse_assign_target(name_only=True).name)\n\n self.parse_signature(node)\n node.body = self.parse_statements(('name:endmacro',),\n drop_needle=True)\n return node\n\n\nclass MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment):\n def _parse(self, source, name, filename):\n return MacroFuzzParser(\n self, source, name,\n jinja2._compat.encode_filename(filename)\n ).parse()\n\n def _compile(self, source, filename):\n \"\"\"Override jinja's compilation to stash the rendered source inside\n the python linecache for debugging.\n \"\"\"\n if filename == '<template>':\n # make a better filename\n filename = 'dbt-{}'.format(\n codecs.encode(os.urandom(12), 'hex').decode('ascii')\n )\n # encode, though I don't think this matters\n filename = jinja2._compat.encode_filename(filename)\n # put ourselves in the cache using the 'lazycache' method\n linecache.cache[filename] = (lambda: source,)\n\n return super(MacroFuzzEnvironment, self)._compile(source, filename)\n\n\nclass TemplateCache(object):\n\n def __init__(self):\n self.file_cache = {}\n\n def get_node_template(self, node):\n key = (node['package_name'], node['original_file_path'])\n\n if key in self.file_cache:\n return self.file_cache[key]\n\n template = get_template(\n string=node.get('raw_sql'),\n ctx={},\n node=node\n )\n self.file_cache[key] = template\n\n return template\n\n def clear(self):\n self.file_cache.clear()\n\n\ntemplate_cache = TemplateCache()\n\n\ndef macro_generator(node):\n def apply_context(context):\n def call(*args, **kwargs):\n name = node.get('name')\n template = template_cache.get_node_template(node)\n module = template.make_module(context, False, context)\n\n if node['resource_type'] == NodeType.Operation:\n macro = module.__dict__[dbt.utils.get_dbt_operation_name(name)]\n else:\n macro = module.__dict__[dbt.utils.get_dbt_macro_name(name)]\n module.__dict__.update(context)\n\n try:\n return macro(*args, **kwargs)\n except dbt.exceptions.MacroReturn as e:\n return e.value\n except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:\n dbt.exceptions.raise_compiler_error(str(e), node)\n except dbt.exceptions.CompilationException as e:\n e.stack.append(node)\n raise e\n\n return call\n return apply_context\n\n\nclass MaterializationExtension(jinja2.ext.Extension):\n tags = ['materialization']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n materialization_name = \\\n parser.parse_assign_target(name_only=True).name\n\n adapter_name = 'default'\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n if target.name == 'default':\n pass\n\n elif target.name == 'adapter':\n parser.stream.expect('assign')\n value = parser.parse_expression()\n adapter_name = value.value\n\n else:\n dbt.exceptions.invalid_materialization_argument(\n materialization_name, target.name)\n\n node.name = dbt.utils.get_materialization_macro_name(\n materialization_name, adapter_name)\n\n node.body = parser.parse_statements(('name:endmaterialization',),\n drop_needle=True)\n\n return node\n\n\nclass OperationExtension(jinja2.ext.Extension):\n tags = ['operation']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n operation_name = \\\n parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n node.name = dbt.utils.get_operation_macro_name(operation_name)\n\n node.body = parser.parse_statements(('name:endoperation',),\n drop_needle=True)\n\n return node\n\n\nclass DocumentationExtension(jinja2.ext.Extension):\n tags = ['docs']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n docs_name = parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n node.name = dbt.utils.get_docs_macro_name(docs_name)\n node.body = parser.parse_statements(('name:enddocs',),\n drop_needle=True)\n return node\n\n\ndef _is_dunder_name(name):\n return name.startswith('__') and name.endswith('__')\n\n\ndef create_macro_capture_env(node):\n\n class ParserMacroCapture(jinja2.Undefined):\n \"\"\"\n This class sets up the parser to capture macros.\n \"\"\"\n def __init__(self, hint=None, obj=None, name=None, exc=None):\n super(ParserMacroCapture, self).__init__(hint=hint, name=name)\n self.node = node\n self.name = name\n self.package_name = node.get('package_name')\n # jinja uses these for safety, so we have to override them.\n # see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa\n self.unsafe_callable = False\n self.alters_data = False\n\n def __deepcopy__(self, memo):\n path = os.path.join(self.node.get('root_path'),\n self.node.get('original_file_path'))\n\n logger.debug(\n 'dbt encountered an undefined variable, \"{}\" in node {}.{} '\n '(source path: {})'\n .format(self.name, self.node.get('package_name'),\n self.node.get('name'), path))\n\n # match jinja's message\n dbt.exceptions.raise_compiler_error(\n \"{!r} is undefined\".format(self.name),\n node=self.node\n )\n\n def __getitem__(self, name):\n # Propagate the undefined value if a caller accesses this as if it\n # were a dictionary\n return self\n\n def __getattr__(self, name):\n if name == 'name' or _is_dunder_name(name):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\"\n .format(type(self).__name__, name)\n )\n\n self.package_name = self.name\n self.name = name\n\n return self\n\n def __call__(self, *args, **kwargs):\n return True\n\n return ParserMacroCapture\n\n\ndef get_environment(node=None, capture_macros=False):\n args = {\n 'extensions': []\n }\n\n if capture_macros:\n args['undefined'] = create_macro_capture_env(node)\n\n args['extensions'].append(MaterializationExtension)\n args['extensions'].append(OperationExtension)\n args['extensions'].append(DocumentationExtension)\n\n return MacroFuzzEnvironment(**args)\n\n\ndef parse(string):\n try:\n return get_environment().parse(dbt.compat.to_string(string))\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e))\n\n\ndef get_template(string, ctx, node=None, capture_macros=False):\n try:\n env = get_environment(node, capture_macros)\n\n template_source = dbt.compat.to_string(string)\n return env.from_string(template_source, globals=ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef render_template(template, ctx, node=None):\n try:\n return template.render(ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef get_rendered(string, ctx, node=None,\n capture_macros=False):\n template = get_template(string, ctx, node,\n capture_macros=capture_macros)\n\n return render_template(template, ctx, node)\n\n\ndef undefined_error(msg):\n raise jinja2.exceptions.UndefinedError(msg)\n", "path": "dbt/clients/jinja.py" } ]
[ { "content": "import codecs\nimport linecache\nimport os\n\nimport jinja2\nimport jinja2._compat\nimport jinja2.ext\nimport jinja2.nodes\nimport jinja2.parser\nimport jinja2.sandbox\n\nimport dbt.compat\nimport dbt.exceptions\n\nfrom dbt.node_types import NodeType\nfrom dbt.utils import AttrDict\n\nfrom dbt.logger import GLOBAL_LOGGER as logger # noqa\n\n\nclass MacroFuzzParser(jinja2.parser.Parser):\n def parse_macro(self):\n node = jinja2.nodes.Macro(lineno=next(self.stream).lineno)\n\n # modified to fuzz macros defined in the same file. this way\n # dbt can understand the stack of macros being called.\n # - @cmcarthur\n node.name = dbt.utils.get_dbt_macro_name(\n self.parse_assign_target(name_only=True).name)\n\n self.parse_signature(node)\n node.body = self.parse_statements(('name:endmacro',),\n drop_needle=True)\n return node\n\n\nclass MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment):\n def _parse(self, source, name, filename):\n return MacroFuzzParser(\n self, source, name,\n jinja2._compat.encode_filename(filename)\n ).parse()\n\n def _compile(self, source, filename):\n \"\"\"Override jinja's compilation to stash the rendered source inside\n the python linecache for debugging.\n \"\"\"\n if filename == '<template>':\n # make a better filename\n filename = 'dbt-{}'.format(\n codecs.encode(os.urandom(12), 'hex').decode('ascii')\n )\n # encode, though I don't think this matters\n filename = jinja2._compat.encode_filename(filename)\n # put ourselves in the cache using the 'lazycache' method\n linecache.cache[filename] = (lambda: source,)\n\n return super(MacroFuzzEnvironment, self)._compile(source, filename)\n\n\nclass TemplateCache(object):\n\n def __init__(self):\n self.file_cache = {}\n\n def get_node_template(self, node):\n key = (node['package_name'], node['original_file_path'])\n\n if key in self.file_cache:\n return self.file_cache[key]\n\n template = get_template(\n string=node.get('raw_sql'),\n ctx={},\n node=node\n )\n self.file_cache[key] = template\n\n return template\n\n def clear(self):\n self.file_cache.clear()\n\n\ntemplate_cache = TemplateCache()\n\n\ndef macro_generator(node):\n def apply_context(context):\n def call(*args, **kwargs):\n name = node.get('name')\n template = template_cache.get_node_template(node)\n module = template.make_module(context, False, context)\n\n if node['resource_type'] == NodeType.Operation:\n macro = module.__dict__[dbt.utils.get_dbt_operation_name(name)]\n else:\n macro = module.__dict__[dbt.utils.get_dbt_macro_name(name)]\n module.__dict__.update(context)\n\n try:\n return macro(*args, **kwargs)\n except dbt.exceptions.MacroReturn as e:\n return e.value\n except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:\n dbt.exceptions.raise_compiler_error(str(e), node)\n except dbt.exceptions.CompilationException as e:\n e.stack.append(node)\n raise e\n\n return call\n return apply_context\n\n\nclass MaterializationExtension(jinja2.ext.Extension):\n tags = ['materialization']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n materialization_name = \\\n parser.parse_assign_target(name_only=True).name\n\n adapter_name = 'default'\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n if target.name == 'default':\n pass\n\n elif target.name == 'adapter':\n parser.stream.expect('assign')\n value = parser.parse_expression()\n adapter_name = value.value\n\n else:\n dbt.exceptions.invalid_materialization_argument(\n materialization_name, target.name)\n\n node.name = dbt.utils.get_materialization_macro_name(\n materialization_name, adapter_name)\n\n node.body = parser.parse_statements(('name:endmaterialization',),\n drop_needle=True)\n\n return node\n\n\nclass OperationExtension(jinja2.ext.Extension):\n tags = ['operation']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n operation_name = \\\n parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n node.name = dbt.utils.get_operation_macro_name(operation_name)\n\n node.body = parser.parse_statements(('name:endoperation',),\n drop_needle=True)\n\n return node\n\n\nclass DocumentationExtension(jinja2.ext.Extension):\n tags = ['docs']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n docs_name = parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n node.name = dbt.utils.get_docs_macro_name(docs_name)\n node.body = parser.parse_statements(('name:enddocs',),\n drop_needle=True)\n return node\n\n\ndef _is_dunder_name(name):\n return name.startswith('__') and name.endswith('__')\n\n\ndef create_macro_capture_env(node):\n\n class ParserMacroCapture(jinja2.Undefined):\n \"\"\"\n This class sets up the parser to capture macros.\n \"\"\"\n def __init__(self, hint=None, obj=None, name=None, exc=None):\n super(ParserMacroCapture, self).__init__(hint=hint, name=name)\n self.node = node\n self.name = name\n self.package_name = node.get('package_name')\n # jinja uses these for safety, so we have to override them.\n # see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa\n self.unsafe_callable = False\n self.alters_data = False\n\n def __deepcopy__(self, memo):\n path = os.path.join(self.node.get('root_path'),\n self.node.get('original_file_path'))\n\n logger.debug(\n 'dbt encountered an undefined variable, \"{}\" in node {}.{} '\n '(source path: {})'\n .format(self.name, self.node.get('package_name'),\n self.node.get('name'), path))\n\n # match jinja's message\n dbt.exceptions.raise_compiler_error(\n \"{!r} is undefined\".format(self.name),\n node=self.node\n )\n\n def __getitem__(self, name):\n # Propagate the undefined value if a caller accesses this as if it\n # were a dictionary\n return self\n\n def __getattr__(self, name):\n if name == 'name' or _is_dunder_name(name):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\"\n .format(type(self).__name__, name)\n )\n\n self.package_name = self.name\n self.name = name\n\n return self\n\n def __call__(self, *args, **kwargs):\n return True\n\n return ParserMacroCapture\n\n\ndef get_environment(node=None, capture_macros=False):\n args = {\n 'extensions': ['jinja2.ext.do']\n }\n\n if capture_macros:\n args['undefined'] = create_macro_capture_env(node)\n\n args['extensions'].append(MaterializationExtension)\n args['extensions'].append(OperationExtension)\n args['extensions'].append(DocumentationExtension)\n\n return MacroFuzzEnvironment(**args)\n\n\ndef parse(string):\n try:\n return get_environment().parse(dbt.compat.to_string(string))\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e))\n\n\ndef get_template(string, ctx, node=None, capture_macros=False):\n try:\n env = get_environment(node, capture_macros)\n\n template_source = dbt.compat.to_string(string)\n return env.from_string(template_source, globals=ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef render_template(template, ctx, node=None):\n try:\n return template.render(ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef get_rendered(string, ctx, node=None,\n capture_macros=False):\n template = get_template(string, ctx, node,\n capture_macros=capture_macros)\n\n return render_template(template, ctx, node)\n\n\ndef undefined_error(msg):\n raise jinja2.exceptions.UndefinedError(msg)\n", "path": "dbt/clients/jinja.py" } ]
diff --git a/dbt/clients/jinja.py b/dbt/clients/jinja.py index 9f85c553116..40cf1ae5950 100644 --- a/dbt/clients/jinja.py +++ b/dbt/clients/jinja.py @@ -246,7 +246,7 @@ def __call__(self, *args, **kwargs): def get_environment(node=None, capture_macros=False): args = { - 'extensions': [] + 'extensions': ['jinja2.ext.do'] } if capture_macros: diff --git a/test/unit/test_jinja.py b/test/unit/test_jinja.py new file mode 100644 index 00000000000..21abd573b5b --- /dev/null +++ b/test/unit/test_jinja.py @@ -0,0 +1,11 @@ +import unittest + +from dbt.clients.jinja import get_template + +class TestJinja(unittest.TestCase): + def test_do(self): + s = '{% set my_dict = {} %}\n{% do my_dict.update(a=1) %}' + + template = get_template(s, {}) + mod = template.make_module() + self.assertEqual(mod.my_dict, {'a': 1})
ranaroussi__yfinance-32
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Yahoo! Finance Fix for Pandas Datareader\n# https://github.com/ranaroussi/fix-yahoo-finance\n#\n# Copyright 2017 Ran Aroussi\n#\n# Licensed under the GNU Lesser General Public License, v3.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.gnu.org/licenses/lgpl-3.0.en.html\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\n__version__ = \"0.0.18\"\n__author__ = \"Ran Aroussi\"\n__all__ = ['download', 'get_yahoo_crumb', 'parse_ticker_csv']\n\n\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport time\nimport io\nimport requests\nimport re\nimport warnings\nimport sys\nimport multitasking\n\nwarnings.simplefilter(\"once\")\nwarnings.warn(\"\"\"\n Auto-overriding of pandas_datareader's get_data_yahoo() is deprecated and will be removed in future versions.\n Use pdr_override() to explicitly override it.\"\"\",\n DeprecationWarning)\n\n_YAHOO_COOKIE_ = ''\n_YAHOO_CRUMB_ = ''\n_YAHOO_CHECKED_ = None\n_YAHOO_TTL_ = 180\n\n\ndef get_yahoo_crumb(force=False):\n global _YAHOO_COOKIE_, _YAHOO_CRUMB_, _YAHOO_CHECKED_, _YAHOO_TTL_\n\n # use same cookie for 5 min\n if _YAHOO_CHECKED_ and not force:\n now = datetime.datetime.now()\n delta = (now - _YAHOO_CHECKED_).total_seconds()\n if delta < _YAHOO_TTL_:\n return (_YAHOO_CRUMB_, _YAHOO_COOKIE_)\n\n res = requests.get('https://finance.yahoo.com/quote/SPY/history')\n _YAHOO_COOKIE_ = res.cookies['B']\n\n pattern = re.compile('.*\"CrumbStore\":\\{\"crumb\":\"(?P<crumb>[^\"]+)\"\\}')\n for line in res.text.splitlines():\n m = pattern.match(line)\n if m is not None:\n _YAHOO_CRUMB_ = m.groupdict()['crumb']\n\n # set global params\n _YAHOO_CHECKED_ = datetime.datetime.now()\n\n return (_YAHOO_CRUMB_, _YAHOO_COOKIE_)\n\n\ndef parse_ticker_csv(csv_str, auto_adjust):\n df = pd.read_csv(csv_str, index_col=0, error_bad_lines=False\n ).replace('null', np.nan).dropna()\n\n df.index = pd.to_datetime(df.index)\n df = df.apply(pd.to_numeric)\n df['Volume'] = df['Volume'].fillna(0).astype(int)\n\n if auto_adjust:\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[['Open', 'High', 'Low', 'Close', 'Volume']]\n\n return df.groupby(df.index).first()\n\n\n_DFS_ = {}\n_COMPLETED_ = 0\n_PROGRESS_BAR_ = False\n_FAILED_ = []\n\n\ndef make_chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef download(tickers, start=None, end=None, as_panel=True,\n group_by='column', auto_adjust=False, progress=True,\n actions=None, threads=1, *args, **kwargs):\n \"\"\"Download yahoo tickers\n :Parameters:\n\n tickers : str, list\n List of tickers to download\n start: str\n Download start date string (YYYY-MM-DD) or datetime. Default is 1950-01-01\n end: str\n Download end date string (YYYY-MM-DD) or datetime. Default is today\n as_panel : bool\n Return a multi-index DataFrame or Panel. Default is True (Panel), which is deprecated\n group_by : str\n Group by ticker or 'column' (default)\n auto_adjust: bool\n Adjust all OHLC automatically? Default is False\n actions: str\n Download dividend + stock splits data. Default is None (no actions)\n Options are 'inline' (returns history + actions) and 'only' (actions only)\n threads: int\n How may threads to use? Default is 1 thread\n \"\"\"\n\n global _DFS_, _COMPLETED_, _PROGRESS_BAR_, _FAILED_\n _COMPLETED_ = 0\n _FAILED_ = []\n\n # format start\n if start is None:\n start = int(time.mktime(time.strptime('1950-01-01', '%Y-%m-%d')))\n elif isinstance(start, datetime.datetime):\n start = int(time.mktime(start.timetuple()))\n else:\n start = int(time.mktime(time.strptime(str(start), '%Y-%m-%d')))\n\n # format end\n if end is None:\n end = int(time.mktime(datetime.datetime.now().timetuple()))\n elif isinstance(end, datetime.datetime):\n end = int(time.mktime(end.timetuple()))\n else:\n end = int(time.mktime(time.strptime(str(end), '%Y-%m-%d')))\n\n # create ticker list\n tickers = tickers if isinstance(tickers, list) else [tickers]\n tickers = [x.upper() for x in tickers]\n\n # initiate progress bar\n if progress:\n _PROGRESS_BAR_ = ProgressBar(len(tickers), 'downloaded')\n\n # download using single thread\n if threads is None or threads < 2:\n download_chunk(tickers, start=start, end=end,\n auto_adjust=auto_adjust, progress=progress,\n actions=actions, *args, **kwargs)\n # threaded download\n else:\n threads = min([threads, len(tickers)])\n\n # download in chunks\n chunks = 0\n for chunk in make_chunks(tickers, max([1, len(tickers) // threads])):\n chunks += len(chunk)\n download_thread(chunk, start=start, end=end,\n auto_adjust=auto_adjust, progress=progress,\n actions=actions, *args, **kwargs)\n if len(tickers[-chunks:]) > 0:\n download_thread(tickers[-chunks:], start=start, end=end,\n auto_adjust=auto_adjust, progress=progress,\n actions=actions, *args, **kwargs)\n\n # wait for completion\n while _COMPLETED_ < len(tickers):\n time.sleep(0.1)\n\n _PROGRESS_BAR_.completed()\n\n # create panel (derecated)\n if as_panel:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n data = pd.Panel(_DFS_)\n if group_by == 'column':\n data = data.swapaxes(0, 2)\n\n # create multiIndex df\n else:\n data = pd.concat(_DFS_.values(), axis=1, keys=_DFS_.keys())\n if group_by == 'column':\n data.columns = data.columns.swaplevel(0, 1)\n data.sort_index(level=0, axis=1, inplace=True)\n if auto_adjust:\n data = data[['Open', 'High', 'Low', 'Close', 'Volume']]\n else:\n data = data[['Open', 'High', 'Low',\n 'Close', 'Adj Close', 'Volume']]\n\n # return single df if only one ticker\n if len(tickers) == 1:\n data = _DFS_[tickers[0]]\n\n if len(_FAILED_) > 0:\n print(\"\\nThe following tickers failed to download:\\n\",\n ', '.join(_FAILED_))\n\n return data\n\n\ndef download_one(ticker, start, end, interval, auto_adjust=None, actions=None):\n\n tried_once = False\n crumb, cookie = get_yahoo_crumb()\n\n url_str = \"https://query1.finance.yahoo.com/v7/finance/download/%s\"\n url_str += \"?period1=%s&period2=%s&interval=%s&events=%s&crumb=%s\"\n\n actions = None if '^' in ticker else actions\n\n if actions:\n url = url_str % (ticker, start, end, interval, 'div', crumb)\n res = requests.get(url, cookies={'B': cookie}).text\n # print(res)\n div = pd.DataFrame(columns=['action', 'value'])\n if \"error\" not in res:\n div = pd.read_csv(io.StringIO(res),\n index_col=0, error_bad_lines=False\n ).replace('null', np.nan).dropna()\n\n if isinstance(div, pd.DataFrame):\n div.index = pd.to_datetime(div.index)\n div[\"action\"] = \"DIVIDEND\"\n div = div.rename(columns={'Dividends': 'value'})\n div['value'] = div['value'].astype(float)\n\n # download Stock Splits data\n url = url_str % (ticker, start, end, interval, 'split', crumb)\n res = requests.get(url, cookies={'B': cookie}).text\n split = pd.DataFrame(columns=['action', 'value'])\n if \"error\" not in res:\n split = pd.read_csv(io.StringIO(res),\n index_col=0, error_bad_lines=False\n ).replace('null', np.nan).dropna()\n\n if isinstance(split, pd.DataFrame):\n split.index = pd.to_datetime(split.index)\n split[\"action\"] = \"SPLIT\"\n split = split.rename(columns={'Stock Splits': 'value'})\n if len(split.index) > 0:\n split['value'] = split.apply(\n lambda x: 1 / eval(x['value']), axis=1).astype(float)\n\n if actions == 'only':\n return pd.concat([div, split]).sort_index()\n\n # download history\n url = url_str % (ticker, start, end, interval, 'history', crumb)\n res = requests.get(url, cookies={'B': cookie}).text\n hist = pd.DataFrame(\n columns=['Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume'])\n\n if \"error\" in res:\n return pd.DataFrame()\n\n hist = parse_ticker_csv(io.StringIO(res), auto_adjust)\n\n if len(hist.index) > 0:\n if actions is None:\n return hist\n\n hist['Dividends'] = div['value'] if len(div.index) > 0 else np.nan\n hist['Dividends'].fillna(0, inplace=True)\n hist['Stock Splits'] = split['value'] if len(\n split.index) > 0 else np.nan\n hist['Stock Splits'].fillna(1, inplace=True)\n\n return hist\n\n # empty len(hist.index) == 0\n if not tried_once:\n tried_once = True\n get_yahoo_crumb(force=True)\n return download_one(ticker, start, end, interval, auto_adjust, actions)\n\n\n@multitasking.task\ndef download_thread(tickers, start=None, end=None,\n auto_adjust=False, progress=True,\n actions=False, *args, **kwargs):\n download_chunk(tickers, start=None, end=None,\n auto_adjust=False, progress=progress,\n actions=False, *args, **kwargs)\n\n\ndef download_chunk(tickers, start=None, end=None,\n auto_adjust=False, progress=True,\n actions=False, *args, **kwargs):\n\n global _DFS_, _COMPLETED_, _PROGRESS_BAR_, _FAILED_\n\n interval = kwargs[\"interval\"] if \"interval\" in kwargs else \"1d\"\n\n # url template\n url_str = \"https://query1.finance.yahoo.com/v7/finance/download/%s\"\n url_str += \"?period1=%s&period2=%s&interval=%s&events=%s&crumb=%s\"\n\n # failed tickers collectors\n round1_failed_tickers = []\n\n # start downloading\n for ticker in tickers:\n\n # yahoo crumb/cookie\n crumb, cookie = get_yahoo_crumb()\n\n tried_once = False\n try:\n hist = download_one(ticker, start, end,\n interval, auto_adjust, actions)\n if isinstance(hist, pd.DataFrame):\n _DFS_[ticker] = hist\n if progress:\n _PROGRESS_BAR_.animate()\n else:\n round1_failed_tickers.append(ticker)\n except:\n # something went wrong...\n # try one more time using a new cookie/crumb\n if not tried_once:\n tried_once = True\n try:\n get_yahoo_crumb(force=True)\n hist = download_one(ticker, start, end,\n interval, auto_adjust, actions)\n if isinstance(hist, pd.DataFrame):\n _DFS_[ticker] = hist\n if progress:\n _PROGRESS_BAR_.animate()\n else:\n round1_failed_tickers.append(ticker)\n except:\n round1_failed_tickers.append(ticker)\n time.sleep(0.001)\n\n # try failed items again before giving up\n _COMPLETED_ += len(tickers) - len(round1_failed_tickers)\n\n if len(round1_failed_tickers) > 0:\n get_yahoo_crumb(force=True)\n for ticker in round1_failed_tickers:\n try:\n hist = download_one(ticker, start, end,\n interval, auto_adjust, actions)\n if isinstance(hist, pd.DataFrame):\n _DFS_[ticker] = hist\n if progress:\n _PROGRESS_BAR_.animate()\n else:\n _FAILED_.append(ticker)\n except:\n _FAILED_.append(ticker)\n pass\n time.sleep(0.000001)\n _COMPLETED_ += 1\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n sys.stdout.flush()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n\n\n# make pandas datareader optional\n# otherwise can be called via fix_yahoo_finance.download(...)\ndef pdr_override():\n try:\n import pandas_datareader\n pandas_datareader.data.get_data_yahoo = download\n except:\n pass\n\npdr_override()\n", "path": "fix_yahoo_finance/__init__.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Yahoo! Finance Fix for Pandas Datareader\n# https://github.com/ranaroussi/fix-yahoo-finance\n#\n# Copyright 2017 Ran Aroussi\n#\n# Licensed under the GNU Lesser General Public License, v3.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.gnu.org/licenses/lgpl-3.0.en.html\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\n__version__ = \"0.0.18\"\n__author__ = \"Ran Aroussi\"\n__all__ = ['download', 'get_yahoo_crumb', 'parse_ticker_csv']\n\n\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport time\nimport io\nimport requests\nimport re\nimport warnings\nimport sys\nimport multitasking\n\nwarnings.simplefilter(\"once\")\nwarnings.warn(\"\"\"\n Auto-overriding of pandas_datareader's get_data_yahoo() is deprecated and will be removed in future versions.\n Use pdr_override() to explicitly override it.\"\"\",\n DeprecationWarning)\n\n_YAHOO_COOKIE_ = ''\n_YAHOO_CRUMB_ = ''\n_YAHOO_CHECKED_ = None\n_YAHOO_TTL_ = 180\n\n\ndef get_yahoo_crumb(force=False):\n global _YAHOO_COOKIE_, _YAHOO_CRUMB_, _YAHOO_CHECKED_, _YAHOO_TTL_\n\n # use same cookie for 5 min\n if _YAHOO_CHECKED_ and not force:\n now = datetime.datetime.now()\n delta = (now - _YAHOO_CHECKED_).total_seconds()\n if delta < _YAHOO_TTL_:\n return (_YAHOO_CRUMB_, _YAHOO_COOKIE_)\n\n res = requests.get('https://finance.yahoo.com/quote/SPY/history')\n _YAHOO_COOKIE_ = res.cookies['B']\n\n pattern = re.compile('.*\"CrumbStore\":\\{\"crumb\":\"(?P<crumb>[^\"]+)\"\\}')\n for line in res.text.splitlines():\n m = pattern.match(line)\n if m is not None:\n _YAHOO_CRUMB_ = m.groupdict()['crumb']\n\n # set global params\n _YAHOO_CHECKED_ = datetime.datetime.now()\n\n return (_YAHOO_CRUMB_, _YAHOO_COOKIE_)\n\n\ndef parse_ticker_csv(csv_str, auto_adjust):\n df = pd.read_csv(csv_str, index_col=0, error_bad_lines=False\n ).replace('null', np.nan).dropna()\n\n df.index = pd.to_datetime(df.index)\n df = df.apply(pd.to_numeric)\n df['Volume'] = df['Volume'].fillna(0).astype(int)\n\n if auto_adjust:\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[['Open', 'High', 'Low', 'Close', 'Volume']]\n\n return df.groupby(df.index).first()\n\n\n_DFS_ = {}\n_COMPLETED_ = 0\n_PROGRESS_BAR_ = False\n_FAILED_ = []\n\n\ndef make_chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef download(tickers, start=None, end=None, as_panel=True,\n group_by='column', auto_adjust=False, progress=True,\n actions=None, threads=1, *args, **kwargs):\n \"\"\"Download yahoo tickers\n :Parameters:\n\n tickers : str, list\n List of tickers to download\n start: str\n Download start date string (YYYY-MM-DD) or datetime. Default is 1950-01-01\n end: str\n Download end date string (YYYY-MM-DD) or datetime. Default is today\n as_panel : bool\n Return a multi-index DataFrame or Panel. Default is True (Panel), which is deprecated\n group_by : str\n Group by ticker or 'column' (default)\n auto_adjust: bool\n Adjust all OHLC automatically? Default is False\n actions: str\n Download dividend + stock splits data. Default is None (no actions)\n Options are 'inline' (returns history + actions) and 'only' (actions only)\n threads: int\n How may threads to use? Default is 1 thread\n \"\"\"\n\n global _DFS_, _COMPLETED_, _PROGRESS_BAR_, _FAILED_\n _COMPLETED_ = 0\n _FAILED_ = []\n\n # format start\n if start is None:\n start = int(time.mktime(time.strptime('1950-01-01', '%Y-%m-%d')))\n elif isinstance(start, datetime.datetime):\n start = int(time.mktime(start.timetuple()))\n else:\n start = int(time.mktime(time.strptime(str(start), '%Y-%m-%d')))\n\n # format end\n if end is None:\n end = int(time.mktime(datetime.datetime.now().timetuple()))\n elif isinstance(end, datetime.datetime):\n end = int(time.mktime(end.timetuple()))\n else:\n end = int(time.mktime(time.strptime(str(end), '%Y-%m-%d')))\n\n # create ticker list\n tickers = tickers if isinstance(tickers, list) else [tickers]\n tickers = [x.upper() for x in tickers]\n\n # initiate progress bar\n if progress:\n _PROGRESS_BAR_ = ProgressBar(len(tickers), 'downloaded')\n\n # download using single thread\n if threads is None or threads < 2:\n download_chunk(tickers, start=start, end=end,\n auto_adjust=auto_adjust, progress=progress,\n actions=actions, *args, **kwargs)\n # threaded download\n else:\n threads = min([threads, len(tickers)])\n\n # download in chunks\n chunks = 0\n for chunk in make_chunks(tickers, max([1, len(tickers) // threads])):\n chunks += len(chunk)\n download_thread(chunk, start=start, end=end,\n auto_adjust=auto_adjust, progress=progress,\n actions=actions, *args, **kwargs)\n if len(tickers[-chunks:]) > 0:\n download_thread(tickers[-chunks:], start=start, end=end,\n auto_adjust=auto_adjust, progress=progress,\n actions=actions, *args, **kwargs)\n\n # wait for completion\n while _COMPLETED_ < len(tickers):\n time.sleep(0.1)\n\n _PROGRESS_BAR_.completed()\n\n # create panel (derecated)\n if as_panel:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n data = pd.Panel(_DFS_)\n if group_by == 'column':\n data = data.swapaxes(0, 2)\n\n # create multiIndex df\n else:\n data = pd.concat(_DFS_.values(), axis=1, keys=_DFS_.keys())\n if group_by == 'column':\n data.columns = data.columns.swaplevel(0, 1)\n data.sort_index(level=0, axis=1, inplace=True)\n if auto_adjust:\n data = data[['Open', 'High', 'Low', 'Close', 'Volume']]\n else:\n data = data[['Open', 'High', 'Low',\n 'Close', 'Adj Close', 'Volume']]\n\n # return single df if only one ticker\n if len(tickers) == 1:\n data = _DFS_[tickers[0]]\n\n if len(_FAILED_) > 0:\n print(\"\\nThe following tickers failed to download:\\n\",\n ', '.join(_FAILED_))\n\n _DFS_ = {}\n return data\n\n\ndef download_one(ticker, start, end, interval, auto_adjust=None, actions=None):\n\n tried_once = False\n crumb, cookie = get_yahoo_crumb()\n\n url_str = \"https://query1.finance.yahoo.com/v7/finance/download/%s\"\n url_str += \"?period1=%s&period2=%s&interval=%s&events=%s&crumb=%s\"\n\n actions = None if '^' in ticker else actions\n\n if actions:\n url = url_str % (ticker, start, end, interval, 'div', crumb)\n res = requests.get(url, cookies={'B': cookie}).text\n # print(res)\n div = pd.DataFrame(columns=['action', 'value'])\n if \"error\" not in res:\n div = pd.read_csv(io.StringIO(res),\n index_col=0, error_bad_lines=False\n ).replace('null', np.nan).dropna()\n\n if isinstance(div, pd.DataFrame):\n div.index = pd.to_datetime(div.index)\n div[\"action\"] = \"DIVIDEND\"\n div = div.rename(columns={'Dividends': 'value'})\n div['value'] = div['value'].astype(float)\n\n # download Stock Splits data\n url = url_str % (ticker, start, end, interval, 'split', crumb)\n res = requests.get(url, cookies={'B': cookie}).text\n split = pd.DataFrame(columns=['action', 'value'])\n if \"error\" not in res:\n split = pd.read_csv(io.StringIO(res),\n index_col=0, error_bad_lines=False\n ).replace('null', np.nan).dropna()\n\n if isinstance(split, pd.DataFrame):\n split.index = pd.to_datetime(split.index)\n split[\"action\"] = \"SPLIT\"\n split = split.rename(columns={'Stock Splits': 'value'})\n if len(split.index) > 0:\n split['value'] = split.apply(\n lambda x: 1 / eval(x['value']), axis=1).astype(float)\n\n if actions == 'only':\n return pd.concat([div, split]).sort_index()\n\n # download history\n url = url_str % (ticker, start, end, interval, 'history', crumb)\n res = requests.get(url, cookies={'B': cookie}).text\n hist = pd.DataFrame(\n columns=['Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume'])\n\n if \"error\" in res:\n return pd.DataFrame()\n\n hist = parse_ticker_csv(io.StringIO(res), auto_adjust)\n\n if len(hist.index) > 0:\n if actions is None:\n return hist\n\n hist['Dividends'] = div['value'] if len(div.index) > 0 else np.nan\n hist['Dividends'].fillna(0, inplace=True)\n hist['Stock Splits'] = split['value'] if len(\n split.index) > 0 else np.nan\n hist['Stock Splits'].fillna(1, inplace=True)\n\n return hist\n\n # empty len(hist.index) == 0\n if not tried_once:\n tried_once = True\n get_yahoo_crumb(force=True)\n return download_one(ticker, start, end, interval, auto_adjust, actions)\n\n\n@multitasking.task\ndef download_thread(tickers, start=None, end=None,\n auto_adjust=False, progress=True,\n actions=False, *args, **kwargs):\n download_chunk(tickers, start=None, end=None,\n auto_adjust=False, progress=progress,\n actions=False, *args, **kwargs)\n\n\ndef download_chunk(tickers, start=None, end=None,\n auto_adjust=False, progress=True,\n actions=False, *args, **kwargs):\n\n global _DFS_, _COMPLETED_, _PROGRESS_BAR_, _FAILED_\n\n interval = kwargs[\"interval\"] if \"interval\" in kwargs else \"1d\"\n\n # url template\n url_str = \"https://query1.finance.yahoo.com/v7/finance/download/%s\"\n url_str += \"?period1=%s&period2=%s&interval=%s&events=%s&crumb=%s\"\n\n # failed tickers collectors\n round1_failed_tickers = []\n\n # start downloading\n for ticker in tickers:\n\n # yahoo crumb/cookie\n crumb, cookie = get_yahoo_crumb()\n\n tried_once = False\n try:\n hist = download_one(ticker, start, end,\n interval, auto_adjust, actions)\n if isinstance(hist, pd.DataFrame):\n _DFS_[ticker] = hist\n if progress:\n _PROGRESS_BAR_.animate()\n else:\n round1_failed_tickers.append(ticker)\n except:\n # something went wrong...\n # try one more time using a new cookie/crumb\n if not tried_once:\n tried_once = True\n try:\n get_yahoo_crumb(force=True)\n hist = download_one(ticker, start, end,\n interval, auto_adjust, actions)\n if isinstance(hist, pd.DataFrame):\n _DFS_[ticker] = hist\n if progress:\n _PROGRESS_BAR_.animate()\n else:\n round1_failed_tickers.append(ticker)\n except:\n round1_failed_tickers.append(ticker)\n time.sleep(0.001)\n\n # try failed items again before giving up\n _COMPLETED_ += len(tickers) - len(round1_failed_tickers)\n\n if len(round1_failed_tickers) > 0:\n get_yahoo_crumb(force=True)\n for ticker in round1_failed_tickers:\n try:\n hist = download_one(ticker, start, end,\n interval, auto_adjust, actions)\n if isinstance(hist, pd.DataFrame):\n _DFS_[ticker] = hist\n if progress:\n _PROGRESS_BAR_.animate()\n else:\n _FAILED_.append(ticker)\n except:\n _FAILED_.append(ticker)\n pass\n time.sleep(0.000001)\n _COMPLETED_ += 1\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n sys.stdout.flush()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n\n\n# make pandas datareader optional\n# otherwise can be called via fix_yahoo_finance.download(...)\ndef pdr_override():\n try:\n import pandas_datareader\n pandas_datareader.data.get_data_yahoo = download\n except:\n pass\n\npdr_override()\n", "path": "fix_yahoo_finance/__init__.py" } ]
diff --git a/fix_yahoo_finance/__init__.py b/fix_yahoo_finance/__init__.py index b5a71a269..0511a8ae3 100644 --- a/fix_yahoo_finance/__init__.py +++ b/fix_yahoo_finance/__init__.py @@ -221,6 +221,7 @@ def download(tickers, start=None, end=None, as_panel=True, print("\nThe following tickers failed to download:\n", ', '.join(_FAILED_)) + _DFS_ = {} return data
ibis-project__ibis-1760
[ { "content": "import warnings\nfrom copy import copy\nfrom datetime import date, datetime\nfrom io import StringIO\n\nimport ibis\nimport ibis.common as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.rules as rlz\nimport ibis.expr.types as ir\nimport ibis.util as util\nfrom ibis.impala import compiler as impala_compiler\nfrom ibis.mapd.identifiers import quote_identifier\n\n_sql_type_names = {\n 'boolean': 'boolean',\n 'date': 'date',\n 'decimal': 'decimal',\n 'double': 'double',\n 'float32': 'float',\n 'float64': 'double',\n 'int8': 'smallint',\n 'int16': 'smallint',\n 'int32': 'int',\n 'int64': 'bigint',\n 'linestring': 'linestring',\n 'multipolygon': 'multipolygon',\n 'point': 'point',\n 'polygon': 'polygon',\n 'string': 'text',\n 'time': 'time',\n 'timestamp': 'timestamp',\n}\n\n\ndef _is_floating(*args):\n for arg in args:\n if isinstance(arg, ir.FloatingColumn):\n return True\n return False\n\n\ndef _type_to_sql_string(tval):\n if isinstance(tval, dt.Decimal):\n return 'decimal({}, {})'.format(tval.precision, tval.scale)\n else:\n return _sql_type_names[tval.name.lower()]\n\n\ndef _cast(translator, expr):\n from ibis.mapd.client import MapDDataType\n\n op = expr.op()\n arg, target = op.args\n arg_ = translator.translate(arg)\n type_ = str(MapDDataType.from_ibis(target, nullable=False))\n\n return 'CAST({0!s} AS {1!s})'.format(arg_, type_)\n\n\ndef _all(expr):\n op = expr.op()\n arg = op.args[0]\n\n if isinstance(arg, ir.BooleanValue):\n arg = arg.ifelse(1, 0)\n\n return (1 - arg).sum() == 0\n\n\ndef _any(expr):\n op = expr.op()\n arg = op.args[0]\n\n if isinstance(arg, ir.BooleanValue):\n arg = arg.ifelse(1, 0)\n\n return arg.sum() >= 0\n\n\ndef _not_any(expr):\n op = expr.op()\n arg = op.args[0]\n\n if isinstance(arg, ir.BooleanValue):\n arg = arg.ifelse(1, 0)\n\n return arg.sum() == 0\n\n\ndef _not_all(expr):\n op = expr.op()\n arg = op.args[0]\n\n if isinstance(arg, ir.BooleanValue):\n arg = arg.ifelse(1, 0)\n\n return (1 - arg).sum() != 0\n\n\ndef _parenthesize(translator, expr):\n op = expr.op()\n op_klass = type(op)\n\n # function calls don't need parens\n what_ = translator.translate(expr)\n if (op_klass in _binary_infix_ops) or (op_klass in _unary_ops):\n return '({0!s})'.format(what_)\n else:\n return what_\n\n\ndef fixed_arity(func_name, arity):\n def formatter(translator, expr):\n op = expr.op()\n arg_count = len(op.args)\n if arity != arg_count:\n msg = 'Incorrect number of args {0} instead of {1}'\n raise com.UnsupportedOperationError(msg.format(arg_count, arity))\n return _call(translator, func_name, *op.args)\n\n formatter.__name__ = func_name\n return formatter\n\n\ndef unary(func_name):\n return fixed_arity(func_name, 1)\n\n\ndef _reduction_format(\n translator,\n func_name,\n sql_func_name=None,\n sql_signature='{}({})',\n arg=None,\n args=None,\n where=None,\n):\n if not sql_func_name:\n sql_func_name = func_name\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n return sql_signature.format(\n sql_func_name, ', '.join(map(translator.translate, [arg] + list(args)))\n )\n\n\ndef _reduction(func_name, sql_func_name=None, sql_signature='{}({})'):\n def formatter(translator, expr):\n op = expr.op()\n\n # HACK: support trailing arguments\n where = op.where\n args = [arg for arg in op.args if arg is not where]\n\n return _reduction_format(\n translator,\n func_name,\n sql_func_name,\n sql_signature,\n args[0],\n args[1:],\n where,\n )\n\n formatter.__name__ = func_name\n return formatter\n\n\ndef _variance_like(func):\n variants = {'sample': '{}_SAMP'.format(func), 'pop': '{}_POP'.format(func)}\n\n def formatter(translator, expr):\n arg, how, where = expr.op().args\n\n return _reduction_format(\n translator, variants[how].upper(), None, '{}({})', arg, [], where\n )\n\n formatter.__name__ = func\n return formatter\n\n\ndef unary_prefix_op(prefix_op):\n def formatter(translator, expr):\n op = expr.op()\n arg = _parenthesize(translator, op.args[0])\n\n return '{0!s} {1!s}'.format(prefix_op.upper(), arg)\n\n formatter.__name__ = prefix_op\n return formatter\n\n\ndef binary_infix_op(infix_sym):\n def formatter(translator, expr):\n op = expr.op()\n\n left, right = op.args[0], op.args[1]\n left_ = _parenthesize(translator, left)\n right_ = _parenthesize(translator, right)\n\n return '{0!s} {1!s} {2!s}'.format(left_, infix_sym, right_)\n\n return formatter\n\n\ndef _call(translator, func, *args):\n args_ = ', '.join(map(translator.translate, args))\n return '{0!s}({1!s})'.format(func, args_)\n\n\ndef _extract_field(sql_attr):\n def extract_field_formatter(translator, expr):\n op = expr.op()\n arg = translator.translate(op.args[0])\n return 'EXTRACT({} FROM {})'.format(sql_attr, arg)\n\n return extract_field_formatter\n\n\n# STATS\n\n\ndef _corr(translator, expr):\n # pull out the arguments to the expression\n args = expr.op().args\n\n x, y, how, where = args\n\n # compile the argument\n compiled_x = translator.translate(x)\n compiled_y = translator.translate(y)\n\n return 'CORR({}, {})'.format(compiled_x, compiled_y)\n\n\ndef _cov(translator, expr):\n # pull out the arguments to the expression\n args = expr.op().args\n\n x, y, how, where = args\n\n # compile the argument\n compiled_x = translator.translate(x)\n compiled_y = translator.translate(y)\n\n return 'COVAR_{}({}, {})'.format(how[:4].upper(), compiled_x, compiled_y)\n\n\n# STRING\n\n\ndef _length(func_name='length', sql_func_name='CHAR_LENGTH'):\n def __lenght(translator, expr):\n # pull out the arguments to the expression\n arg = expr.op().args[0]\n # compile the argument\n compiled_arg = translator.translate(arg)\n return '{}({})'.format(sql_func_name, compiled_arg)\n\n __lenght.__name__ = func_name\n return __lenght\n\n\ndef _contains(translator, expr):\n arg, pattern = expr.op().args[:2]\n\n pattern_ = '%{}%'.format(translator.translate(pattern)[1:-1])\n\n return _parenthesize(translator, arg.like(pattern_).ifelse(1, -1))\n\n\n# GENERIC\n\n\ndef _value_list(translator, expr):\n op = expr.op()\n values_ = map(translator.translate, op.values)\n return '({0})'.format(', '.join(values_))\n\n\ndef _interval_format(translator, expr):\n dtype = expr.type()\n if dtype.unit in {'ms', 'us', 'ns'}:\n raise com.UnsupportedOperationError(\n \"MapD doesn't support subsecond interval resolutions\"\n )\n\n return '{1}, (sign){0}'.format(expr.op().value, dtype.resolution.upper())\n\n\ndef _interval_from_integer(translator, expr):\n op = expr.op()\n arg, unit = op.args\n\n dtype = expr.type()\n if dtype.unit in {'ms', 'us', 'ns'}:\n raise com.UnsupportedOperationError(\n \"MapD doesn't support subsecond interval resolutions\"\n )\n\n arg_ = translator.translate(arg)\n return '{}, (sign){}'.format(dtype.resolution.upper(), arg_)\n\n\ndef _timestamp_op(func, op_sign='+'):\n def _formatter(translator, expr):\n op = expr.op()\n left, right = op.args\n\n formatted_left = translator.translate(left)\n formatted_right = translator.translate(right)\n\n if isinstance(left, ir.DateValue):\n formatted_left = 'CAST({} as timestamp)'.format(formatted_left)\n\n return '{}({}, {})'.format(\n func, formatted_right.replace('(sign)', op_sign), formatted_left\n )\n\n return _formatter\n\n\ndef _set_literal_format(translator, expr):\n value_type = expr.type().value_type\n\n formatted = [\n translator.translate(ir.literal(x, type=value_type))\n for x in expr.op().value\n ]\n\n return '({})'.format(', '.join(formatted))\n\n\ndef _cross_join(translator, expr):\n args = expr.op().args\n left, right = args[:2]\n return translator.translate(left.join(right, ibis.literal(True)))\n\n\ndef _format_point_value(value):\n return ' '.join(str(v) for v in value)\n\n\ndef _format_linestring_value(value):\n return ', '.join(\n '{}'.format(_format_point_value(point)) for point in value\n )\n\n\ndef _format_polygon_value(value):\n return ', '.join(\n '({})'.format(_format_linestring_value(line)) for line in value\n )\n\n\ndef _format_multipolygon_value(value):\n return ', '.join(\n '({})'.format(_format_polygon_value(polygon)) for polygon in value\n )\n\n\ndef _format_geo_metadata(op, value):\n value = copy(value)\n srid = op.args[1].srid\n geotype = op.args[1].geotype\n\n if geotype is None or geotype not in ('geometry', 'geography'):\n return \"'{}'\".format(value)\n\n if geotype == 'geography':\n geofunc = 'ST_GeogFromText'\n else:\n geofunc = 'ST_GeomFromText'\n\n return \"{}('{}'{})\".format(\n geofunc, value, ', {}'.format(srid) if srid else ''\n )\n\n\ndef literal(translator, expr):\n op = expr.op()\n value = op.value\n\n # geo spatial data type\n if isinstance(expr, ir.PointScalar):\n result = \"POINT({0})\".format(_format_point_value(value))\n return _format_geo_metadata(op, result)\n elif isinstance(expr, ir.LineStringScalar):\n result = \"LINESTRING({0})\".format(_format_linestring_value(value))\n return _format_geo_metadata(op, result)\n elif isinstance(expr, ir.PolygonScalar):\n result = \"POLYGON({0!s})\".format(_format_polygon_value(value))\n return _format_geo_metadata(op, result)\n elif isinstance(expr, ir.MultiPolygonScalar):\n result = \"MULTIPOLYGON({0})\".format(_format_multipolygon_value(value))\n return _format_geo_metadata(op, result)\n # primitive data type\n elif isinstance(expr, ir.BooleanValue):\n return '1' if value else '0'\n elif isinstance(expr, ir.StringValue):\n return \"'{0!s}'\".format(value.replace(\"'\", \"\\\\'\"))\n elif isinstance(expr, ir.NumericValue):\n return repr(value)\n elif isinstance(expr, ir.SetScalar):\n return _set_literal_format(translator, expr)\n elif isinstance(expr, ir.IntervalValue):\n return _interval_format(translator, expr)\n elif isinstance(expr, ir.TimestampValue):\n if isinstance(value, datetime):\n if value.microsecond != 0:\n msg = 'Unsupported subsecond accuracy {}'\n warnings.warn(msg.format(value))\n value = value.strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(value, str):\n # check if the datetime format is a valid format (\n # '%Y-%m-%d %H:%M:%S' or '%Y-%m-%d'). if format is '%Y-%m-%d' it\n # is converted to '%Y-%m-%d 00:00:00'\n msg = (\n \"Literal datetime string should use '%Y-%m-%d %H:%M:%S' \"\n \"format. When '%Y-%m-%d' format is used, datetime will be \"\n \"converted automatically to '%Y-%m-%d 00:00:00'\"\n )\n\n try:\n dt_value = datetime.strptime(value, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n try:\n dt_value = datetime.strptime(value, '%Y-%m-%d')\n warnings.warn(msg)\n except ValueError:\n raise Exception(msg)\n\n value = dt_value.strftime('%Y-%m-%d %H:%M:%S')\n\n return \"'{0!s}'\".format(value)\n elif isinstance(expr, ir.DateValue):\n if isinstance(value, date):\n value = value.strftime('%Y-%m-%d')\n return \"toDate('{0!s}')\".format(value)\n # array data type\n elif isinstance(expr, ir.ArrayValue):\n return str(list(value))\n else:\n raise NotImplementedError(type(expr))\n\n\ndef _where(translator, expr):\n # pull out the arguments to the expression\n args = expr.op().args\n condition, expr1, expr2 = args\n expr = condition.ifelse(expr1, expr2)\n return translator.translate(expr)\n\n\ndef raise_unsupported_expr_error(expr):\n msg = \"MapD backend doesn't support {} operation!\"\n op = expr.op()\n raise com.UnsupportedOperationError(msg.format(type(op)))\n\n\ndef raise_unsupported_op_error(translator, expr, *args):\n msg = \"MapD backend doesn't support {} operation!\"\n op = expr.op()\n raise com.UnsupportedOperationError(msg.format(type(op)))\n\n\n# translator\ndef _name_expr(formatted_expr, quoted_name):\n return '{} AS {}'.format(formatted_expr, quote_identifier(quoted_name))\n\n\nclass CaseFormatter:\n def __init__(self, translator, base, cases, results, default):\n self.translator = translator\n self.base = base\n self.cases = cases\n self.results = results\n self.default = default\n\n # HACK\n self.indent = 2\n self.multiline = len(cases) > 1\n self.buf = StringIO()\n\n def _trans(self, expr):\n return self.translator.translate(expr)\n\n def get_result(self):\n \"\"\"\n\n :return:\n \"\"\"\n self.buf.seek(0)\n\n self.buf.write('CASE')\n if self.base is not None:\n base_str = self._trans(self.base)\n self.buf.write(' {0}'.format(base_str))\n\n for case, result in zip(self.cases, self.results):\n self._next_case()\n case_str = self._trans(case)\n result_str = self._trans(result)\n self.buf.write('WHEN {0} THEN {1}'.format(case_str, result_str))\n\n if self.default is not None:\n self._next_case()\n default_str = self._trans(self.default)\n self.buf.write('ELSE {0}'.format(default_str))\n\n if self.multiline:\n self.buf.write('\\nEND')\n else:\n self.buf.write(' END')\n\n return self.buf.getvalue()\n\n def _next_case(self):\n if self.multiline:\n self.buf.write('\\n{0}'.format(' ' * self.indent))\n else:\n self.buf.write(' ')\n\n\ndef _table_array_view(translator, expr):\n ctx = translator.context\n table = expr.op().table\n query = ctx.get_compiled_expr(table)\n return '(\\n{0}\\n)'.format(util.indent(query, ctx.indent))\n\n\ndef _timestamp_truncate(translator, expr):\n op = expr.op()\n arg, unit = op.args\n\n unit_ = dt.Interval(unit=unit).resolution.upper()\n\n # return _call_date_trunc(translator, converter, arg)\n arg_ = translator.translate(arg)\n return 'DATE_TRUNC({0!s}, {1!s})'.format(unit_, arg_)\n\n\ndef _table_column(translator, expr):\n op = expr.op()\n field_name = op.name\n\n quoted_name = quote_identifier(field_name, force=True)\n\n table = op.table\n ctx = translator.context\n\n # If the column does not originate from the table set in the current SELECT\n # context, we should format as a subquery\n if translator.permit_subquery and ctx.is_foreign_expr(table):\n proj_expr = table.projection([field_name]).to_array()\n return _table_array_view(translator, proj_expr)\n\n if ctx.need_aliases():\n alias = ctx.get_ref(table)\n if alias is not None:\n quoted_name = '{}.{}'.format(alias, quoted_name)\n\n return quoted_name\n\n\n# AGGREGATION\n\napprox_count_distinct = _reduction(\n 'approx_nunique',\n sql_func_name='approx_count_distinct',\n sql_signature='{}({}, 100)',\n)\n\ncount_distinct = _reduction('count')\ncount = _reduction('count')\n\n\ndef _arbitrary(translator, expr):\n arg, how, where = expr.op().args\n\n if how not in (None, 'last'):\n raise com.UnsupportedOperationError(\n '{!r} value not supported for arbitrary in MapD'.format(how)\n )\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n return 'SAMPLE({})'.format(translator.translate(arg))\n\n\n# MATH\n\n\nclass NumericTruncate(ops.NumericBinaryOp):\n \"\"\"Truncates x to y decimal places\"\"\"\n\n output_type = rlz.shape_like('left', ops.dt.float)\n\n\n# GEOMETRIC\n\n\nclass Conv_4326_900913_X(ops.UnaryOp):\n \"\"\"\n Converts WGS-84 latitude to WGS-84 Web Mercator x coordinate.\n \"\"\"\n\n output_type = rlz.shape_like('arg', ops.dt.float)\n\n\nclass Conv_4326_900913_Y(ops.UnaryOp):\n \"\"\"\n Converts WGS-84 longitude to WGS-84 Web Mercator y coordinate.\n\n \"\"\"\n\n output_type = rlz.shape_like('arg', ops.dt.float)\n\n\n# String\n\n\nclass ByteLength(ops.StringLength):\n \"\"\"Returns the length of a string in bytes length\"\"\"\n\n\n# https://www.mapd.com/docs/latest/mapd-core-guide/dml/\n_binary_infix_ops = {\n # math\n ops.Power: fixed_arity('power', 2),\n ops.NotEquals: impala_compiler._binary_infix_op('<>'),\n}\n\n_unary_ops = {}\n\n# COMPARISON\n_comparison_ops = {}\n\n\n# MATH\n_math_ops = {\n ops.Degrees: unary('degrees'), # MapD function\n ops.Modulus: fixed_arity('mod', 2),\n ops.Pi: fixed_arity('pi', 0),\n ops.Radians: unary('radians'),\n NumericTruncate: fixed_arity('truncate', 2),\n}\n\n# STATS\n_stats_ops = {\n ops.Correlation: _corr,\n ops.StandardDev: _variance_like('stddev'),\n ops.Variance: _variance_like('var'),\n ops.Covariance: _cov,\n}\n\n# TRIGONOMETRIC\n_trigonometric_ops = {\n ops.Acos: unary('acos'),\n ops.Asin: unary('asin'),\n ops.Atan: unary('atan'),\n ops.Atan2: fixed_arity('atan2', 2),\n ops.Cos: unary('cos'),\n ops.Cot: unary('cot'),\n ops.Sin: unary('sin'),\n ops.Tan: unary('tan'),\n}\n\n# GEOMETRIC\n_geometric_ops = {\n Conv_4326_900913_X: unary('conv_4326_900913_x'),\n Conv_4326_900913_Y: unary('conv_4326_900913_y'),\n}\n\n# GEO SPATIAL\n_geospatial_ops = {\n ops.GeoArea: unary('ST_AREA'),\n ops.GeoContains: fixed_arity('ST_CONTAINS', 2),\n ops.GeoDistance: fixed_arity('ST_DISTANCE', 2),\n ops.GeoLength: unary('ST_LENGTH'),\n ops.GeoPerimeter: unary('ST_PERIMETER'),\n ops.GeoMaxDistance: fixed_arity('ST_MAXDISTANCE', 2),\n ops.GeoX: unary('ST_X'),\n ops.GeoY: unary('ST_Y'),\n ops.GeoXMin: unary('ST_XMIN'),\n ops.GeoXMax: unary('ST_XMAX'),\n ops.GeoYMin: unary('ST_YMIN'),\n ops.GeoYMax: unary('ST_YMAX'),\n ops.GeoStartPoint: unary('ST_STARTPOINT'),\n ops.GeoEndPoint: unary('ST_ENDPOINT'),\n ops.GeoPointN: fixed_arity('ST_POINTN', 2),\n ops.GeoNPoints: unary('ST_NPOINTS'),\n ops.GeoNRings: unary('ST_NRINGS'),\n ops.GeoSRID: unary('ST_SRID'),\n}\n\n# STRING\n_string_ops = {\n ops.StringLength: _length(),\n ByteLength: _length('byte_length', 'LENGTH'),\n ops.StringSQLILike: binary_infix_op('ilike'),\n ops.StringFind: _contains,\n}\n\n# DATE\n_date_ops = {\n ops.DateTruncate: _timestamp_truncate,\n ops.TimestampTruncate: _timestamp_truncate,\n # DIRECT EXTRACT OPERATIONS\n ops.ExtractYear: _extract_field('YEAR'),\n ops.ExtractMonth: _extract_field('MONTH'),\n ops.ExtractDay: _extract_field('DAY'),\n ops.ExtractHour: _extract_field('HOUR'),\n ops.ExtractMinute: _extract_field('MINUTE'),\n ops.ExtractSecond: _extract_field('SECOND'),\n ops.IntervalAdd: _interval_from_integer,\n ops.IntervalFromInteger: _interval_from_integer,\n ops.DateAdd: _timestamp_op('TIMESTAMPADD'),\n ops.DateSub: _timestamp_op('TIMESTAMPADD', '-'),\n ops.TimestampAdd: _timestamp_op('TIMESTAMPADD'),\n ops.TimestampSub: _timestamp_op('TIMESTAMPADD', '-'),\n}\n\n# AGGREGATION/REDUCTION\n_agg_ops = {\n ops.HLLCardinality: approx_count_distinct,\n ops.DistinctColumn: unary_prefix_op('distinct'),\n ops.Arbitrary: _arbitrary,\n}\n\n# GENERAL\n_general_ops = {\n ops.Literal: literal,\n ops.ValueList: _value_list,\n ops.Cast: _cast,\n ops.Where: _where,\n ops.TableColumn: _table_column,\n ops.CrossJoin: _cross_join,\n}\n\n# UNSUPPORTED OPERATIONS\n_unsupported_ops = [\n # generic/aggregation\n ops.CMSMedian,\n ops.WindowOp,\n ops.DecimalPrecision,\n ops.DecimalScale,\n ops.BaseConvert,\n ops.CumulativeSum,\n ops.CumulativeMin,\n ops.CumulativeMax,\n ops.CumulativeMean,\n ops.CumulativeAny,\n ops.CumulativeAll,\n ops.IdenticalTo,\n ops.RowNumber,\n ops.DenseRank,\n ops.MinRank,\n ops.PercentRank,\n ops.FirstValue,\n ops.LastValue,\n ops.NthValue,\n ops.Lag,\n ops.Lead,\n ops.NTile,\n ops.GroupConcat,\n ops.NullIf,\n ops.NullIfZero,\n ops.NullLiteral,\n ops.IsInf,\n ops.IsNan,\n ops.IfNull,\n # string\n ops.Lowercase,\n ops.Uppercase,\n ops.FindInSet,\n ops.StringReplace,\n ops.StringJoin,\n ops.StringSplit,\n ops.Translate,\n ops.StringAscii,\n ops.LPad,\n ops.RPad,\n ops.Strip,\n ops.RStrip,\n ops.LStrip,\n ops.Capitalize,\n ops.Substring,\n ops.StrRight,\n ops.Repeat,\n ops.Reverse,\n ops.RegexExtract,\n ops.RegexReplace,\n ops.ParseURL,\n # Numeric\n ops.Least,\n ops.Greatest,\n ops.Log2,\n ops.Log,\n ops.Round,\n # date/time/timestamp\n ops.TimestampFromUNIX,\n ops.Date,\n ops.TimeTruncate,\n ops.TimestampDiff,\n ops.DayOfWeekIndex,\n ops.DayOfWeekName,\n # table\n ops.Union,\n]\n\n_unsupported_ops = {k: raise_unsupported_op_error for k in _unsupported_ops}\n\n# registry\n_operation_registry = impala_compiler._operation_registry.copy()\n\n_operation_registry.update(_general_ops)\n_operation_registry.update(_binary_infix_ops)\n_operation_registry.update(_unary_ops)\n_operation_registry.update(_comparison_ops)\n_operation_registry.update(_math_ops)\n_operation_registry.update(_stats_ops)\n_operation_registry.update(_trigonometric_ops)\n_operation_registry.update(_geometric_ops)\n_operation_registry.update(_string_ops)\n_operation_registry.update(_date_ops)\n_operation_registry.update(_agg_ops)\n_operation_registry.update(_geospatial_ops)\n# the last update should be with unsupported ops\n_operation_registry.update(_unsupported_ops)\n", "path": "ibis/mapd/operations.py" } ]
[ { "content": "import warnings\nfrom copy import copy\nfrom datetime import date, datetime\nfrom io import StringIO\n\nimport ibis\nimport ibis.common as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.rules as rlz\nimport ibis.expr.types as ir\nimport ibis.util as util\nfrom ibis.impala import compiler as impala_compiler\nfrom ibis.mapd.identifiers import quote_identifier\n\n_sql_type_names = {\n 'boolean': 'boolean',\n 'date': 'date',\n 'decimal': 'decimal',\n 'double': 'double',\n 'float32': 'float',\n 'float64': 'double',\n 'int8': 'smallint',\n 'int16': 'smallint',\n 'int32': 'int',\n 'int64': 'bigint',\n 'linestring': 'linestring',\n 'multipolygon': 'multipolygon',\n 'point': 'point',\n 'polygon': 'polygon',\n 'string': 'text',\n 'time': 'time',\n 'timestamp': 'timestamp',\n}\n\n\ndef _is_floating(*args):\n for arg in args:\n if isinstance(arg, ir.FloatingColumn):\n return True\n return False\n\n\ndef _type_to_sql_string(tval):\n if isinstance(tval, dt.Decimal):\n return 'decimal({}, {})'.format(tval.precision, tval.scale)\n else:\n return _sql_type_names[tval.name.lower()]\n\n\ndef _cast(translator, expr):\n from ibis.mapd.client import MapDDataType\n\n op = expr.op()\n arg, target = op.args\n arg_ = translator.translate(arg)\n type_ = str(MapDDataType.from_ibis(target, nullable=False))\n\n return 'CAST({0!s} AS {1!s})'.format(arg_, type_)\n\n\ndef _all(expr):\n op = expr.op()\n arg = op.args[0]\n\n if isinstance(arg, ir.BooleanValue):\n arg = arg.ifelse(1, 0)\n\n return (1 - arg).sum() == 0\n\n\ndef _any(expr):\n op = expr.op()\n arg = op.args[0]\n\n if isinstance(arg, ir.BooleanValue):\n arg = arg.ifelse(1, 0)\n\n return arg.sum() >= 0\n\n\ndef _not_any(expr):\n op = expr.op()\n arg = op.args[0]\n\n if isinstance(arg, ir.BooleanValue):\n arg = arg.ifelse(1, 0)\n\n return arg.sum() == 0\n\n\ndef _not_all(expr):\n op = expr.op()\n arg = op.args[0]\n\n if isinstance(arg, ir.BooleanValue):\n arg = arg.ifelse(1, 0)\n\n return (1 - arg).sum() != 0\n\n\ndef _parenthesize(translator, expr):\n op = expr.op()\n op_klass = type(op)\n\n # function calls don't need parens\n what_ = translator.translate(expr)\n if (op_klass in _binary_infix_ops) or (op_klass in _unary_ops):\n return '({0!s})'.format(what_)\n else:\n return what_\n\n\ndef fixed_arity(func_name, arity):\n def formatter(translator, expr):\n op = expr.op()\n arg_count = len(op.args)\n if arity != arg_count:\n msg = 'Incorrect number of args {0} instead of {1}'\n raise com.UnsupportedOperationError(msg.format(arg_count, arity))\n return _call(translator, func_name, *op.args)\n\n formatter.__name__ = func_name\n return formatter\n\n\ndef unary(func_name):\n return fixed_arity(func_name, 1)\n\n\ndef _reduction_format(\n translator,\n func_name,\n sql_func_name=None,\n sql_signature='{}({})',\n arg=None,\n args=None,\n where=None,\n):\n if not sql_func_name:\n sql_func_name = func_name\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n return sql_signature.format(\n sql_func_name, ', '.join(map(translator.translate, [arg] + list(args)))\n )\n\n\ndef _reduction(func_name, sql_func_name=None, sql_signature='{}({})'):\n def formatter(translator, expr):\n op = expr.op()\n\n # HACK: support trailing arguments\n where = op.where\n args = [arg for arg in op.args if arg is not where]\n\n return _reduction_format(\n translator,\n func_name,\n sql_func_name,\n sql_signature,\n args[0],\n args[1:],\n where,\n )\n\n formatter.__name__ = func_name\n return formatter\n\n\ndef _variance_like(func):\n variants = {'sample': '{}_SAMP'.format(func), 'pop': '{}_POP'.format(func)}\n\n def formatter(translator, expr):\n arg, how, where = expr.op().args\n\n return _reduction_format(\n translator, variants[how].upper(), None, '{}({})', arg, [], where\n )\n\n formatter.__name__ = func\n return formatter\n\n\ndef unary_prefix_op(prefix_op):\n def formatter(translator, expr):\n op = expr.op()\n arg = _parenthesize(translator, op.args[0])\n\n return '{0!s} {1!s}'.format(prefix_op.upper(), arg)\n\n formatter.__name__ = prefix_op\n return formatter\n\n\ndef binary_infix_op(infix_sym):\n def formatter(translator, expr):\n op = expr.op()\n\n left, right = op.args[0], op.args[1]\n left_ = _parenthesize(translator, left)\n right_ = _parenthesize(translator, right)\n\n return '{0!s} {1!s} {2!s}'.format(left_, infix_sym, right_)\n\n return formatter\n\n\ndef _call(translator, func, *args):\n args_ = ', '.join(map(translator.translate, args))\n return '{0!s}({1!s})'.format(func, args_)\n\n\ndef _extract_field(sql_attr):\n def extract_field_formatter(translator, expr):\n op = expr.op()\n arg = translator.translate(op.args[0])\n return 'EXTRACT({} FROM {})'.format(sql_attr, arg)\n\n return extract_field_formatter\n\n\n# STATS\n\n\ndef _corr(translator, expr):\n # pull out the arguments to the expression\n args = expr.op().args\n\n x, y, how, where = args\n\n # compile the argument\n compiled_x = translator.translate(x)\n compiled_y = translator.translate(y)\n\n return 'CORR({}, {})'.format(compiled_x, compiled_y)\n\n\ndef _cov(translator, expr):\n # pull out the arguments to the expression\n args = expr.op().args\n\n x, y, how, where = args\n\n # compile the argument\n compiled_x = translator.translate(x)\n compiled_y = translator.translate(y)\n\n return 'COVAR_{}({}, {})'.format(how[:4].upper(), compiled_x, compiled_y)\n\n\n# STRING\n\n\ndef _length(func_name='length', sql_func_name='CHAR_LENGTH'):\n def __lenght(translator, expr):\n # pull out the arguments to the expression\n arg = expr.op().args[0]\n # compile the argument\n compiled_arg = translator.translate(arg)\n return '{}({})'.format(sql_func_name, compiled_arg)\n\n __lenght.__name__ = func_name\n return __lenght\n\n\ndef _contains(translator, expr):\n arg, pattern = expr.op().args[:2]\n\n pattern_ = '%{}%'.format(translator.translate(pattern)[1:-1])\n\n return _parenthesize(translator, arg.like(pattern_).ifelse(1, -1))\n\n\n# GENERIC\n\n\ndef _value_list(translator, expr):\n op = expr.op()\n values_ = map(translator.translate, op.values)\n return '({0})'.format(', '.join(values_))\n\n\ndef _interval_format(translator, expr):\n dtype = expr.type()\n if dtype.unit in {'ms', 'us', 'ns'}:\n raise com.UnsupportedOperationError(\n \"MapD doesn't support subsecond interval resolutions\"\n )\n\n return '{1}, (sign){0}'.format(expr.op().value, dtype.resolution.upper())\n\n\ndef _interval_from_integer(translator, expr):\n op = expr.op()\n arg, unit = op.args\n\n dtype = expr.type()\n if dtype.unit in {'ms', 'us', 'ns'}:\n raise com.UnsupportedOperationError(\n \"MapD doesn't support subsecond interval resolutions\"\n )\n\n arg_ = translator.translate(arg)\n return '{}, (sign){}'.format(dtype.resolution.upper(), arg_)\n\n\ndef _timestamp_op(func, op_sign='+'):\n def _formatter(translator, expr):\n op = expr.op()\n left, right = op.args\n\n formatted_left = translator.translate(left)\n formatted_right = translator.translate(right)\n\n if isinstance(left, ir.DateValue):\n formatted_left = 'CAST({} as timestamp)'.format(formatted_left)\n\n return '{}({}, {})'.format(\n func, formatted_right.replace('(sign)', op_sign), formatted_left\n )\n\n return _formatter\n\n\ndef _set_literal_format(translator, expr):\n value_type = expr.type().value_type\n\n formatted = [\n translator.translate(ir.literal(x, type=value_type))\n for x in expr.op().value\n ]\n\n return '({})'.format(', '.join(formatted))\n\n\ndef _cross_join(translator, expr):\n args = expr.op().args\n left, right = args[:2]\n return translator.translate(left.join(right, ibis.literal(True)))\n\n\ndef _format_point_value(value):\n return ' '.join(str(v) for v in value)\n\n\ndef _format_linestring_value(value):\n return ', '.join(\n '{}'.format(_format_point_value(point)) for point in value\n )\n\n\ndef _format_polygon_value(value):\n return ', '.join(\n '({})'.format(_format_linestring_value(line)) for line in value\n )\n\n\ndef _format_multipolygon_value(value):\n return ', '.join(\n '({})'.format(_format_polygon_value(polygon)) for polygon in value\n )\n\n\ndef _format_geo_metadata(op, value):\n value = copy(value)\n srid = op.args[1].srid\n geotype = op.args[1].geotype\n\n if geotype is None or geotype not in ('geometry', 'geography'):\n return \"'{}'\".format(value)\n\n if geotype == 'geography':\n geofunc = 'ST_GeogFromText'\n else:\n geofunc = 'ST_GeomFromText'\n\n return \"{}('{}'{})\".format(\n geofunc, value, ', {}'.format(srid) if srid else ''\n )\n\n\ndef literal(translator, expr):\n op = expr.op()\n value = op.value\n\n # geo spatial data type\n if isinstance(expr, ir.PointScalar):\n result = \"POINT({0})\".format(_format_point_value(value))\n return _format_geo_metadata(op, result)\n elif isinstance(expr, ir.LineStringScalar):\n result = \"LINESTRING({0})\".format(_format_linestring_value(value))\n return _format_geo_metadata(op, result)\n elif isinstance(expr, ir.PolygonScalar):\n result = \"POLYGON({0!s})\".format(_format_polygon_value(value))\n return _format_geo_metadata(op, result)\n elif isinstance(expr, ir.MultiPolygonScalar):\n result = \"MULTIPOLYGON({0})\".format(_format_multipolygon_value(value))\n return _format_geo_metadata(op, result)\n # primitive data type\n elif isinstance(expr, ir.BooleanValue):\n return '1' if value else '0'\n elif isinstance(expr, ir.StringValue):\n return \"'{0!s}'\".format(value.replace(\"'\", \"\\\\'\"))\n elif isinstance(expr, ir.NumericValue):\n return repr(value)\n elif isinstance(expr, ir.SetScalar):\n return _set_literal_format(translator, expr)\n elif isinstance(expr, ir.IntervalValue):\n return _interval_format(translator, expr)\n elif isinstance(expr, ir.TimestampValue):\n if isinstance(value, datetime):\n if value.microsecond != 0:\n msg = 'Unsupported subsecond accuracy {}'\n warnings.warn(msg.format(value))\n value = value.strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(value, str):\n # check if the datetime format is a valid format (\n # '%Y-%m-%d %H:%M:%S' or '%Y-%m-%d'). if format is '%Y-%m-%d' it\n # is converted to '%Y-%m-%d 00:00:00'\n msg = (\n \"Literal datetime string should use '%Y-%m-%d %H:%M:%S' \"\n \"format. When '%Y-%m-%d' format is used, datetime will be \"\n \"converted automatically to '%Y-%m-%d 00:00:00'\"\n )\n\n try:\n dt_value = datetime.strptime(value, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n try:\n dt_value = datetime.strptime(value, '%Y-%m-%d')\n warnings.warn(msg)\n except ValueError:\n raise Exception(msg)\n\n value = dt_value.strftime('%Y-%m-%d %H:%M:%S')\n\n return \"'{0!s}'\".format(value)\n elif isinstance(expr, ir.DateValue):\n if isinstance(value, date):\n value = value.strftime('%Y-%m-%d')\n return \"toDate('{0!s}')\".format(value)\n # array data type\n elif isinstance(expr, ir.ArrayValue):\n return str(list(value))\n else:\n raise NotImplementedError(type(expr))\n\n\ndef _where(translator, expr):\n # pull out the arguments to the expression\n args = expr.op().args\n condition, expr1, expr2 = args\n expr = condition.ifelse(expr1, expr2)\n return translator.translate(expr)\n\n\ndef raise_unsupported_expr_error(expr):\n msg = \"MapD backend doesn't support {} operation!\"\n op = expr.op()\n raise com.UnsupportedOperationError(msg.format(type(op)))\n\n\ndef raise_unsupported_op_error(translator, expr, *args):\n msg = \"MapD backend doesn't support {} operation!\"\n op = expr.op()\n raise com.UnsupportedOperationError(msg.format(type(op)))\n\n\n# translator\ndef _name_expr(formatted_expr, quoted_name):\n return '{} AS {}'.format(formatted_expr, quote_identifier(quoted_name))\n\n\nclass CaseFormatter:\n def __init__(self, translator, base, cases, results, default):\n self.translator = translator\n self.base = base\n self.cases = cases\n self.results = results\n self.default = default\n\n # HACK\n self.indent = 2\n self.multiline = len(cases) > 1\n self.buf = StringIO()\n\n def _trans(self, expr):\n return self.translator.translate(expr)\n\n def get_result(self):\n \"\"\"\n\n :return:\n \"\"\"\n self.buf.seek(0)\n\n self.buf.write('CASE')\n if self.base is not None:\n base_str = self._trans(self.base)\n self.buf.write(' {0}'.format(base_str))\n\n for case, result in zip(self.cases, self.results):\n self._next_case()\n case_str = self._trans(case)\n result_str = self._trans(result)\n self.buf.write('WHEN {0} THEN {1}'.format(case_str, result_str))\n\n if self.default is not None:\n self._next_case()\n default_str = self._trans(self.default)\n self.buf.write('ELSE {0}'.format(default_str))\n\n if self.multiline:\n self.buf.write('\\nEND')\n else:\n self.buf.write(' END')\n\n return self.buf.getvalue()\n\n def _next_case(self):\n if self.multiline:\n self.buf.write('\\n{0}'.format(' ' * self.indent))\n else:\n self.buf.write(' ')\n\n\ndef _table_array_view(translator, expr):\n ctx = translator.context\n table = expr.op().table\n query = ctx.get_compiled_expr(table)\n return '(\\n{0}\\n)'.format(util.indent(query, ctx.indent))\n\n\ndef _timestamp_truncate(translator, expr):\n op = expr.op()\n arg, unit = op.args\n\n unit_ = dt.Interval(unit=unit).resolution.upper()\n\n # return _call_date_trunc(translator, converter, arg)\n arg_ = translator.translate(arg)\n return 'DATE_TRUNC({0!s}, {1!s})'.format(unit_, arg_)\n\n\ndef _table_column(translator, expr):\n op = expr.op()\n field_name = op.name\n\n quoted_name = quote_identifier(field_name, force=True)\n\n table = op.table\n ctx = translator.context\n\n # If the column does not originate from the table set in the current SELECT\n # context, we should format as a subquery\n if translator.permit_subquery and ctx.is_foreign_expr(table):\n proj_expr = table.projection([field_name]).to_array()\n return _table_array_view(translator, proj_expr)\n\n if ctx.need_aliases():\n alias = ctx.get_ref(table)\n if alias is not None:\n quoted_name = '{}.{}'.format(alias, quoted_name)\n\n return quoted_name\n\n\n# AGGREGATION\n\napprox_count_distinct = _reduction(\n 'approx_nunique',\n sql_func_name='approx_count_distinct',\n sql_signature='{}({}, 100)',\n)\n\ncount_distinct = _reduction('count')\ncount = _reduction('count')\n\n\ndef _arbitrary(translator, expr):\n arg, how, where = expr.op().args\n\n if how not in (None, 'last'):\n raise com.UnsupportedOperationError(\n '{!r} value not supported for arbitrary in MapD'.format(how)\n )\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n return 'SAMPLE({})'.format(translator.translate(arg))\n\n\n# MATH\n\n\nclass NumericTruncate(ops.NumericBinaryOp):\n \"\"\"Truncates x to y decimal places\"\"\"\n\n output_type = rlz.shape_like('left', ops.dt.float)\n\n\n# GEOMETRIC\n\n\nclass Conv_4326_900913_X(ops.UnaryOp):\n \"\"\"\n Converts WGS-84 latitude to WGS-84 Web Mercator x coordinate.\n \"\"\"\n\n output_type = rlz.shape_like('arg', ops.dt.float)\n\n\nclass Conv_4326_900913_Y(ops.UnaryOp):\n \"\"\"\n Converts WGS-84 longitude to WGS-84 Web Mercator y coordinate.\n\n \"\"\"\n\n output_type = rlz.shape_like('arg', ops.dt.float)\n\n\n# String\n\n\nclass ByteLength(ops.StringLength):\n \"\"\"Returns the length of a string in bytes length\"\"\"\n\n\n# https://www.mapd.com/docs/latest/mapd-core-guide/dml/\n_binary_infix_ops = {\n # math\n ops.Power: fixed_arity('power', 2),\n ops.NotEquals: impala_compiler._binary_infix_op('<>'),\n}\n\n_unary_ops = {}\n\n# COMPARISON\n_comparison_ops = {}\n\n\n# MATH\n_math_ops = {\n ops.Degrees: unary('degrees'), # MapD function\n ops.Modulus: fixed_arity('mod', 2),\n ops.Pi: fixed_arity('pi', 0),\n ops.Radians: unary('radians'),\n NumericTruncate: fixed_arity('truncate', 2),\n}\n\n# STATS\n_stats_ops = {\n ops.Correlation: _corr,\n ops.StandardDev: _variance_like('stddev'),\n ops.Variance: _variance_like('var'),\n ops.Covariance: _cov,\n}\n\n# TRIGONOMETRIC\n_trigonometric_ops = {\n ops.Acos: unary('acos'),\n ops.Asin: unary('asin'),\n ops.Atan: unary('atan'),\n ops.Atan2: fixed_arity('atan2', 2),\n ops.Cos: unary('cos'),\n ops.Cot: unary('cot'),\n ops.Sin: unary('sin'),\n ops.Tan: unary('tan'),\n}\n\n# GEOMETRIC\n_geometric_ops = {\n Conv_4326_900913_X: unary('conv_4326_900913_x'),\n Conv_4326_900913_Y: unary('conv_4326_900913_y'),\n}\n\n# GEO SPATIAL\n_geospatial_ops = {\n ops.GeoArea: unary('ST_AREA'),\n ops.GeoContains: fixed_arity('ST_CONTAINS', 2),\n ops.GeoDistance: fixed_arity('ST_DISTANCE', 2),\n ops.GeoLength: unary('ST_LENGTH'),\n ops.GeoPerimeter: unary('ST_PERIMETER'),\n ops.GeoMaxDistance: fixed_arity('ST_MAXDISTANCE', 2),\n ops.GeoX: unary('ST_X'),\n ops.GeoY: unary('ST_Y'),\n ops.GeoXMin: unary('ST_XMIN'),\n ops.GeoXMax: unary('ST_XMAX'),\n ops.GeoYMin: unary('ST_YMIN'),\n ops.GeoYMax: unary('ST_YMAX'),\n ops.GeoStartPoint: unary('ST_STARTPOINT'),\n ops.GeoEndPoint: unary('ST_ENDPOINT'),\n ops.GeoPointN: fixed_arity('ST_POINTN', 2),\n ops.GeoNPoints: unary('ST_NPOINTS'),\n ops.GeoNRings: unary('ST_NRINGS'),\n ops.GeoSRID: unary('ST_SRID'),\n}\n\n# STRING\n_string_ops = {\n ops.StringLength: _length(),\n ByteLength: _length('byte_length', 'LENGTH'),\n ops.StringSQLILike: binary_infix_op('ilike'),\n ops.StringFind: _contains,\n}\n\n# DATE\n_date_ops = {\n ops.DateTruncate: _timestamp_truncate,\n ops.TimestampTruncate: _timestamp_truncate,\n # DIRECT EXTRACT OPERATIONS\n ops.ExtractYear: _extract_field('YEAR'),\n ops.ExtractMonth: _extract_field('MONTH'),\n ops.ExtractDay: _extract_field('DAY'),\n ops.ExtractHour: _extract_field('HOUR'),\n ops.ExtractMinute: _extract_field('MINUTE'),\n ops.ExtractSecond: _extract_field('SECOND'),\n ops.IntervalAdd: _interval_from_integer,\n ops.IntervalFromInteger: _interval_from_integer,\n ops.DateAdd: _timestamp_op('TIMESTAMPADD'),\n ops.DateSub: _timestamp_op('TIMESTAMPADD', '-'),\n ops.TimestampAdd: _timestamp_op('TIMESTAMPADD'),\n ops.TimestampSub: _timestamp_op('TIMESTAMPADD', '-'),\n}\n\n# AGGREGATION/REDUCTION\n_agg_ops = {\n ops.HLLCardinality: approx_count_distinct,\n ops.DistinctColumn: unary_prefix_op('distinct'),\n ops.Arbitrary: _arbitrary,\n}\n\n# GENERAL\n_general_ops = {\n ops.Literal: literal,\n ops.ValueList: _value_list,\n ops.Cast: _cast,\n ops.Where: _where,\n ops.TableColumn: _table_column,\n ops.CrossJoin: _cross_join,\n}\n\n# UNSUPPORTED OPERATIONS\n_unsupported_ops = [\n # generic/aggregation\n ops.CMSMedian,\n ops.WindowOp,\n ops.DecimalPrecision,\n ops.DecimalScale,\n ops.BaseConvert,\n ops.CumulativeSum,\n ops.CumulativeMin,\n ops.CumulativeMax,\n ops.CumulativeMean,\n ops.CumulativeAny,\n ops.CumulativeAll,\n ops.IdenticalTo,\n ops.RowNumber,\n ops.DenseRank,\n ops.MinRank,\n ops.PercentRank,\n ops.FirstValue,\n ops.LastValue,\n ops.NthValue,\n ops.Lag,\n ops.Lead,\n ops.NTile,\n ops.GroupConcat,\n ops.NullIf,\n ops.NullIfZero,\n ops.NullLiteral,\n ops.IsInf,\n ops.IsNan,\n ops.IfNull,\n # string\n ops.Lowercase,\n ops.Uppercase,\n ops.FindInSet,\n ops.StringReplace,\n ops.StringJoin,\n ops.StringSplit,\n ops.StringToTimestamp,\n ops.Translate,\n ops.StringAscii,\n ops.LPad,\n ops.RPad,\n ops.Strip,\n ops.RStrip,\n ops.LStrip,\n ops.Capitalize,\n ops.Substring,\n ops.StrRight,\n ops.Repeat,\n ops.Reverse,\n ops.RegexExtract,\n ops.RegexReplace,\n ops.ParseURL,\n # Numeric\n ops.Least,\n ops.Greatest,\n ops.Log2,\n ops.Log,\n ops.Round,\n # date/time/timestamp\n ops.TimestampFromUNIX,\n ops.Date,\n ops.TimeTruncate,\n ops.TimestampDiff,\n ops.DayOfWeekIndex,\n ops.DayOfWeekName,\n # table\n ops.Union,\n]\n\n_unsupported_ops = {k: raise_unsupported_op_error for k in _unsupported_ops}\n\n# registry\n_operation_registry = impala_compiler._operation_registry.copy()\n\n_operation_registry.update(_general_ops)\n_operation_registry.update(_binary_infix_ops)\n_operation_registry.update(_unary_ops)\n_operation_registry.update(_comparison_ops)\n_operation_registry.update(_math_ops)\n_operation_registry.update(_stats_ops)\n_operation_registry.update(_trigonometric_ops)\n_operation_registry.update(_geometric_ops)\n_operation_registry.update(_string_ops)\n_operation_registry.update(_date_ops)\n_operation_registry.update(_agg_ops)\n_operation_registry.update(_geospatial_ops)\n# the last update should be with unsupported ops\n_operation_registry.update(_unsupported_ops)\n", "path": "ibis/mapd/operations.py" } ]
diff --git a/ibis/mapd/operations.py b/ibis/mapd/operations.py index 5a14cde7cf74..1403ea9e6c1d 100644 --- a/ibis/mapd/operations.py +++ b/ibis/mapd/operations.py @@ -783,6 +783,7 @@ class ByteLength(ops.StringLength): ops.StringReplace, ops.StringJoin, ops.StringSplit, + ops.StringToTimestamp, ops.Translate, ops.StringAscii, ops.LPad,
bookwyrm-social__bookwyrm-550
[ { "content": "''' non-interactive pages '''\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Avg, Max\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom .helpers import get_activity_feed\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass About(View):\n ''' create invites '''\n def get(self, request):\n ''' more information about the instance '''\n data = {\n 'title': 'About',\n }\n return TemplateResponse(request, 'about.html', data)\n\nclass Home(View):\n ''' discover page or home feed depending on auth '''\n def get(self, request):\n ''' this is the same as the feed on the home tab '''\n if request.user.is_authenticated:\n feed_view = Feed.as_view()\n return feed_view(request, 'home')\n discover_view = Discover.as_view()\n return discover_view(request)\n\nclass Discover(View):\n ''' preview of recently reviewed books '''\n def get(self, request):\n ''' tiled book activity page '''\n books = models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__user__local=True,\n review__privacy__in=['public', 'unlisted'],\n ).exclude(\n cover__exact=''\n ).annotate(\n Max('review__published_date')\n ).order_by('-review__published_date__max')[:6]\n\n ratings = {}\n for book in books:\n reviews = models.Review.objects.filter(\n book__in=book.parent_work.editions.all()\n )\n reviews = get_activity_feed(\n request.user, ['public', 'unlisted'], queryset=reviews)\n ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']\n data = {\n 'title': 'Discover',\n 'register_form': forms.RegisterForm(),\n 'books': list(set(books)),\n 'ratings': ratings\n }\n return TemplateResponse(request, 'discover.html', data)\n\n\n@method_decorator(login_required, name='dispatch')\nclass Feed(View):\n ''' activity stream '''\n def get(self, request, tab):\n ''' user's homepage with activity feed '''\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n\n suggested_books = get_suggested_books(request.user)\n\n if tab == 'home':\n activities = get_activity_feed(\n request.user, ['public', 'unlisted', 'followers'],\n following_only=True)\n elif tab == 'local':\n activities = get_activity_feed(\n request.user, ['public', 'followers'], local_only=True)\n else:\n activities = get_activity_feed(\n request.user, ['public', 'followers'])\n paginated = Paginator(activities, PAGE_LENGTH)\n\n goal = models.AnnualGoal.objects.filter(\n user=request.user, year=timezone.now().year\n ).first()\n data = {\n 'title': 'Updates Feed',\n 'user': request.user,\n 'suggested_books': suggested_books,\n 'activities': paginated.page(page),\n 'tab': tab,\n 'goal': goal,\n 'goal_form': forms.GoalForm(),\n }\n return TemplateResponse(request, 'feed.html', data)\n\n\ndef get_suggested_books(user, max_books=5):\n ''' helper to get a user's recent books '''\n book_count = 0\n preset_shelves = [\n ('reading', max_books), ('read', 2), ('to-read', max_books)\n ]\n suggested_books = []\n for (preset, shelf_max) in preset_shelves:\n limit = shelf_max if shelf_max < (max_books - book_count) \\\n else max_books - book_count\n shelf = user.shelf_set.get(identifier=preset)\n\n shelf_books = shelf.shelfbook_set.order_by(\n '-updated_date'\n ).all()[:limit]\n if not shelf_books:\n continue\n shelf_preview = {\n 'name': shelf.name,\n 'books': [s.book for s in shelf_books]\n }\n suggested_books.append(shelf_preview)\n book_count += len(shelf_preview['books'])\n return suggested_books\n", "path": "bookwyrm/views/landing.py" } ]
[ { "content": "''' non-interactive pages '''\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Avg, Max\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom .helpers import get_activity_feed\n\n\n# pylint: disable= no-self-use\nclass About(View):\n ''' create invites '''\n def get(self, request):\n ''' more information about the instance '''\n data = {\n 'title': 'About',\n }\n return TemplateResponse(request, 'about.html', data)\n\nclass Home(View):\n ''' discover page or home feed depending on auth '''\n def get(self, request):\n ''' this is the same as the feed on the home tab '''\n if request.user.is_authenticated:\n feed_view = Feed.as_view()\n return feed_view(request, 'home')\n discover_view = Discover.as_view()\n return discover_view(request)\n\nclass Discover(View):\n ''' preview of recently reviewed books '''\n def get(self, request):\n ''' tiled book activity page '''\n books = models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__user__local=True,\n review__privacy__in=['public', 'unlisted'],\n ).exclude(\n cover__exact=''\n ).annotate(\n Max('review__published_date')\n ).order_by('-review__published_date__max')[:6]\n\n ratings = {}\n for book in books:\n reviews = models.Review.objects.filter(\n book__in=book.parent_work.editions.all()\n )\n reviews = get_activity_feed(\n request.user, ['public', 'unlisted'], queryset=reviews)\n ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']\n data = {\n 'title': 'Discover',\n 'register_form': forms.RegisterForm(),\n 'books': list(set(books)),\n 'ratings': ratings\n }\n return TemplateResponse(request, 'discover.html', data)\n\n\n@method_decorator(login_required, name='dispatch')\nclass Feed(View):\n ''' activity stream '''\n def get(self, request, tab):\n ''' user's homepage with activity feed '''\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n\n suggested_books = get_suggested_books(request.user)\n\n if tab == 'home':\n activities = get_activity_feed(\n request.user, ['public', 'unlisted', 'followers'],\n following_only=True)\n elif tab == 'local':\n activities = get_activity_feed(\n request.user, ['public', 'followers'], local_only=True)\n else:\n activities = get_activity_feed(\n request.user, ['public', 'followers'])\n paginated = Paginator(activities, PAGE_LENGTH)\n\n goal = models.AnnualGoal.objects.filter(\n user=request.user, year=timezone.now().year\n ).first()\n data = {\n 'title': 'Updates Feed',\n 'user': request.user,\n 'suggested_books': suggested_books,\n 'activities': paginated.page(page),\n 'tab': tab,\n 'goal': goal,\n 'goal_form': forms.GoalForm(),\n }\n return TemplateResponse(request, 'feed.html', data)\n\n\ndef get_suggested_books(user, max_books=5):\n ''' helper to get a user's recent books '''\n book_count = 0\n preset_shelves = [\n ('reading', max_books), ('read', 2), ('to-read', max_books)\n ]\n suggested_books = []\n for (preset, shelf_max) in preset_shelves:\n limit = shelf_max if shelf_max < (max_books - book_count) \\\n else max_books - book_count\n shelf = user.shelf_set.get(identifier=preset)\n\n shelf_books = shelf.shelfbook_set.order_by(\n '-updated_date'\n ).all()[:limit]\n if not shelf_books:\n continue\n shelf_preview = {\n 'name': shelf.name,\n 'books': [s.book for s in shelf_books]\n }\n suggested_books.append(shelf_preview)\n book_count += len(shelf_preview['books'])\n return suggested_books\n", "path": "bookwyrm/views/landing.py" } ]
diff --git a/bookwyrm/views/landing.py b/bookwyrm/views/landing.py index 8305e0063d..ec6cb3a9bb 100644 --- a/bookwyrm/views/landing.py +++ b/bookwyrm/views/landing.py @@ -13,7 +13,6 @@ # pylint: disable= no-self-use -@method_decorator(login_required, name='dispatch') class About(View): ''' create invites ''' def get(self, request):
TheAlgorithms__Python-10664
[ { "content": "\"\"\"\n== Raise base to the power of exponent using recursion ==\n Input -->\n Enter the base: 3\n Enter the exponent: 4\n Output -->\n 3 to the power of 4 is 81\n Input -->\n Enter the base: 2\n Enter the exponent: 0\n Output -->\n 2 to the power of 0 is 1\n\"\"\"\n\n\ndef power(base: int, exponent: int) -> float:\n \"\"\"\n >>> power(3, 4)\n 81\n >>> power(2, 0)\n 1\n >>> all(power(base, exponent) == pow(base, exponent)\n ... for base in range(-10, 10) for exponent in range(10))\n True\n >>> power('a', 1)\n 'a'\n >>> power('a', 2)\n Traceback (most recent call last):\n ...\n TypeError: can't multiply sequence by non-int of type 'str'\n >>> power('a', 'b')\n Traceback (most recent call last):\n ...\n TypeError: unsupported operand type(s) for -: 'str' and 'int'\n >>> power(2, -1)\n Traceback (most recent call last):\n ...\n RecursionError: maximum recursion depth exceeded\n \"\"\"\n return base * power(base, (exponent - 1)) if exponent else 1\n\n\nif __name__ == \"__main__\":\n from doctests import testmod\n\n testmod()\n print(\"Raise base to the power of exponent using recursion...\")\n base = int(input(\"Enter the base: \").strip())\n exponent = int(input(\"Enter the exponent: \").strip())\n result = power(base, abs(exponent))\n if exponent < 0: # power() does not properly deal w/ negative exponents\n result = 1 / result\n print(f\"{base} to the power of {exponent} is {result}\")\n", "path": "maths/power_using_recursion.py" } ]
[ { "content": "\"\"\"\n== Raise base to the power of exponent using recursion ==\n Input -->\n Enter the base: 3\n Enter the exponent: 4\n Output -->\n 3 to the power of 4 is 81\n Input -->\n Enter the base: 2\n Enter the exponent: 0\n Output -->\n 2 to the power of 0 is 1\n\"\"\"\n\n\ndef power(base: int, exponent: int) -> float:\n \"\"\"\n Calculate the power of a base raised to an exponent.\n\n >>> power(3, 4)\n 81\n >>> power(2, 0)\n 1\n >>> all(power(base, exponent) == pow(base, exponent)\n ... for base in range(-10, 10) for exponent in range(10))\n True\n >>> power('a', 1)\n 'a'\n >>> power('a', 2)\n Traceback (most recent call last):\n ...\n TypeError: can't multiply sequence by non-int of type 'str'\n >>> power('a', 'b')\n Traceback (most recent call last):\n ...\n TypeError: unsupported operand type(s) for -: 'str' and 'int'\n >>> power(2, -1)\n Traceback (most recent call last):\n ...\n RecursionError: maximum recursion depth exceeded\n \"\"\"\n return base * power(base, (exponent - 1)) if exponent else 1\n\n\nif __name__ == \"__main__\":\n from doctests import testmod\n\n testmod()\n print(\"Raise base to the power of exponent using recursion...\")\n base = int(input(\"Enter the base: \").strip())\n exponent = int(input(\"Enter the exponent: \").strip())\n result = power(base, abs(exponent))\n if exponent < 0: # power() does not properly deal w/ negative exponents\n result = 1 / result\n print(f\"{base} to the power of {exponent} is {result}\")\n", "path": "maths/power_using_recursion.py" } ]
diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py index e82635ba0005..462fc45bff64 100644 --- a/maths/power_using_recursion.py +++ b/maths/power_using_recursion.py @@ -15,6 +15,8 @@ def power(base: int, exponent: int) -> float: """ + Calculate the power of a base raised to an exponent. + >>> power(3, 4) 81 >>> power(2, 0)
scikit-hep__pyhf-2135
[ { "content": "import urllib.parse\n\n\ndef main():\n code = \"\"\"\\\nimport piplite\nawait piplite.install([\"pyhf==0.7.0\"])\n%matplotlib inline\nimport pyhf\\\n\"\"\"\n\n parsed_url = urllib.parse.quote(code)\n url_base = \"https://jupyterlite.github.io/demo/repl/index.html\"\n jupyterlite_options = \"?kernel=python&toolbar=1&code=\"\n jupyterlite_url = url_base + jupyterlite_options + parsed_url\n\n print(f\"# jupyterlite URL:\\n{jupyterlite_url}\")\n\n jupyterlite_iframe_rst = f\"\"\"\\\n <iframe\n src=\"{jupyterlite_url}\"\n width=\"100%\"\n height=\"500px\"\n ></iframe>\\\n\"\"\"\n print(f\"\\n# RST for iframe for jupyterlite.rst:\\n{jupyterlite_iframe_rst}\")\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "docs/generate_jupyterlite_iframe.py" } ]
[ { "content": "import urllib.parse\n\n\ndef main():\n code = \"\"\"\\\nimport piplite\nawait piplite.install([\"pyhf==0.7.0\", \"matplotlib>=3.0.0\"])\n%matplotlib inline\nimport pyhf\\\n\"\"\"\n\n parsed_url = urllib.parse.quote(code)\n url_base = \"https://jupyterlite.github.io/demo/repl/index.html\"\n jupyterlite_options = \"?kernel=python&toolbar=1&code=\"\n jupyterlite_url = url_base + jupyterlite_options + parsed_url\n\n print(f\"# jupyterlite URL:\\n{jupyterlite_url}\")\n\n jupyterlite_iframe_rst = f\"\"\"\\\n <iframe\n src=\"{jupyterlite_url}\"\n width=\"100%\"\n height=\"500px\"\n ></iframe>\\\n\"\"\"\n print(f\"\\n# RST for iframe for jupyterlite.rst:\\n{jupyterlite_iframe_rst}\")\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "docs/generate_jupyterlite_iframe.py" } ]
diff --git a/docs/generate_jupyterlite_iframe.py b/docs/generate_jupyterlite_iframe.py index 7770c1ee5d..fc0517ef61 100644 --- a/docs/generate_jupyterlite_iframe.py +++ b/docs/generate_jupyterlite_iframe.py @@ -4,7 +4,7 @@ def main(): code = """\ import piplite -await piplite.install(["pyhf==0.7.0"]) +await piplite.install(["pyhf==0.7.0", "matplotlib>=3.0.0"]) %matplotlib inline import pyhf\ """ diff --git a/docs/jupyterlite.rst b/docs/jupyterlite.rst index 57b5fc11e3..12b227bbfa 100644 --- a/docs/jupyterlite.rst +++ b/docs/jupyterlite.rst @@ -21,7 +21,7 @@ Try out now with JupyterLite_ .. raw:: html <iframe - src="https://jupyterlite.github.io/demo/repl/index.html?kernel=python&toolbar=1&code=import%20piplite%0Aawait%20piplite.install%28%5B%22pyhf%3D%3D0.7.0%22%5D%29%0A%25matplotlib%20inline%0Aimport%20pyhf" + src="https://jupyterlite.github.io/demo/repl/index.html?kernel=python&toolbar=1&code=import%20piplite%0Aawait%20piplite.install%28%5B%22pyhf%3D%3D0.7.0%22%2C%20%22matplotlib%3E%3D3.0.0%22%5D%29%0A%25matplotlib%20inline%0Aimport%20pyhf" width="100%" height="500px" ></iframe>
beetbox__beets-3159
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, FranΓ§ois-Xavier Thomas.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Use command-line tools to check for audio file corruption.\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nfrom subprocess import check_output, CalledProcessError, list2cmdline, STDOUT\n\nimport shlex\nimport os\nimport errno\nimport sys\nimport six\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui import Subcommand\nfrom beets.util import displayable_path, confit, par_map\nfrom beets import ui\n\n\nclass CheckerCommandException(Exception):\n \"\"\"Raised when running a checker failed.\n\n Attributes:\n checker: Checker command name.\n path: Path to the file being validated.\n errno: Error number from the checker execution error.\n msg: Message from the checker execution error.\n \"\"\"\n\n def __init__(self, cmd, oserror):\n self.checker = cmd[0]\n self.path = cmd[-1]\n self.errno = oserror.errno\n self.msg = str(oserror)\n\n\nclass BadFiles(BeetsPlugin):\n def __init__(self):\n self.verbose = False\n\n def run_command(self, cmd):\n self._log.debug(u\"running command: {}\",\n displayable_path(list2cmdline(cmd)))\n try:\n output = check_output(cmd, stderr=STDOUT)\n errors = 0\n status = 0\n except CalledProcessError as e:\n output = e.output\n errors = 1\n status = e.returncode\n except OSError as e:\n raise CheckerCommandException(cmd, e)\n output = output.decode(sys.getfilesystemencoding())\n return status, errors, [line for line in output.split(\"\\n\") if line]\n\n def check_mp3val(self, path):\n status, errors, output = self.run_command([\"mp3val\", path])\n if status == 0:\n output = [line for line in output if line.startswith(\"WARNING:\")]\n errors = len(output)\n return status, errors, output\n\n def check_flac(self, path):\n return self.run_command([\"flac\", \"-wst\", path])\n\n def check_custom(self, command):\n def checker(path):\n cmd = shlex.split(command)\n cmd.append(path)\n return self.run_command(cmd)\n return checker\n\n def get_checker(self, ext):\n ext = ext.lower()\n try:\n command = self.config['commands'].get(dict).get(ext)\n except confit.NotFoundError:\n command = None\n if command:\n return self.check_custom(command)\n if ext == \"mp3\":\n return self.check_mp3val\n if ext == \"flac\":\n return self.check_flac\n\n def check_item(self, item):\n # First, check whether the path exists. If not, the user\n # should probably run `beet update` to cleanup your library.\n dpath = displayable_path(item.path)\n self._log.debug(u\"checking path: {}\", dpath)\n if not os.path.exists(item.path):\n ui.print_(u\"{}: file does not exist\".format(\n ui.colorize('text_error', dpath)))\n\n # Run the checker against the file if one is found\n ext = os.path.splitext(item.path)[1][1:].decode('utf8', 'ignore')\n checker = self.get_checker(ext)\n if not checker:\n self._log.error(u\"no checker specified in the config for {}\",\n ext)\n return\n path = item.path\n if not isinstance(path, six.text_type):\n path = item.path.decode(sys.getfilesystemencoding())\n try:\n status, errors, output = checker(path)\n except CheckerCommandException as e:\n if e.errno == errno.ENOENT:\n self._log.error(\n u\"command not found: {} when validating file: {}\",\n e.checker,\n e.path\n )\n else:\n self._log.error(u\"error invoking {}: {}\", e.checker, e.msg)\n return\n if status > 0:\n ui.print_(u\"{}: checker exited with status {}\"\n .format(ui.colorize('text_error', dpath), status))\n for line in output:\n ui.print_(u\" {}\".format(displayable_path(line)))\n elif errors > 0:\n ui.print_(u\"{}: checker found {} errors or warnings\"\n .format(ui.colorize('text_warning', dpath), errors))\n for line in output:\n ui.print_(u\" {}\".format(displayable_path(line)))\n elif self.verbose:\n ui.print_(u\"{}: ok\".format(ui.colorize('text_success', dpath)))\n\n def command(self, lib, opts, args):\n # Get items from arguments\n items = lib.items(ui.decargs(args))\n self.verbose = opts.verbose\n par_map(self.check_item, items)\n\n def commands(self):\n bad_command = Subcommand('bad',\n help=u'check for corrupt or missing files')\n bad_command.parser.add_option(\n u'-v', u'--verbose',\n action='store_true', default=False, dest='verbose',\n help=u'view results for both the bad and uncorrupted files'\n )\n bad_command.func = self.command\n return [bad_command]\n", "path": "beetsplug/badfiles.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, FranΓ§ois-Xavier Thomas.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Use command-line tools to check for audio file corruption.\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nfrom subprocess import check_output, CalledProcessError, list2cmdline, STDOUT\n\nimport shlex\nimport os\nimport errno\nimport sys\nimport six\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui import Subcommand\nfrom beets.util import displayable_path, confit, par_map\nfrom beets import ui\n\n\nclass CheckerCommandException(Exception):\n \"\"\"Raised when running a checker failed.\n\n Attributes:\n checker: Checker command name.\n path: Path to the file being validated.\n errno: Error number from the checker execution error.\n msg: Message from the checker execution error.\n \"\"\"\n\n def __init__(self, cmd, oserror):\n self.checker = cmd[0]\n self.path = cmd[-1]\n self.errno = oserror.errno\n self.msg = str(oserror)\n\n\nclass BadFiles(BeetsPlugin):\n def __init__(self):\n super(BadFiles, self).__init__()\n self.verbose = False\n\n def run_command(self, cmd):\n self._log.debug(u\"running command: {}\",\n displayable_path(list2cmdline(cmd)))\n try:\n output = check_output(cmd, stderr=STDOUT)\n errors = 0\n status = 0\n except CalledProcessError as e:\n output = e.output\n errors = 1\n status = e.returncode\n except OSError as e:\n raise CheckerCommandException(cmd, e)\n output = output.decode(sys.getfilesystemencoding())\n return status, errors, [line for line in output.split(\"\\n\") if line]\n\n def check_mp3val(self, path):\n status, errors, output = self.run_command([\"mp3val\", path])\n if status == 0:\n output = [line for line in output if line.startswith(\"WARNING:\")]\n errors = len(output)\n return status, errors, output\n\n def check_flac(self, path):\n return self.run_command([\"flac\", \"-wst\", path])\n\n def check_custom(self, command):\n def checker(path):\n cmd = shlex.split(command)\n cmd.append(path)\n return self.run_command(cmd)\n return checker\n\n def get_checker(self, ext):\n ext = ext.lower()\n try:\n command = self.config['commands'].get(dict).get(ext)\n except confit.NotFoundError:\n command = None\n if command:\n return self.check_custom(command)\n if ext == \"mp3\":\n return self.check_mp3val\n if ext == \"flac\":\n return self.check_flac\n\n def check_item(self, item):\n # First, check whether the path exists. If not, the user\n # should probably run `beet update` to cleanup your library.\n dpath = displayable_path(item.path)\n self._log.debug(u\"checking path: {}\", dpath)\n if not os.path.exists(item.path):\n ui.print_(u\"{}: file does not exist\".format(\n ui.colorize('text_error', dpath)))\n\n # Run the checker against the file if one is found\n ext = os.path.splitext(item.path)[1][1:].decode('utf8', 'ignore')\n checker = self.get_checker(ext)\n if not checker:\n self._log.error(u\"no checker specified in the config for {}\",\n ext)\n return\n path = item.path\n if not isinstance(path, six.text_type):\n path = item.path.decode(sys.getfilesystemencoding())\n try:\n status, errors, output = checker(path)\n except CheckerCommandException as e:\n if e.errno == errno.ENOENT:\n self._log.error(\n u\"command not found: {} when validating file: {}\",\n e.checker,\n e.path\n )\n else:\n self._log.error(u\"error invoking {}: {}\", e.checker, e.msg)\n return\n if status > 0:\n ui.print_(u\"{}: checker exited with status {}\"\n .format(ui.colorize('text_error', dpath), status))\n for line in output:\n ui.print_(u\" {}\".format(displayable_path(line)))\n elif errors > 0:\n ui.print_(u\"{}: checker found {} errors or warnings\"\n .format(ui.colorize('text_warning', dpath), errors))\n for line in output:\n ui.print_(u\" {}\".format(displayable_path(line)))\n elif self.verbose:\n ui.print_(u\"{}: ok\".format(ui.colorize('text_success', dpath)))\n\n def command(self, lib, opts, args):\n # Get items from arguments\n items = lib.items(ui.decargs(args))\n self.verbose = opts.verbose\n par_map(self.check_item, items)\n\n def commands(self):\n bad_command = Subcommand('bad',\n help=u'check for corrupt or missing files')\n bad_command.parser.add_option(\n u'-v', u'--verbose',\n action='store_true', default=False, dest='verbose',\n help=u'view results for both the bad and uncorrupted files'\n )\n bad_command.func = self.command\n return [bad_command]\n", "path": "beetsplug/badfiles.py" } ]
diff --git a/beetsplug/badfiles.py b/beetsplug/badfiles.py index 52caa99943..0be08bae5f 100644 --- a/beetsplug/badfiles.py +++ b/beetsplug/badfiles.py @@ -50,6 +50,7 @@ def __init__(self, cmd, oserror): class BadFiles(BeetsPlugin): def __init__(self): + super(BadFiles, self).__init__() self.verbose = False def run_command(self, cmd):
napari__napari-3501
[ { "content": "import configparser\nimport os\nimport platform\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport time\nfrom contextlib import contextmanager\n\nimport tomlkit\n\nAPP = 'napari'\n\n# EXTRA_REQS will be added to the bundle, in addition to those specified in\n# setup.cfg. To add additional packages to the bundle, or to override any of\n# the packages listed here or in `setup.cfg, use the `--add` command line\n# argument with a series of \"pip install\" style strings when running this file.\n# For example, the following will ADD ome-zarr, and CHANGE the version of\n# PySide2:\n# python bundle.py --add 'PySide2==5.15.0' 'ome-zarr'\n\n# This is now defined in setup.cfg \"options.extras_require.bundle_run\"\n# EXTRA_REQS = []\n\nWINDOWS = os.name == 'nt'\nMACOS = sys.platform == 'darwin'\nLINUX = sys.platform.startswith(\"linux\")\nHERE = os.path.abspath(os.path.dirname(__file__))\nPYPROJECT_TOML = os.path.join(HERE, 'pyproject.toml')\nSETUP_CFG = os.path.join(HERE, 'setup.cfg')\n\n\nif WINDOWS:\n BUILD_DIR = os.path.join(HERE, 'windows')\nelif LINUX:\n BUILD_DIR = os.path.join(HERE, 'linux')\nelif MACOS:\n BUILD_DIR = os.path.join(HERE, 'macOS')\n APP_DIR = os.path.join(BUILD_DIR, APP, f'{APP}.app')\n\n\nwith open(os.path.join(HERE, \"napari\", \"_version.py\")) as f:\n match = re.search(r'version\\s?=\\s?\\'([^\\']+)', f.read())\n if match:\n VERSION = match.groups()[0].split('+')[0]\n\n\n@contextmanager\ndef patched_toml():\n parser = configparser.ConfigParser()\n parser.read(SETUP_CFG)\n requirements = parser.get(\"options\", \"install_requires\").splitlines()\n requirements = [r.split('#')[0].strip() for r in requirements if r]\n\n with open(PYPROJECT_TOML) as f:\n original_toml = f.read()\n\n toml = tomlkit.parse(original_toml)\n\n # Initialize EXTRA_REQS from setup.cfg 'options.extras_require.bundle_run'\n bundle_run = parser.get(\"options.extras_require\", \"bundle_run\")\n EXTRA_REQS = [\n requirement.split('#')[0].strip()\n for requirement in bundle_run.splitlines()\n if requirement\n ]\n\n # parse command line arguments\n if '--add' in sys.argv:\n for item in sys.argv[sys.argv.index('--add') + 1 :]:\n if item.startswith('-'):\n break\n EXTRA_REQS.append(item)\n\n for item in EXTRA_REQS:\n _base = re.split('<|>|=', item, maxsplit=1)[0]\n for r in requirements:\n if r.startswith(_base):\n requirements.remove(r)\n break\n if _base.lower().startswith('pyqt5'):\n try:\n i = next(x for x in requirements if x.startswith('PySide'))\n requirements.remove(i)\n except StopIteration:\n pass\n\n requirements += EXTRA_REQS\n\n toml['tool']['briefcase']['app'][APP]['requires'] = requirements\n toml['tool']['briefcase']['version'] = VERSION\n\n print(\"patching pyproject.toml to version: \", VERSION)\n print(\n \"patching pyproject.toml requirements to:\",\n *toml['tool']['briefcase']['app'][APP]['requires'],\n sep=\"\\n \",\n )\n\n if MACOS:\n # Workaround https://github.com/napari/napari/issues/2965\n # Pin revisions to releases _before_ they switched to static libs\n revision = {\n (3, 6): 'b11',\n (3, 7): 'b5',\n (3, 8): 'b4',\n (3, 9): 'b1',\n }[sys.version_info[:2]]\n app_table = toml['tool']['briefcase']['app'][APP]\n app_table.add('macOS', tomlkit.table())\n app_table['macOS']['support_revision'] = revision\n print(\n \"patching pyproject.toml to pin support package to revision:\",\n revision,\n )\n\n with open(PYPROJECT_TOML, 'w') as f:\n f.write(tomlkit.dumps(toml))\n\n try:\n yield\n finally:\n with open(PYPROJECT_TOML, 'w') as f:\n f.write(original_toml)\n\n\ndef patch_dmgbuild():\n if not MACOS:\n return\n from dmgbuild import core\n\n with open(core.__file__) as f:\n src = f.read()\n with open(core.__file__, 'w') as f:\n f.write(\n src.replace(\n \"shutil.rmtree(os.path.join(mount_point, '.Trashes'), True)\",\n \"shutil.rmtree(os.path.join(mount_point, '.Trashes'), True)\"\n \";time.sleep(30)\",\n )\n )\n print(\"patched dmgbuild.core\")\n\n\ndef add_site_packages_to_path():\n # on mac, make sure the site-packages folder exists even before the user\n # has pip installed, so it is in sys.path on the first run\n # (otherwise, newly installed plugins will not be detected until restart)\n if MACOS:\n pkgs_dir = os.path.join(\n APP_DIR,\n 'Contents',\n 'Resources',\n 'Support',\n 'lib',\n f'python{sys.version_info.major}.{sys.version_info.minor}',\n 'site-packages',\n )\n os.makedirs(pkgs_dir)\n print(\"created site-packages at\", pkgs_dir)\n\n # on windows, briefcase uses a _pth file to determine the sys.path at\n # runtime. https://docs.python.org/3/using/windows.html#finding-modules\n # We update that file with the eventual location of pip site-packages\n elif WINDOWS:\n py = \"\".join(map(str, sys.version_info[:2]))\n python_dir = os.path.join(BUILD_DIR, APP, 'src', 'python')\n pth = os.path.join(python_dir, f'python{py}._pth')\n with open(pth, \"a\") as f:\n # Append 'hello' at the end of file\n f.write(\".\\\\\\\\Lib\\\\\\\\site-packages\\n\")\n print(\"added bundled site-packages to\", pth)\n\n pkgs_dir = os.path.join(python_dir, 'Lib', 'site-packages')\n os.makedirs(pkgs_dir)\n print(\"created site-packages at\", pkgs_dir)\n with open(os.path.join(pkgs_dir, 'readme.txt'), 'w') as f:\n f.write(\"this is where plugin packages will go\")\n\n\ndef patch_wxs():\n # must run after briefcase create\n fname = os.path.join(BUILD_DIR, APP, f'{APP}.wxs')\n\n if os.path.exists(fname):\n with open(fname) as f:\n source = f.read()\n with open(fname, 'w') as f:\n f.write(source.replace('pythonw.exe', 'python.exe'))\n print(\"patched pythonw.exe -> python.exe\")\n\n\ndef patch_python_lib_location():\n # must run after briefcase create\n support = os.path.join(\n BUILD_DIR, APP, APP + \".app\", \"Contents\", \"Resources\", \"Support\"\n )\n python_resources = os.path.join(support, \"Python\", \"Resources\")\n os.makedirs(python_resources, exist_ok=True)\n for subdir in (\"bin\", \"lib\"):\n orig = os.path.join(support, subdir)\n dest = os.path.join(python_resources, subdir)\n os.symlink(\"../../\" + subdir, dest)\n print(\"symlinking\", orig, \"to\", dest)\n\n\ndef patch_environment_variables():\n os.environ[\"ARCH\"] = architecture()\n\n\ndef architecture():\n arch = platform.machine() or \"generic\"\n # Try to canonicalize across OS\n replacements = {\n \"amd64\": \"x86_64\",\n }\n return replacements.get(arch.lower(), arch)\n\n\ndef make_zip():\n import glob\n import zipfile\n\n if WINDOWS:\n ext, OS = '*.msi', 'Windows'\n elif LINUX:\n ext, OS = '*.AppImage', 'Linux'\n elif MACOS:\n ext, OS = '*.dmg', 'macOS'\n artifact = glob.glob(os.path.join(BUILD_DIR, ext))[0]\n dest = f'napari-{VERSION}-{OS}-{architecture()}.zip'\n\n with zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED) as zf:\n zf.write(artifact, arcname=os.path.basename(artifact))\n print(\"created zipfile: \", dest)\n return dest\n\n\ndef clean():\n shutil.rmtree(BUILD_DIR, ignore_errors=True)\n\n\ndef bundle():\n clean()\n\n if MACOS:\n patch_dmgbuild()\n\n if LINUX:\n patch_environment_variables()\n\n # smoke test, and build resources\n subprocess.check_call([sys.executable, '-m', APP, '--info'])\n\n # the briefcase calls need to happen while the pyproject toml is patched\n with patched_toml():\n # create\n cmd = ['briefcase', 'create'] + (['--no-docker'] if LINUX else [])\n subprocess.check_call(cmd)\n\n time.sleep(0.5)\n\n add_site_packages_to_path()\n\n if MACOS:\n patch_python_lib_location()\n\n # build\n cmd = ['briefcase', 'build'] + (['--no-docker'] if LINUX else [])\n subprocess.check_call(cmd)\n\n # package\n cmd = ['briefcase', 'package']\n cmd += ['--no-sign'] if MACOS else (['--no-docker'] if LINUX else [])\n subprocess.check_call(cmd)\n\n # compress\n dest = make_zip()\n clean()\n\n return dest\n\n\nif __name__ == \"__main__\":\n if '--clean' in sys.argv:\n clean()\n sys.exit()\n if '--version' in sys.argv:\n print(VERSION)\n sys.exit()\n if '--arch' in sys.argv:\n print(architecture())\n sys.exit()\n print('created', bundle())\n", "path": "bundle.py" } ]
[ { "content": "import configparser\nimport os\nimport platform\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport time\nfrom contextlib import contextmanager\n\nimport tomlkit\n\nAPP = 'napari'\n\n# EXTRA_REQS will be added to the bundle, in addition to those specified in\n# setup.cfg. To add additional packages to the bundle, or to override any of\n# the packages listed here or in `setup.cfg, use the `--add` command line\n# argument with a series of \"pip install\" style strings when running this file.\n# For example, the following will ADD ome-zarr, and CHANGE the version of\n# PySide2:\n# python bundle.py --add 'PySide2==5.15.0' 'ome-zarr'\n\n# This is now defined in setup.cfg \"options.extras_require.bundle_run\"\n# EXTRA_REQS = []\n\nWINDOWS = os.name == 'nt'\nMACOS = sys.platform == 'darwin'\nLINUX = sys.platform.startswith(\"linux\")\nHERE = os.path.abspath(os.path.dirname(__file__))\nPYPROJECT_TOML = os.path.join(HERE, 'pyproject.toml')\nSETUP_CFG = os.path.join(HERE, 'setup.cfg')\n\n\nif WINDOWS:\n BUILD_DIR = os.path.join(HERE, 'windows')\nelif LINUX:\n BUILD_DIR = os.path.join(HERE, 'linux')\nelif MACOS:\n BUILD_DIR = os.path.join(HERE, 'macOS')\n APP_DIR = os.path.join(BUILD_DIR, APP, f'{APP}.app')\n\n\nwith open(os.path.join(HERE, \"napari\", \"_version.py\")) as f:\n match = re.search(r'version\\s?=\\s?\\'([^\\']+)', f.read())\n if match:\n VERSION = match.groups()[0].split('+')[0]\n\n\n@contextmanager\ndef patched_toml():\n parser = configparser.ConfigParser()\n parser.read(SETUP_CFG)\n requirements = parser.get(\"options\", \"install_requires\").splitlines()\n requirements = [r.split('#')[0].strip() for r in requirements if r]\n\n with open(PYPROJECT_TOML) as f:\n original_toml = f.read()\n\n toml = tomlkit.parse(original_toml)\n\n # Initialize EXTRA_REQS from setup.cfg 'options.extras_require.bundle_run'\n bundle_run = parser.get(\"options.extras_require\", \"bundle_run\")\n EXTRA_REQS = [\n requirement.split('#')[0].strip()\n for requirement in bundle_run.splitlines()\n if requirement\n ]\n\n # parse command line arguments\n if '--add' in sys.argv:\n for item in sys.argv[sys.argv.index('--add') + 1 :]:\n if item.startswith('-'):\n break\n EXTRA_REQS.append(item)\n\n for item in EXTRA_REQS:\n _base = re.split('<|>|=', item, maxsplit=1)[0]\n for r in requirements:\n if r.startswith(_base):\n requirements.remove(r)\n break\n if _base.lower().startswith('pyqt5'):\n try:\n i = next(x for x in requirements if x.startswith('PySide'))\n requirements.remove(i)\n except StopIteration:\n pass\n\n requirements += EXTRA_REQS\n\n toml['tool']['briefcase']['app'][APP]['requires'] = requirements\n toml['tool']['briefcase']['version'] = VERSION\n\n print(\"patching pyproject.toml to version: \", VERSION)\n print(\n \"patching pyproject.toml requirements to:\",\n *toml['tool']['briefcase']['app'][APP]['requires'],\n sep=\"\\n \",\n )\n\n if MACOS:\n # Workaround https://github.com/napari/napari/issues/2965\n # Pin revisions to releases _before_ they switched to static libs\n revision = {\n (3, 6): 'b11',\n (3, 7): 'b5',\n (3, 8): 'b4',\n (3, 9): 'b1',\n }[sys.version_info[:2]]\n app_table = toml['tool']['briefcase']['app'][APP]\n app_table.add('macOS', tomlkit.table())\n app_table['macOS']['support_revision'] = revision\n print(\n \"patching pyproject.toml to pin support package to revision:\",\n revision,\n )\n\n with open(PYPROJECT_TOML, 'w') as f:\n f.write(tomlkit.dumps(toml))\n\n try:\n yield\n finally:\n with open(PYPROJECT_TOML, 'w') as f:\n f.write(original_toml)\n\n\ndef patch_dmgbuild():\n if not MACOS:\n return\n from dmgbuild import core\n\n with open(core.__file__) as f:\n src = f.read()\n with open(core.__file__, 'w') as f:\n f.write(\n src.replace(\n \"shutil.rmtree(os.path.join(mount_point, '.Trashes'), True)\",\n \"shutil.rmtree(os.path.join(mount_point, '.Trashes'), True)\"\n \";time.sleep(30)\",\n )\n )\n print(\"patched dmgbuild.core\")\n\n\ndef add_site_packages_to_path():\n # on mac, make sure the site-packages folder exists even before the user\n # has pip installed, so it is in sys.path on the first run\n # (otherwise, newly installed plugins will not be detected until restart)\n if MACOS:\n pkgs_dir = os.path.join(\n APP_DIR,\n 'Contents',\n 'Resources',\n 'Support',\n 'lib',\n f'python{sys.version_info.major}.{sys.version_info.minor}',\n 'site-packages',\n )\n os.makedirs(pkgs_dir)\n print(\"created site-packages at\", pkgs_dir)\n\n # on windows, briefcase uses a _pth file to determine the sys.path at\n # runtime. https://docs.python.org/3/using/windows.html#finding-modules\n # We update that file with the eventual location of pip site-packages\n elif WINDOWS:\n py = \"\".join(map(str, sys.version_info[:2]))\n python_dir = os.path.join(BUILD_DIR, APP, 'src', 'python')\n pth = os.path.join(python_dir, f'python{py}._pth')\n with open(pth, \"a\") as f:\n # Append 'hello' at the end of file\n f.write(\".\\\\\\\\Lib\\\\\\\\site-packages\\n\")\n print(\"added bundled site-packages to\", pth)\n\n pkgs_dir = os.path.join(python_dir, 'Lib', 'site-packages')\n os.makedirs(pkgs_dir)\n print(\"created site-packages at\", pkgs_dir)\n with open(os.path.join(pkgs_dir, 'readme.txt'), 'w') as f:\n f.write(\"this is where plugin packages will go\")\n\n\ndef patch_wxs():\n # must run after briefcase create\n fname = os.path.join(BUILD_DIR, APP, f'{APP}.wxs')\n\n if os.path.exists(fname):\n with open(fname) as f:\n source = f.read()\n with open(fname, 'w') as f:\n f.write(source.replace('pythonw.exe', 'python.exe'))\n print(\"patched pythonw.exe -> python.exe\")\n\n\ndef patch_python_lib_location():\n # must run after briefcase create\n support = os.path.join(\n BUILD_DIR, APP, APP + \".app\", \"Contents\", \"Resources\", \"Support\"\n )\n python_resources = os.path.join(support, \"Python\", \"Resources\")\n os.makedirs(python_resources, exist_ok=True)\n for subdir in (\"bin\", \"lib\"):\n orig = os.path.join(support, subdir)\n dest = os.path.join(python_resources, subdir)\n os.symlink(\"../../\" + subdir, dest)\n print(\"symlinking\", orig, \"to\", dest)\n\n\ndef patch_environment_variables():\n os.environ[\"ARCH\"] = architecture()\n\n\ndef architecture():\n arch = platform.machine() or \"generic\"\n # Try to canonicalize across OS\n replacements = {\n \"amd64\": \"x86_64\",\n }\n return replacements.get(arch.lower(), arch)\n\n\ndef make_zip():\n import glob\n import zipfile\n\n if WINDOWS:\n ext, OS = '*.msi', 'Windows'\n elif LINUX:\n ext, OS = '*.AppImage', 'Linux'\n elif MACOS:\n ext, OS = '*.dmg', 'macOS'\n artifact = glob.glob(os.path.join(BUILD_DIR, ext))[0]\n dest = f'napari-{VERSION}-{OS}-{architecture()}.zip'\n\n with zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED) as zf:\n zf.write(artifact, arcname=os.path.basename(artifact))\n print(\"created zipfile: \", dest)\n return dest\n\n\ndef clean():\n shutil.rmtree(BUILD_DIR, ignore_errors=True)\n\n\ndef bundle():\n clean()\n\n if MACOS:\n patch_dmgbuild()\n\n if LINUX:\n patch_environment_variables()\n\n # smoke test, and build resources\n subprocess.check_call([sys.executable, '-m', APP, '--info'])\n\n # the briefcase calls need to happen while the pyproject toml is patched\n with patched_toml():\n # create\n cmd = ['briefcase', 'create'] + (['--no-docker'] if LINUX else [])\n subprocess.check_call(cmd)\n\n time.sleep(0.5)\n\n add_site_packages_to_path()\n\n if WINDOWS:\n patch_wxs()\n elif MACOS:\n patch_python_lib_location()\n\n # build\n cmd = ['briefcase', 'build'] + (['--no-docker'] if LINUX else [])\n subprocess.check_call(cmd)\n\n # package\n cmd = ['briefcase', 'package']\n cmd += ['--no-sign'] if MACOS else (['--no-docker'] if LINUX else [])\n subprocess.check_call(cmd)\n\n # compress\n dest = make_zip()\n clean()\n\n return dest\n\n\nif __name__ == \"__main__\":\n if '--clean' in sys.argv:\n clean()\n sys.exit()\n if '--version' in sys.argv:\n print(VERSION)\n sys.exit()\n if '--arch' in sys.argv:\n print(architecture())\n sys.exit()\n print('created', bundle())\n", "path": "bundle.py" } ]
diff --git a/bundle.py b/bundle.py index b744e714688..24916fda93c 100644 --- a/bundle.py +++ b/bundle.py @@ -263,7 +263,9 @@ def bundle(): add_site_packages_to_path() - if MACOS: + if WINDOWS: + patch_wxs() + elif MACOS: patch_python_lib_location() # build diff --git a/docs/release/release_0_4_12.md b/docs/release/release_0_4_12.md index d582ef28c18..22428f124fd 100644 --- a/docs/release/release_0_4_12.md +++ b/docs/release/release_0_4_12.md @@ -16,8 +16,8 @@ This is a bug fix release with many minor improvements and bug fixes. The user experience for users of dask arrays might be significantly improved by a new approach to setting the contrast limits based on the current slice (#3425). -A progress bar will now display when opening multiple files (#3355). -Thanks to first-time contributor @tibuch the data type of labels layers can now +A progress bar will now display when opening multiple files (#3355). +Thanks to first-time contributor @tibuch the data type of labels layers can now be converted from a context menu on the layer list (#3402). See the full list of merged pull requests below for further delails! @@ -57,6 +57,7 @@ See the full list of merged pull requests below for further delails! - Finds layer_controls based on layer's MRO (#3471) - Use `ensure_main_thread` instead of custom thread propagation mechanism in NapariQtNotification (#3473) - Drop pythonw patch in windows bundle (#3479) +- Revert "drop pythonw patch (#3479)" (#3501) ## Bug Fixes
jupyterhub__jupyterhub-263
[ { "content": "#!/usr/bin/env python3\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\ntry:\n from urllib.parse import quote\nexcept ImportError:\n # PY2 Compat\n from urllib import quote\n\nimport requests\nfrom jinja2 import ChoiceLoader, FunctionLoader\n\nfrom tornado import ioloop\nfrom tornado.web import HTTPError\n\nfrom traitlets import (\n Integer,\n Unicode,\n CUnicode,\n)\n\nfrom IPython.html.notebookapp import NotebookApp, aliases as notebook_aliases\nfrom IPython.html.auth.login import LoginHandler\nfrom IPython.html.auth.logout import LogoutHandler\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('3.0'):\n raise ImportError(\"JupyterHub Requires IPython >= 3.0, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\nclass JupyterHubLoginHandler(LoginHandler):\n @staticmethod\n def login_available(settings):\n return True\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n \"\"\"method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n return cookie_cache[encrypted_cookie]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations/cookie\", cookie_name, quote(encrypted_cookie, safe=''),\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key},\n )\n if r.status_code == 404:\n data = None\n elif r.status_code == 403:\n self.log.error(\"I don't have permission to verify cookies, my auth token may have expired: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Permission failure checking authorization, I may need to be restarted\")\n elif r.status_code >= 500:\n self.log.error(\"Upstream failure verifying auth token: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(502, \"Failed to check authorization (upstream problem)\")\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Failed to check authorization\")\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n return data\n \n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n # only allow this to be called once per handler\n # avoids issues if an error is raised,\n # since this may be called again when trying to render the error page\n if hasattr(self, '_cached_user'):\n return self._cached_user\n \n self._cached_user = None\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['name']\n if user == my_user:\n self._cached_user = user\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\nclass JupyterHubLogoutHandler(LogoutHandler):\n def get(self):\n self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))\n\n\n# register new hub related command-line aliases\naliases = dict(notebook_aliases)\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\npage_template = \"\"\"\n{% extends \"templates/page.html\" %}\n\n{% block header_buttons %}\n{{super()}}\n\n<a href='{{hub_control_panel_url}}'\n class='btn btn-default btn-sm navbar-btn pull-right'\n style='margin-right: 4px; margin-left: 2px;'\n>\nControl Panel</a>\n{% endblock %}\n\"\"\"\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = CUnicode(config=True)\n def _user_changed(self, name, old, new):\n self.log.name = new\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n trust_xheaders = True\n login_handler_class = JupyterHubLoginHandler\n logout_handler_class = JupyterHubLogoutHandler\n\n cookie_cache_lifetime = Integer(\n config=True,\n default_value=300,\n allow_none=True,\n help=\"\"\"\n Time, in seconds, that we cache a validated cookie before requiring\n revalidation with the hub.\n \"\"\",\n )\n\n def _log_datefmt_default(self):\n \"\"\"Exclude date from default date format\"\"\"\n return \"%Y-%m-%d %H:%M:%S\"\n\n def _log_format_default(self):\n \"\"\"override default log format to include time\"\"\"\n return \"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s\"\n\n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n\n def _clear_cookie_cache(self):\n self.log.debug(\"Clearing cookie cache\")\n self.tornado_settings['cookie_cache'].clear()\n \n def start(self):\n # Start a PeriodicCallback to clear cached cookies. This forces us to\n # revalidate our user with the Hub at least every\n # `cookie_cache_lifetime` seconds.\n if self.cookie_cache_lifetime:\n ioloop.PeriodicCallback(\n self._clear_cookie_cache,\n self.cookie_cache_lifetime * 1e3,\n ).start()\n super(SingleUserNotebookApp, self).start()\n \n def init_webapp(self):\n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.tornado_settings\n s['cookie_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN')\n s['hub_prefix'] = self.hub_prefix\n s['cookie_name'] = self.cookie_name\n s['login_url'] = self.hub_prefix\n s['hub_api_url'] = self.hub_api_url\n \n super(SingleUserNotebookApp, self).init_webapp()\n self.patch_templates()\n \n def patch_templates(self):\n \"\"\"Patch page templates to add Hub-related buttons\"\"\"\n env = self.web_app.settings['jinja2_env']\n \n env.globals['hub_control_panel_url'] = \\\n url_path_join(self.hub_prefix, 'home')\n \n # patch jinja env loading to modify page template\n def get_page(name):\n if name == 'page.html':\n return page_template\n \n orig_loader = env.loader\n env.loader = ChoiceLoader([\n FunctionLoader(get_page),\n orig_loader,\n ])\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuser.py" } ]
[ { "content": "#!/usr/bin/env python3\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\ntry:\n from urllib.parse import quote\nexcept ImportError:\n # PY2 Compat\n from urllib import quote\n\nimport requests\nfrom jinja2 import ChoiceLoader, FunctionLoader\n\nfrom tornado import ioloop\nfrom tornado.web import HTTPError\n\nfrom IPython.utils.traitlets import (\n Integer,\n Unicode,\n CUnicode,\n)\n\nfrom IPython.html.notebookapp import NotebookApp, aliases as notebook_aliases\nfrom IPython.html.auth.login import LoginHandler\nfrom IPython.html.auth.logout import LogoutHandler\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('3.0'):\n raise ImportError(\"JupyterHub Requires IPython >= 3.0, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\nclass JupyterHubLoginHandler(LoginHandler):\n @staticmethod\n def login_available(settings):\n return True\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n \"\"\"method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n return cookie_cache[encrypted_cookie]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations/cookie\", cookie_name, quote(encrypted_cookie, safe=''),\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key},\n )\n if r.status_code == 404:\n data = None\n elif r.status_code == 403:\n self.log.error(\"I don't have permission to verify cookies, my auth token may have expired: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Permission failure checking authorization, I may need to be restarted\")\n elif r.status_code >= 500:\n self.log.error(\"Upstream failure verifying auth token: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(502, \"Failed to check authorization (upstream problem)\")\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Failed to check authorization\")\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n return data\n \n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n # only allow this to be called once per handler\n # avoids issues if an error is raised,\n # since this may be called again when trying to render the error page\n if hasattr(self, '_cached_user'):\n return self._cached_user\n \n self._cached_user = None\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['name']\n if user == my_user:\n self._cached_user = user\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\nclass JupyterHubLogoutHandler(LogoutHandler):\n def get(self):\n self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))\n\n\n# register new hub related command-line aliases\naliases = dict(notebook_aliases)\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\npage_template = \"\"\"\n{% extends \"templates/page.html\" %}\n\n{% block header_buttons %}\n{{super()}}\n\n<a href='{{hub_control_panel_url}}'\n class='btn btn-default btn-sm navbar-btn pull-right'\n style='margin-right: 4px; margin-left: 2px;'\n>\nControl Panel</a>\n{% endblock %}\n\"\"\"\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = CUnicode(config=True)\n def _user_changed(self, name, old, new):\n self.log.name = new\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n trust_xheaders = True\n login_handler_class = JupyterHubLoginHandler\n logout_handler_class = JupyterHubLogoutHandler\n\n cookie_cache_lifetime = Integer(\n config=True,\n default_value=300,\n allow_none=True,\n help=\"\"\"\n Time, in seconds, that we cache a validated cookie before requiring\n revalidation with the hub.\n \"\"\",\n )\n\n def _log_datefmt_default(self):\n \"\"\"Exclude date from default date format\"\"\"\n return \"%Y-%m-%d %H:%M:%S\"\n\n def _log_format_default(self):\n \"\"\"override default log format to include time\"\"\"\n return \"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s\"\n\n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n\n def _clear_cookie_cache(self):\n self.log.debug(\"Clearing cookie cache\")\n self.tornado_settings['cookie_cache'].clear()\n \n def start(self):\n # Start a PeriodicCallback to clear cached cookies. This forces us to\n # revalidate our user with the Hub at least every\n # `cookie_cache_lifetime` seconds.\n if self.cookie_cache_lifetime:\n ioloop.PeriodicCallback(\n self._clear_cookie_cache,\n self.cookie_cache_lifetime * 1e3,\n ).start()\n super(SingleUserNotebookApp, self).start()\n \n def init_webapp(self):\n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.tornado_settings\n s['cookie_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN')\n s['hub_prefix'] = self.hub_prefix\n s['cookie_name'] = self.cookie_name\n s['login_url'] = self.hub_prefix\n s['hub_api_url'] = self.hub_api_url\n \n super(SingleUserNotebookApp, self).init_webapp()\n self.patch_templates()\n \n def patch_templates(self):\n \"\"\"Patch page templates to add Hub-related buttons\"\"\"\n env = self.web_app.settings['jinja2_env']\n \n env.globals['hub_control_panel_url'] = \\\n url_path_join(self.hub_prefix, 'home')\n \n # patch jinja env loading to modify page template\n def get_page(name):\n if name == 'page.html':\n return page_template\n \n orig_loader = env.loader\n env.loader = ChoiceLoader([\n FunctionLoader(get_page),\n orig_loader,\n ])\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuser.py" } ]
diff --git a/jupyterhub/singleuser.py b/jupyterhub/singleuser.py index b42594bc58..51db52eccc 100644 --- a/jupyterhub/singleuser.py +++ b/jupyterhub/singleuser.py @@ -17,7 +17,7 @@ from tornado import ioloop from tornado.web import HTTPError -from traitlets import ( +from IPython.utils.traitlets import ( Integer, Unicode, CUnicode, diff --git a/jupyterhub/tests/test_spawner.py b/jupyterhub/tests/test_spawner.py index 8c8a9b2436..1d02f46587 100644 --- a/jupyterhub/tests/test_spawner.py +++ b/jupyterhub/tests/test_spawner.py @@ -56,6 +56,30 @@ def test_spawner(db, io_loop): status = io_loop.run_sync(spawner.poll) assert status == 1 +def test_single_user_spawner(db, io_loop): + spawner = new_spawner(db, cmd=[sys.executable, '-m', 'jupyterhub.singleuser']) + io_loop.run_sync(spawner.start) + assert spawner.user.server.ip == 'localhost' + # wait for http server to come up, + # checking for early termination every 1s + def wait(): + return spawner.user.server.wait_up(timeout=1, http=True) + for i in range(30): + status = io_loop.run_sync(spawner.poll) + assert status is None + try: + io_loop.run_sync(wait) + except TimeoutError: + continue + else: + break + io_loop.run_sync(wait) + status = io_loop.run_sync(spawner.poll) + assert status == None + io_loop.run_sync(spawner.stop) + status = io_loop.run_sync(spawner.poll) + assert status == 0 + def test_stop_spawner_sigint_fails(db, io_loop): spawner = new_spawner(db, cmd=[sys.executable, '-c', _uninterruptible])
d2l-ai__d2l-en-2279
[ { "content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'ipython>=7.23',\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='d2l.devs@gmail.com',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'matplotlib-inline',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='d2l.devs@gmail.com',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 6f14d25677..66d84387cb 100644 --- a/setup.py +++ b/setup.py @@ -2,10 +2,10 @@ import d2l requirements = [ - 'ipython>=7.23', 'jupyter', 'numpy', 'matplotlib', + 'matplotlib-inline', 'requests', 'pandas', 'gym'
getpelican__pelican-2630
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals\n\nimport copy\nimport inspect\nimport locale\nimport logging\nimport os\nimport re\nfrom os.path import isabs\nfrom posixpath import join as posix_join\n\nimport six\n\nfrom pelican.log import LimitFilter\n\n\ntry:\n # spec_from_file_location is the recommended way in Python 3.5+\n import importlib.util\n\n def load_source(name, path):\n spec = importlib.util.spec_from_file_location(name, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod\nexcept ImportError:\n # but it does not exist in Python 2.7, so fall back to imp\n import imp\n load_source = imp.load_source\n\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'themes', 'notmyidea')\nDEFAULT_CONFIG = {\n 'PATH': os.curdir,\n 'ARTICLE_PATHS': [''],\n 'ARTICLE_EXCLUDES': [],\n 'PAGE_PATHS': ['pages'],\n 'PAGE_EXCLUDES': [],\n 'THEME': DEFAULT_THEME,\n 'OUTPUT_PATH': 'output',\n 'READERS': {},\n 'STATIC_PATHS': ['images'],\n 'STATIC_EXCLUDES': [],\n 'STATIC_EXCLUDE_SOURCES': True,\n 'THEME_STATIC_DIR': 'theme',\n 'THEME_STATIC_PATHS': ['static', ],\n 'FEED_ALL_ATOM': posix_join('feeds', 'all.atom.xml'),\n 'CATEGORY_FEED_ATOM': posix_join('feeds', '{slug}.atom.xml'),\n 'AUTHOR_FEED_ATOM': posix_join('feeds', '{slug}.atom.xml'),\n 'AUTHOR_FEED_RSS': posix_join('feeds', '{slug}.rss.xml'),\n 'TRANSLATION_FEED_ATOM': posix_join('feeds', 'all-{lang}.atom.xml'),\n 'FEED_MAX_ITEMS': '',\n 'RSS_FEED_SUMMARY_ONLY': True,\n 'SITEURL': '',\n 'SITENAME': 'A Pelican Blog',\n 'DISPLAY_PAGES_ON_MENU': True,\n 'DISPLAY_CATEGORIES_ON_MENU': True,\n 'DOCUTILS_SETTINGS': {},\n 'OUTPUT_SOURCES': False,\n 'OUTPUT_SOURCES_EXTENSION': '.text',\n 'USE_FOLDER_AS_CATEGORY': True,\n 'DEFAULT_CATEGORY': 'misc',\n 'WITH_FUTURE_DATES': True,\n 'CSS_FILE': 'main.css',\n 'NEWEST_FIRST_ARCHIVES': True,\n 'REVERSE_CATEGORY_ORDER': False,\n 'DELETE_OUTPUT_DIRECTORY': False,\n 'OUTPUT_RETENTION': [],\n 'INDEX_SAVE_AS': 'index.html',\n 'ARTICLE_URL': '{slug}.html',\n 'ARTICLE_SAVE_AS': '{slug}.html',\n 'ARTICLE_ORDER_BY': 'reversed-date',\n 'ARTICLE_LANG_URL': '{slug}-{lang}.html',\n 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',\n 'DRAFT_URL': 'drafts/{slug}.html',\n 'DRAFT_SAVE_AS': posix_join('drafts', '{slug}.html'),\n 'DRAFT_LANG_URL': 'drafts/{slug}-{lang}.html',\n 'DRAFT_LANG_SAVE_AS': posix_join('drafts', '{slug}-{lang}.html'),\n 'PAGE_URL': 'pages/{slug}.html',\n 'PAGE_SAVE_AS': posix_join('pages', '{slug}.html'),\n 'PAGE_ORDER_BY': 'basename',\n 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',\n 'PAGE_LANG_SAVE_AS': posix_join('pages', '{slug}-{lang}.html'),\n 'DRAFT_PAGE_URL': 'drafts/pages/{slug}.html',\n 'DRAFT_PAGE_SAVE_AS': posix_join('drafts', 'pages', '{slug}.html'),\n 'DRAFT_PAGE_LANG_URL': 'drafts/pages/{slug}-{lang}.html',\n 'DRAFT_PAGE_LANG_SAVE_AS': posix_join('drafts', 'pages',\n '{slug}-{lang}.html'),\n 'STATIC_URL': '{path}',\n 'STATIC_SAVE_AS': '{path}',\n 'STATIC_CREATE_LINKS': False,\n 'STATIC_CHECK_IF_MODIFIED': False,\n 'CATEGORY_URL': 'category/{slug}.html',\n 'CATEGORY_SAVE_AS': posix_join('category', '{slug}.html'),\n 'TAG_URL': 'tag/{slug}.html',\n 'TAG_SAVE_AS': posix_join('tag', '{slug}.html'),\n 'AUTHOR_URL': 'author/{slug}.html',\n 'AUTHOR_SAVE_AS': posix_join('author', '{slug}.html'),\n 'PAGINATION_PATTERNS': [\n (1, '{name}{extension}', '{name}{extension}'),\n (2, '{name}{number}{extension}', '{name}{number}{extension}'),\n ],\n 'YEAR_ARCHIVE_URL': '',\n 'YEAR_ARCHIVE_SAVE_AS': '',\n 'MONTH_ARCHIVE_URL': '',\n 'MONTH_ARCHIVE_SAVE_AS': '',\n 'DAY_ARCHIVE_URL': '',\n 'DAY_ARCHIVE_SAVE_AS': '',\n 'RELATIVE_URLS': False,\n 'DEFAULT_LANG': 'en',\n 'ARTICLE_TRANSLATION_ID': 'slug',\n 'PAGE_TRANSLATION_ID': 'slug',\n 'DIRECT_TEMPLATES': ['index', 'tags', 'categories', 'authors', 'archives'],\n 'THEME_TEMPLATES_OVERRIDES': [],\n 'PAGINATED_TEMPLATES': {'index': None, 'tag': None, 'category': None,\n 'author': None},\n 'PELICAN_CLASS': 'pelican.Pelican',\n 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',\n 'DATE_FORMATS': {},\n 'MARKDOWN': {\n 'extension_configs': {\n 'markdown.extensions.codehilite': {'css_class': 'highlight'},\n 'markdown.extensions.extra': {},\n 'markdown.extensions.meta': {},\n },\n 'output_format': 'html5',\n },\n 'JINJA_FILTERS': {},\n 'JINJA_ENVIRONMENT': {\n 'trim_blocks': True,\n 'lstrip_blocks': True,\n 'extensions': [],\n },\n 'LOG_FILTER': [],\n 'LOCALE': [''], # defaults to user locale\n 'DEFAULT_PAGINATION': False,\n 'DEFAULT_ORPHANS': 0,\n 'DEFAULT_METADATA': {},\n 'FILENAME_METADATA': r'(?P<date>\\d{4}-\\d{2}-\\d{2}).*',\n 'PATH_METADATA': '',\n 'EXTRA_PATH_METADATA': {},\n 'ARTICLE_PERMALINK_STRUCTURE': '',\n 'TYPOGRIFY': False,\n 'TYPOGRIFY_IGNORE_TAGS': [],\n 'SUMMARY_MAX_LENGTH': 50,\n 'PLUGIN_PATHS': [],\n 'PLUGINS': [],\n 'PYGMENTS_RST_OPTIONS': {},\n 'TEMPLATE_PAGES': {},\n 'TEMPLATE_EXTENSIONS': ['.html'],\n 'IGNORE_FILES': ['.#*'],\n 'SLUG_REGEX_SUBSTITUTIONS': [\n (r'[^\\w\\s-]', ''), # remove non-alphabetical/whitespace/'-' chars\n (r'(?u)\\A\\s*', ''), # strip leading whitespace\n (r'(?u)\\s*\\Z', ''), # strip trailing whitespace\n (r'[-\\s]+', '-'), # reduce multiple whitespace or '-' to single '-'\n ],\n 'INTRASITE_LINK_REGEX': '[{|](?P<what>.*?)[|}]',\n 'SLUGIFY_SOURCE': 'title',\n 'CACHE_CONTENT': False,\n 'CONTENT_CACHING_LAYER': 'reader',\n 'CACHE_PATH': 'cache',\n 'GZIP_CACHE': True,\n 'CHECK_MODIFIED_METHOD': 'mtime',\n 'LOAD_CONTENT_CACHE': False,\n 'WRITE_SELECTED': [],\n 'FORMATTED_FIELDS': ['summary'],\n 'PORT': 8000,\n 'BIND': '',\n}\n\nPYGMENTS_RST_OPTIONS = None\n\n\ndef read_settings(path=None, override=None):\n settings = override or {}\n\n if path:\n settings = dict(get_settings_from_file(path), **settings)\n\n if settings:\n settings = handle_deprecated_settings(settings)\n\n if path:\n # Make relative paths absolute\n def getabs(maybe_relative, base_path=path):\n if isabs(maybe_relative):\n return maybe_relative\n return os.path.abspath(os.path.normpath(os.path.join(\n os.path.dirname(base_path), maybe_relative)))\n\n for p in ['PATH', 'OUTPUT_PATH', 'THEME', 'CACHE_PATH']:\n if settings.get(p) is not None:\n absp = getabs(settings[p])\n # THEME may be a name rather than a path\n if p != 'THEME' or os.path.exists(absp):\n settings[p] = absp\n\n if settings.get('PLUGIN_PATHS') is not None:\n settings['PLUGIN_PATHS'] = [getabs(pluginpath)\n for pluginpath\n in settings['PLUGIN_PATHS']]\n\n settings = dict(copy.deepcopy(DEFAULT_CONFIG), **settings)\n settings = configure_settings(settings)\n\n # This is because there doesn't seem to be a way to pass extra\n # parameters to docutils directive handlers, so we have to have a\n # variable here that we'll import from within Pygments.run (see\n # rstdirectives.py) to see what the user defaults were.\n global PYGMENTS_RST_OPTIONS\n PYGMENTS_RST_OPTIONS = settings.get('PYGMENTS_RST_OPTIONS', None)\n return settings\n\n\ndef get_settings_from_module(module=None):\n \"\"\"Loads settings from a module, returns a dictionary.\"\"\"\n\n context = {}\n if module is not None:\n context.update(\n (k, v) for k, v in inspect.getmembers(module) if k.isupper())\n return context\n\n\ndef get_settings_from_file(path):\n \"\"\"Loads settings from a file path, returning a dict.\"\"\"\n\n name, ext = os.path.splitext(os.path.basename(path))\n module = load_source(name, path)\n return get_settings_from_module(module)\n\n\ndef get_jinja_environment(settings):\n \"\"\"Sets the environment for Jinja\"\"\"\n\n jinja_env = settings.setdefault('JINJA_ENVIRONMENT',\n DEFAULT_CONFIG['JINJA_ENVIRONMENT'])\n\n # Make sure we include the defaults if the user has set env variables\n for key, value in DEFAULT_CONFIG['JINJA_ENVIRONMENT'].items():\n if key not in jinja_env:\n jinja_env[key] = value\n\n return settings\n\n\ndef _printf_s_to_format_field(printf_string, format_field):\n \"\"\"Tries to replace %s with {format_field} in the provided printf_string.\n Raises ValueError in case of failure.\n \"\"\"\n TEST_STRING = 'PELICAN_PRINTF_S_DEPRECATION'\n expected = printf_string % TEST_STRING\n\n result = printf_string.replace('{', '{{').replace('}', '}}') \\\n % '{{{}}}'.format(format_field)\n if result.format(**{format_field: TEST_STRING}) != expected:\n raise ValueError('Failed to safely replace %s with {{{}}}'.format(\n format_field))\n\n return result\n\n\ndef handle_deprecated_settings(settings):\n \"\"\"Converts deprecated settings and issues warnings. Issues an exception\n if both old and new setting is specified.\n \"\"\"\n\n # PLUGIN_PATH -> PLUGIN_PATHS\n if 'PLUGIN_PATH' in settings:\n logger.warning('PLUGIN_PATH setting has been replaced by '\n 'PLUGIN_PATHS, moving it to the new setting name.')\n settings['PLUGIN_PATHS'] = settings['PLUGIN_PATH']\n del settings['PLUGIN_PATH']\n\n # PLUGIN_PATHS: str -> [str]\n if isinstance(settings.get('PLUGIN_PATHS'), six.string_types):\n logger.warning(\"Defining PLUGIN_PATHS setting as string \"\n \"has been deprecated (should be a list)\")\n settings['PLUGIN_PATHS'] = [settings['PLUGIN_PATHS']]\n\n # JINJA_EXTENSIONS -> JINJA_ENVIRONMENT > extensions\n if 'JINJA_EXTENSIONS' in settings:\n logger.warning('JINJA_EXTENSIONS setting has been deprecated, '\n 'moving it to JINJA_ENVIRONMENT setting.')\n settings['JINJA_ENVIRONMENT']['extensions'] = \\\n settings['JINJA_EXTENSIONS']\n del settings['JINJA_EXTENSIONS']\n\n # {ARTICLE,PAGE}_DIR -> {ARTICLE,PAGE}_PATHS\n for key in ['ARTICLE', 'PAGE']:\n old_key = key + '_DIR'\n new_key = key + '_PATHS'\n if old_key in settings:\n logger.warning(\n 'Deprecated setting %s, moving it to %s list',\n old_key, new_key)\n settings[new_key] = [settings[old_key]] # also make a list\n del settings[old_key]\n\n # EXTRA_TEMPLATES_PATHS -> THEME_TEMPLATES_OVERRIDES\n if 'EXTRA_TEMPLATES_PATHS' in settings:\n logger.warning('EXTRA_TEMPLATES_PATHS is deprecated use '\n 'THEME_TEMPLATES_OVERRIDES instead.')\n if ('THEME_TEMPLATES_OVERRIDES' in settings and\n settings['THEME_TEMPLATES_OVERRIDES']):\n raise Exception(\n 'Setting both EXTRA_TEMPLATES_PATHS and '\n 'THEME_TEMPLATES_OVERRIDES is not permitted. Please move to '\n 'only setting THEME_TEMPLATES_OVERRIDES.')\n settings['THEME_TEMPLATES_OVERRIDES'] = \\\n settings['EXTRA_TEMPLATES_PATHS']\n del settings['EXTRA_TEMPLATES_PATHS']\n\n # MD_EXTENSIONS -> MARKDOWN\n if 'MD_EXTENSIONS' in settings:\n logger.warning('MD_EXTENSIONS is deprecated use MARKDOWN '\n 'instead. Falling back to the default.')\n settings['MARKDOWN'] = DEFAULT_CONFIG['MARKDOWN']\n\n # LESS_GENERATOR -> Webassets plugin\n # FILES_TO_COPY -> STATIC_PATHS, EXTRA_PATH_METADATA\n for old, new, doc in [\n ('LESS_GENERATOR', 'the Webassets plugin', None),\n ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',\n 'https://github.com/getpelican/pelican/'\n 'blob/master/docs/settings.rst#path-metadata'),\n ]:\n if old in settings:\n message = 'The {} setting has been removed in favor of {}'.format(\n old, new)\n if doc:\n message += ', see {} for details'.format(doc)\n logger.warning(message)\n\n # PAGINATED_DIRECT_TEMPLATES -> PAGINATED_TEMPLATES\n if 'PAGINATED_DIRECT_TEMPLATES' in settings:\n message = 'The {} setting has been removed in favor of {}'.format(\n 'PAGINATED_DIRECT_TEMPLATES', 'PAGINATED_TEMPLATES')\n logger.warning(message)\n\n # set PAGINATED_TEMPLATES\n if 'PAGINATED_TEMPLATES' not in settings:\n settings['PAGINATED_TEMPLATES'] = {\n 'tag': None, 'category': None, 'author': None}\n\n for t in settings['PAGINATED_DIRECT_TEMPLATES']:\n if t not in settings['PAGINATED_TEMPLATES']:\n settings['PAGINATED_TEMPLATES'][t] = None\n del settings['PAGINATED_DIRECT_TEMPLATES']\n\n # {SLUG,CATEGORY,TAG,AUTHOR}_SUBSTITUTIONS ->\n # {SLUG,CATEGORY,TAG,AUTHOR}_REGEX_SUBSTITUTIONS\n url_settings_url = \\\n 'http://docs.getpelican.com/en/latest/settings.html#url-settings'\n flavours = {'SLUG', 'CATEGORY', 'TAG', 'AUTHOR'}\n old_values = {f: settings[f + '_SUBSTITUTIONS']\n for f in flavours if f + '_SUBSTITUTIONS' in settings}\n new_values = {f: settings[f + '_REGEX_SUBSTITUTIONS']\n for f in flavours if f + '_REGEX_SUBSTITUTIONS' in settings}\n if old_values and new_values:\n raise Exception(\n 'Setting both {new_key} and {old_key} (or variants thereof) is '\n 'not permitted. Please move to only setting {new_key}.'\n .format(old_key='SLUG_SUBSTITUTIONS',\n new_key='SLUG_REGEX_SUBSTITUTIONS'))\n if old_values:\n message = ('{} and variants thereof are deprecated and will be '\n 'removed in the future. Please use {} and variants thereof '\n 'instead. Check {}.'\n .format('SLUG_SUBSTITUTIONS', 'SLUG_REGEX_SUBSTITUTIONS',\n url_settings_url))\n logger.warning(message)\n if old_values.get('SLUG'):\n for f in {'CATEGORY', 'TAG'}:\n if old_values.get(f):\n old_values[f] = old_values['SLUG'] + old_values[f]\n old_values['AUTHOR'] = old_values.get('AUTHOR', [])\n for f in flavours:\n if old_values.get(f) is not None:\n regex_subs = []\n # by default will replace non-alphanum characters\n replace = True\n for tpl in old_values[f]:\n try:\n src, dst, skip = tpl\n if skip:\n replace = False\n except ValueError:\n src, dst = tpl\n regex_subs.append(\n (re.escape(src), dst.replace('\\\\', r'\\\\')))\n\n if replace:\n regex_subs += [\n (r'[^\\w\\s-]', ''),\n (r'(?u)\\A\\s*', ''),\n (r'(?u)\\s*\\Z', ''),\n (r'[-\\s]+', '-'),\n ]\n else:\n regex_subs += [\n (r'(?u)\\A\\s*', ''),\n (r'(?u)\\s*\\Z', ''),\n ]\n settings[f + '_REGEX_SUBSTITUTIONS'] = regex_subs\n settings.pop(f + '_SUBSTITUTIONS', None)\n\n # `%s` -> '{slug}` or `{lang}` in FEED settings\n for key in ['TRANSLATION_FEED_ATOM',\n 'TRANSLATION_FEED_RSS'\n ]:\n if settings.get(key) and '%s' in settings[key]:\n logger.warning('%%s usage in %s is deprecated, use {lang} '\n 'instead.', key)\n try:\n settings[key] = _printf_s_to_format_field(\n settings[key], 'lang')\n except ValueError:\n logger.warning('Failed to convert %%s to {lang} for %s. '\n 'Falling back to default.', key)\n settings[key] = DEFAULT_CONFIG[key]\n for key in ['AUTHOR_FEED_ATOM',\n 'AUTHOR_FEED_RSS',\n 'CATEGORY_FEED_ATOM',\n 'CATEGORY_FEED_RSS',\n 'TAG_FEED_ATOM',\n 'TAG_FEED_RSS',\n ]:\n if settings.get(key) and '%s' in settings[key]:\n logger.warning('%%s usage in %s is deprecated, use {slug} '\n 'instead.', key)\n try:\n settings[key] = _printf_s_to_format_field(\n settings[key], 'slug')\n except ValueError:\n logger.warning('Failed to convert %%s to {slug} for %s. '\n 'Falling back to default.', key)\n settings[key] = DEFAULT_CONFIG[key]\n\n return settings\n\n\ndef configure_settings(settings):\n \"\"\"Provide optimizations, error checking, and warnings for the given\n settings.\n Also, specify the log messages to be ignored.\n \"\"\"\n if 'PATH' not in settings or not os.path.isdir(settings['PATH']):\n raise Exception('You need to specify a path containing the content'\n ' (see pelican --help for more information)')\n\n # specify the log messages to be ignored\n log_filter = settings.get('LOG_FILTER', DEFAULT_CONFIG['LOG_FILTER'])\n LimitFilter._ignore.update(set(log_filter))\n\n # lookup the theme in \"pelican/themes\" if the given one doesn't exist\n if not os.path.isdir(settings['THEME']):\n theme_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'themes',\n settings['THEME'])\n if os.path.exists(theme_path):\n settings['THEME'] = theme_path\n else:\n raise Exception(\"Could not find the theme %s\"\n % settings['THEME'])\n\n # make paths selected for writing absolute if necessary\n settings['WRITE_SELECTED'] = [\n os.path.abspath(path) for path in\n settings.get('WRITE_SELECTED', DEFAULT_CONFIG['WRITE_SELECTED'])\n ]\n\n # standardize strings to lowercase strings\n for key in ['DEFAULT_LANG']:\n if key in settings:\n settings[key] = settings[key].lower()\n\n # set defaults for Jinja environment\n settings = get_jinja_environment(settings)\n\n # standardize strings to lists\n for key in ['LOCALE']:\n if key in settings and isinstance(settings[key], six.string_types):\n settings[key] = [settings[key]]\n\n # check settings that must be a particular type\n for key, types in [\n ('OUTPUT_SOURCES_EXTENSION', six.string_types),\n ('FILENAME_METADATA', six.string_types),\n ]:\n if key in settings and not isinstance(settings[key], types):\n value = settings.pop(key)\n logger.warn(\n 'Detected misconfigured %s (%s), '\n 'falling back to the default (%s)',\n key, value, DEFAULT_CONFIG[key])\n\n # try to set the different locales, fallback on the default.\n locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])\n\n for locale_ in locales:\n try:\n locale.setlocale(locale.LC_ALL, str(locale_))\n break # break if it is successful\n except locale.Error:\n pass\n else:\n logger.warning(\n \"Locale could not be set. Check the LOCALE setting, ensuring it \"\n \"is valid and available on your system.\")\n\n if ('SITEURL' in settings):\n # If SITEURL has a trailing slash, remove it and provide a warning\n siteurl = settings['SITEURL']\n if (siteurl.endswith('/')):\n settings['SITEURL'] = siteurl[:-1]\n logger.warning(\"Removed extraneous trailing slash from SITEURL.\")\n # If SITEURL is defined but FEED_DOMAIN isn't,\n # set FEED_DOMAIN to SITEURL\n if 'FEED_DOMAIN' not in settings:\n settings['FEED_DOMAIN'] = settings['SITEURL']\n\n # check content caching layer and warn of incompatibilities\n if settings.get('CACHE_CONTENT', False) and \\\n settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and \\\n settings.get('WITH_FUTURE_DATES', False):\n logger.warning(\n \"WITH_FUTURE_DATES conflicts with CONTENT_CACHING_LAYER \"\n \"set to 'generator', use 'reader' layer instead\")\n\n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n feed_keys = [\n 'FEED_ATOM', 'FEED_RSS',\n 'FEED_ALL_ATOM', 'FEED_ALL_RSS',\n 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS',\n 'AUTHOR_FEED_ATOM', 'AUTHOR_FEED_RSS',\n 'TAG_FEED_ATOM', 'TAG_FEED_RSS',\n 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS',\n ]\n\n if any(settings.get(k) for k in feed_keys):\n if not settings.get('SITEURL'):\n logger.warning('Feeds generated without SITEURL set properly may'\n ' not be valid')\n\n if 'TIMEZONE' not in settings:\n logger.warning(\n 'No timezone information specified in the settings. Assuming'\n ' your timezone is UTC for feed generation. Check '\n 'http://docs.getpelican.com/en/latest/settings.html#timezone '\n 'for more information')\n\n # fix up pagination rules\n from pelican.paginator import PaginationRule\n pagination_rules = [\n PaginationRule(*r) for r in settings.get(\n 'PAGINATION_PATTERNS',\n DEFAULT_CONFIG['PAGINATION_PATTERNS'],\n )\n ]\n settings['PAGINATION_PATTERNS'] = sorted(\n pagination_rules,\n key=lambda r: r[0],\n )\n\n # Save people from accidentally setting a string rather than a list\n path_keys = (\n 'ARTICLE_EXCLUDES',\n 'DEFAULT_METADATA',\n 'DIRECT_TEMPLATES',\n 'THEME_TEMPLATES_OVERRIDES',\n 'FILES_TO_COPY',\n 'IGNORE_FILES',\n 'PAGINATED_DIRECT_TEMPLATES',\n 'PLUGINS',\n 'STATIC_EXCLUDES',\n 'STATIC_PATHS',\n 'THEME_STATIC_PATHS',\n 'ARTICLE_PATHS',\n 'PAGE_PATHS',\n )\n for PATH_KEY in filter(lambda k: k in settings, path_keys):\n if isinstance(settings[PATH_KEY], six.string_types):\n logger.warning(\"Detected misconfiguration with %s setting \"\n \"(must be a list), falling back to the default\",\n PATH_KEY)\n settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]\n\n # Add {PAGE,ARTICLE}_PATHS to {ARTICLE,PAGE}_EXCLUDES\n mutually_exclusive = ('ARTICLE', 'PAGE')\n for type_1, type_2 in [mutually_exclusive, mutually_exclusive[::-1]]:\n try:\n includes = settings[type_1 + '_PATHS']\n excludes = settings[type_2 + '_EXCLUDES']\n for path in includes:\n if path not in excludes:\n excludes.append(path)\n except KeyError:\n continue # setting not specified, nothing to do\n\n return settings\n", "path": "pelican/settings.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals\n\nimport copy\nimport inspect\nimport locale\nimport logging\nimport os\nimport re\nfrom os.path import isabs\nfrom posixpath import join as posix_join\n\nimport six\n\nfrom pelican.log import LimitFilter\n\n\ntry:\n # spec_from_file_location is the recommended way in Python 3.5+\n import importlib.util\n\n def load_source(name, path):\n spec = importlib.util.spec_from_file_location(name, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod\nexcept ImportError:\n # but it does not exist in Python 2.7, so fall back to imp\n import imp\n load_source = imp.load_source\n\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'themes', 'notmyidea')\nDEFAULT_CONFIG = {\n 'PATH': os.curdir,\n 'ARTICLE_PATHS': [''],\n 'ARTICLE_EXCLUDES': [],\n 'PAGE_PATHS': ['pages'],\n 'PAGE_EXCLUDES': [],\n 'THEME': DEFAULT_THEME,\n 'OUTPUT_PATH': 'output',\n 'READERS': {},\n 'STATIC_PATHS': ['images'],\n 'STATIC_EXCLUDES': [],\n 'STATIC_EXCLUDE_SOURCES': True,\n 'THEME_STATIC_DIR': 'theme',\n 'THEME_STATIC_PATHS': ['static', ],\n 'FEED_ALL_ATOM': posix_join('feeds', 'all.atom.xml'),\n 'CATEGORY_FEED_ATOM': posix_join('feeds', '{slug}.atom.xml'),\n 'AUTHOR_FEED_ATOM': posix_join('feeds', '{slug}.atom.xml'),\n 'AUTHOR_FEED_RSS': posix_join('feeds', '{slug}.rss.xml'),\n 'TRANSLATION_FEED_ATOM': posix_join('feeds', 'all-{lang}.atom.xml'),\n 'FEED_MAX_ITEMS': '',\n 'RSS_FEED_SUMMARY_ONLY': True,\n 'SITEURL': '',\n 'SITENAME': 'A Pelican Blog',\n 'DISPLAY_PAGES_ON_MENU': True,\n 'DISPLAY_CATEGORIES_ON_MENU': True,\n 'DOCUTILS_SETTINGS': {},\n 'OUTPUT_SOURCES': False,\n 'OUTPUT_SOURCES_EXTENSION': '.text',\n 'USE_FOLDER_AS_CATEGORY': True,\n 'DEFAULT_CATEGORY': 'misc',\n 'WITH_FUTURE_DATES': True,\n 'CSS_FILE': 'main.css',\n 'NEWEST_FIRST_ARCHIVES': True,\n 'REVERSE_CATEGORY_ORDER': False,\n 'DELETE_OUTPUT_DIRECTORY': False,\n 'OUTPUT_RETENTION': [],\n 'INDEX_SAVE_AS': 'index.html',\n 'ARTICLE_URL': '{slug}.html',\n 'ARTICLE_SAVE_AS': '{slug}.html',\n 'ARTICLE_ORDER_BY': 'reversed-date',\n 'ARTICLE_LANG_URL': '{slug}-{lang}.html',\n 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',\n 'DRAFT_URL': 'drafts/{slug}.html',\n 'DRAFT_SAVE_AS': posix_join('drafts', '{slug}.html'),\n 'DRAFT_LANG_URL': 'drafts/{slug}-{lang}.html',\n 'DRAFT_LANG_SAVE_AS': posix_join('drafts', '{slug}-{lang}.html'),\n 'PAGE_URL': 'pages/{slug}.html',\n 'PAGE_SAVE_AS': posix_join('pages', '{slug}.html'),\n 'PAGE_ORDER_BY': 'basename',\n 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',\n 'PAGE_LANG_SAVE_AS': posix_join('pages', '{slug}-{lang}.html'),\n 'DRAFT_PAGE_URL': 'drafts/pages/{slug}.html',\n 'DRAFT_PAGE_SAVE_AS': posix_join('drafts', 'pages', '{slug}.html'),\n 'DRAFT_PAGE_LANG_URL': 'drafts/pages/{slug}-{lang}.html',\n 'DRAFT_PAGE_LANG_SAVE_AS': posix_join('drafts', 'pages',\n '{slug}-{lang}.html'),\n 'STATIC_URL': '{path}',\n 'STATIC_SAVE_AS': '{path}',\n 'STATIC_CREATE_LINKS': False,\n 'STATIC_CHECK_IF_MODIFIED': False,\n 'CATEGORY_URL': 'category/{slug}.html',\n 'CATEGORY_SAVE_AS': posix_join('category', '{slug}.html'),\n 'TAG_URL': 'tag/{slug}.html',\n 'TAG_SAVE_AS': posix_join('tag', '{slug}.html'),\n 'AUTHOR_URL': 'author/{slug}.html',\n 'AUTHOR_SAVE_AS': posix_join('author', '{slug}.html'),\n 'PAGINATION_PATTERNS': [\n (1, '{name}{extension}', '{name}{extension}'),\n (2, '{name}{number}{extension}', '{name}{number}{extension}'),\n ],\n 'YEAR_ARCHIVE_URL': '',\n 'YEAR_ARCHIVE_SAVE_AS': '',\n 'MONTH_ARCHIVE_URL': '',\n 'MONTH_ARCHIVE_SAVE_AS': '',\n 'DAY_ARCHIVE_URL': '',\n 'DAY_ARCHIVE_SAVE_AS': '',\n 'RELATIVE_URLS': False,\n 'DEFAULT_LANG': 'en',\n 'ARTICLE_TRANSLATION_ID': 'slug',\n 'PAGE_TRANSLATION_ID': 'slug',\n 'DIRECT_TEMPLATES': ['index', 'tags', 'categories', 'authors', 'archives'],\n 'THEME_TEMPLATES_OVERRIDES': [],\n 'PAGINATED_TEMPLATES': {'index': None, 'tag': None, 'category': None,\n 'author': None},\n 'PELICAN_CLASS': 'pelican.Pelican',\n 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',\n 'DATE_FORMATS': {},\n 'MARKDOWN': {\n 'extension_configs': {\n 'markdown.extensions.codehilite': {'css_class': 'highlight'},\n 'markdown.extensions.extra': {},\n 'markdown.extensions.meta': {},\n },\n 'output_format': 'html5',\n },\n 'JINJA_FILTERS': {},\n 'JINJA_ENVIRONMENT': {\n 'trim_blocks': True,\n 'lstrip_blocks': True,\n 'extensions': [],\n },\n 'LOG_FILTER': [],\n 'LOCALE': [''], # defaults to user locale\n 'DEFAULT_PAGINATION': False,\n 'DEFAULT_ORPHANS': 0,\n 'DEFAULT_METADATA': {},\n 'FILENAME_METADATA': r'(?P<date>\\d{4}-\\d{2}-\\d{2}).*',\n 'PATH_METADATA': '',\n 'EXTRA_PATH_METADATA': {},\n 'ARTICLE_PERMALINK_STRUCTURE': '',\n 'TYPOGRIFY': False,\n 'TYPOGRIFY_IGNORE_TAGS': [],\n 'SUMMARY_MAX_LENGTH': 50,\n 'PLUGIN_PATHS': [],\n 'PLUGINS': [],\n 'PYGMENTS_RST_OPTIONS': {},\n 'TEMPLATE_PAGES': {},\n 'TEMPLATE_EXTENSIONS': ['.html'],\n 'IGNORE_FILES': ['.#*'],\n 'SLUG_REGEX_SUBSTITUTIONS': [\n (r'[^\\w\\s-]', ''), # remove non-alphabetical/whitespace/'-' chars\n (r'(?u)\\A\\s*', ''), # strip leading whitespace\n (r'(?u)\\s*\\Z', ''), # strip trailing whitespace\n (r'[-\\s]+', '-'), # reduce multiple whitespace or '-' to single '-'\n ],\n 'INTRASITE_LINK_REGEX': '[{|](?P<what>.*?)[|}]',\n 'SLUGIFY_SOURCE': 'title',\n 'CACHE_CONTENT': False,\n 'CONTENT_CACHING_LAYER': 'reader',\n 'CACHE_PATH': 'cache',\n 'GZIP_CACHE': True,\n 'CHECK_MODIFIED_METHOD': 'mtime',\n 'LOAD_CONTENT_CACHE': False,\n 'WRITE_SELECTED': [],\n 'FORMATTED_FIELDS': ['summary'],\n 'PORT': 8000,\n 'BIND': '127.0.0.1',\n}\n\nPYGMENTS_RST_OPTIONS = None\n\n\ndef read_settings(path=None, override=None):\n settings = override or {}\n\n if path:\n settings = dict(get_settings_from_file(path), **settings)\n\n if settings:\n settings = handle_deprecated_settings(settings)\n\n if path:\n # Make relative paths absolute\n def getabs(maybe_relative, base_path=path):\n if isabs(maybe_relative):\n return maybe_relative\n return os.path.abspath(os.path.normpath(os.path.join(\n os.path.dirname(base_path), maybe_relative)))\n\n for p in ['PATH', 'OUTPUT_PATH', 'THEME', 'CACHE_PATH']:\n if settings.get(p) is not None:\n absp = getabs(settings[p])\n # THEME may be a name rather than a path\n if p != 'THEME' or os.path.exists(absp):\n settings[p] = absp\n\n if settings.get('PLUGIN_PATHS') is not None:\n settings['PLUGIN_PATHS'] = [getabs(pluginpath)\n for pluginpath\n in settings['PLUGIN_PATHS']]\n\n settings = dict(copy.deepcopy(DEFAULT_CONFIG), **settings)\n settings = configure_settings(settings)\n\n # This is because there doesn't seem to be a way to pass extra\n # parameters to docutils directive handlers, so we have to have a\n # variable here that we'll import from within Pygments.run (see\n # rstdirectives.py) to see what the user defaults were.\n global PYGMENTS_RST_OPTIONS\n PYGMENTS_RST_OPTIONS = settings.get('PYGMENTS_RST_OPTIONS', None)\n return settings\n\n\ndef get_settings_from_module(module=None):\n \"\"\"Loads settings from a module, returns a dictionary.\"\"\"\n\n context = {}\n if module is not None:\n context.update(\n (k, v) for k, v in inspect.getmembers(module) if k.isupper())\n return context\n\n\ndef get_settings_from_file(path):\n \"\"\"Loads settings from a file path, returning a dict.\"\"\"\n\n name, ext = os.path.splitext(os.path.basename(path))\n module = load_source(name, path)\n return get_settings_from_module(module)\n\n\ndef get_jinja_environment(settings):\n \"\"\"Sets the environment for Jinja\"\"\"\n\n jinja_env = settings.setdefault('JINJA_ENVIRONMENT',\n DEFAULT_CONFIG['JINJA_ENVIRONMENT'])\n\n # Make sure we include the defaults if the user has set env variables\n for key, value in DEFAULT_CONFIG['JINJA_ENVIRONMENT'].items():\n if key not in jinja_env:\n jinja_env[key] = value\n\n return settings\n\n\ndef _printf_s_to_format_field(printf_string, format_field):\n \"\"\"Tries to replace %s with {format_field} in the provided printf_string.\n Raises ValueError in case of failure.\n \"\"\"\n TEST_STRING = 'PELICAN_PRINTF_S_DEPRECATION'\n expected = printf_string % TEST_STRING\n\n result = printf_string.replace('{', '{{').replace('}', '}}') \\\n % '{{{}}}'.format(format_field)\n if result.format(**{format_field: TEST_STRING}) != expected:\n raise ValueError('Failed to safely replace %s with {{{}}}'.format(\n format_field))\n\n return result\n\n\ndef handle_deprecated_settings(settings):\n \"\"\"Converts deprecated settings and issues warnings. Issues an exception\n if both old and new setting is specified.\n \"\"\"\n\n # PLUGIN_PATH -> PLUGIN_PATHS\n if 'PLUGIN_PATH' in settings:\n logger.warning('PLUGIN_PATH setting has been replaced by '\n 'PLUGIN_PATHS, moving it to the new setting name.')\n settings['PLUGIN_PATHS'] = settings['PLUGIN_PATH']\n del settings['PLUGIN_PATH']\n\n # PLUGIN_PATHS: str -> [str]\n if isinstance(settings.get('PLUGIN_PATHS'), six.string_types):\n logger.warning(\"Defining PLUGIN_PATHS setting as string \"\n \"has been deprecated (should be a list)\")\n settings['PLUGIN_PATHS'] = [settings['PLUGIN_PATHS']]\n\n # JINJA_EXTENSIONS -> JINJA_ENVIRONMENT > extensions\n if 'JINJA_EXTENSIONS' in settings:\n logger.warning('JINJA_EXTENSIONS setting has been deprecated, '\n 'moving it to JINJA_ENVIRONMENT setting.')\n settings['JINJA_ENVIRONMENT']['extensions'] = \\\n settings['JINJA_EXTENSIONS']\n del settings['JINJA_EXTENSIONS']\n\n # {ARTICLE,PAGE}_DIR -> {ARTICLE,PAGE}_PATHS\n for key in ['ARTICLE', 'PAGE']:\n old_key = key + '_DIR'\n new_key = key + '_PATHS'\n if old_key in settings:\n logger.warning(\n 'Deprecated setting %s, moving it to %s list',\n old_key, new_key)\n settings[new_key] = [settings[old_key]] # also make a list\n del settings[old_key]\n\n # EXTRA_TEMPLATES_PATHS -> THEME_TEMPLATES_OVERRIDES\n if 'EXTRA_TEMPLATES_PATHS' in settings:\n logger.warning('EXTRA_TEMPLATES_PATHS is deprecated use '\n 'THEME_TEMPLATES_OVERRIDES instead.')\n if ('THEME_TEMPLATES_OVERRIDES' in settings and\n settings['THEME_TEMPLATES_OVERRIDES']):\n raise Exception(\n 'Setting both EXTRA_TEMPLATES_PATHS and '\n 'THEME_TEMPLATES_OVERRIDES is not permitted. Please move to '\n 'only setting THEME_TEMPLATES_OVERRIDES.')\n settings['THEME_TEMPLATES_OVERRIDES'] = \\\n settings['EXTRA_TEMPLATES_PATHS']\n del settings['EXTRA_TEMPLATES_PATHS']\n\n # MD_EXTENSIONS -> MARKDOWN\n if 'MD_EXTENSIONS' in settings:\n logger.warning('MD_EXTENSIONS is deprecated use MARKDOWN '\n 'instead. Falling back to the default.')\n settings['MARKDOWN'] = DEFAULT_CONFIG['MARKDOWN']\n\n # LESS_GENERATOR -> Webassets plugin\n # FILES_TO_COPY -> STATIC_PATHS, EXTRA_PATH_METADATA\n for old, new, doc in [\n ('LESS_GENERATOR', 'the Webassets plugin', None),\n ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',\n 'https://github.com/getpelican/pelican/'\n 'blob/master/docs/settings.rst#path-metadata'),\n ]:\n if old in settings:\n message = 'The {} setting has been removed in favor of {}'.format(\n old, new)\n if doc:\n message += ', see {} for details'.format(doc)\n logger.warning(message)\n\n # PAGINATED_DIRECT_TEMPLATES -> PAGINATED_TEMPLATES\n if 'PAGINATED_DIRECT_TEMPLATES' in settings:\n message = 'The {} setting has been removed in favor of {}'.format(\n 'PAGINATED_DIRECT_TEMPLATES', 'PAGINATED_TEMPLATES')\n logger.warning(message)\n\n # set PAGINATED_TEMPLATES\n if 'PAGINATED_TEMPLATES' not in settings:\n settings['PAGINATED_TEMPLATES'] = {\n 'tag': None, 'category': None, 'author': None}\n\n for t in settings['PAGINATED_DIRECT_TEMPLATES']:\n if t not in settings['PAGINATED_TEMPLATES']:\n settings['PAGINATED_TEMPLATES'][t] = None\n del settings['PAGINATED_DIRECT_TEMPLATES']\n\n # {SLUG,CATEGORY,TAG,AUTHOR}_SUBSTITUTIONS ->\n # {SLUG,CATEGORY,TAG,AUTHOR}_REGEX_SUBSTITUTIONS\n url_settings_url = \\\n 'http://docs.getpelican.com/en/latest/settings.html#url-settings'\n flavours = {'SLUG', 'CATEGORY', 'TAG', 'AUTHOR'}\n old_values = {f: settings[f + '_SUBSTITUTIONS']\n for f in flavours if f + '_SUBSTITUTIONS' in settings}\n new_values = {f: settings[f + '_REGEX_SUBSTITUTIONS']\n for f in flavours if f + '_REGEX_SUBSTITUTIONS' in settings}\n if old_values and new_values:\n raise Exception(\n 'Setting both {new_key} and {old_key} (or variants thereof) is '\n 'not permitted. Please move to only setting {new_key}.'\n .format(old_key='SLUG_SUBSTITUTIONS',\n new_key='SLUG_REGEX_SUBSTITUTIONS'))\n if old_values:\n message = ('{} and variants thereof are deprecated and will be '\n 'removed in the future. Please use {} and variants thereof '\n 'instead. Check {}.'\n .format('SLUG_SUBSTITUTIONS', 'SLUG_REGEX_SUBSTITUTIONS',\n url_settings_url))\n logger.warning(message)\n if old_values.get('SLUG'):\n for f in {'CATEGORY', 'TAG'}:\n if old_values.get(f):\n old_values[f] = old_values['SLUG'] + old_values[f]\n old_values['AUTHOR'] = old_values.get('AUTHOR', [])\n for f in flavours:\n if old_values.get(f) is not None:\n regex_subs = []\n # by default will replace non-alphanum characters\n replace = True\n for tpl in old_values[f]:\n try:\n src, dst, skip = tpl\n if skip:\n replace = False\n except ValueError:\n src, dst = tpl\n regex_subs.append(\n (re.escape(src), dst.replace('\\\\', r'\\\\')))\n\n if replace:\n regex_subs += [\n (r'[^\\w\\s-]', ''),\n (r'(?u)\\A\\s*', ''),\n (r'(?u)\\s*\\Z', ''),\n (r'[-\\s]+', '-'),\n ]\n else:\n regex_subs += [\n (r'(?u)\\A\\s*', ''),\n (r'(?u)\\s*\\Z', ''),\n ]\n settings[f + '_REGEX_SUBSTITUTIONS'] = regex_subs\n settings.pop(f + '_SUBSTITUTIONS', None)\n\n # `%s` -> '{slug}` or `{lang}` in FEED settings\n for key in ['TRANSLATION_FEED_ATOM',\n 'TRANSLATION_FEED_RSS'\n ]:\n if settings.get(key) and '%s' in settings[key]:\n logger.warning('%%s usage in %s is deprecated, use {lang} '\n 'instead.', key)\n try:\n settings[key] = _printf_s_to_format_field(\n settings[key], 'lang')\n except ValueError:\n logger.warning('Failed to convert %%s to {lang} for %s. '\n 'Falling back to default.', key)\n settings[key] = DEFAULT_CONFIG[key]\n for key in ['AUTHOR_FEED_ATOM',\n 'AUTHOR_FEED_RSS',\n 'CATEGORY_FEED_ATOM',\n 'CATEGORY_FEED_RSS',\n 'TAG_FEED_ATOM',\n 'TAG_FEED_RSS',\n ]:\n if settings.get(key) and '%s' in settings[key]:\n logger.warning('%%s usage in %s is deprecated, use {slug} '\n 'instead.', key)\n try:\n settings[key] = _printf_s_to_format_field(\n settings[key], 'slug')\n except ValueError:\n logger.warning('Failed to convert %%s to {slug} for %s. '\n 'Falling back to default.', key)\n settings[key] = DEFAULT_CONFIG[key]\n\n return settings\n\n\ndef configure_settings(settings):\n \"\"\"Provide optimizations, error checking, and warnings for the given\n settings.\n Also, specify the log messages to be ignored.\n \"\"\"\n if 'PATH' not in settings or not os.path.isdir(settings['PATH']):\n raise Exception('You need to specify a path containing the content'\n ' (see pelican --help for more information)')\n\n # specify the log messages to be ignored\n log_filter = settings.get('LOG_FILTER', DEFAULT_CONFIG['LOG_FILTER'])\n LimitFilter._ignore.update(set(log_filter))\n\n # lookup the theme in \"pelican/themes\" if the given one doesn't exist\n if not os.path.isdir(settings['THEME']):\n theme_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'themes',\n settings['THEME'])\n if os.path.exists(theme_path):\n settings['THEME'] = theme_path\n else:\n raise Exception(\"Could not find the theme %s\"\n % settings['THEME'])\n\n # make paths selected for writing absolute if necessary\n settings['WRITE_SELECTED'] = [\n os.path.abspath(path) for path in\n settings.get('WRITE_SELECTED', DEFAULT_CONFIG['WRITE_SELECTED'])\n ]\n\n # standardize strings to lowercase strings\n for key in ['DEFAULT_LANG']:\n if key in settings:\n settings[key] = settings[key].lower()\n\n # set defaults for Jinja environment\n settings = get_jinja_environment(settings)\n\n # standardize strings to lists\n for key in ['LOCALE']:\n if key in settings and isinstance(settings[key], six.string_types):\n settings[key] = [settings[key]]\n\n # check settings that must be a particular type\n for key, types in [\n ('OUTPUT_SOURCES_EXTENSION', six.string_types),\n ('FILENAME_METADATA', six.string_types),\n ]:\n if key in settings and not isinstance(settings[key], types):\n value = settings.pop(key)\n logger.warn(\n 'Detected misconfigured %s (%s), '\n 'falling back to the default (%s)',\n key, value, DEFAULT_CONFIG[key])\n\n # try to set the different locales, fallback on the default.\n locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])\n\n for locale_ in locales:\n try:\n locale.setlocale(locale.LC_ALL, str(locale_))\n break # break if it is successful\n except locale.Error:\n pass\n else:\n logger.warning(\n \"Locale could not be set. Check the LOCALE setting, ensuring it \"\n \"is valid and available on your system.\")\n\n if ('SITEURL' in settings):\n # If SITEURL has a trailing slash, remove it and provide a warning\n siteurl = settings['SITEURL']\n if (siteurl.endswith('/')):\n settings['SITEURL'] = siteurl[:-1]\n logger.warning(\"Removed extraneous trailing slash from SITEURL.\")\n # If SITEURL is defined but FEED_DOMAIN isn't,\n # set FEED_DOMAIN to SITEURL\n if 'FEED_DOMAIN' not in settings:\n settings['FEED_DOMAIN'] = settings['SITEURL']\n\n # check content caching layer and warn of incompatibilities\n if settings.get('CACHE_CONTENT', False) and \\\n settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and \\\n settings.get('WITH_FUTURE_DATES', False):\n logger.warning(\n \"WITH_FUTURE_DATES conflicts with CONTENT_CACHING_LAYER \"\n \"set to 'generator', use 'reader' layer instead\")\n\n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n feed_keys = [\n 'FEED_ATOM', 'FEED_RSS',\n 'FEED_ALL_ATOM', 'FEED_ALL_RSS',\n 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS',\n 'AUTHOR_FEED_ATOM', 'AUTHOR_FEED_RSS',\n 'TAG_FEED_ATOM', 'TAG_FEED_RSS',\n 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS',\n ]\n\n if any(settings.get(k) for k in feed_keys):\n if not settings.get('SITEURL'):\n logger.warning('Feeds generated without SITEURL set properly may'\n ' not be valid')\n\n if 'TIMEZONE' not in settings:\n logger.warning(\n 'No timezone information specified in the settings. Assuming'\n ' your timezone is UTC for feed generation. Check '\n 'http://docs.getpelican.com/en/latest/settings.html#timezone '\n 'for more information')\n\n # fix up pagination rules\n from pelican.paginator import PaginationRule\n pagination_rules = [\n PaginationRule(*r) for r in settings.get(\n 'PAGINATION_PATTERNS',\n DEFAULT_CONFIG['PAGINATION_PATTERNS'],\n )\n ]\n settings['PAGINATION_PATTERNS'] = sorted(\n pagination_rules,\n key=lambda r: r[0],\n )\n\n # Save people from accidentally setting a string rather than a list\n path_keys = (\n 'ARTICLE_EXCLUDES',\n 'DEFAULT_METADATA',\n 'DIRECT_TEMPLATES',\n 'THEME_TEMPLATES_OVERRIDES',\n 'FILES_TO_COPY',\n 'IGNORE_FILES',\n 'PAGINATED_DIRECT_TEMPLATES',\n 'PLUGINS',\n 'STATIC_EXCLUDES',\n 'STATIC_PATHS',\n 'THEME_STATIC_PATHS',\n 'ARTICLE_PATHS',\n 'PAGE_PATHS',\n )\n for PATH_KEY in filter(lambda k: k in settings, path_keys):\n if isinstance(settings[PATH_KEY], six.string_types):\n logger.warning(\"Detected misconfiguration with %s setting \"\n \"(must be a list), falling back to the default\",\n PATH_KEY)\n settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]\n\n # Add {PAGE,ARTICLE}_PATHS to {ARTICLE,PAGE}_EXCLUDES\n mutually_exclusive = ('ARTICLE', 'PAGE')\n for type_1, type_2 in [mutually_exclusive, mutually_exclusive[::-1]]:\n try:\n includes = settings[type_1 + '_PATHS']\n excludes = settings[type_2 + '_EXCLUDES']\n for path in includes:\n if path not in excludes:\n excludes.append(path)\n except KeyError:\n continue # setting not specified, nothing to do\n\n return settings\n", "path": "pelican/settings.py" } ]
diff --git a/pelican/settings.py b/pelican/settings.py index 4aa31afef..d79f64fe0 100644 --- a/pelican/settings.py +++ b/pelican/settings.py @@ -170,7 +170,7 @@ def load_source(name, path): 'WRITE_SELECTED': [], 'FORMATTED_FIELDS': ['summary'], 'PORT': 8000, - 'BIND': '', + 'BIND': '127.0.0.1', } PYGMENTS_RST_OPTIONS = None
spacetelescope__jwql-474
[ { "content": "\"\"\"Various utility functions for the ``jwql`` project.\n\nAuthors\n-------\n\n - Matthew Bourque\n - Lauren Chambers\n\nUse\n---\n\n This module can be imported as such:\n\n >>> import utils\n settings = get_config()\n\nReferences\n----------\n\n Filename parser modified from Joe Hunkeler:\n https://gist.github.com/jhunkeler/f08783ca2da7bfd1f8e9ee1d207da5ff\n\n Various documentation related to JWST filename conventions:\n - https://jwst-docs.stsci.edu/display/JDAT/File+Naming+Conventions+and+Data+Products\n - https://innerspace.stsci.edu/pages/viewpage.action?pageId=94092600\n - https://innerspace.stsci.edu/pages/viewpage.action?spaceKey=SCSB&title=JWST+Science+Data+Products\n - https://jwst-docs.stsci.edu/display/JDAT/Understanding+Associations?q=association%20candidate\n - https://jwst-pipeline.readthedocs.io/en/stable/jwst/introduction.html#pipeline-step-suffix-definitions\n - JWST TR JWST-STScI-004800, SM-12\n \"\"\"\n\nimport datetime\nimport getpass\nimport json\nimport os\nimport re\nimport shutil\n\nimport jsonschema\n\nfrom jwql.utils import permissions\nfrom jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES_SHORTHAND\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n\ndef copy_files(files, out_dir):\n \"\"\"Copy a given file to a given directory. Only try to copy the file\n if it is not already present in the output directory.\n\n Parameters\n ----------\n files : list\n List of files to be copied\n\n out_dir : str\n Destination directory\n\n Returns\n -------\n success : list\n Files successfully copied (or that already existed in out_dir)\n\n failed : list\n Files that were not copied\n \"\"\"\n\n # Copy files if they do not already exist\n success = []\n failed = []\n for input_file in files:\n input_new_path = os.path.join(out_dir, os.path.basename(input_file))\n if os.path.isfile(input_new_path):\n success.append(input_new_path)\n else:\n try:\n shutil.copy2(input_file, out_dir)\n success.append(input_new_path)\n permissions.set_permissions(input_new_path)\n except:\n failed.append(input_file)\n return success, failed\n\n\ndef download_mast_data(query_results, output_dir):\n \"\"\"Example function for downloading MAST query results. From MAST\n website (``https://mast.stsci.edu/api/v0/pyex.html``)\n\n Parameters\n ----------\n query_results : list\n List of dictionaries returned by a MAST query.\n\n output_dir : str\n Directory into which the files will be downlaoded\n \"\"\"\n\n # Set up the https connection\n server = 'mast.stsci.edu'\n conn = httplib.HTTPSConnection(server)\n\n # Dowload the products\n print('Number of query results: {}'.format(len(query_results)))\n\n for i in range(len(query_results)):\n\n # Make full output file path\n output_file = os.path.join(output_dir, query_results[i]['filename'])\n\n print('Output file is {}'.format(output_file))\n\n # Download the data\n uri = query_results[i]['dataURI']\n\n print('uri is {}'.format(uri))\n\n conn.request(\"GET\", \"/api/v0/download/file?uri=\" + uri)\n resp = conn.getresponse()\n file_content = resp.read()\n\n # Save to file\n with open(output_file, 'wb') as file_obj:\n file_obj.write(file_content)\n\n # Check for file\n if not os.path.isfile(output_file):\n print(\"ERROR: {} failed to download.\".format(output_file))\n else:\n statinfo = os.stat(output_file)\n if statinfo.st_size > 0:\n print(\"DOWNLOAD COMPLETE: \", output_file)\n else:\n print(\"ERROR: {} file is empty.\".format(output_file))\n conn.close()\n\n\ndef ensure_dir_exists(fullpath):\n \"\"\"Creates dirs from ``fullpath`` if they do not already exist.\"\"\"\n if not os.path.exists(fullpath):\n os.makedirs(fullpath)\n permissions.set_permissions(fullpath)\n\n\ndef filename_parser(filename):\n \"\"\"Return a dictionary that contains the properties of a given\n JWST file (e.g. program ID, visit number, detector, etc.).\n\n Parameters\n ----------\n filename : str\n Path or name of JWST file to parse\n\n Returns\n -------\n filename_dict : dict\n Collection of file properties\n\n Raises\n ------\n ValueError\n When the provided file does not follow naming conventions\n \"\"\"\n\n filename = os.path.basename(filename)\n file_root_name = (len(filename.split('.')) < 2)\n\n # Stage 1 and 2 filenames\n # e.g. \"jw80500012009_01101_00012_nrcalong_uncal.fits\"\n stage_1_and_2 = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"(?P<observation>\\d{3})\"\\\n r\"(?P<visit>\\d{3})\"\\\n r\"_(?P<visit_group>\\d{2})\"\\\n r\"(?P<parallel_seq_id>\\d{1})\"\\\n r\"(?P<activity>\\w{2})\"\\\n r\"_(?P<exposure_id>\\d+)\"\\\n r\"_(?P<detector>((?!_)[\\w])+)\"\n\n # Stage 2c outlier detection filenames\n # e.g. \"jw94015002002_02108_00001_mirimage_o002_crf.fits\"\n stage_2c = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\" \\\n r\"(?P<observation>\\d{3})\" \\\n r\"(?P<visit>\\d{3})\" \\\n r\"_(?P<visit_group>\\d{2})\" \\\n r\"(?P<parallel_seq_id>\\d{1})\" \\\n r\"(?P<activity>\\w{2})\" \\\n r\"_(?P<exposure_id>\\d+)\" \\\n r\"_(?P<detector>((?!_)[\\w])+)\"\\\n r\"_(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\n\n # Stage 3 filenames with target ID\n # e.g. \"jw80600-o009_t001_miri_f1130w_i2d.fits\"\n stage_3_target_id = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"-(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\\\n r\"_(?P<target_id>(t)\\d{3})\"\\\n r\"_(?P<instrument>(nircam|niriss|nirspec|miri|fgs))\"\\\n r\"_(?P<optical_elements>((?!_)[\\w-])+)\"\n\n # Stage 3 filenames with source ID\n # e.g. \"jw80600-o009_s00001_miri_f1130w_i2d.fits\"\n stage_3_source_id = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"-(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\\\n r\"_(?P<source_id>(s)\\d{5})\"\\\n r\"_(?P<instrument>(nircam|niriss|nirspec|miri|fgs))\"\\\n r\"_(?P<optical_elements>((?!_)[\\w-])+)\"\n\n # Stage 3 filenames with target ID and epoch\n # e.g. \"jw80600-o009_t001-epoch1_miri_f1130w_i2d.fits\"\n stage_3_target_id_epoch = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"-(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\\\n r\"_(?P<target_id>(t)\\d{3})\"\\\n r\"-epoch(?P<epoch>\\d{1})\"\\\n r\"_(?P<instrument>(nircam|niriss|nirspec|miri|fgs))\"\\\n r\"_(?P<optical_elements>((?!_)[\\w-])+)\"\n\n # Stage 3 filenames with source ID and epoch\n # e.g. \"jw80600-o009_s00001-epoch1_miri_f1130w_i2d.fits\"\n stage_3_source_id_epoch = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"-(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\\\n r\"_(?P<source_id>(s)\\d{5})\"\\\n r\"-epoch(?P<epoch>\\d{1})\"\\\n r\"_(?P<instrument>(nircam|niriss|nirspec|miri|fgs))\"\\\n r\"_(?P<optical_elements>((?!_)[\\w-])+)\"\n\n # Time series filenames\n # e.g. \"jw00733003001_02101_00002-seg001_nrs1_rate.fits\"\n time_series = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"(?P<observation>\\d{3})\"\\\n r\"(?P<visit>\\d{3})\"\\\n r\"_(?P<visit_group>\\d{2})\"\\\n r\"(?P<parallel_seq_id>\\d{1})\"\\\n r\"(?P<activity>\\w{2})\"\\\n r\"_(?P<exposure_id>\\d+)\"\\\n r\"-seg(?P<segment>\\d{3})\"\\\n r\"_(?P<detector>\\w+)\"\n\n # Guider filenames\n # e.g. \"jw00729011001_gs-id_1_image_cal.fits\" or\n # \"jw00799003001_gs-acq1_2019154181705_stream.fits\"\n guider = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\" \\\n r\"(?P<observation>\\d{3})\" \\\n r\"(?P<visit>\\d{3})\" \\\n r\"_gs-(?P<guider_mode>(id|acq1|acq2|track|fg))\" \\\n r\"_((?P<date_time>\\d{13})|(?P<guide_star_attempt_id>\\d{1}))\"\n\n # Build list of filename types\n filename_types = [\n stage_1_and_2,\n stage_2c,\n stage_3_target_id,\n stage_3_source_id,\n stage_3_target_id_epoch,\n stage_3_source_id_epoch,\n time_series,\n guider]\n\n filename_type_names = [\n 'stage_1_and_2',\n 'stage_2c',\n 'stage_3_target_id',\n 'stage_3_source_id',\n 'stage_3_target_id_epoch',\n 'stage_3_source_id_epoch',\n 'time_series',\n 'guider'\n ]\n\n # Try to parse the filename\n for filename_type, filename_type_name in zip(filename_types, filename_type_names):\n\n # If full filename, try using suffix\n if not file_root_name:\n filename_type += r\"_(?P<suffix>{}).*\".format('|'.join(FILE_SUFFIX_TYPES))\n # If not, make sure the provided regex matches the entire filename root\n else:\n filename_type += r\"$\"\n\n elements = re.compile(filename_type)\n jwst_file = elements.match(filename)\n\n # Stop when you find a format that matches\n if jwst_file is not None:\n name_match = filename_type_name\n break\n\n try:\n # Convert the regex match to a dictionary\n filename_dict = jwst_file.groupdict()\n\n # Add the filename type to that dict\n filename_dict['filename_type'] = name_match\n\n # Also, add the instrument if not already there\n if 'instrument' not in filename_dict.keys():\n if name_match == 'guider':\n filename_dict['instrument'] = 'fgs'\n elif 'detector' in filename_dict.keys():\n filename_dict['instrument'] = JWST_INSTRUMENT_NAMES_SHORTHAND[\n filename_dict['detector'][:3]\n ]\n\n # Raise error if unable to parse the filename\n except AttributeError:\n jdox_url = 'https://jwst-docs.stsci.edu/display/JDAT/' \\\n 'File+Naming+Conventions+and+Data+Products'\n raise ValueError(\n 'Provided file {} does not follow JWST naming conventions. '\n 'See {} for further information.'.format(filename, jdox_url)\n )\n\n return filename_dict\n\n\ndef filesystem_path(filename):\n \"\"\"Return the full path to a given file in the filesystem\n\n Parameters\n ----------\n filename : str\n File to locate (e.g. ``jw86600006001_02101_00008_guider1_cal.fits``)\n\n Returns\n -------\n full_path : str\n Full path to the given file, including filename\n \"\"\"\n\n filesystem_base = get_config()[\"filesystem\"]\n\n # Subdirectory name is based on the proposal ID\n subdir = 'jw{}'.format(filename_parser(filename)['program_id'])\n full_path = os.path.join(filesystem_base, subdir, filename)\n\n # Check to see if the file exists\n if os.path.isfile(full_path):\n return full_path\n else:\n raise FileNotFoundError(\n '{} is not in the predicted location: {}'.format(filename, full_path)\n )\n\n\ndef get_base_url():\n \"\"\"Return the beginning part of the URL to the ``jwql`` web app\n based on which user is running the software.\n\n If the admin account is running the code, the ``base_url`` is\n assumed to be the production URL. If not, the ``base_url`` is\n assumed to be local.\n\n Returns\n -------\n base_url : str\n The beginning part of the URL to the ``jwql`` web app\n \"\"\"\n\n username = getpass.getuser()\n if username == get_config()['admin_account']:\n base_url = 'https://dljwql.stsci.edu'\n else:\n base_url = 'http://127.0.0.1:8000'\n\n return base_url\n\n\ndef get_config():\n \"\"\"Return a dictionary that holds the contents of the ``jwql``\n config file.\n\n Returns\n -------\n settings : dict\n A dictionary that holds the contents of the config file.\n \"\"\"\n config_file_location = os.path.join(__location__, 'config.json')\n\n # Make sure the file exists\n if not os.path.isfile(config_file_location):\n raise FileNotFoundError('The JWQL package requires a configuration file (config.json) '\n 'to be placed within the jwql/utils directory. '\n 'This file is missing. Please read the relevant wiki page '\n '(https://github.com/spacetelescope/jwql/wiki/'\n 'Config-file) for more information.')\n\n with open(config_file_location, 'r') as config_file_object:\n try:\n # Load it with JSON\n settings = json.load(config_file_object)\n except json.JSONDecodeError as e:\n # Raise a more helpful error if there is a formatting problem\n raise ValueError('Incorrectly formatted config.json file. '\n 'Please fix JSON formatting: {}'.format(e))\n\n # Ensure the file has all the needed entries with expected data types\n _validate_config(settings)\n\n return settings\n\n\ndef check_config_for_key(key):\n \"\"\"Check that the config.json file contains the specified key\n and that the entry is not empty\n\n Parameters\n -------\n key : str\n The configuration file key to verify\n \"\"\"\n try:\n get_config()[key]\n except KeyError:\n raise KeyError(\n 'The key `{}` is not present in config.json. Please add it.'.format(key)\n + ' See the relevant wiki page (https://github.com/spacetelescope/'\n 'jwql/wiki/Config-file) for more information.'\n )\n\n if get_config()[key] == \"\":\n raise ValueError(\n 'Please complete the `{}` field in your config.json. '.format(key)\n + ' See the relevant wiki page (https://github.com/spacetelescope/'\n 'jwql/wiki/Config-file) for more information.'\n )\n\n\ndef _validate_config(config_file_dict):\n \"\"\"Check that the config.json file contains all the needed entries with\n expected data types\n\n Parameters\n ----------\n config_file_dict : dict\n The configuration JSON file loaded as a dictionary\n\n Notes\n -----\n See here for more information on JSON schemas:\n https://json-schema.org/learn/getting-started-step-by-step.html\n \"\"\"\n # Define the schema for config.json\n schema = {\n \"type\": \"object\", # Must be a JSON object\n \"properties\": { # List all the possible entries and their types\n \"connection_string\": {\"type\": \"string\"},\n \"database\": {\n \"type\": \"object\",\n \"properties\": {\n \"engine\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n \"user\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"string\"}\n },\n \"required\": ['engine', 'name', 'user', 'password', 'host', 'port']\n },\n \"filesystem\": {\"type\": \"string\"},\n \"preview_image_filesystem\": {\"type\": \"string\"},\n \"thumbnail_filesystem\": {\"type\": \"string\"},\n \"outputs\": {\"type\": \"string\"},\n \"jwql_dir\": {\"type\": \"string\"},\n \"admin_account\": {\"type\": \"string\"},\n \"log_dir\": {\"type\": \"string\"},\n \"test_dir\": {\"type\": \"string\"},\n \"test_data\": {\"type\": \"string\"},\n \"setup_file\": {\"type\": \"string\"},\n \"auth_mast\": {\"type\": \"string\"},\n \"client_id\": {\"type\": \"string\"},\n \"client_secret\": {\"type\": \"string\"},\n \"mast_token\": {\"type\": \"string\"},\n },\n # List which entries are needed (all of them)\n \"required\": [\"connection_string\", \"database\", \"filesystem\",\n \"preview_image_filesystem\", \"thumbnail_filesystem\",\n \"outputs\", \"jwql_dir\", \"admin_account\", \"log_dir\",\n \"test_dir\", \"test_data\", \"setup_file\", \"auth_mast\",\n \"client_id\", \"client_secret\", \"mast_token\"]\n }\n\n # Test that the provided config file dict matches the schema\n try:\n jsonschema.validate(instance=config_file_dict, schema=schema)\n except jsonschema.ValidationError as e:\n raise jsonschema.ValidationError(\n 'Provided config.json does not match the ' + \\\n 'required JSON schema: {}'.format(e.message)\n )\n\n\ndef initialize_instrument_monitor(module):\n \"\"\"Configures a log file for the instrument monitor run and\n captures the start time of the monitor\n\n Parameters\n ----------\n module : str\n The module name (e.g. ``dark_monitor``)\n\n Returns\n -------\n start_time : datetime object\n The start time of the monitor\n log_file : str\n The path to where the log file is stored\n \"\"\"\n\n from jwql.utils.logging_functions import configure_logging\n\n start_time = datetime.datetime.now()\n log_file = configure_logging(module)\n\n return start_time, log_file\n\n\ndef update_monitor_table(module, start_time, log_file):\n \"\"\"Update the ``monitor`` database table with information about\n the instrument monitor run\n\n Parameters\n ----------\n module : str\n The module name (e.g. ``dark_monitor``)\n start_time : datetime object\n The start time of the monitor\n log_file : str\n The path to where the log file is stored\n \"\"\"\n\n from jwql.database.database_interface import Monitor\n\n new_entry = {}\n new_entry['monitor_name'] = module\n new_entry['start_time'] = start_time\n new_entry['end_time'] = datetime.datetime.now()\n new_entry['log_file'] = os.path.basename(log_file)\n\n Monitor.__table__.insert().execute(new_entry)\n", "path": "jwql/utils/utils.py" } ]
[ { "content": "\"\"\"Various utility functions for the ``jwql`` project.\n\nAuthors\n-------\n\n - Matthew Bourque\n - Lauren Chambers\n\nUse\n---\n\n This module can be imported as such:\n\n >>> import utils\n settings = get_config()\n\nReferences\n----------\n\n Filename parser modified from Joe Hunkeler:\n https://gist.github.com/jhunkeler/f08783ca2da7bfd1f8e9ee1d207da5ff\n\n Various documentation related to JWST filename conventions:\n - https://jwst-docs.stsci.edu/display/JDAT/File+Naming+Conventions+and+Data+Products\n - https://innerspace.stsci.edu/pages/viewpage.action?pageId=94092600\n - https://innerspace.stsci.edu/pages/viewpage.action?spaceKey=SCSB&title=JWST+Science+Data+Products\n - https://jwst-docs.stsci.edu/display/JDAT/Understanding+Associations?q=association%20candidate\n - https://jwst-pipeline.readthedocs.io/en/stable/jwst/introduction.html#pipeline-step-suffix-definitions\n - JWST TR JWST-STScI-004800, SM-12\n \"\"\"\n\nimport datetime\nimport getpass\nimport json\nimport os\nimport re\nimport shutil\n\nimport jsonschema\n\nfrom jwql.utils import permissions\nfrom jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES_SHORTHAND\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n\ndef copy_files(files, out_dir):\n \"\"\"Copy a given file to a given directory. Only try to copy the file\n if it is not already present in the output directory.\n\n Parameters\n ----------\n files : list\n List of files to be copied\n\n out_dir : str\n Destination directory\n\n Returns\n -------\n success : list\n Files successfully copied (or that already existed in out_dir)\n\n failed : list\n Files that were not copied\n \"\"\"\n\n # Copy files if they do not already exist\n success = []\n failed = []\n for input_file in files:\n input_new_path = os.path.join(out_dir, os.path.basename(input_file))\n if os.path.isfile(input_new_path):\n success.append(input_new_path)\n else:\n try:\n shutil.copy2(input_file, out_dir)\n success.append(input_new_path)\n permissions.set_permissions(input_new_path)\n except:\n failed.append(input_file)\n return success, failed\n\n\ndef download_mast_data(query_results, output_dir):\n \"\"\"Example function for downloading MAST query results. From MAST\n website (``https://mast.stsci.edu/api/v0/pyex.html``)\n\n Parameters\n ----------\n query_results : list\n List of dictionaries returned by a MAST query.\n\n output_dir : str\n Directory into which the files will be downlaoded\n \"\"\"\n\n # Set up the https connection\n server = 'mast.stsci.edu'\n conn = httplib.HTTPSConnection(server)\n\n # Dowload the products\n print('Number of query results: {}'.format(len(query_results)))\n\n for i in range(len(query_results)):\n\n # Make full output file path\n output_file = os.path.join(output_dir, query_results[i]['filename'])\n\n print('Output file is {}'.format(output_file))\n\n # Download the data\n uri = query_results[i]['dataURI']\n\n print('uri is {}'.format(uri))\n\n conn.request(\"GET\", \"/api/v0/download/file?uri=\" + uri)\n resp = conn.getresponse()\n file_content = resp.read()\n\n # Save to file\n with open(output_file, 'wb') as file_obj:\n file_obj.write(file_content)\n\n # Check for file\n if not os.path.isfile(output_file):\n print(\"ERROR: {} failed to download.\".format(output_file))\n else:\n statinfo = os.stat(output_file)\n if statinfo.st_size > 0:\n print(\"DOWNLOAD COMPLETE: \", output_file)\n else:\n print(\"ERROR: {} file is empty.\".format(output_file))\n conn.close()\n\n\ndef ensure_dir_exists(fullpath):\n \"\"\"Creates dirs from ``fullpath`` if they do not already exist.\"\"\"\n if not os.path.exists(fullpath):\n os.makedirs(fullpath)\n permissions.set_permissions(fullpath)\n\n\ndef filename_parser(filename):\n \"\"\"Return a dictionary that contains the properties of a given\n JWST file (e.g. program ID, visit number, detector, etc.).\n\n Parameters\n ----------\n filename : str\n Path or name of JWST file to parse\n\n Returns\n -------\n filename_dict : dict\n Collection of file properties\n\n Raises\n ------\n ValueError\n When the provided file does not follow naming conventions\n \"\"\"\n\n filename = os.path.basename(filename)\n file_root_name = (len(filename.split('.')) < 2)\n\n # Stage 1 and 2 filenames\n # e.g. \"jw80500012009_01101_00012_nrcalong_uncal.fits\"\n stage_1_and_2 = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"(?P<observation>\\d{3})\"\\\n r\"(?P<visit>\\d{3})\"\\\n r\"_(?P<visit_group>\\d{2})\"\\\n r\"(?P<parallel_seq_id>\\d{1})\"\\\n r\"(?P<activity>\\w{2})\"\\\n r\"_(?P<exposure_id>\\d+)\"\\\n r\"_(?P<detector>((?!_)[\\w])+)\"\n\n # Stage 2c outlier detection filenames\n # e.g. \"jw94015002002_02108_00001_mirimage_o002_crf.fits\"\n stage_2c = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\" \\\n r\"(?P<observation>\\d{3})\" \\\n r\"(?P<visit>\\d{3})\" \\\n r\"_(?P<visit_group>\\d{2})\" \\\n r\"(?P<parallel_seq_id>\\d{1})\" \\\n r\"(?P<activity>\\w{2})\" \\\n r\"_(?P<exposure_id>\\d+)\" \\\n r\"_(?P<detector>((?!_)[\\w])+)\"\\\n r\"_(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\n\n # Stage 3 filenames with target ID\n # e.g. \"jw80600-o009_t001_miri_f1130w_i2d.fits\"\n stage_3_target_id = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"-(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\\\n r\"_(?P<target_id>(t)\\d{3})\"\\\n r\"_(?P<instrument>(nircam|niriss|nirspec|miri|fgs))\"\\\n r\"_(?P<optical_elements>((?!_)[\\w-])+)\"\n\n # Stage 3 filenames with source ID\n # e.g. \"jw80600-o009_s00001_miri_f1130w_i2d.fits\"\n stage_3_source_id = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"-(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\\\n r\"_(?P<source_id>(s)\\d{5})\"\\\n r\"_(?P<instrument>(nircam|niriss|nirspec|miri|fgs))\"\\\n r\"_(?P<optical_elements>((?!_)[\\w-])+)\"\n\n # Stage 3 filenames with target ID and epoch\n # e.g. \"jw80600-o009_t001-epoch1_miri_f1130w_i2d.fits\"\n stage_3_target_id_epoch = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"-(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\\\n r\"_(?P<target_id>(t)\\d{3})\"\\\n r\"-epoch(?P<epoch>\\d{1})\"\\\n r\"_(?P<instrument>(nircam|niriss|nirspec|miri|fgs))\"\\\n r\"_(?P<optical_elements>((?!_)[\\w-])+)\"\n\n # Stage 3 filenames with source ID and epoch\n # e.g. \"jw80600-o009_s00001-epoch1_miri_f1130w_i2d.fits\"\n stage_3_source_id_epoch = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"-(?P<ac_id>(o\\d{3}|(c|a|r)\\d{4}))\"\\\n r\"_(?P<source_id>(s)\\d{5})\"\\\n r\"-epoch(?P<epoch>\\d{1})\"\\\n r\"_(?P<instrument>(nircam|niriss|nirspec|miri|fgs))\"\\\n r\"_(?P<optical_elements>((?!_)[\\w-])+)\"\n\n # Time series filenames\n # e.g. \"jw00733003001_02101_00002-seg001_nrs1_rate.fits\"\n time_series = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\"\\\n r\"(?P<observation>\\d{3})\"\\\n r\"(?P<visit>\\d{3})\"\\\n r\"_(?P<visit_group>\\d{2})\"\\\n r\"(?P<parallel_seq_id>\\d{1})\"\\\n r\"(?P<activity>\\w{2})\"\\\n r\"_(?P<exposure_id>\\d+)\"\\\n r\"-seg(?P<segment>\\d{3})\"\\\n r\"_(?P<detector>\\w+)\"\n\n # Guider filenames\n # e.g. \"jw00729011001_gs-id_1_image_cal.fits\" or\n # \"jw00799003001_gs-acq1_2019154181705_stream.fits\"\n guider = \\\n r\"jw\" \\\n r\"(?P<program_id>\\d{5})\" \\\n r\"(?P<observation>\\d{3})\" \\\n r\"(?P<visit>\\d{3})\" \\\n r\"_gs-(?P<guider_mode>(id|acq1|acq2|track|fg))\" \\\n r\"_((?P<date_time>\\d{13})|(?P<guide_star_attempt_id>\\d{1}))\"\n\n # Build list of filename types\n filename_types = [\n stage_1_and_2,\n stage_2c,\n stage_3_target_id,\n stage_3_source_id,\n stage_3_target_id_epoch,\n stage_3_source_id_epoch,\n time_series,\n guider]\n\n filename_type_names = [\n 'stage_1_and_2',\n 'stage_2c',\n 'stage_3_target_id',\n 'stage_3_source_id',\n 'stage_3_target_id_epoch',\n 'stage_3_source_id_epoch',\n 'time_series',\n 'guider'\n ]\n\n # Try to parse the filename\n for filename_type, filename_type_name in zip(filename_types, filename_type_names):\n\n # If full filename, try using suffix\n if not file_root_name:\n filename_type += r\"_(?P<suffix>{}).*\".format('|'.join(FILE_SUFFIX_TYPES))\n # If not, make sure the provided regex matches the entire filename root\n else:\n filename_type += r\"$\"\n\n elements = re.compile(filename_type)\n jwst_file = elements.match(filename)\n\n # Stop when you find a format that matches\n if jwst_file is not None:\n name_match = filename_type_name\n break\n\n try:\n # Convert the regex match to a dictionary\n filename_dict = jwst_file.groupdict()\n\n # Add the filename type to that dict\n filename_dict['filename_type'] = name_match\n\n # Also, add the instrument if not already there\n if 'instrument' not in filename_dict.keys():\n if name_match == 'guider':\n filename_dict['instrument'] = 'fgs'\n elif 'detector' in filename_dict.keys():\n filename_dict['instrument'] = JWST_INSTRUMENT_NAMES_SHORTHAND[\n filename_dict['detector'][:3]\n ]\n\n # Raise error if unable to parse the filename\n except AttributeError:\n jdox_url = 'https://jwst-docs.stsci.edu/display/JDAT/' \\\n 'File+Naming+Conventions+and+Data+Products'\n raise ValueError(\n 'Provided file {} does not follow JWST naming conventions. '\n 'See {} for further information.'.format(filename, jdox_url)\n )\n\n return filename_dict\n\n\ndef filesystem_path(filename):\n \"\"\"Return the full path to a given file in the filesystem\n\n Parameters\n ----------\n filename : str\n File to locate (e.g. ``jw86600006001_02101_00008_guider1_cal.fits``)\n\n Returns\n -------\n full_path : str\n Full path to the given file, including filename\n \"\"\"\n\n filesystem_base = get_config()[\"filesystem\"]\n\n # Subdirectory name is based on the proposal ID\n subdir = 'jw{}'.format(filename_parser(filename)['program_id'])\n full_path = os.path.join(filesystem_base, subdir, filename)\n\n # Check to see if the file exists\n if os.path.isfile(full_path):\n return full_path\n else:\n raise FileNotFoundError(\n '{} is not in the predicted location: {}'.format(filename, full_path)\n )\n\n\ndef get_base_url():\n \"\"\"Return the beginning part of the URL to the ``jwql`` web app\n based on which user is running the software.\n\n If the admin account is running the code, the ``base_url`` is\n assumed to be the production URL. If not, the ``base_url`` is\n assumed to be local.\n\n Returns\n -------\n base_url : str\n The beginning part of the URL to the ``jwql`` web app\n \"\"\"\n\n username = getpass.getuser()\n if username == get_config()['admin_account']:\n base_url = 'https://dljwql.stsci.edu'\n else:\n base_url = 'http://127.0.0.1:8000'\n\n return base_url\n\n\ndef get_config():\n \"\"\"Return a dictionary that holds the contents of the ``jwql``\n config file.\n\n Returns\n -------\n settings : dict\n A dictionary that holds the contents of the config file.\n \"\"\"\n config_file_location = os.path.join(__location__, 'config.json')\n\n # Make sure the file exists\n if not os.path.isfile(config_file_location):\n raise FileNotFoundError('The JWQL package requires a configuration file (config.json) '\n 'to be placed within the jwql/utils directory. '\n 'This file is missing. Please read the relevant wiki page '\n '(https://github.com/spacetelescope/jwql/wiki/'\n 'Config-file) for more information.')\n\n with open(config_file_location, 'r') as config_file_object:\n try:\n # Load it with JSON\n settings = json.load(config_file_object)\n except json.JSONDecodeError as e:\n # Raise a more helpful error if there is a formatting problem\n raise ValueError('Incorrectly formatted config.json file. '\n 'Please fix JSON formatting: {}'.format(e))\n\n # Ensure the file has all the needed entries with expected data types\n _validate_config(settings)\n\n return settings\n\n\ndef check_config_for_key(key):\n \"\"\"Check that the config.json file contains the specified key\n and that the entry is not empty\n\n Parameters\n ----------\n key : str\n The configuration file key to verify\n \"\"\"\n try:\n get_config()[key]\n except KeyError:\n raise KeyError(\n 'The key `{}` is not present in config.json. Please add it.'.format(key)\n + ' See the relevant wiki page (https://github.com/spacetelescope/'\n 'jwql/wiki/Config-file) for more information.'\n )\n\n if get_config()[key] == \"\":\n raise ValueError(\n 'Please complete the `{}` field in your config.json. '.format(key)\n + ' See the relevant wiki page (https://github.com/spacetelescope/'\n 'jwql/wiki/Config-file) for more information.'\n )\n\n\ndef _validate_config(config_file_dict):\n \"\"\"Check that the config.json file contains all the needed entries with\n expected data types\n\n Parameters\n ----------\n config_file_dict : dict\n The configuration JSON file loaded as a dictionary\n\n Notes\n -----\n See here for more information on JSON schemas:\n https://json-schema.org/learn/getting-started-step-by-step.html\n \"\"\"\n # Define the schema for config.json\n schema = {\n \"type\": \"object\", # Must be a JSON object\n \"properties\": { # List all the possible entries and their types\n \"connection_string\": {\"type\": \"string\"},\n \"database\": {\n \"type\": \"object\",\n \"properties\": {\n \"engine\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n \"user\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"string\"}\n },\n \"required\": ['engine', 'name', 'user', 'password', 'host', 'port']\n },\n \"filesystem\": {\"type\": \"string\"},\n \"preview_image_filesystem\": {\"type\": \"string\"},\n \"thumbnail_filesystem\": {\"type\": \"string\"},\n \"outputs\": {\"type\": \"string\"},\n \"jwql_dir\": {\"type\": \"string\"},\n \"admin_account\": {\"type\": \"string\"},\n \"log_dir\": {\"type\": \"string\"},\n \"test_dir\": {\"type\": \"string\"},\n \"test_data\": {\"type\": \"string\"},\n \"setup_file\": {\"type\": \"string\"},\n \"auth_mast\": {\"type\": \"string\"},\n \"client_id\": {\"type\": \"string\"},\n \"client_secret\": {\"type\": \"string\"},\n \"mast_token\": {\"type\": \"string\"},\n },\n # List which entries are needed (all of them)\n \"required\": [\"connection_string\", \"database\", \"filesystem\",\n \"preview_image_filesystem\", \"thumbnail_filesystem\",\n \"outputs\", \"jwql_dir\", \"admin_account\", \"log_dir\",\n \"test_dir\", \"test_data\", \"setup_file\", \"auth_mast\",\n \"client_id\", \"client_secret\", \"mast_token\"]\n }\n\n # Test that the provided config file dict matches the schema\n try:\n jsonschema.validate(instance=config_file_dict, schema=schema)\n except jsonschema.ValidationError as e:\n raise jsonschema.ValidationError(\n 'Provided config.json does not match the ' + \\\n 'required JSON schema: {}'.format(e.message)\n )\n\n\ndef initialize_instrument_monitor(module):\n \"\"\"Configures a log file for the instrument monitor run and\n captures the start time of the monitor\n\n Parameters\n ----------\n module : str\n The module name (e.g. ``dark_monitor``)\n\n Returns\n -------\n start_time : datetime object\n The start time of the monitor\n log_file : str\n The path to where the log file is stored\n \"\"\"\n\n from jwql.utils.logging_functions import configure_logging\n\n start_time = datetime.datetime.now()\n log_file = configure_logging(module)\n\n return start_time, log_file\n\n\ndef update_monitor_table(module, start_time, log_file):\n \"\"\"Update the ``monitor`` database table with information about\n the instrument monitor run\n\n Parameters\n ----------\n module : str\n The module name (e.g. ``dark_monitor``)\n start_time : datetime object\n The start time of the monitor\n log_file : str\n The path to where the log file is stored\n \"\"\"\n\n from jwql.database.database_interface import Monitor\n\n new_entry = {}\n new_entry['monitor_name'] = module\n new_entry['start_time'] = start_time\n new_entry['end_time'] = datetime.datetime.now()\n new_entry['log_file'] = os.path.basename(log_file)\n\n Monitor.__table__.insert().execute(new_entry)\n", "path": "jwql/utils/utils.py" } ]
diff --git a/MANIFEST.in b/MANIFEST.in index 7756ec124..0bcf570f6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -7,5 +7,6 @@ include setup.py recursive-include notebooks * recursive-include style_guide * recursive-include typing_demo * +recursive-include jwql/database/monitor_table_definitions *.txt exclude *.pyc \ No newline at end of file diff --git a/docs/source/tests.rst b/docs/source/tests.rst index c88e45396..f0785896d 100644 --- a/docs/source/tests.rst +++ b/docs/source/tests.rst @@ -20,8 +20,8 @@ test_dark_monitor.py :members: :undoc-members: -test_edb_interface.py ---------------------- +test_edb.py +----------- .. automodule:: jwql.tests.test_edb :members: :undoc-members: @@ -33,7 +33,7 @@ test_instrument_properties.py :undoc-members: test_loading_times.py --------------------- +--------------------- .. automodule:: jwql.tests.test_loading_times :members: :undoc-members: diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index 3ef082e1a..d7bde960a 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -417,7 +417,7 @@ def check_config_for_key(key): and that the entry is not empty Parameters - ------- + ---------- key : str The configuration file key to verify """ diff --git a/requirements.txt b/requirements.txt index 848160690..1ac7102d6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,6 @@ ipython==7.7.0 jinja2==2.10.1 jsonschema==3.0.2 jwedb>=0.0.3 -jwst==0.0.0 matplotlib==3.1.1 numpy==1.17.0 numpydoc==0.9.1 @@ -18,8 +17,9 @@ psycopg2==2.8.3 pysiaf==0.3.1 python-dateutil==2.8.0 pytest==5.0.1 +pytest-cov==2.7.1 sphinx==2.1.2 sphinx-automodapi==0.11 sqlalchemy==1.3.6 stsci_rtd_theme==0.0.2 -pytest-cov==2.7.1 \ No newline at end of file +git+https://github.com/spacetelescope/jwst@stable \ No newline at end of file
weni-ai__bothub-engine-76
[ { "content": "from setuptools import setup, find_packages\n\nwith open('requirements.txt') as fp:\n install_requires = fp.read()\ninstall_requires = list(\n filter(lambda x: len(x) > 0, install_requires.split('\\n')))\n\nsetup(\n name='bothub',\n version='1.7.1',\n description='bothub',\n packages=find_packages(),\n install_requires=install_requires,\n python_requires='>=3.6',\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages\n\nwith open('requirements.txt') as fp:\n install_requires = fp.read()\ninstall_requires = list(\n filter(lambda x: len(x) > 0, install_requires.split('\\n')))\n\nsetup(\n name='bothub',\n version='1.7.2',\n description='bothub',\n packages=find_packages(),\n install_requires=install_requires,\n python_requires='>=3.6',\n)\n", "path": "setup.py" } ]
diff --git a/docker/bothub-nginx.conf b/docker/bothub-nginx.conf index b4b557bc..a6134f57 100644 --- a/docker/bothub-nginx.conf +++ b/docker/bothub-nginx.conf @@ -20,10 +20,12 @@ server { location ~ ^/(api|docs|admin) { proxy_redirect off; proxy_pass $scheme://bothub; + proxy_set_header Host $http_host; } location @bothub { proxy_redirect off; proxy_pass $scheme://bothub; + proxy_set_header Host $http_host; } } diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 8271ab66..e28e01ac 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -7,7 +7,7 @@ services: context: .. dockerfile: docker/Dockerfile ports: - - 8000:8000 + - 80:80 environment: - SECRET_KEY - DEBUG diff --git a/setup.py b/setup.py index 0bacecfa..8e351e03 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ setup( name='bothub', - version='1.7.1', + version='1.7.2', description='bothub', packages=find_packages(), install_requires=install_requires,
engnadeau__pybotics-751
[ { "content": "\"\"\"Predefined robot models.\"\"\"\nimport numpy as np # type: ignore\n\n\ndef kuka_lbr_iiwa_7() -> np.ndarray: # pragma: no cover\n \"\"\"Get KUKA LBR iiwa 7 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 340],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 400],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, 0, 400],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 126],\n ]\n )\n\n\ndef mecademic_meca500() -> np.ndarray: # pragma: no cover\n \"\"\"Get Meca500 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 135],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 135, 0, 0],\n [-np.pi / 2, 38, 0, 120],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n\n\ndef puma560() -> np.ndarray: # pragma: no cover\n \"\"\"Get PUMA560 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 0],\n [-np.pi / 2, 0, 0, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef ur10() -> np.ndarray: # pragma: no cover\n \"\"\"Get UR10 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 118],\n [np.pi / 2, 0, np.pi, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef abb_irb120() -> np.ndarray: # pragma: no cover\n \"\"\"Get ABB irb120 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 290],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 270, 0, 0],\n [-np.pi / 2, 70, 0, 302],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n", "path": "pybotics/predefined_models.py" } ]
[ { "content": "\"\"\"Predefined robot models.\n\nThese models correspond to the Modified Denavit–Hartenberg parameters:\nhttps://en.wikipedia.org/wiki/Denavit%E2%80%93Hartenberg_parameters\n\"\"\"\nimport numpy as np # type: ignore\n\n\ndef kuka_lbr_iiwa_7() -> np.ndarray: # pragma: no cover\n \"\"\"Get KUKA LBR iiwa 7 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 340],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 400],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, 0, 400],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 126],\n ]\n )\n\n\ndef mecademic_meca500() -> np.ndarray: # pragma: no cover\n \"\"\"Get Meca500 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 135],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 135, 0, 0],\n [-np.pi / 2, 38, 0, 120],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n\n\ndef puma560() -> np.ndarray: # pragma: no cover\n \"\"\"Get PUMA560 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 0],\n [-np.pi / 2, 0, 0, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef ur10() -> np.ndarray: # pragma: no cover\n \"\"\"Get UR10 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 118],\n [np.pi / 2, 0, np.pi, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef abb_irb120() -> np.ndarray: # pragma: no cover\n \"\"\"Get ABB irb120 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 290],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 270, 0, 0],\n [-np.pi / 2, 70, 0, 302],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n", "path": "pybotics/predefined_models.py" } ]
diff --git a/pybotics/predefined_models.py b/pybotics/predefined_models.py index daf27a26..6bd906b8 100644 --- a/pybotics/predefined_models.py +++ b/pybotics/predefined_models.py @@ -1,4 +1,8 @@ -"""Predefined robot models.""" +"""Predefined robot models. + +These models correspond to the Modified Denavit–Hartenberg parameters: +https://en.wikipedia.org/wiki/Denavit%E2%80%93Hartenberg_parameters +""" import numpy as np # type: ignore
searx__searx-3091
[ { "content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Qwant (Web, News, Images, Videos)\n\nThis engine uses the Qwant API (https://api.qwant.com/v3). The API is\nundocumented but can be reverse engineered by reading the network log of\nhttps://www.qwant.com/ queries.\n\nThis implementation is used by different qwant engines in the settings.yml::\n\n - name: qwant\n categories: general\n ...\n - name: qwant news\n categories: news\n ...\n - name: qwant images\n categories: images\n ...\n - name: qwant videos\n categories: videos\n ...\n\n\"\"\"\n\nfrom datetime import (\n datetime,\n timedelta,\n)\nfrom json import loads\nfrom urllib.parse import urlencode\nfrom flask_babel import gettext\n\nfrom searx.utils import match_language\nfrom searx.exceptions import SearxEngineAPIException\nfrom searx.network import raise_for_httperror\n\n\n# about\nabout = {\n \"website\": 'https://www.qwant.com/',\n \"wikidata_id\": 'Q14657870',\n \"official_api_documentation\": None,\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\n# engine dependent config\ncategories = []\npaging = True\nsupported_languages_url = about['website']\n\ncategory_to_keyword = {\n 'general': 'web',\n 'news': 'news',\n 'images': 'images',\n 'videos': 'videos',\n}\n\n# search-url\nurl = 'https://api.qwant.com/v3/search/{keyword}?q={query}&count={count}&offset={offset}'\n\n\ndef request(query, params):\n \"\"\"Qwant search request\"\"\"\n keyword = category_to_keyword[categories[0]]\n count = 10 # web: count must be equal to 10\n\n if keyword == 'images':\n count = 50\n offset = (params['pageno'] - 1) * count\n # count + offset must be lower than 250\n offset = min(offset, 199)\n else:\n offset = (params['pageno'] - 1) * count\n # count + offset must be lower than 50\n offset = min(offset, 40)\n\n params['url'] = url.format(\n keyword=keyword,\n query=urlencode({'q': query}),\n offset=offset,\n count=count,\n )\n\n # add language tag\n if params['language'] == 'all':\n params['url'] += '&locale=en_us'\n else:\n language = match_language(\n params['language'],\n # pylint: disable=undefined-variable\n supported_languages,\n language_aliases,\n )\n params['url'] += '&locale=' + language.replace('-', '_').lower()\n\n params['raise_for_httperror'] = False\n return params\n\n\ndef response(resp):\n \"\"\"Get response from Qwant's search request\"\"\"\n # pylint: disable=too-many-locals, too-many-branches, too-many-statements\n\n keyword = category_to_keyword[categories[0]]\n results = []\n\n # load JSON result\n search_results = loads(resp.text)\n data = search_results.get('data', {})\n\n # check for an API error\n if search_results.get('status') != 'success':\n msg = \",\".join(data.get('message', ['unknown', ]))\n raise SearxEngineAPIException('API error::' + msg)\n\n # raise for other errors\n raise_for_httperror(resp)\n\n if keyword == 'web':\n # The WEB query contains a list named 'mainline'. This list can contain\n # different result types (e.g. mainline[0]['type'] returns type of the\n # result items in mainline[0]['items']\n mainline = data.get('result', {}).get('items', {}).get('mainline', {})\n else:\n # Queries on News, Images and Videos do not have a list named 'mainline'\n # in the response. The result items are directly in the list\n # result['items'].\n mainline = data.get('result', {}).get('items', [])\n mainline = [\n {'type': keyword, 'items': mainline},\n ]\n\n # return empty array if there are no results\n if not mainline:\n return []\n\n for row in mainline:\n\n mainline_type = row.get('type', 'web')\n if mainline_type != keyword:\n continue\n\n if mainline_type == 'ads':\n # ignore adds\n continue\n\n mainline_items = row.get('items', [])\n for item in mainline_items:\n\n title = item.get('title', None)\n res_url = item.get('url', None)\n\n if mainline_type == 'web':\n content = item['desc']\n results.append({\n 'title': title,\n 'url': res_url,\n 'content': content,\n })\n\n elif mainline_type == 'news':\n\n pub_date = item['date']\n if pub_date is not None:\n pub_date = datetime.fromtimestamp(pub_date)\n news_media = item.get('media', [])\n img_src = None\n if news_media:\n img_src = news_media[0].get('pict', {}).get('url', None)\n results.append({\n 'title': title,\n 'url': res_url,\n 'publishedDate': pub_date,\n 'img_src': img_src,\n })\n\n elif mainline_type == 'images':\n thumbnail = item['thumbnail']\n img_src = item['media']\n results.append({\n 'title': title,\n 'url': res_url,\n 'template': 'images.html',\n 'thumbnail_src': thumbnail,\n 'img_src': img_src,\n })\n\n elif mainline_type == 'videos':\n # some videos do not have a description: while qwant-video\n # returns an empty string, such video from a qwant-web query\n # miss the 'desc' key.\n d, s, c = item.get('desc'), item.get('source'), item.get('channel')\n content_parts = []\n if d:\n content_parts.append(d)\n if s:\n content_parts.append(\"%s: %s \" % (gettext(\"Source\"), s))\n if c:\n content_parts.append(\"%s: %s \" % (gettext(\"Channel\"), c))\n content = ' // '.join(content_parts)\n length = item['duration']\n if length is not None:\n length = timedelta(milliseconds=length)\n pub_date = item['date']\n if pub_date is not None:\n pub_date = datetime.fromtimestamp(pub_date)\n thumbnail = item['thumbnail']\n # from some locations (DE and others?) the s2 link do\n # response a 'Please wait ..' but does not deliver the thumbnail\n thumbnail = thumbnail.replace(\n 'https://s2.qwant.com',\n 'https://s1.qwant.com', 1\n )\n results.append({\n 'title': title,\n 'url': res_url,\n 'content': content,\n 'publishedDate': pub_date,\n 'thumbnail': thumbnail,\n 'template': 'videos.html',\n 'length': length,\n })\n\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n # list of regions is embedded in page as a js object\n response_text = resp.text\n response_text = response_text[response_text.find('INITIAL_PROPS'):]\n response_text = response_text[response_text.find('{'):response_text.find('</script>')]\n\n regions_json = loads(response_text)\n\n supported_languages = []\n for country, langs in regions_json['locales'].items():\n for lang in langs['langs']:\n lang_code = \"{lang}-{country}\".format(lang=lang, country=country)\n supported_languages.append(lang_code)\n\n return supported_languages\n", "path": "searx/engines/qwant.py" } ]
[ { "content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Qwant (Web, News, Images, Videos)\n\nThis engine uses the Qwant API (https://api.qwant.com/v3). The API is\nundocumented but can be reverse engineered by reading the network log of\nhttps://www.qwant.com/ queries.\n\nThis implementation is used by different qwant engines in the settings.yml::\n\n - name: qwant\n categories: general\n ...\n - name: qwant news\n categories: news\n ...\n - name: qwant images\n categories: images\n ...\n - name: qwant videos\n categories: videos\n ...\n\n\"\"\"\n\nfrom datetime import (\n datetime,\n timedelta,\n)\nfrom json import loads\nfrom urllib.parse import urlencode\nfrom flask_babel import gettext\n\nfrom searx.utils import match_language\nfrom searx.exceptions import SearxEngineAPIException\nfrom searx.network import raise_for_httperror\n\n\n# about\nabout = {\n \"website\": 'https://www.qwant.com/',\n \"wikidata_id\": 'Q14657870',\n \"official_api_documentation\": None,\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\n# engine dependent config\ncategories = []\npaging = True\nsupported_languages_url = about['website']\n\ncategory_to_keyword = {\n 'general': 'web',\n 'news': 'news',\n 'images': 'images',\n 'videos': 'videos',\n}\n\n# search-url\nurl = 'https://api.qwant.com/v3/search/{keyword}?{query}&count={count}&offset={offset}'\n\n\ndef request(query, params):\n \"\"\"Qwant search request\"\"\"\n keyword = category_to_keyword[categories[0]]\n count = 10 # web: count must be equal to 10\n\n if keyword == 'images':\n count = 50\n offset = (params['pageno'] - 1) * count\n # count + offset must be lower than 250\n offset = min(offset, 199)\n else:\n offset = (params['pageno'] - 1) * count\n # count + offset must be lower than 50\n offset = min(offset, 40)\n\n params['url'] = url.format(\n keyword=keyword,\n query=urlencode({'q': query}),\n offset=offset,\n count=count,\n )\n\n # add language tag\n if params['language'] == 'all':\n params['url'] += '&locale=en_us'\n else:\n language = match_language(\n params['language'],\n # pylint: disable=undefined-variable\n supported_languages,\n language_aliases,\n )\n params['url'] += '&locale=' + language.replace('-', '_').lower()\n\n params['raise_for_httperror'] = False\n return params\n\n\ndef response(resp):\n \"\"\"Get response from Qwant's search request\"\"\"\n # pylint: disable=too-many-locals, too-many-branches, too-many-statements\n\n keyword = category_to_keyword[categories[0]]\n results = []\n\n # load JSON result\n search_results = loads(resp.text)\n data = search_results.get('data', {})\n\n # check for an API error\n if search_results.get('status') != 'success':\n msg = \",\".join(data.get('message', ['unknown', ]))\n raise SearxEngineAPIException('API error::' + msg)\n\n # raise for other errors\n raise_for_httperror(resp)\n\n if keyword == 'web':\n # The WEB query contains a list named 'mainline'. This list can contain\n # different result types (e.g. mainline[0]['type'] returns type of the\n # result items in mainline[0]['items']\n mainline = data.get('result', {}).get('items', {}).get('mainline', {})\n else:\n # Queries on News, Images and Videos do not have a list named 'mainline'\n # in the response. The result items are directly in the list\n # result['items'].\n mainline = data.get('result', {}).get('items', [])\n mainline = [\n {'type': keyword, 'items': mainline},\n ]\n\n # return empty array if there are no results\n if not mainline:\n return []\n\n for row in mainline:\n\n mainline_type = row.get('type', 'web')\n if mainline_type != keyword:\n continue\n\n if mainline_type == 'ads':\n # ignore adds\n continue\n\n mainline_items = row.get('items', [])\n for item in mainline_items:\n\n title = item.get('title', None)\n res_url = item.get('url', None)\n\n if mainline_type == 'web':\n content = item['desc']\n results.append({\n 'title': title,\n 'url': res_url,\n 'content': content,\n })\n\n elif mainline_type == 'news':\n\n pub_date = item['date']\n if pub_date is not None:\n pub_date = datetime.fromtimestamp(pub_date)\n news_media = item.get('media', [])\n img_src = None\n if news_media:\n img_src = news_media[0].get('pict', {}).get('url', None)\n results.append({\n 'title': title,\n 'url': res_url,\n 'publishedDate': pub_date,\n 'img_src': img_src,\n })\n\n elif mainline_type == 'images':\n thumbnail = item['thumbnail']\n img_src = item['media']\n results.append({\n 'title': title,\n 'url': res_url,\n 'template': 'images.html',\n 'thumbnail_src': thumbnail,\n 'img_src': img_src,\n })\n\n elif mainline_type == 'videos':\n # some videos do not have a description: while qwant-video\n # returns an empty string, such video from a qwant-web query\n # miss the 'desc' key.\n d, s, c = item.get('desc'), item.get('source'), item.get('channel')\n content_parts = []\n if d:\n content_parts.append(d)\n if s:\n content_parts.append(\"%s: %s \" % (gettext(\"Source\"), s))\n if c:\n content_parts.append(\"%s: %s \" % (gettext(\"Channel\"), c))\n content = ' // '.join(content_parts)\n length = item['duration']\n if length is not None:\n length = timedelta(milliseconds=length)\n pub_date = item['date']\n if pub_date is not None:\n pub_date = datetime.fromtimestamp(pub_date)\n thumbnail = item['thumbnail']\n # from some locations (DE and others?) the s2 link do\n # response a 'Please wait ..' but does not deliver the thumbnail\n thumbnail = thumbnail.replace(\n 'https://s2.qwant.com',\n 'https://s1.qwant.com', 1\n )\n results.append({\n 'title': title,\n 'url': res_url,\n 'content': content,\n 'publishedDate': pub_date,\n 'thumbnail': thumbnail,\n 'template': 'videos.html',\n 'length': length,\n })\n\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n # list of regions is embedded in page as a js object\n response_text = resp.text\n response_text = response_text[response_text.find('INITIAL_PROPS'):]\n response_text = response_text[response_text.find('{'):response_text.find('</script>')]\n\n regions_json = loads(response_text)\n\n supported_languages = []\n for country, langs in regions_json['locales'].items():\n for lang in langs['langs']:\n lang_code = \"{lang}-{country}\".format(lang=lang, country=country)\n supported_languages.append(lang_code)\n\n return supported_languages\n", "path": "searx/engines/qwant.py" } ]
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py index fe45b58eca..55c355efdd 100644 --- a/searx/engines/qwant.py +++ b/searx/engines/qwant.py @@ -59,7 +59,7 @@ } # search-url -url = 'https://api.qwant.com/v3/search/{keyword}?q={query}&count={count}&offset={offset}' +url = 'https://api.qwant.com/v3/search/{keyword}?{query}&count={count}&offset={offset}' def request(query, params):
litestar-org__litestar-2330
[ { "content": "from enum import Enum\n\n__all__ = (\"OpenAPIFormat\", \"OpenAPIType\")\n\n\nclass OpenAPIFormat(str, Enum):\n \"\"\"Formats extracted from: https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#page-13\"\"\"\n\n DATE = \"date\"\n DATE_TIME = \"date-time\"\n TIME = \"time\"\n DURATION = \"duration\"\n URL = \"url\"\n EMAIL = \"email\"\n IDN_EMAIL = \"idn-email\"\n HOST_NAME = \"hostname\"\n IDN_HOST_NAME = \"idn-hostname\"\n IPV4 = \"ipv4\"\n IPV6 = \"ipv6\"\n URI = \"uri\"\n URI_REFERENCE = \"uri-reference\"\n URI_TEMPLATE = \"uri-template\"\n JSON_POINTER = \"json-pointer\"\n RELATIVE_JSON_POINTER = \"relative-json-pointer\"\n IRI = \"iri-reference\"\n IRI_REFERENCE = \"iri-reference\" # noqa: PIE796\n UUID = \"uuid\"\n REGEX = \"regex\"\n\n\nclass OpenAPIType(str, Enum):\n \"\"\"An OopenAPI type.\"\"\"\n\n ARRAY = \"array\"\n BOOLEAN = \"boolean\"\n INTEGER = \"integer\"\n NULL = \"null\"\n NUMBER = \"number\"\n OBJECT = \"object\"\n STRING = \"string\"\n", "path": "litestar/openapi/spec/enums.py" } ]
[ { "content": "from enum import Enum\n\n__all__ = (\"OpenAPIFormat\", \"OpenAPIType\")\n\n\nclass OpenAPIFormat(str, Enum):\n \"\"\"Formats extracted from: https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#page-13\"\"\"\n\n DATE = \"date\"\n DATE_TIME = \"date-time\"\n TIME = \"time\"\n DURATION = \"duration\"\n URL = \"url\"\n EMAIL = \"email\"\n IDN_EMAIL = \"idn-email\"\n HOST_NAME = \"hostname\"\n IDN_HOST_NAME = \"idn-hostname\"\n IPV4 = \"ipv4\"\n IPV6 = \"ipv6\"\n URI = \"uri\"\n URI_REFERENCE = \"uri-reference\"\n URI_TEMPLATE = \"uri-template\"\n JSON_POINTER = \"json-pointer\"\n RELATIVE_JSON_POINTER = \"relative-json-pointer\"\n IRI = \"iri-reference\"\n IRI_REFERENCE = \"iri-reference\" # noqa: PIE796\n UUID = \"uuid\"\n REGEX = \"regex\"\n BINARY = \"binary\"\n\n\nclass OpenAPIType(str, Enum):\n \"\"\"An OopenAPI type.\"\"\"\n\n ARRAY = \"array\"\n BOOLEAN = \"boolean\"\n INTEGER = \"integer\"\n NULL = \"null\"\n NUMBER = \"number\"\n OBJECT = \"object\"\n STRING = \"string\"\n", "path": "litestar/openapi/spec/enums.py" } ]
diff --git a/litestar/openapi/spec/enums.py b/litestar/openapi/spec/enums.py index 3b7bb96093..da9adeabd1 100644 --- a/litestar/openapi/spec/enums.py +++ b/litestar/openapi/spec/enums.py @@ -26,6 +26,7 @@ class OpenAPIFormat(str, Enum): IRI_REFERENCE = "iri-reference" # noqa: PIE796 UUID = "uuid" REGEX = "regex" + BINARY = "binary" class OpenAPIType(str, Enum):
conda-forge__conda-smithy-1727
[ { "content": "import shutil\nimport tempfile\nimport io\nimport jinja2\nimport datetime\nimport time\nimport os\nimport sys\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport ruamel.yaml\n\n\ndef get_feedstock_name_from_meta(meta):\n \"\"\"Resolve the feedtstock name from the parsed meta.yaml.\"\"\"\n if \"feedstock-name\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"feedstock-name\"]\n elif \"parent_recipe\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"parent_recipe\"][\"name\"]\n else:\n return meta.name()\n\n\ndef get_feedstock_about_from_meta(meta) -> dict:\n \"\"\"Fetch the feedtstock about from the parsed meta.yaml.\"\"\"\n # it turns out that conda_build would not preserve the feedstock about:\n # - if a subpackage does not have about, it uses the feedstock's\n # - if a subpackage has about, it's used as is\n # therefore we need to parse the yaml again just to get the about section...\n if \"parent_recipe\" in meta.meta[\"extra\"]:\n recipe_meta = os.path.join(\n meta.meta[\"extra\"][\"parent_recipe\"][\"path\"], \"meta.yaml\"\n )\n with io.open(recipe_meta, \"rt\") as fh:\n content = render_meta_yaml(\"\".join(fh))\n meta = get_yaml().load(content)\n return dict(meta[\"about\"])\n else:\n # no parent recipe for any reason, use self's about\n return dict(meta.meta[\"about\"])\n\n\ndef get_yaml():\n # define global yaml API\n # roundrip-loader and allowing duplicate keys\n # for handling # [filter] / # [not filter]\n # Don't use a global variable for this as a global\n # variable will make conda-smithy thread unsafe.\n yaml = ruamel.yaml.YAML(typ=\"rt\")\n yaml.allow_duplicate_keys = True\n return yaml\n\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp(\"_recipe\")\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return \"{}.{}\".format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: \"\")\n self.sep = \"/\"\n\n\ndef stub_compatible_pin(*args, **kwargs):\n return f\"compatible_pin {args[0]}\"\n\n\ndef stub_subpackage_pin(*args, **kwargs):\n return f\"subpackage_pin {args[0]}\"\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(\n dict(\n compiler=lambda x: x + \"_compiler_stub\",\n pin_subpackage=stub_subpackage_pin,\n pin_compatible=stub_compatible_pin,\n cdt=lambda *args, **kwargs: \"cdt_stub\",\n load_file_regex=lambda *args, **kwargs: defaultdict(lambda: \"\"),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n mpi=\"mpi\",\n )\n )\n mockos = MockOS()\n py_ver = \"3.7\"\n context = {\"os\": mockos, \"environ\": mockos.environ, \"PY_VER\": py_ver}\n content = env.from_string(text).render(context)\n return content\n\n\n@contextmanager\ndef update_conda_forge_config(forge_yaml):\n \"\"\"Utility method used to update conda forge configuration files\n\n Uage:\n >>> with update_conda_forge_config(somepath) as cfg:\n ... cfg['foo'] = 'bar'\n \"\"\"\n if os.path.exists(forge_yaml):\n with open(forge_yaml, \"r\") as fh:\n code = get_yaml().load(fh)\n else:\n code = {}\n\n # Code could come in as an empty list.\n if not code:\n code = {}\n\n yield code\n\n get_yaml().dump(code, Path(forge_yaml))\n\n\ndef merge_dict(src, dest):\n \"\"\"Recursive merge dictionary\"\"\"\n for key, value in src.items():\n if isinstance(value, dict):\n # get node or create one\n node = dest.setdefault(key, {})\n merge_dict(value, node)\n else:\n dest[key] = value\n\n return dest\n", "path": "conda_smithy/utils.py" } ]
[ { "content": "import shutil\nimport tempfile\nimport io\nimport jinja2\nimport datetime\nimport time\nimport os\nimport sys\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport ruamel.yaml\n\n\ndef get_feedstock_name_from_meta(meta):\n \"\"\"Resolve the feedtstock name from the parsed meta.yaml.\"\"\"\n if \"feedstock-name\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"feedstock-name\"]\n elif \"parent_recipe\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"parent_recipe\"][\"name\"]\n else:\n return meta.name()\n\n\ndef get_feedstock_about_from_meta(meta) -> dict:\n \"\"\"Fetch the feedtstock about from the parsed meta.yaml.\"\"\"\n # it turns out that conda_build would not preserve the feedstock about:\n # - if a subpackage does not have about, it uses the feedstock's\n # - if a subpackage has about, it's used as is\n # therefore we need to parse the yaml again just to get the about section...\n if \"parent_recipe\" in meta.meta[\"extra\"]:\n recipe_meta = os.path.join(\n meta.meta[\"extra\"][\"parent_recipe\"][\"path\"], \"meta.yaml\"\n )\n with io.open(recipe_meta, \"rt\") as fh:\n content = render_meta_yaml(\"\".join(fh))\n meta = get_yaml().load(content)\n return dict(meta[\"about\"])\n else:\n # no parent recipe for any reason, use self's about\n return dict(meta.meta[\"about\"])\n\n\ndef get_yaml():\n # define global yaml API\n # roundrip-loader and allowing duplicate keys\n # for handling # [filter] / # [not filter]\n # Don't use a global variable for this as a global\n # variable will make conda-smithy thread unsafe.\n yaml = ruamel.yaml.YAML(typ=\"rt\")\n yaml.allow_duplicate_keys = True\n return yaml\n\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp(\"_recipe\")\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __str__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return \"{}.{}\".format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: \"\")\n self.sep = \"/\"\n\n\ndef stub_compatible_pin(*args, **kwargs):\n return f\"compatible_pin {args[0]}\"\n\n\ndef stub_subpackage_pin(*args, **kwargs):\n return f\"subpackage_pin {args[0]}\"\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(\n dict(\n compiler=lambda x: x + \"_compiler_stub\",\n pin_subpackage=stub_subpackage_pin,\n pin_compatible=stub_compatible_pin,\n cdt=lambda *args, **kwargs: \"cdt_stub\",\n load_file_regex=lambda *args, **kwargs: defaultdict(lambda: \"\"),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n mpi=\"mpi\",\n )\n )\n mockos = MockOS()\n py_ver = \"3.7\"\n context = {\"os\": mockos, \"environ\": mockos.environ, \"PY_VER\": py_ver}\n content = env.from_string(text).render(context)\n return content\n\n\n@contextmanager\ndef update_conda_forge_config(forge_yaml):\n \"\"\"Utility method used to update conda forge configuration files\n\n Uage:\n >>> with update_conda_forge_config(somepath) as cfg:\n ... cfg['foo'] = 'bar'\n \"\"\"\n if os.path.exists(forge_yaml):\n with open(forge_yaml, \"r\") as fh:\n code = get_yaml().load(fh)\n else:\n code = {}\n\n # Code could come in as an empty list.\n if not code:\n code = {}\n\n yield code\n\n get_yaml().dump(code, Path(forge_yaml))\n\n\ndef merge_dict(src, dest):\n \"\"\"Recursive merge dictionary\"\"\"\n for key, value in src.items():\n if isinstance(value, dict):\n # get node or create one\n node = dest.setdefault(key, {})\n merge_dict(value, node)\n else:\n dest[key] = value\n\n return dest\n", "path": "conda_smithy/utils.py" } ]
diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py index b37870c6a..f50c14025 100644 --- a/conda_smithy/utils.py +++ b/conda_smithy/utils.py @@ -61,7 +61,7 @@ def tmp_directory(): class NullUndefined(jinja2.Undefined): - def __unicode__(self): + def __str__(self): return self._undefined_name def __getattr__(self, name): diff --git a/news/1726-undefined-null-str b/news/1726-undefined-null-str new file mode 100644 index 000000000..48b275f7d --- /dev/null +++ b/news/1726-undefined-null-str @@ -0,0 +1,23 @@ +**Added:** + +* <news item> + +**Changed:** + +* <news item> + +**Deprecated:** + +* <news item> + +**Removed:** + +* <news item> + +**Fixed:** + +* Ensure undefined Jinja variables are rendered as the variable name, restoring Python 2-like behaviour. (#1726 via #1727) + +**Security:** + +* <news item>
ydataai__ydata-profiling-80
[ { "content": "from __future__ import division\n\nimport sys\n\nimport itertools\n\ntry:\n from StringIO import BytesIO\nexcept ImportError:\n from io import BytesIO\n\ntry:\n from urllib import quote\nexcept ImportError:\n from urllib.parse import quote\n\nimport base64\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nimport pandas as pd\nimport pandas_profiling.formatters as formatters, pandas_profiling.templates as templates\nfrom matplotlib import pyplot as plt\nfrom pkg_resources import resource_filename\nimport six\nimport multiprocessing\nfrom functools import partial\nfrom distutils.version import LooseVersion\n\n\ndef pretty_name(x):\n x *= 100\n if x == int(x):\n return '%.0f%%' % x\n else:\n return '%.1f%%' % x\n\n\ndef get_vartype(data):\n # TODO: Shall not be computed several times\n distinct_count=data.nunique(dropna=False)\n leng=len(data)\n if distinct_count <=1:\n return 'CONST'\n elif pd.api.types.is_bool_dtype(data):\n return 'BOOL'\n elif pd.api.types.is_numeric_dtype(data):\n return 'NUM'\n elif pd.api.types.is_datetime64_dtype(data):\n return 'DATE'\n elif distinct_count==leng:\n return 'UNIQUE'\n else:\n return 'CAT'\n\n\ndef describe_numeric_1d(series, **kwargs):\n stats = {'mean': series.mean(), 'std': series.std(), 'variance': series.var(), 'min': series.min(),\n 'max': series.max()}\n stats['range'] = stats['max'] - stats['min']\n\n for x in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):\n stats[pretty_name(x)] = series.dropna().quantile(x) # The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098\n stats['iqr'] = stats['75%'] - stats['25%']\n stats['kurtosis'] = series.kurt()\n stats['skewness'] = series.skew()\n stats['sum'] = series.sum()\n stats['mad'] = series.mad()\n stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN\n stats['type'] = \"NUM\"\n stats['n_zeros'] = (len(series) - np.count_nonzero(series))\n stats['p_zeros'] = stats['n_zeros'] / len(series)\n # Histograms\n stats['histogram'] = histogram(series, **kwargs)\n stats['mini_histogram'] = mini_histogram(series, **kwargs)\n return pd.Series(stats, name=series.name)\n\n\ndef _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'):\n \"\"\"Plot an histogram from the data and return the AxesSubplot object.\n\n Parameters\n ----------\n series: Series, default None\n The data to plot\n figsize: a tuple (width, height) in inches, default (6,4)\n The size of the figure.\n facecolor: str\n The color code.\n\n Returns\n -------\n matplotlib.AxesSubplot, The plot.\n \"\"\"\n if get_vartype(series) == 'DATE':\n # TODO: These calls should be merged\n fig = plt.figure(figsize=figsize)\n plot = fig.add_subplot(111)\n plot.set_ylabel('Frequency')\n try:\n plot.hist(series.values, facecolor=facecolor, bins=bins)\n except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead\n pass\n else:\n plot = series.plot(kind='hist', figsize=figsize,\n facecolor=facecolor,\n bins=bins) # TODO when running on server, send this off to a different thread\n return plot\n\n\ndef histogram(series, **kwargs):\n \"\"\"Plot an histogram of the data.\n\n Parameters\n ----------\n series: Series, default None\n The data to plot.\n\n Returns\n -------\n str, The resulting image encoded as a string.\n \"\"\"\n imgdata = BytesIO()\n plot = _plot_histogram(series, **kwargs)\n plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0)\n plot.figure.savefig(imgdata)\n imgdata.seek(0)\n result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))\n # TODO Think about writing this to disk instead of caching them in strings\n plt.close(plot.figure)\n return result_string\n\n\ndef mini_histogram(series, **kwargs):\n \"\"\"Plot a small (mini) histogram of the data.\n\n Parameters\n ----------\n series: Series, default None\n The data to plot.\n\n Returns\n -------\n str, The resulting image encoded as a string.\n \"\"\"\n imgdata = BytesIO()\n plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)\n plot.axes.get_yaxis().set_visible(False)\n\n if LooseVersion(matplotlib.__version__) <= '1.5.9':\n plot.set_axis_bgcolor(\"w\")\n else:\n plot.set_facecolor(\"w\")\n\n xticks = plot.xaxis.get_major_ticks()\n for tick in xticks[1:-1]:\n tick.set_visible(False)\n tick.label.set_visible(False)\n for tick in (xticks[0], xticks[-1]):\n tick.label.set_fontsize(8)\n plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)\n plot.figure.savefig(imgdata)\n imgdata.seek(0)\n result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))\n plt.close(plot.figure)\n return result_string\n\n\ndef describe_date_1d(series):\n stats = {'min': series.min(), 'max': series.max()}\n stats['range'] = stats['max'] - stats['min']\n stats['type'] = \"DATE\"\n stats['histogram'] = histogram(series)\n stats['mini_histogram'] = mini_histogram(series)\n return pd.Series(stats, name=series.name)\n\n\ndef describe_categorical_1d(data):\n # Only run if at least 1 non-missing value\n objcounts = data.value_counts()\n top, freq = objcounts.index[0], objcounts.iloc[0]\n names = []\n result = []\n\n if get_vartype(data) == 'CAT':\n names += ['top', 'freq', 'type']\n result += [top, freq, 'CAT']\n\n return pd.Series(result, index=names, name=data.name)\n\ndef describe_boolean_1d(data):\n objcounts = data.value_counts()\n top, freq = objcounts.index[0], objcounts.iloc[0]\n # The mean of boolean is an interesting information\n mean = data.mean()\n names = []\n result = []\n names += ['top', 'freq', 'type', 'mean']\n result += [top, freq, 'BOOL', mean]\n\n return pd.Series(result, index=names, name=data.name)\n\ndef describe_constant_1d(data):\n return pd.Series(['CONST'], index=['type'], name=data.name)\n\n\ndef describe_unique_1d(data):\n return pd.Series(['UNIQUE'], index=['type'], name=data.name)\n\n\ndef describe_1d(data, **kwargs):\n leng = len(data) # number of observations in the Series\n count = data.count() # number of non-NaN observations in the Series\n\n # Replace infinite values with NaNs to avoid issues with\n # histograms later.\n data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)\n\n n_infinite = count - data.count() # number of infinte observations in the Series\n\n distinct_count = data.nunique(dropna=False) # number of unique elements in the Series\n if count > distinct_count > 1:\n mode = data.mode().iloc[0]\n else:\n mode = data[0]\n\n results_data = {'count': count,\n 'distinct_count': distinct_count,\n 'p_missing': 1 - count / leng,\n 'n_missing': leng - count,\n 'p_infinite': n_infinite / leng,\n 'n_infinite': n_infinite,\n 'is_unique': distinct_count == leng,\n 'mode': mode,\n 'p_unique': distinct_count / leng}\n try:\n # pandas 0.17 onwards\n results_data['memorysize'] = data.memory_usage()\n except:\n results_data['memorysize'] = 0\n\n result = pd.Series(results_data, name=data.name)\n\n vartype = get_vartype(data)\n if vartype == 'CONST':\n result = result.append(describe_constant_1d(data))\n elif vartype == 'BOOL':\n result = result.append(describe_boolean_1d(data, **kwargs))\n elif vartype == 'NUM':\n result = result.append(describe_numeric_1d(data, **kwargs))\n elif vartype == 'DATE':\n result = result.append(describe_date_1d(data, **kwargs))\n elif vartype == 'UNIQUE':\n result = result.append(describe_unique_1d(data, **kwargs))\n else:\n result = result.append(describe_categorical_1d(data))\n return result\n\n\ndef multiprocess_func(x, **kwargs):\n return x[0], describe_1d(x[1], **kwargs)\n\n\ndef describe(df, bins=10, check_correlation=True, correlation_overrides=None, pool_size=multiprocessing.cpu_count(), **kwargs):\n \"\"\"\n Generates a object containing summary statistics for a given DataFrame\n :param df: DataFrame to be analyzed\n :param bins: Number of bins in histogram\n :param check_correlation: Flag, set to False to skip correlation checks.\n :param correlation_overrides: Variable names not to be rejected because they are correlated\n :param pool_size: Number of workers in thread pool\n :return: Dictionary containing\n table: general statistics on the DataFrame\n variables: summary statistics for each variable\n freq: frequency table\n \"\"\"\n\n if not isinstance(df, pd.DataFrame):\n raise TypeError(\"df must be of type pandas.DataFrame\")\n if df.empty:\n raise ValueError(\"df can not be empty\")\n\n try:\n # reset matplotlib style before use\n # Fails in matplotlib 1.4.x so plot might look bad\n matplotlib.style.use(\"default\")\n except:\n pass\n\n matplotlib.style.use(resource_filename(__name__, \"pandas_profiling.mplstyle\"))\n\n if not pd.Index(np.arange(0, len(df))).equals(df.index):\n # Treat index as any other column\n df = df.reset_index()\n\n # Describe all variables in a univariate way\n pool = multiprocessing.Pool(pool_size)\n local_multiprocess_func = partial(multiprocess_func, **kwargs)\n ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}\n pool.close()\n\n # Check correlations between variable\n if check_correlation is True:\n ''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9\n If x~y and y~z but not x~z, it would be better to delete only y\n Better way would be to find out which variable causes the highest increase in multicollinearity.\n '''\n corr = df.corr()\n for x, corr_x in corr.iterrows():\n if correlation_overrides and x in correlation_overrides:\n continue\n\n for y, corr in corr_x.iteritems():\n if x == y: break\n\n if corr > 0.9:\n ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])\n\n categorical_variables = [(name, data) for (name, data) in df.iteritems() if get_vartype(data)=='CAT']\n for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):\n if correlation_overrides and name1 in correlation_overrides:\n continue\n\n confusion_matrix=pd.crosstab(data1,data2)\n if confusion_matrix.values.diagonal().sum() == len(df):\n ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])\n\n # Convert ldesc to a DataFrame\n names = []\n ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)\n variable_stats.columns.names = df.columns.names\n\n # General statistics\n table_stats = {'n': len(df), 'nvar': len(df.columns)}\n table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])\n table_stats['n_duplicates'] = sum(df.duplicated())\n\n memsize = df.memory_usage(index=True).sum()\n table_stats['memsize'] = formatters.fmt_bytesize(memsize)\n table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])\n\n table_stats.update({k: 0 for k in (\"NUM\", \"DATE\", \"CONST\", \"CAT\", \"UNIQUE\", \"CORR\", \"RECODED\", \"BOOL\")})\n table_stats.update(dict(variable_stats.loc['type'].value_counts()))\n table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']\n\n return {'table': table_stats, 'variables': variable_stats.T, 'freq': {k: df[k].value_counts() for k in df.columns}}\n\n\ndef to_html(sample, stats_object):\n \"\"\"Generate a HTML report from summary statistics and a given sample.\n\n Parameters\n ----------\n sample: DataFrame containing the sample you want to print\n stats_object: Dictionary containing summary statistics. Should be generated with an appropriate describe() function\n\n Returns\n -------\n str, containing profile report in HTML format\n \"\"\"\n\n n_obs = stats_object['table']['n']\n\n value_formatters = formatters.value_formatters\n row_formatters = formatters.row_formatters\n\n if not isinstance(sample, pd.DataFrame):\n raise TypeError(\"sample must be of type pandas.DataFrame\")\n\n if not isinstance(stats_object, dict):\n raise TypeError(\"stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?\")\n\n if set(stats_object.keys()) != {'table', 'variables', 'freq'}:\n raise TypeError(\"stats_object badly formatted. Did you generate this using the pandas_profiling-eda.describe() function?\")\n\n def fmt(value, name):\n if pd.isnull(value):\n return \"\"\n if name in value_formatters:\n return value_formatters[name](value)\n elif isinstance(value, float):\n return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value)\n else:\n if sys.version_info.major == 3:\n return str(value)\n else:\n return unicode(value)\n\n def _format_row(freq, label, max_freq, row_template, n, extra_class=''):\n width = int(freq / max_freq * 99) + 1\n if width > 20:\n label_in_bar = freq\n label_after_bar = \"\"\n else:\n label_in_bar = \"&nbsp;\"\n label_after_bar = freq\n\n return row_template.render(label=label,\n width=width,\n count=freq,\n percentage='{:2.1f}'.format(freq / n * 100),\n extra_class=extra_class,\n label_in_bar=label_in_bar,\n label_after_bar=label_after_bar)\n\n def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6):\n\n freq_rows_html = u''\n\n if max_number_to_print > n:\n max_number_to_print=n\n\n if max_number_to_print < len(freqtable):\n freq_other = sum(freqtable.iloc[max_number_to_print:])\n min_freq = freqtable.values[max_number_to_print]\n else:\n freq_other = 0\n min_freq = 0\n\n freq_missing = n - sum(freqtable)\n max_freq = max(freqtable.values[0], freq_other, freq_missing)\n\n # TODO: Correctly sort missing and other\n\n for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]):\n freq_rows_html += _format_row(freq, label, max_freq, row_template, n)\n\n if freq_other > min_freq:\n freq_rows_html += _format_row(freq_other,\n \"Other values (%s)\" % (freqtable.count() - max_number_to_print), max_freq, row_template, n,\n extra_class='other')\n\n if freq_missing > min_freq:\n freq_rows_html += _format_row(freq_missing, \"(Missing)\", max_freq, row_template, n, extra_class='missing')\n\n return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col)\n\n def extreme_obs_table(freqtable, table_template, row_template, number_to_print, n, ascending = True):\n if ascending:\n obs_to_print = freqtable.sort_index().iloc[:number_to_print]\n else:\n obs_to_print = freqtable.sort_index().iloc[-number_to_print:]\n\n freq_rows_html = ''\n max_freq = max(obs_to_print.values)\n\n for label, freq in six.iteritems(obs_to_print):\n freq_rows_html += _format_row(freq, label, max_freq, row_template, n)\n\n return table_template.render(rows=freq_rows_html)\n\n # Variables\n rows_html = u\"\"\n messages = []\n\n for idx, row in stats_object['variables'].iterrows():\n\n formatted_values = {'varname': idx, 'varid': hash(idx)}\n row_classes = {}\n\n for col, value in six.iteritems(row):\n formatted_values[col] = fmt(value, col)\n\n for col in set(row.index) & six.viewkeys(row_formatters):\n row_classes[col] = row_formatters[col](row[col])\n if row_classes[col] == \"alert\" and col in templates.messages:\n messages.append(templates.messages[col].format(formatted_values, varname = formatters.fmt_varname(idx)))\n\n if row['type'] in {'CAT', 'BOOL'}:\n formatted_values['minifreqtable'] = freq_table(stats_object['freq'][idx], n_obs,\n templates.template('mini_freq_table'), \n templates.template('mini_freq_table_row'), \n 3, \n templates.mini_freq_table_nb_col[row['type']])\n\n if row['distinct_count'] > 50:\n messages.append(templates.messages['HIGH_CARDINALITY'].format(formatted_values, varname = formatters.fmt_varname(idx)))\n row_classes['distinct_count'] = \"alert\"\n else:\n row_classes['distinct_count'] = \"\"\n\n if row['type'] == 'UNIQUE':\n obs = stats_object['freq'][idx].index\n\n formatted_values['firstn'] = pd.DataFrame(obs[0:3], columns=[\"First 3 values\"]).to_html(classes=\"example_values\", index=False)\n formatted_values['lastn'] = pd.DataFrame(obs[-3:], columns=[\"Last 3 values\"]).to_html(classes=\"example_values\", index=False)\n\n if row['type'] in {'CORR', 'CONST', 'RECODED'}:\n formatted_values['varname'] = formatters.fmt_varname(idx)\n messages.append(templates.messages[row['type']].format(formatted_values))\n else:\n formatted_values['freqtable'] = freq_table(stats_object['freq'][idx], n_obs,\n templates.template('freq_table'), templates.template('freq_table_row'), 10)\n formatted_values['firstn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = True)\n formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False)\n\n rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes)\n\n # Overview\n formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])}\n\n row_classes={}\n for col in six.viewkeys(stats_object['table']) & six.viewkeys(row_formatters):\n row_classes[col] = row_formatters[col](stats_object['table'][col])\n if row_classes[col] == \"alert\" and col in templates.messages:\n messages.append(templates.messages[col].format(formatted_values, varname = formatters.fmt_varname(idx)))\n\n messages_html = u''\n for msg in messages:\n messages_html += templates.message_row.format(message=msg)\n\n overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html)\n\n # Sample\n\n sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes=\"sample\"))\n # TODO: should be done in the template\n return templates.template('base').render({'overview_html': overview_html, 'rows_html': rows_html, 'sample_html': sample_html})\n", "path": "pandas_profiling/base.py" } ]
[ { "content": "from __future__ import division\n\nimport sys\n\nimport itertools\n\ntry:\n from StringIO import BytesIO\nexcept ImportError:\n from io import BytesIO\n\ntry:\n from urllib import quote\nexcept ImportError:\n from urllib.parse import quote\n\nimport base64\n\nimport matplotlib\n# Fix #68, this call is not needed and brings side effects in some use cases\n# matplotlib.use('Agg')\n\nimport numpy as np\nimport pandas as pd\nimport pandas_profiling.formatters as formatters, pandas_profiling.templates as templates\nfrom matplotlib import pyplot as plt\nfrom pkg_resources import resource_filename\nimport six\nimport multiprocessing\nfrom functools import partial\nfrom distutils.version import LooseVersion\n\n\ndef pretty_name(x):\n x *= 100\n if x == int(x):\n return '%.0f%%' % x\n else:\n return '%.1f%%' % x\n\n\ndef get_vartype(data):\n # TODO: Shall not be computed several times\n distinct_count=data.nunique(dropna=False)\n leng=len(data)\n if distinct_count <=1:\n return 'CONST'\n elif pd.api.types.is_bool_dtype(data):\n return 'BOOL'\n elif pd.api.types.is_numeric_dtype(data):\n return 'NUM'\n elif pd.api.types.is_datetime64_dtype(data):\n return 'DATE'\n elif distinct_count==leng:\n return 'UNIQUE'\n else:\n return 'CAT'\n\n\ndef describe_numeric_1d(series, **kwargs):\n stats = {'mean': series.mean(), 'std': series.std(), 'variance': series.var(), 'min': series.min(),\n 'max': series.max()}\n stats['range'] = stats['max'] - stats['min']\n\n for x in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):\n stats[pretty_name(x)] = series.dropna().quantile(x) # The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098\n stats['iqr'] = stats['75%'] - stats['25%']\n stats['kurtosis'] = series.kurt()\n stats['skewness'] = series.skew()\n stats['sum'] = series.sum()\n stats['mad'] = series.mad()\n stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN\n stats['type'] = \"NUM\"\n stats['n_zeros'] = (len(series) - np.count_nonzero(series))\n stats['p_zeros'] = stats['n_zeros'] / len(series)\n # Histograms\n stats['histogram'] = histogram(series, **kwargs)\n stats['mini_histogram'] = mini_histogram(series, **kwargs)\n return pd.Series(stats, name=series.name)\n\n\ndef _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'):\n \"\"\"Plot an histogram from the data and return the AxesSubplot object.\n\n Parameters\n ----------\n series: Series, default None\n The data to plot\n figsize: a tuple (width, height) in inches, default (6,4)\n The size of the figure.\n facecolor: str\n The color code.\n\n Returns\n -------\n matplotlib.AxesSubplot, The plot.\n \"\"\"\n if get_vartype(series) == 'DATE':\n # TODO: These calls should be merged\n fig = plt.figure(figsize=figsize)\n plot = fig.add_subplot(111)\n plot.set_ylabel('Frequency')\n try:\n plot.hist(series.values, facecolor=facecolor, bins=bins)\n except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead\n pass\n else:\n plot = series.plot(kind='hist', figsize=figsize,\n facecolor=facecolor,\n bins=bins) # TODO when running on server, send this off to a different thread\n return plot\n\n\ndef histogram(series, **kwargs):\n \"\"\"Plot an histogram of the data.\n\n Parameters\n ----------\n series: Series, default None\n The data to plot.\n\n Returns\n -------\n str, The resulting image encoded as a string.\n \"\"\"\n imgdata = BytesIO()\n plot = _plot_histogram(series, **kwargs)\n plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0)\n plot.figure.savefig(imgdata)\n imgdata.seek(0)\n result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))\n # TODO Think about writing this to disk instead of caching them in strings\n plt.close(plot.figure)\n return result_string\n\n\ndef mini_histogram(series, **kwargs):\n \"\"\"Plot a small (mini) histogram of the data.\n\n Parameters\n ----------\n series: Series, default None\n The data to plot.\n\n Returns\n -------\n str, The resulting image encoded as a string.\n \"\"\"\n imgdata = BytesIO()\n plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)\n plot.axes.get_yaxis().set_visible(False)\n\n if LooseVersion(matplotlib.__version__) <= '1.5.9':\n plot.set_axis_bgcolor(\"w\")\n else:\n plot.set_facecolor(\"w\")\n\n xticks = plot.xaxis.get_major_ticks()\n for tick in xticks[1:-1]:\n tick.set_visible(False)\n tick.label.set_visible(False)\n for tick in (xticks[0], xticks[-1]):\n tick.label.set_fontsize(8)\n plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)\n plot.figure.savefig(imgdata)\n imgdata.seek(0)\n result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))\n plt.close(plot.figure)\n return result_string\n\n\ndef describe_date_1d(series):\n stats = {'min': series.min(), 'max': series.max()}\n stats['range'] = stats['max'] - stats['min']\n stats['type'] = \"DATE\"\n stats['histogram'] = histogram(series)\n stats['mini_histogram'] = mini_histogram(series)\n return pd.Series(stats, name=series.name)\n\n\ndef describe_categorical_1d(data):\n # Only run if at least 1 non-missing value\n objcounts = data.value_counts()\n top, freq = objcounts.index[0], objcounts.iloc[0]\n names = []\n result = []\n\n if get_vartype(data) == 'CAT':\n names += ['top', 'freq', 'type']\n result += [top, freq, 'CAT']\n\n return pd.Series(result, index=names, name=data.name)\n\ndef describe_boolean_1d(data):\n objcounts = data.value_counts()\n top, freq = objcounts.index[0], objcounts.iloc[0]\n # The mean of boolean is an interesting information\n mean = data.mean()\n names = []\n result = []\n names += ['top', 'freq', 'type', 'mean']\n result += [top, freq, 'BOOL', mean]\n\n return pd.Series(result, index=names, name=data.name)\n\ndef describe_constant_1d(data):\n return pd.Series(['CONST'], index=['type'], name=data.name)\n\n\ndef describe_unique_1d(data):\n return pd.Series(['UNIQUE'], index=['type'], name=data.name)\n\n\ndef describe_1d(data, **kwargs):\n leng = len(data) # number of observations in the Series\n count = data.count() # number of non-NaN observations in the Series\n\n # Replace infinite values with NaNs to avoid issues with\n # histograms later.\n data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)\n\n n_infinite = count - data.count() # number of infinte observations in the Series\n\n distinct_count = data.nunique(dropna=False) # number of unique elements in the Series\n if count > distinct_count > 1:\n mode = data.mode().iloc[0]\n else:\n mode = data[0]\n\n results_data = {'count': count,\n 'distinct_count': distinct_count,\n 'p_missing': 1 - count / leng,\n 'n_missing': leng - count,\n 'p_infinite': n_infinite / leng,\n 'n_infinite': n_infinite,\n 'is_unique': distinct_count == leng,\n 'mode': mode,\n 'p_unique': distinct_count / leng}\n try:\n # pandas 0.17 onwards\n results_data['memorysize'] = data.memory_usage()\n except:\n results_data['memorysize'] = 0\n\n result = pd.Series(results_data, name=data.name)\n\n vartype = get_vartype(data)\n if vartype == 'CONST':\n result = result.append(describe_constant_1d(data))\n elif vartype == 'BOOL':\n result = result.append(describe_boolean_1d(data, **kwargs))\n elif vartype == 'NUM':\n result = result.append(describe_numeric_1d(data, **kwargs))\n elif vartype == 'DATE':\n result = result.append(describe_date_1d(data, **kwargs))\n elif vartype == 'UNIQUE':\n result = result.append(describe_unique_1d(data, **kwargs))\n else:\n result = result.append(describe_categorical_1d(data))\n return result\n\n\ndef multiprocess_func(x, **kwargs):\n return x[0], describe_1d(x[1], **kwargs)\n\n\ndef describe(df, bins=10, check_correlation=True, correlation_overrides=None, pool_size=multiprocessing.cpu_count(), **kwargs):\n \"\"\"\n Generates a object containing summary statistics for a given DataFrame\n :param df: DataFrame to be analyzed\n :param bins: Number of bins in histogram\n :param check_correlation: Flag, set to False to skip correlation checks.\n :param correlation_overrides: Variable names not to be rejected because they are correlated\n :param pool_size: Number of workers in thread pool\n :return: Dictionary containing\n table: general statistics on the DataFrame\n variables: summary statistics for each variable\n freq: frequency table\n \"\"\"\n\n if not isinstance(df, pd.DataFrame):\n raise TypeError(\"df must be of type pandas.DataFrame\")\n if df.empty:\n raise ValueError(\"df can not be empty\")\n\n try:\n # reset matplotlib style before use\n # Fails in matplotlib 1.4.x so plot might look bad\n matplotlib.style.use(\"default\")\n except:\n pass\n\n matplotlib.style.use(resource_filename(__name__, \"pandas_profiling.mplstyle\"))\n\n if not pd.Index(np.arange(0, len(df))).equals(df.index):\n # Treat index as any other column\n df = df.reset_index()\n\n # Describe all variables in a univariate way\n pool = multiprocessing.Pool(pool_size)\n local_multiprocess_func = partial(multiprocess_func, **kwargs)\n ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}\n pool.close()\n\n # Check correlations between variable\n if check_correlation is True:\n ''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9\n If x~y and y~z but not x~z, it would be better to delete only y\n Better way would be to find out which variable causes the highest increase in multicollinearity.\n '''\n corr = df.corr()\n for x, corr_x in corr.iterrows():\n if correlation_overrides and x in correlation_overrides:\n continue\n\n for y, corr in corr_x.iteritems():\n if x == y: break\n\n if corr > 0.9:\n ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])\n\n categorical_variables = [(name, data) for (name, data) in df.iteritems() if get_vartype(data)=='CAT']\n for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):\n if correlation_overrides and name1 in correlation_overrides:\n continue\n\n confusion_matrix=pd.crosstab(data1,data2)\n if confusion_matrix.values.diagonal().sum() == len(df):\n ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])\n\n # Convert ldesc to a DataFrame\n names = []\n ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)\n variable_stats.columns.names = df.columns.names\n\n # General statistics\n table_stats = {'n': len(df), 'nvar': len(df.columns)}\n table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])\n table_stats['n_duplicates'] = sum(df.duplicated())\n\n memsize = df.memory_usage(index=True).sum()\n table_stats['memsize'] = formatters.fmt_bytesize(memsize)\n table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])\n\n table_stats.update({k: 0 for k in (\"NUM\", \"DATE\", \"CONST\", \"CAT\", \"UNIQUE\", \"CORR\", \"RECODED\", \"BOOL\")})\n table_stats.update(dict(variable_stats.loc['type'].value_counts()))\n table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']\n\n return {'table': table_stats, 'variables': variable_stats.T, 'freq': {k: df[k].value_counts() for k in df.columns}}\n\n\ndef to_html(sample, stats_object):\n \"\"\"Generate a HTML report from summary statistics and a given sample.\n\n Parameters\n ----------\n sample: DataFrame containing the sample you want to print\n stats_object: Dictionary containing summary statistics. Should be generated with an appropriate describe() function\n\n Returns\n -------\n str, containing profile report in HTML format\n \"\"\"\n\n n_obs = stats_object['table']['n']\n\n value_formatters = formatters.value_formatters\n row_formatters = formatters.row_formatters\n\n if not isinstance(sample, pd.DataFrame):\n raise TypeError(\"sample must be of type pandas.DataFrame\")\n\n if not isinstance(stats_object, dict):\n raise TypeError(\"stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?\")\n\n if set(stats_object.keys()) != {'table', 'variables', 'freq'}:\n raise TypeError(\"stats_object badly formatted. Did you generate this using the pandas_profiling-eda.describe() function?\")\n\n def fmt(value, name):\n if pd.isnull(value):\n return \"\"\n if name in value_formatters:\n return value_formatters[name](value)\n elif isinstance(value, float):\n return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value)\n else:\n if sys.version_info.major == 3:\n return str(value)\n else:\n return unicode(value)\n\n def _format_row(freq, label, max_freq, row_template, n, extra_class=''):\n width = int(freq / max_freq * 99) + 1\n if width > 20:\n label_in_bar = freq\n label_after_bar = \"\"\n else:\n label_in_bar = \"&nbsp;\"\n label_after_bar = freq\n\n return row_template.render(label=label,\n width=width,\n count=freq,\n percentage='{:2.1f}'.format(freq / n * 100),\n extra_class=extra_class,\n label_in_bar=label_in_bar,\n label_after_bar=label_after_bar)\n\n def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6):\n\n freq_rows_html = u''\n\n if max_number_to_print > n:\n max_number_to_print=n\n\n if max_number_to_print < len(freqtable):\n freq_other = sum(freqtable.iloc[max_number_to_print:])\n min_freq = freqtable.values[max_number_to_print]\n else:\n freq_other = 0\n min_freq = 0\n\n freq_missing = n - sum(freqtable)\n max_freq = max(freqtable.values[0], freq_other, freq_missing)\n\n # TODO: Correctly sort missing and other\n\n for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]):\n freq_rows_html += _format_row(freq, label, max_freq, row_template, n)\n\n if freq_other > min_freq:\n freq_rows_html += _format_row(freq_other,\n \"Other values (%s)\" % (freqtable.count() - max_number_to_print), max_freq, row_template, n,\n extra_class='other')\n\n if freq_missing > min_freq:\n freq_rows_html += _format_row(freq_missing, \"(Missing)\", max_freq, row_template, n, extra_class='missing')\n\n return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col)\n\n def extreme_obs_table(freqtable, table_template, row_template, number_to_print, n, ascending = True):\n if ascending:\n obs_to_print = freqtable.sort_index().iloc[:number_to_print]\n else:\n obs_to_print = freqtable.sort_index().iloc[-number_to_print:]\n\n freq_rows_html = ''\n max_freq = max(obs_to_print.values)\n\n for label, freq in six.iteritems(obs_to_print):\n freq_rows_html += _format_row(freq, label, max_freq, row_template, n)\n\n return table_template.render(rows=freq_rows_html)\n\n # Variables\n rows_html = u\"\"\n messages = []\n\n for idx, row in stats_object['variables'].iterrows():\n\n formatted_values = {'varname': idx, 'varid': hash(idx)}\n row_classes = {}\n\n for col, value in six.iteritems(row):\n formatted_values[col] = fmt(value, col)\n\n for col in set(row.index) & six.viewkeys(row_formatters):\n row_classes[col] = row_formatters[col](row[col])\n if row_classes[col] == \"alert\" and col in templates.messages:\n messages.append(templates.messages[col].format(formatted_values, varname = formatters.fmt_varname(idx)))\n\n if row['type'] in {'CAT', 'BOOL'}:\n formatted_values['minifreqtable'] = freq_table(stats_object['freq'][idx], n_obs,\n templates.template('mini_freq_table'), \n templates.template('mini_freq_table_row'), \n 3, \n templates.mini_freq_table_nb_col[row['type']])\n\n if row['distinct_count'] > 50:\n messages.append(templates.messages['HIGH_CARDINALITY'].format(formatted_values, varname = formatters.fmt_varname(idx)))\n row_classes['distinct_count'] = \"alert\"\n else:\n row_classes['distinct_count'] = \"\"\n\n if row['type'] == 'UNIQUE':\n obs = stats_object['freq'][idx].index\n\n formatted_values['firstn'] = pd.DataFrame(obs[0:3], columns=[\"First 3 values\"]).to_html(classes=\"example_values\", index=False)\n formatted_values['lastn'] = pd.DataFrame(obs[-3:], columns=[\"Last 3 values\"]).to_html(classes=\"example_values\", index=False)\n\n if row['type'] in {'CORR', 'CONST', 'RECODED'}:\n formatted_values['varname'] = formatters.fmt_varname(idx)\n messages.append(templates.messages[row['type']].format(formatted_values))\n else:\n formatted_values['freqtable'] = freq_table(stats_object['freq'][idx], n_obs,\n templates.template('freq_table'), templates.template('freq_table_row'), 10)\n formatted_values['firstn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = True)\n formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False)\n\n rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes)\n\n # Overview\n formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])}\n\n row_classes={}\n for col in six.viewkeys(stats_object['table']) & six.viewkeys(row_formatters):\n row_classes[col] = row_formatters[col](stats_object['table'][col])\n if row_classes[col] == \"alert\" and col in templates.messages:\n messages.append(templates.messages[col].format(formatted_values, varname = formatters.fmt_varname(idx)))\n\n messages_html = u''\n for msg in messages:\n messages_html += templates.message_row.format(message=msg)\n\n overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html)\n\n # Sample\n\n sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes=\"sample\"))\n # TODO: should be done in the template\n return templates.template('base').render({'overview_html': overview_html, 'rows_html': rows_html, 'sample_html': sample_html})\n", "path": "pandas_profiling/base.py" } ]
diff --git a/pandas_profiling/base.py b/pandas_profiling/base.py index 63c2b25df..a87b3443e 100644 --- a/pandas_profiling/base.py +++ b/pandas_profiling/base.py @@ -17,7 +17,8 @@ import base64 import matplotlib -matplotlib.use('Agg') +# Fix #68, this call is not needed and brings side effects in some use cases +# matplotlib.use('Agg') import numpy as np import pandas as pd
hedyorg__hedy-214
[ { "content": "# coding=utf-8\nimport datetime\nimport collections\nfrom functools import wraps\nimport hedy\nimport json\nimport jsonbin\nimport logging\nimport os\nfrom os import path\nimport re\nimport requests\nimport uuid\nimport yaml\nfrom flask_commonmark import Commonmark\nfrom werkzeug.urls import url_encode\nfrom config import config\nfrom auth import auth_templates, current_user, requires_login, is_admin\nfrom utils import db_get, db_get_many, db_set, timems, type_check, object_check, db_del\n\n# app.py\nfrom flask import Flask, request, jsonify, render_template, session, abort, g, redirect\nfrom flask_compress import Compress\n\n# Hedy-specific modules\nimport courses\nimport hedyweb\n\n# Define and load all available language data\nALL_LANGUAGES = {\n 'en': 'English',\n 'nl': 'Nederlands',\n 'es': 'EspaΓ±ol',\n 'fr': 'FranΓ§ais',\n 'pt_br': 'PortuguΓͺs',\n 'de': 'Deutsch',\n}\n\nLEVEL_DEFAULTS = collections.defaultdict(courses.NoSuchDefaults)\nfor lang in ALL_LANGUAGES.keys():\n LEVEL_DEFAULTS[lang] = courses.LevelDefaults(lang)\n\nHEDY_COURSE = collections.defaultdict(courses.NoSuchCourse)\nfor lang in ALL_LANGUAGES.keys():\n HEDY_COURSE[lang] = courses.Course('hedy', lang, LEVEL_DEFAULTS[lang])\n\nSPACE_EU_COURSE = {'nl': courses.Course('space_eu', 'nl', LEVEL_DEFAULTS['nl']),\n 'en': courses.Course('space_eu', 'en', LEVEL_DEFAULTS['en']),\n 'es': courses.Course('space_eu', 'es', LEVEL_DEFAULTS['es'])\n }\n\nONLINE_MASTERS_COURSE = courses.Course('online_masters', 'nl', LEVEL_DEFAULTS['nl'])\n\nTRANSLATIONS = hedyweb.Translations()\n\n# Load main menu (do it once, can be cached)\nwith open(f'main/menu.json', 'r') as f:\n main_menu_json = json.load(f)\n\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='[%(asctime)s] %(levelname)-8s: %(message)s')\n\napp = Flask(__name__, static_url_path='')\n\n# HTTP -> HTTPS redirect\n# https://stackoverflow.com/questions/32237379/python-flask-redirect-to-https-from-http/32238093\nif os.getenv ('REDIRECT_HTTP_TO_HTTPS'):\n @app.before_request\n def before_request():\n if request.url.startswith('http://'):\n url = request.url.replace('http://', 'https://', 1)\n # We use a 302 in case we need to revert the redirect.\n return redirect(url, code=302)\n\n# Unique random key for sessions\napp.config['SECRET_KEY'] = uuid.uuid4().hex\n\nCompress(app)\nCommonmark(app)\nlogger = jsonbin.JsonBinLogger.from_env_vars()\n\nif not os.getenv('HEROKU_RELEASE_CREATED_AT'):\n logging.warning('Cannot determine release; enable Dyno metadata by running \"heroku labs:enable runtime-dyno-metadata -a <APP_NAME>\"')\n\n@app.route('/parse', methods=['POST'])\ndef parse():\n body = request.json\n if not body:\n return \"body must be an object\", 400\n if 'code' not in body:\n return \"body.code must be a string\", 400\n if 'level' not in body:\n return \"body.level must be a string\", 400\n\n code = body ['code']\n level = int(body ['level'])\n # Language should come principally from the request body,\n # but we'll fall back to browser default if it's missing for whatever\n # reason.\n lang = body.get('lang', requested_lang())\n\n # For debugging\n print(f\"got code {code}\")\n\n response = {}\n username = current_user(request) ['username'] or None\n\n # Check if user sent code\n if not code:\n response[\"Error\"] = \"no code found, please send code.\"\n # is so, parse\n else:\n try:\n hedy_errors = TRANSLATIONS.get_translations(lang, 'HedyErrorMessages')\n result = hedy.transpile(code, level)\n response[\"Code\"] = \"# coding=utf8\\n\" + result\n except hedy.HedyException as E:\n # some 'errors' can be fixed, for these we throw an exception, but also\n # return fixed code, so it can be ran\n if E.args[0] == \"Invalid Space\":\n error_template = hedy_errors[E.error_code]\n response[\"Code\"] = \"# coding=utf8\\n\" + E.arguments['fixed_code']\n response[\"Warning\"] = error_template.format(**E.arguments)\n elif E.args[0] == \"Parse\":\n error_template = hedy_errors[E.error_code]\n # Localize the names of characters\n # Localize the names of characters\n if 'character_found' in E.arguments:\n E.arguments['character_found'] = hedy_errors[E.arguments['character_found']]\n response[\"Error\"] = error_template.format(**E.arguments)\n else:\n error_template = hedy_errors[E.error_code]\n response[\"Error\"] = error_template.format(**E.arguments)\n except Exception as E:\n print(f\"error transpiling {code}\")\n response[\"Error\"] = str(E)\n\n logger.log({\n 'session': session_id(),\n 'date': str(datetime.datetime.now()),\n 'level': level,\n 'lang': lang,\n 'code': code,\n 'server_error': response.get('Error'),\n 'version': version(),\n 'username': username\n })\n\n return jsonify(response)\n\n@app.route('/report_error', methods=['POST'])\ndef report_error():\n post_body = request.json\n\n logger.log({\n 'session': session_id(),\n 'date': str(datetime.datetime.now()),\n 'level': post_body.get('level'),\n 'code': post_body.get('code'),\n 'client_error': post_body.get('client_error'),\n 'version': version(),\n 'username': current_user(request) ['username'] or None\n })\n\n return 'logged'\n\ndef programs_page (request):\n username = current_user(request) ['username']\n if not username:\n return \"unauthorized\", 403\n\n lang = requested_lang()\n query_lang = request.args.get('lang') or ''\n if query_lang:\n query_lang = '?lang=' + query_lang\n\n from_user = request.args.get('user') or None\n if from_user and not is_admin (request):\n return \"unauthorized\", 403\n\n texts=TRANSLATIONS.data [lang] ['Programs']\n\n result = db_get_many ('programs', {'username': from_user or username}, True)\n programs = []\n now = timems ()\n for item in result:\n measure = texts ['minutes']\n date = round ((now - item ['date']) / 60000)\n if date > 90:\n measure = texts ['hours']\n date = round (date / 60)\n if date > 36:\n measure = texts ['days']\n\n date = round (date / 24)\n\n programs.append ({'id': item ['id'], 'code': item ['code'], 'date': texts ['ago-1'] + ' ' + str (date) + ' ' + measure + ' ' + texts ['ago-2'], 'level': item ['level'], 'name': item ['name']})\n\n return render_template('programs.html', lang=requested_lang(), menu=render_main_menu('programs'), texts=texts, auth=TRANSLATIONS.data [lang] ['Auth'], programs=programs, username=username, current_page='programs', query_lang=query_lang, from_user=from_user)\n\n# @app.route('/post/', methods=['POST'])\n# for now we do not need a post but I am leaving it in for a potential future\n\n# routing to index.html\n@app.route('/hedy', methods=['GET'], defaults={'level': 1, 'step': 1})\n@app.route('/hedy/<level>', methods=['GET'], defaults={'step': 1})\n@app.route('/hedy/<level>/<step>', methods=['GET'])\ndef index(level, step):\n session_id() # Run this for the side effect of generating a session ID\n g.level = level = int(level)\n g.lang = requested_lang()\n g.prefix = '/hedy'\n\n # If step is a string that has more than two characters, it must be an id of a program\n if step and type_check (step, 'str') and len (step) > 2:\n result = db_get ('programs', {'id': step})\n if not result:\n return 'No such program', 404\n # Allow both the owner of the program and the admin user to access the program\n user = current_user (request)\n if user ['username'] != result ['username'] and not is_admin (request):\n return 'No such program!', 404\n loaded_program = result ['code']\n # We default to step 1 to provide a meaningful default assignment\n step = 1\n else:\n loaded_program = None\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=HEDY_COURSE[g.lang],\n level_number=level,\n assignment_number=step,\n menu=render_main_menu('hedy'),\n translations=TRANSLATIONS,\n version=version(),\n loaded_program=loaded_program)\n\n@app.route('/onlinemasters', methods=['GET'], defaults={'level': 1, 'step': 1})\n@app.route('/onlinemasters/<level>', methods=['GET'], defaults={'step': 1})\n@app.route('/onlinemasters/<level>/<step>', methods=['GET'])\ndef onlinemasters(level, step):\n session_id() # Run this for the side effect of generating a session ID\n g.level = level = int(level)\n g.lang = lang = requested_lang()\n g.prefix = '/onlinemasters'\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=ONLINE_MASTERS_COURSE,\n level_number=level,\n assignment_number=step,\n translations=TRANSLATIONS,\n version=version(),\n menu=None,\n loaded_program=None)\n\n@app.route('/space_eu', methods=['GET'], defaults={'level': 1, 'step': 1})\n@app.route('/space_eu/<level>', methods=['GET'], defaults={'step': 1})\n@app.route('/space_eu/<level>/<step>', methods=['GET'])\ndef space_eu(level, step):\n session_id() # Run this for the side effect of generating a session ID\n g.level = level = int(level)\n g.lang = requested_lang()\n g.prefix = '/space_eu'\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=SPACE_EU_COURSE[g.lang],\n level_number=level,\n assignment_number=step,\n translations=TRANSLATIONS,\n version=version(),\n menu=None,\n loaded_program=None)\n\n\n\n@app.route('/error_messages.js', methods=['GET'])\ndef error():\n error_messages = TRANSLATIONS.get_translations(requested_lang(), \"ClientErrorMessages\")\n return render_template(\"error_messages.js\", error_messages=json.dumps(error_messages))\n\n\n@app.errorhandler(500)\ndef internal_error(exception):\n import traceback\n print(traceback.format_exc())\n return \"<h1>500 Internal Server Error</h1>\"\n\n@app.route('/index.html')\n@app.route('/')\ndef default_landing_page():\n return main_page('start')\n\n@app.route('/<page>')\ndef main_page(page):\n if page == 'favicon.ico':\n abort(404)\n\n lang = requested_lang()\n effective_lang = lang\n\n if page in ['signup', 'login', 'my-profile', 'recover', 'reset', 'admin']:\n return auth_templates(page, lang, render_main_menu(page), request)\n\n if page == 'programs':\n return programs_page(request)\n\n # Default to English if requested language is not available\n if not path.isfile(f'main/{page}-{effective_lang}.md'):\n effective_lang = 'en'\n\n try:\n with open(f'main/{page}-{effective_lang}.md', 'r') as f:\n contents = f.read()\n except IOError:\n abort(404)\n\n front_matter, markdown = split_markdown_front_matter(contents)\n\n menu = render_main_menu(page)\n return render_template('main-page.html', mkd=markdown, lang=lang, menu=menu, username=current_user(request) ['username'], auth=TRANSLATIONS.data [lang] ['Auth'], **front_matter)\n\n\ndef session_id():\n \"\"\"Returns or sets the current session ID.\"\"\"\n if 'session_id' not in session:\n session['session_id'] = uuid.uuid4().hex\n return session['session_id']\n\n\ndef requested_lang():\n \"\"\"Return the user's requested language code.\n\n If not in the request parameters, use the browser's accept-languages\n header to do language negotiation.\n \"\"\"\n lang = request.args.get(\"lang\")\n if lang: return lang\n\n return request.accept_languages.best_match(ALL_LANGUAGES.keys(), 'en')\n\n@app.template_global()\ndef current_language():\n return make_lang_obj(requested_lang())\n\n@app.template_global()\ndef hedy_link(level_nr, assignment_nr, subpage=None, lang=None):\n \"\"\"Make a link to a Hedy page.\"\"\"\n parts = [g.prefix]\n parts.append('/' + str(level_nr))\n if str(assignment_nr) != '1' or subpage:\n parts.append('/' + str(assignment_nr if assignment_nr else '1'))\n if subpage and subpage != 'code':\n parts.append('/' + subpage)\n parts.append('?')\n parts.append('lang=' + (lang if lang else requested_lang()))\n return ''.join(parts)\n\n@app.template_global()\ndef other_languages():\n cl = requested_lang()\n return [make_lang_obj(l) for l in ALL_LANGUAGES.keys() if l != cl]\n\n\ndef make_lang_obj(lang):\n \"\"\"Make a language object for a given language.\"\"\"\n return {\n 'sym': ALL_LANGUAGES[lang],\n 'lang': lang\n }\n\n\n@app.template_global()\ndef modify_query(**new_values):\n args = request.args.copy()\n\n for key, value in new_values.items():\n args[key] = value\n\n return '{}?{}'.format(request.path, url_encode(args))\n\n\ndef no_none_sense(d):\n \"\"\"Remove all None values from a dict.\"\"\"\n return {k: v for k, v in d.items() if v is not None}\n\n\ndef version():\n \"\"\"Get the version from the Heroku environment variables.\"\"\"\n if not os.getenv('DYNO'):\n # Not on Heroku\n return 'DEV'\n\n vrz = os.getenv('HEROKU_RELEASE_CREATED_AT')\n the_date = datetime.date.fromisoformat(vrz[:10]) if vrz else datetime.date.today()\n\n commit = os.getenv('HEROKU_SLUG_COMMIT', '????')[0:6]\n return the_date.strftime('%b %d') + f' ({commit})'\n\n\ndef split_markdown_front_matter(md):\n parts = re.split('^---', md, 1, re.M)\n if len(parts) == 1:\n return {}, md\n # safe_load returns 'None' if the string is empty\n front_matter = yaml.safe_load(parts[0]) or {}\n return front_matter, parts[1]\n\n\ndef render_main_menu(current_page):\n \"\"\"Render a list of (caption, href, selected, color) from the main menu.\"\"\"\n return [dict(\n caption=item.get(requested_lang(), item.get('en', '???')),\n href='/' + item['_'],\n selected=(current_page == item['_']),\n accent_color=item.get('accent_color', 'white')\n ) for item in main_menu_json['nav']]\n\n# *** PROGRAMS ***\n\n# Not very restful to use a GET to delete something, but indeed convenient; we can do it with a single link and avoiding AJAX.\n@app.route('/programs/delete/<program_id>', methods=['GET'])\n@requires_login\ndef delete_program (user, program_id):\n result = db_get ('programs', {'id': program_id})\n if not result or result ['username'] != user ['username']:\n return \"\", 404\n db_del ('programs', {'id': program_id})\n return redirect ('/programs')\n\n@app.route('/programs', methods=['POST'])\n@requires_login\ndef save_program (user):\n\n body = request.json\n if not type_check (body, 'dict'):\n return 'body must be an object', 400\n if not object_check (body, 'code', 'str'):\n return 'code must be a string', 400\n if not object_check (body, 'name', 'str'):\n return 'name must be a string', 400\n if not object_check (body, 'level', 'int'):\n return 'level must be an integer', 400\n\n # We execute the saved program to see if it would generate an error or not\n error = None\n try:\n hedy_errors = TRANSLATIONS.get_translations(requested_lang(), 'HedyErrorMessages')\n result = hedy.transpile(body ['code'], body ['level'])\n except hedy.HedyException as E:\n error_template = hedy_errors[E.error_code]\n error = error_template.format(**E.arguments)\n except Exception as E:\n error = str(E)\n\n name = body ['name']\n\n # We check if a program with a name `xyz` exists in the database for the username. If it does, we exist whether `xyz (1)` exists, until we find a program `xyz (NN)` that doesn't exist yet.\n # It'd be ideal to search by username & program name, but since DynamoDB doesn't allow searching for two indexes at the same time, this would require to create a special index to that effect, which is cumbersome.\n # For now, we bring all existing programs for the user and then search within them for repeated names.\n existing = db_get_many ('programs', {'username': user ['username']}, True)\n name_counter = 0\n for program in existing:\n if re.match ('^' + re.escape (name) + '( \\(\\d+\\))*', program ['name']):\n name_counter = name_counter + 1\n if name_counter:\n name = name + ' (' + str (name_counter) + ')'\n\n db_set('programs', {\n 'id': uuid.uuid4().hex,\n 'session': session_id(),\n 'date': timems (),\n 'lang': requested_lang(),\n 'version': version(),\n 'level': body ['level'],\n 'code': body ['code'],\n 'name': name,\n 'server_error': error,\n 'username': user ['username']\n })\n\n return jsonify({})\n\n# *** AUTH ***\n\nimport auth\nauth.routes(app, requested_lang)\n\n# *** START SERVER ***\n\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(threaded=True, port=config ['port'])\n", "path": "app.py" } ]
[ { "content": "# coding=utf-8\nimport datetime\nimport collections\nfrom functools import wraps\nimport hedy\nimport json\nimport jsonbin\nimport logging\nimport os\nfrom os import path\nimport re\nimport requests\nimport uuid\nimport yaml\nfrom flask_commonmark import Commonmark\nfrom werkzeug.urls import url_encode\nfrom config import config\nfrom auth import auth_templates, current_user, requires_login, is_admin\nfrom utils import db_get, db_get_many, db_set, timems, type_check, object_check, db_del\n\n# app.py\nfrom flask import Flask, request, jsonify, render_template, session, abort, g, redirect\nfrom flask_compress import Compress\n\n# Hedy-specific modules\nimport courses\nimport hedyweb\n\n# Define and load all available language data\nALL_LANGUAGES = {\n 'en': 'English',\n 'nl': 'Nederlands',\n 'es': 'EspaΓ±ol',\n 'fr': 'FranΓ§ais',\n 'pt_br': 'PortuguΓͺs',\n 'de': 'Deutsch',\n 'it': 'Italiano'\n}\n\nLEVEL_DEFAULTS = collections.defaultdict(courses.NoSuchDefaults)\nfor lang in ALL_LANGUAGES.keys():\n LEVEL_DEFAULTS[lang] = courses.LevelDefaults(lang)\n\nHEDY_COURSE = collections.defaultdict(courses.NoSuchCourse)\nfor lang in ALL_LANGUAGES.keys():\n HEDY_COURSE[lang] = courses.Course('hedy', lang, LEVEL_DEFAULTS[lang])\n\nSPACE_EU_COURSE = {'nl': courses.Course('space_eu', 'nl', LEVEL_DEFAULTS['nl']),\n 'en': courses.Course('space_eu', 'en', LEVEL_DEFAULTS['en']),\n 'es': courses.Course('space_eu', 'es', LEVEL_DEFAULTS['es'])\n }\n\nONLINE_MASTERS_COURSE = courses.Course('online_masters', 'nl', LEVEL_DEFAULTS['nl'])\n\nTRANSLATIONS = hedyweb.Translations()\n\n# Load main menu (do it once, can be cached)\nwith open(f'main/menu.json', 'r') as f:\n main_menu_json = json.load(f)\n\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='[%(asctime)s] %(levelname)-8s: %(message)s')\n\napp = Flask(__name__, static_url_path='')\n\n# HTTP -> HTTPS redirect\n# https://stackoverflow.com/questions/32237379/python-flask-redirect-to-https-from-http/32238093\nif os.getenv ('REDIRECT_HTTP_TO_HTTPS'):\n @app.before_request\n def before_request():\n if request.url.startswith('http://'):\n url = request.url.replace('http://', 'https://', 1)\n # We use a 302 in case we need to revert the redirect.\n return redirect(url, code=302)\n\n# Unique random key for sessions\napp.config['SECRET_KEY'] = uuid.uuid4().hex\n\nCompress(app)\nCommonmark(app)\nlogger = jsonbin.JsonBinLogger.from_env_vars()\n\nif not os.getenv('HEROKU_RELEASE_CREATED_AT'):\n logging.warning('Cannot determine release; enable Dyno metadata by running \"heroku labs:enable runtime-dyno-metadata -a <APP_NAME>\"')\n\n@app.route('/parse', methods=['POST'])\ndef parse():\n body = request.json\n if not body:\n return \"body must be an object\", 400\n if 'code' not in body:\n return \"body.code must be a string\", 400\n if 'level' not in body:\n return \"body.level must be a string\", 400\n\n code = body ['code']\n level = int(body ['level'])\n # Language should come principally from the request body,\n # but we'll fall back to browser default if it's missing for whatever\n # reason.\n lang = body.get('lang', requested_lang())\n\n # For debugging\n print(f\"got code {code}\")\n\n response = {}\n username = current_user(request) ['username'] or None\n\n # Check if user sent code\n if not code:\n response[\"Error\"] = \"no code found, please send code.\"\n # is so, parse\n else:\n try:\n hedy_errors = TRANSLATIONS.get_translations(lang, 'HedyErrorMessages')\n result = hedy.transpile(code, level)\n response[\"Code\"] = \"# coding=utf8\\n\" + result\n except hedy.HedyException as E:\n # some 'errors' can be fixed, for these we throw an exception, but also\n # return fixed code, so it can be ran\n if E.args[0] == \"Invalid Space\":\n error_template = hedy_errors[E.error_code]\n response[\"Code\"] = \"# coding=utf8\\n\" + E.arguments['fixed_code']\n response[\"Warning\"] = error_template.format(**E.arguments)\n elif E.args[0] == \"Parse\":\n error_template = hedy_errors[E.error_code]\n # Localize the names of characters\n # Localize the names of characters\n if 'character_found' in E.arguments:\n E.arguments['character_found'] = hedy_errors[E.arguments['character_found']]\n response[\"Error\"] = error_template.format(**E.arguments)\n else:\n error_template = hedy_errors[E.error_code]\n response[\"Error\"] = error_template.format(**E.arguments)\n except Exception as E:\n print(f\"error transpiling {code}\")\n response[\"Error\"] = str(E)\n\n logger.log({\n 'session': session_id(),\n 'date': str(datetime.datetime.now()),\n 'level': level,\n 'lang': lang,\n 'code': code,\n 'server_error': response.get('Error'),\n 'version': version(),\n 'username': username\n })\n\n return jsonify(response)\n\n@app.route('/report_error', methods=['POST'])\ndef report_error():\n post_body = request.json\n\n logger.log({\n 'session': session_id(),\n 'date': str(datetime.datetime.now()),\n 'level': post_body.get('level'),\n 'code': post_body.get('code'),\n 'client_error': post_body.get('client_error'),\n 'version': version(),\n 'username': current_user(request) ['username'] or None\n })\n\n return 'logged'\n\ndef programs_page (request):\n username = current_user(request) ['username']\n if not username:\n return \"unauthorized\", 403\n\n lang = requested_lang()\n query_lang = request.args.get('lang') or ''\n if query_lang:\n query_lang = '?lang=' + query_lang\n\n from_user = request.args.get('user') or None\n if from_user and not is_admin (request):\n return \"unauthorized\", 403\n\n texts=TRANSLATIONS.data [lang] ['Programs']\n\n result = db_get_many ('programs', {'username': from_user or username}, True)\n programs = []\n now = timems ()\n for item in result:\n measure = texts ['minutes']\n date = round ((now - item ['date']) / 60000)\n if date > 90:\n measure = texts ['hours']\n date = round (date / 60)\n if date > 36:\n measure = texts ['days']\n\n date = round (date / 24)\n\n programs.append ({'id': item ['id'], 'code': item ['code'], 'date': texts ['ago-1'] + ' ' + str (date) + ' ' + measure + ' ' + texts ['ago-2'], 'level': item ['level'], 'name': item ['name']})\n\n return render_template('programs.html', lang=requested_lang(), menu=render_main_menu('programs'), texts=texts, auth=TRANSLATIONS.data [lang] ['Auth'], programs=programs, username=username, current_page='programs', query_lang=query_lang, from_user=from_user)\n\n# @app.route('/post/', methods=['POST'])\n# for now we do not need a post but I am leaving it in for a potential future\n\n# routing to index.html\n@app.route('/hedy', methods=['GET'], defaults={'level': 1, 'step': 1})\n@app.route('/hedy/<level>', methods=['GET'], defaults={'step': 1})\n@app.route('/hedy/<level>/<step>', methods=['GET'])\ndef index(level, step):\n session_id() # Run this for the side effect of generating a session ID\n g.level = level = int(level)\n g.lang = requested_lang()\n g.prefix = '/hedy'\n\n # If step is a string that has more than two characters, it must be an id of a program\n if step and type_check (step, 'str') and len (step) > 2:\n result = db_get ('programs', {'id': step})\n if not result:\n return 'No such program', 404\n # Allow both the owner of the program and the admin user to access the program\n user = current_user (request)\n if user ['username'] != result ['username'] and not is_admin (request):\n return 'No such program!', 404\n loaded_program = result ['code']\n # We default to step 1 to provide a meaningful default assignment\n step = 1\n else:\n loaded_program = None\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=HEDY_COURSE[g.lang],\n level_number=level,\n assignment_number=step,\n menu=render_main_menu('hedy'),\n translations=TRANSLATIONS,\n version=version(),\n loaded_program=loaded_program)\n\n@app.route('/onlinemasters', methods=['GET'], defaults={'level': 1, 'step': 1})\n@app.route('/onlinemasters/<level>', methods=['GET'], defaults={'step': 1})\n@app.route('/onlinemasters/<level>/<step>', methods=['GET'])\ndef onlinemasters(level, step):\n session_id() # Run this for the side effect of generating a session ID\n g.level = level = int(level)\n g.lang = lang = requested_lang()\n g.prefix = '/onlinemasters'\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=ONLINE_MASTERS_COURSE,\n level_number=level,\n assignment_number=step,\n translations=TRANSLATIONS,\n version=version(),\n menu=None,\n loaded_program=None)\n\n@app.route('/space_eu', methods=['GET'], defaults={'level': 1, 'step': 1})\n@app.route('/space_eu/<level>', methods=['GET'], defaults={'step': 1})\n@app.route('/space_eu/<level>/<step>', methods=['GET'])\ndef space_eu(level, step):\n session_id() # Run this for the side effect of generating a session ID\n g.level = level = int(level)\n g.lang = requested_lang()\n g.prefix = '/space_eu'\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=SPACE_EU_COURSE[g.lang],\n level_number=level,\n assignment_number=step,\n translations=TRANSLATIONS,\n version=version(),\n menu=None,\n loaded_program=None)\n\n\n\n@app.route('/error_messages.js', methods=['GET'])\ndef error():\n error_messages = TRANSLATIONS.get_translations(requested_lang(), \"ClientErrorMessages\")\n return render_template(\"error_messages.js\", error_messages=json.dumps(error_messages))\n\n\n@app.errorhandler(500)\ndef internal_error(exception):\n import traceback\n print(traceback.format_exc())\n return \"<h1>500 Internal Server Error</h1>\"\n\n@app.route('/index.html')\n@app.route('/')\ndef default_landing_page():\n return main_page('start')\n\n@app.route('/<page>')\ndef main_page(page):\n if page == 'favicon.ico':\n abort(404)\n\n lang = requested_lang()\n effective_lang = lang\n\n if page in ['signup', 'login', 'my-profile', 'recover', 'reset', 'admin']:\n return auth_templates(page, lang, render_main_menu(page), request)\n\n if page == 'programs':\n return programs_page(request)\n\n # Default to English if requested language is not available\n if not path.isfile(f'main/{page}-{effective_lang}.md'):\n effective_lang = 'en'\n\n try:\n with open(f'main/{page}-{effective_lang}.md', 'r') as f:\n contents = f.read()\n except IOError:\n abort(404)\n\n front_matter, markdown = split_markdown_front_matter(contents)\n\n menu = render_main_menu(page)\n return render_template('main-page.html', mkd=markdown, lang=lang, menu=menu, username=current_user(request) ['username'], auth=TRANSLATIONS.data [lang] ['Auth'], **front_matter)\n\n\ndef session_id():\n \"\"\"Returns or sets the current session ID.\"\"\"\n if 'session_id' not in session:\n session['session_id'] = uuid.uuid4().hex\n return session['session_id']\n\n\ndef requested_lang():\n \"\"\"Return the user's requested language code.\n\n If not in the request parameters, use the browser's accept-languages\n header to do language negotiation.\n \"\"\"\n lang = request.args.get(\"lang\")\n if lang: return lang\n\n return request.accept_languages.best_match(ALL_LANGUAGES.keys(), 'en')\n\n@app.template_global()\ndef current_language():\n return make_lang_obj(requested_lang())\n\n@app.template_global()\ndef hedy_link(level_nr, assignment_nr, subpage=None, lang=None):\n \"\"\"Make a link to a Hedy page.\"\"\"\n parts = [g.prefix]\n parts.append('/' + str(level_nr))\n if str(assignment_nr) != '1' or subpage:\n parts.append('/' + str(assignment_nr if assignment_nr else '1'))\n if subpage and subpage != 'code':\n parts.append('/' + subpage)\n parts.append('?')\n parts.append('lang=' + (lang if lang else requested_lang()))\n return ''.join(parts)\n\n@app.template_global()\ndef other_languages():\n cl = requested_lang()\n return [make_lang_obj(l) for l in ALL_LANGUAGES.keys() if l != cl]\n\n\ndef make_lang_obj(lang):\n \"\"\"Make a language object for a given language.\"\"\"\n return {\n 'sym': ALL_LANGUAGES[lang],\n 'lang': lang\n }\n\n\n@app.template_global()\ndef modify_query(**new_values):\n args = request.args.copy()\n\n for key, value in new_values.items():\n args[key] = value\n\n return '{}?{}'.format(request.path, url_encode(args))\n\n\ndef no_none_sense(d):\n \"\"\"Remove all None values from a dict.\"\"\"\n return {k: v for k, v in d.items() if v is not None}\n\n\ndef version():\n \"\"\"Get the version from the Heroku environment variables.\"\"\"\n if not os.getenv('DYNO'):\n # Not on Heroku\n return 'DEV'\n\n vrz = os.getenv('HEROKU_RELEASE_CREATED_AT')\n the_date = datetime.date.fromisoformat(vrz[:10]) if vrz else datetime.date.today()\n\n commit = os.getenv('HEROKU_SLUG_COMMIT', '????')[0:6]\n return the_date.strftime('%b %d') + f' ({commit})'\n\n\ndef split_markdown_front_matter(md):\n parts = re.split('^---', md, 1, re.M)\n if len(parts) == 1:\n return {}, md\n # safe_load returns 'None' if the string is empty\n front_matter = yaml.safe_load(parts[0]) or {}\n return front_matter, parts[1]\n\n\ndef render_main_menu(current_page):\n \"\"\"Render a list of (caption, href, selected, color) from the main menu.\"\"\"\n return [dict(\n caption=item.get(requested_lang(), item.get('en', '???')),\n href='/' + item['_'],\n selected=(current_page == item['_']),\n accent_color=item.get('accent_color', 'white')\n ) for item in main_menu_json['nav']]\n\n# *** PROGRAMS ***\n\n# Not very restful to use a GET to delete something, but indeed convenient; we can do it with a single link and avoiding AJAX.\n@app.route('/programs/delete/<program_id>', methods=['GET'])\n@requires_login\ndef delete_program (user, program_id):\n result = db_get ('programs', {'id': program_id})\n if not result or result ['username'] != user ['username']:\n return \"\", 404\n db_del ('programs', {'id': program_id})\n return redirect ('/programs')\n\n@app.route('/programs', methods=['POST'])\n@requires_login\ndef save_program (user):\n\n body = request.json\n if not type_check (body, 'dict'):\n return 'body must be an object', 400\n if not object_check (body, 'code', 'str'):\n return 'code must be a string', 400\n if not object_check (body, 'name', 'str'):\n return 'name must be a string', 400\n if not object_check (body, 'level', 'int'):\n return 'level must be an integer', 400\n\n # We execute the saved program to see if it would generate an error or not\n error = None\n try:\n hedy_errors = TRANSLATIONS.get_translations(requested_lang(), 'HedyErrorMessages')\n result = hedy.transpile(body ['code'], body ['level'])\n except hedy.HedyException as E:\n error_template = hedy_errors[E.error_code]\n error = error_template.format(**E.arguments)\n except Exception as E:\n error = str(E)\n\n name = body ['name']\n\n # We check if a program with a name `xyz` exists in the database for the username. If it does, we exist whether `xyz (1)` exists, until we find a program `xyz (NN)` that doesn't exist yet.\n # It'd be ideal to search by username & program name, but since DynamoDB doesn't allow searching for two indexes at the same time, this would require to create a special index to that effect, which is cumbersome.\n # For now, we bring all existing programs for the user and then search within them for repeated names.\n existing = db_get_many ('programs', {'username': user ['username']}, True)\n name_counter = 0\n for program in existing:\n if re.match ('^' + re.escape (name) + '( \\(\\d+\\))*', program ['name']):\n name_counter = name_counter + 1\n if name_counter:\n name = name + ' (' + str (name_counter) + ')'\n\n db_set('programs', {\n 'id': uuid.uuid4().hex,\n 'session': session_id(),\n 'date': timems (),\n 'lang': requested_lang(),\n 'version': version(),\n 'level': body ['level'],\n 'code': body ['code'],\n 'name': name,\n 'server_error': error,\n 'username': user ['username']\n })\n\n return jsonify({})\n\n# *** AUTH ***\n\nimport auth\nauth.routes(app, requested_lang)\n\n# *** START SERVER ***\n\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(threaded=True, port=config ['port'])\n", "path": "app.py" } ]
diff --git a/app.py b/app.py index 6290f006f74..128b93e48d3 100644 --- a/app.py +++ b/app.py @@ -34,6 +34,7 @@ 'fr': 'FranΓ§ais', 'pt_br': 'PortuguΓͺs', 'de': 'Deutsch', + 'it': 'Italiano' } LEVEL_DEFAULTS = collections.defaultdict(courses.NoSuchDefaults) diff --git a/coursedata/course/hedy/it.yaml b/coursedata/course/hedy/it.yaml new file mode 100644 index 00000000000..69e4098749a --- /dev/null +++ b/coursedata/course/hedy/it.yaml @@ -0,0 +1,11 @@ +--- +# This file seems rather empty but the assignments all use the defaults from 'level-defaults' +course: + - level: "1" + - level: "2" + - level: "3" + - level: "4" + - level: "5" + - level: "6" + - level: "7" + diff --git a/coursedata/level-defaults/it.yaml b/coursedata/level-defaults/it.yaml new file mode 100644 index 00000000000..cd958cf9d06 --- /dev/null +++ b/coursedata/level-defaults/it.yaml @@ -0,0 +1,139 @@ +--- + 1: + intro_text: "Nel primo livello puoi usare questi comandi:" + start_code: "print ciao mondo!" + commands: + - + name: "print" + explanation: "Scrivi qualcosa con 'print'." + example: "Esempio: print Ciao benvenuta/o su Hedy!" + demo_code: "print Ciao benvenuta/o su Hedy!" + - + name: "ask" + explanation: "Chiedi qualcosa con 'ask'." + example: "Esempio: ask Qual Γ¨ il tuo colore preferito?" + demo_code: "ask Qual Γ¨ il tuo colore preferito?" + - + name: "echo" + explanation: "Ripeti qualcosa usando 'echo'." + example: "Esempio: echo Quindi il tuo colore preferito Γ¨..." + demo_code: "ask Qual Γ¨ il tuo colore preferito?\necho Quindi il tuo colore preferito Γ¨..." + 2: + intro_text: "'Print' funziona ancora come nel primo livello, invece 'ask' Γ¨ stato cambiato ed ha bisogno di dare un nome alla risposta che potrai poi scrivere con 'print'. 'Echo' non Γ¨ piΓΉ necessario. Puoi anche usare questi comandi:" + start_code: "print ciao mondo!" + commands: + - + name: "is" + explanation: "DΓ  un nomignolo da usare nel programma ad una parola" + example: "Esempio: nome is Hedy." + demo_code: "nome is Hedy\nprint benvenuta nome" + - + name: "ask" + explanation: "Chiedi qualcosa con 'ask'. Attenzione! 'ask' ora ha bisogno di un nome." + example: "Esempio: colore is ask Qual Γ¨ il tuo colore preferito?" + demo_code: "colore is ask Qual Γ¨ il tuo colore preferito?\nprint colore Γ¨ il tuo preferito!" + - + name: "scegli_acaso" + explanation: "Scegli una parola a caso da un gruppo con 'at' e 'random'" + example: "Esempio: animali is cane, gatto, canguro." + demo_code: "animali is cane, gatto, canguro\nprint animali at random" + 3: + start_code: "print 'Ciao mondo'" + intro_text: "Nel terzo livello devi usare le virgolette per le parole che vuoi scrivere con 'print'. 'echo' non Γ¨ piΓΉ necessario, ora puoi scrivere parole con 'print'!" + commands: + - + name: "print" + explanation: "Scrivi esattamente quello che metti tra virgolette" + example: "Esempio: print 'Ciao benvenuta/o su Hedy.'" + demo_code: "print 'Ciao benvenuta/o su Hedy.'" + - + name: "is" + explanation: "Dai un nomignolo ad un testo e scrivilo con 'print' senza usare le virgolette" + example: "Esempio: nome is Hedy." + demo_code: "nome is Hedy\nprint 'Il mio nome Γ¨ ' nome" + - + name: "ask" + explanation: "Chiedi qualcosa con 'ask'. Attenzione! Per scrivere con 'print' devi usare le virgolette!" + example: "Esempio: colore is ask Qual Γ¨ il tuo colore preferito?" + demo_code: "colore is ask Qual Γ¨ il tuo colore preferito?\nprint colore ' Γ¨ il tuo preferito!'" + 4: + start_code: "nome is ask Come ti chiami?\nif nome is Hedy print 'che bello!' else print 'mah'" + intro_text: "'ask' e 'print' funzionano esattamente come funzionavano nel terzo livello. Nel quarto livello si aggiunge la congiunzione 'if'!" + commands: + - + name: "print" + explanation: "Scrivi esattamente quello che metti tra virgolette" + example: "Example: print 'Ciao benvenuta/o su Hedy.'" + demo_code: "print 'Ciao benvenuta/o su Hedy.'" + - + name: "ask" + explanation: "Chiedi qualcosa con 'ask'. Attenzione! Per scrivere con 'print' devi usare le virgolette!" + example: "Esempio: colore is ask Qual Γ¨ il tuo colore preferito?" + demo_code: "colore is ask Qual Γ¨ il tuo colore preferito?\nprint colore ' Γ¨ il tuo preferito!'" + - + name: "if" + explanation: "Fai una scelta" + example: "Esempio: if colore is verde print 'carino!' else print 'mah'" + demo_code: "colore is ask Qual Γ¨ il tuo colore preferito?\nif colore is verde print 'carino!' else print 'mah'" + 5: + start_code: "repeat 3 times print 'Hedy Γ¨ divertente!'" + intro_text: "'ask', 'print' e 'if' funzionano esattamente come funzionavano nel quarto livello. Il quinto livello introduce il comando 'repeat x times. 'repeat x times' puΓ² essere usato per ripetere una parte di codice 'x' volte." + commands: + - + name: "print" + explanation: "Scrivi esattamente quello che metti tra virgolette" + example: "Example: print 'Ciao benvenuta/o su Hedy.'" + demo_code: "print 'Ciao benvenuta/o su Hedy.'" + - + name: "ask" + explanation: "Chiedi qualcosa con 'ask'. Attenzione! Per scrivere con 'print' devi usare le virgolette!" + example: "Esempio: colore is ask Qual Γ¨ il tuo colore preferito?" + demo_code: "colore is ask Qual Γ¨ il tuo colore preferito?\nprint colore ' Γ¨ il tuo preferito!'" + - + name: "if" + explanation: "Fai una scelta" + example: "Esempio: if colore is verde print 'carino!' else print 'mah'" + demo_code: "colore is ask Qual Γ¨ il tuo colore preferito?\nif colore is verde print 'carino!' else print 'mah'" + - + name: "repeat x times" + explanation: "'repeat x times' e 'if' combinati insieme" + example: "Esempio: if colore is verde repeat 3 times print 'carino!' else repeat 5 times print 'mah'" + demo_code: "colore is ask Qual Γ¨ il tuo colore preferito?\nif colore is verde repeat 3 times print 'carino' else repeat 5 times print 'mah'" + 6: + start_code: "print '5 per 5 Γ¨ ' 5 * 5" + intro_text: "'ask', 'print', 'if' e 'repeat x times' sono esattamente come nei livelli 4 e 5. Livello 6 aggiunge qualcosa di nuovo... Ora puoi fare i calcoli. " + commands: + - + name: "print" + explanation: "Scrivi esattamente quello che metti tra virgolette" + example: "Example: print '5 per 5 Γ¨ ' 5 * 5" + demo_code: "print '5 per 5 Γ¨ ' 5 * 5" + - + name: "ask e if con calcoli" + explanation: "Chiedi di fare un calcolo e giudicalo con 'if'. Attenzione: 'Print' richiede sempre le virgolette." + example: "Esempio: risposta is ask Quanto fa 10 piΓΉ 10?" + demo_code: "risposta is ask Quanto fa 10 piΓΉ 10?\nif risposta is 20 print 'Yes!' else print 'Ops'" + - + name: "repeat" + explanation: "repeat e if combinati" + example: "Example: if colore is verde repeat 3 times print 'carino!' else repeat 5 times print 'mah'" + demo_code: "colore is ask Qual Γ¨ il tuo colore preferito?\nif colore is verde repeat 3 times print 'carino!' else repeat 5 times print 'mah'" + 7: + start_code: "repeat 5 times\n print 'Ciao a tutti'\n print 'Questo sarΓ  scritto 5 volte'" + intro_text: "ask e print funzionano come prima ma repeat e if sono cambiati! Ora puoi usare interi blocchi di codice ma dovrai ricordarti di usare l'indentazione. Questo significa mettere 4 spazi all'inizio della riga. Questo funziona anche quando vuoi scrivere un blocco di una sola riga. Se usi repeat e if dovrai ricordarti di mettere l'indentazione per ognuno dei due blocchi. Leggi l'esempio per piΓΉ informazioni!" + commands: + - + name: "print" + explanation: "Scrivi qualcosa. Ricorda di usare le virgolette per scrivere parole e frasi." + example: "Esempio: print '5 volte 5 Γ¨ ' 5 * 5" + demo_code: "print '5 volte 5 Γ¨ ' 5 * 5" + - + name: "if con piΓΉ righe" + explanation: "Chiedi la risposta per una somma e controlla se Γ¨ corretta. Ora possiamo scrivere due righe." + example: "Esempio: risposta is ask Quanto fa 5 piΓΉ 5?" + demo_code: "risposta is ask Quanto fa 5 piΓΉ 5?\nif risposta Γ¨ 10\n print 'Ben fatto!'\n print 'Esatto, la risposta corretta era ' risposta\ninvece\n scrivi 'Ops!'\n scrivi 'La risposta Γ¨ ' risposta" + - + name: "if e repeat insieme" + explanation: "if e repeat insieme" + example: "Esempio: if colore is verde repeat 3 times print 'carino!' else repeat 5 times print 'mah'" + demo_code: "colore is ask Qual Γ¨ il tuo colore preferito?\nif colore is verde\n repeat 3 times\n print 'carino!'\nelse\n repeat 5 times\n print 'mah'" diff --git a/coursedata/texts/it.yaml b/coursedata/texts/it.yaml new file mode 100644 index 00000000000..e7a835f16e1 --- /dev/null +++ b/coursedata/texts/it.yaml @@ -0,0 +1,127 @@ +--- +ui: + level_title: "Livello" + step_title: "Compito" + code_title: "Codice" + docs_title: "Spiegazione" + contact: "Contatti" + video_title: "Video" + run_code_button: "Eseguire codice" + save_code_button: "Salva codice" + untitled: "Senza nome" + regress_button: "Torna a livello" + advance_button: "Vai a livello" + advance_step_button: "Vai a compito" + try_button: "Prova questo" + enter_text: "Inserisci la risposta qui..." + enter: "Invio" + assignment_header: "Compito" + show_explanation: "Mostra spiegazione" + hide_explanation: "Nascondi spiegazione" +ClientErrorMessages: + Transpile_warning: "Attenzione!" + Transpile_error: "Non possiamo eseguire il tuo programma." + Connection_error: "Non siamo riuscito a contattare il server." + Other_error: "Ops! Forse abbiamo fatto un errore." + Execute_error: "Qualcosa Γ¨ andato storto nell'esecuzione del tuo codice." +HedyErrorMessages: + Wrong Level: "Questo Γ¨ il codice Hedy corretto ma non al livello giusto. Hai scritto un codice per il livello {original_level} al livello {working_level}." + Incomplete: "Ops! Hai dimenticato un pezzo di codice! Alla linea {line_number} devi mettere del codice dopo {incomplete_command}." + Invalid: "{invalid_command} non Γ¨ un commando Hedy per il livello {level}. Intendevi {guessed_command}?" + Invalid Space: "Ops! Hai iniziato una linea con uno spazio alla linea {line_number}. Gli spazi confondono i computer, puoi rimuoverla?" + Parse: "Il codice che hai scritto non Γ¨ un codice Hedy valido. C'Γ¨ un errore alla linea {location[0]}, alla posizione {location[1]}. Hai scritto {character_found} ma questo non Γ¨ permesso." + VarUndefined: "Hai provato a scrivere {name} ma non l'hai inizializzato." + space: "uno spazio" + comma: "una virgola" + question mark: "un punto di domanda" + newline: "una nuova riga" + period: "un punto" + exclamation mark: "un punto esclamativo" + dash: "una lineetta" + star: "una stella" + single quotes: "un apostrofo" + double quotes: "virgolette" + slash: "una barra obliqua" +Auth: + create_account: "Crea account" + create_account_explanation: "Avere un account ti permette di salvare i tuoi programmi." + username: "Nome utente" + email: "Email" + password: "Password" + password_repeat: "Ripeti password" + birth_year: "Anno di nascita (opzionale)" + gender: "Genere (opzionale)" + select: "Seleziona" + female: "Femmina" + male: "Maschio" + other: "Altro" + country: "Paese" + subscribe_newsletter: "Iscriviti alla newsletter" + already_account: "Hai giΓ  account?" + login_long: "Accedi al tuo account" + login: "Accedi" + no_account: "Non hai ancora un account?" + profile: "Mio profilo" + update_profile: "Aggiorna profilo" + logout: "Esci" + destroy: "Cancella account permanentemente" + current_password: "Password attuale" + new_password: "Nuova password" + repeat_new_password: "Ripeti nuova password" + change_password: "Cambia password" + are_you_sure: "Sei sicuro? Questa azione Γ¨ permanente." + please_username: "Per favore inserisci un nome utente." + username_three: "Il nome utente deve contenere almeno tre caratteri." + username_special: "Il nome utente non puΓ² contenere `:` o `@`." + please_password: "Per favore inserisci la password." + password_six: "La password deve contenere almeno sei caratteri" + repeat_match: "La password ripetuta non combacia." + valid_email: "Per favore inserisci un'email valida." + valid_year: "Per favore inserisci un anno tra il 1900 e " + ajax_error: "C'Γ¨ stato un problema, per favore riprova." + please_username_email: "Per favore inserisci un nome utente o un'email." + profile_updated: "Profilo aggiornato." + password_updated: "Password aggiornata." + signup_success: "Tutto fatto! Per favore entra nel tuo account :)." + forgot_password: "Dimenticato la tua password?" + recover_password: "Richiedi di resettare la password" + send_password_recovery: "Inviami un link per recuperare la password" + sent_password_recovery: "Dovresti ricevere presto un'email con le istruzioni per resettare la tua password." + reset_password: "Resetta password" + password_resetted: "La tua password Γ¨ stata resettata con successo. Per favore entra nel tuo account." + invalid_username_password: "Nome utente/password non validi." + invalid_username: "Nome utente non valido." + invalid_password: "Password non valida." + invalid_reset_link: "Link per resettare password invalido." + exists_email: "Questa email Γ¨ giΓ  in uso." + exists_username: "Questo nome utente Γ¨ giΓ  in uso." + email_hello: "Ciao!" + email_goodbye: "Grazie!\nIl team di Hedy" + email_welcome_verify_subject: "Benvenuta/o su Hedy" + email_welcome_verify_body: "Il tuo account Hedy Γ¨ stato creato con successo. Benvenuta\\o!\nPer favore clicca su questo link per confermare il tuo indirizzo email:" + email_change_password_subject: "La tua password di Hedy Γ¨ stata cambiata" + email_change_password_body: "La tua password di Hedy Γ¨ stata cambiata. Se ne sei a conoscenza, bene.\nSe non sei statu tu a cambiare la password, per favore contattaci immediatamente rispondendo a questa email." + email_recover_password_subject: "Resetta la tua password di Hedy" + email_recover_password_body: "Cliccando su questo link puoi resettare la tua password di Hedy. Se non hai richiesto un reset della tua password ignora questa email." + email_reset_password_subject: "La tua password di Hedy Γ¨ stata resettata" + email_reset_password_body: "La tua password di Hedy Γ¨ stata resettata con una nuova. Se ne sei a conoscenza, bene.\nSe non sei statu tu a cambiare la password, per favore contattaci immediatamente rispondendo a questa email." + # These variables are added here to make the code simpler, but conceptually they belong to the UI. + program_header: "I miei programmi" + save_prompt: "Devi avere un account per salvare i tuoi programmi. Vuoi entrare nel tuo account ora?" + unsaved_changes: "Hai un programma non salvato. Vuoi uscire senza salvare?" + save_success: "Successo" + save_success_detail: "Programma salvato con successo" + answer_question: "Non puoi eseguire il programma prima di rispondere alla domanda" +Programs: + recent: "I miei programmi recenti" + level: "Livello" + minutes: "minuti" + hours: "ore" + days: "giorni" + ago-1: "" + ago-2: "fa" + open: "Apri" + delete: "Elimina" + delete_confirm: "Sei sicura\\o di voler eliminare questo programma?" + no_programs: "Non hai ancora nessun programma." + write_first: "Scrivi il tuo primo programma!"
doccano__doccano-1280
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nNAME = 'doccano'\nDESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'\nURL = 'https://github.com/doccano/doccano'\nEMAIL = 'hiroki.nakayama.py@gmail.com'\nAUTHOR = 'Hironsan'\nLICENSE = 'MIT'\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nrequired = [\n 'apache-libcloud>=3.2.0',\n 'colour>=0.1.5',\n 'conllu>=4.2.2',\n 'dj-database-url>=0.5.0',\n 'django-cors-headers>=3.5.0',\n 'django-filter>=2.4.0',\n 'django-rest-polymorphic>=0.1.9',\n 'djangorestframework-csv>=2.1.0',\n 'djangorestframework-xml>=2.0.0',\n 'drf-yasg>=1.20.0',\n 'environs>=9.2.0',\n 'furl>=2.1.0',\n 'pyexcel>=0.6.6',\n 'pyexcel-xlsx>=0.6.0',\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n 'whitenoise>=5.2.0',\n 'auto-labeling-pipeline>=0.1.12'\n]\n\nsetup(\n name=NAME,\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n packages=find_packages(exclude=('*.tests',)),\n entry_points={\n 'console_scripts': [\n 'doccano = app.doccano.doccano:main'\n ]\n },\n install_requires=required,\n extras_require={\n 'postgresql': ['psycopg2-binary>=2.8.6'],\n 'mssql': ['django-mssql-backend>=2.8.1'],\n },\n include_package_data=True,\n license=LICENSE,\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nNAME = 'doccano'\nDESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'\nURL = 'https://github.com/doccano/doccano'\nEMAIL = 'hiroki.nakayama.py@gmail.com'\nAUTHOR = 'Hironsan'\nLICENSE = 'MIT'\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nrequired = [\n 'apache-libcloud>=3.2.0',\n 'colour>=0.1.5',\n 'conllu>=4.2.2',\n 'dj-database-url>=0.5.0',\n 'django-cors-headers>=3.5.0',\n 'django-filter>=2.4.0',\n 'django-rest-polymorphic>=0.1.9',\n 'djangorestframework-csv>=2.1.0',\n 'djangorestframework-xml>=2.0.0',\n 'drf-yasg>=1.20.0',\n 'environs>=9.2.0',\n 'furl>=2.1.0',\n 'pyexcel>=0.6.6',\n 'pyexcel-xlsx>=0.6.0',\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n 'whitenoise>=5.2.0',\n 'auto-labeling-pipeline>=0.1.12',\n 'dj-rest-auth>=2.1.4'\n]\n\nsetup(\n name=NAME,\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n packages=find_packages(exclude=('*.tests',)),\n entry_points={\n 'console_scripts': [\n 'doccano = app.doccano.doccano:main'\n ]\n },\n install_requires=required,\n extras_require={\n 'postgresql': ['psycopg2-binary>=2.8.6'],\n 'mssql': ['django-mssql-backend>=2.8.1'],\n },\n include_package_data=True,\n license=LICENSE,\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index de5f683cf3..9001f3fff0 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,8 @@ 'seqeval>=1.2.2', 'social-auth-app-django>=4.0.0', 'whitenoise>=5.2.0', - 'auto-labeling-pipeline>=0.1.12' + 'auto-labeling-pipeline>=0.1.12', + 'dj-rest-auth>=2.1.4' ] setup(
dask__dask-4903
[ { "content": "from __future__ import print_function, division, absolute_import\n\nimport ast\nimport os\nimport sys\nimport threading\ntry:\n import yaml\nexcept ImportError:\n yaml = None\n\nfrom .compatibility import makedirs, builtins, Mapping\n\n\nno_default = '__no_default__'\n\n\npaths = [\n os.getenv('DASK_ROOT_CONFIG', '/etc/dask'),\n os.path.join(sys.prefix, 'etc', 'dask'),\n os.path.join(os.path.expanduser('~'), '.config', 'dask'),\n os.path.join(os.path.expanduser('~'), '.dask')\n]\n\nif 'DASK_CONFIG' in os.environ:\n PATH = os.environ['DASK_CONFIG']\n paths.append(PATH)\nelse:\n PATH = os.path.join(os.path.expanduser('~'), '.config', 'dask')\n\n\nglobal_config = config = {}\n\n\nconfig_lock = threading.Lock()\n\n\ndefaults = []\n\n\ndef canonical_name(k, config):\n \"\"\"Return the canonical name for a key.\n\n Handles user choice of '-' or '_' conventions by standardizing on whichever\n version was set first. If a key already exists in either hyphen or\n underscore form, the existing version is the canonical name. If neither\n version exists the original key is used as is.\n \"\"\"\n try:\n if k in config:\n return k\n except TypeError:\n # config is not a mapping, return the same name as provided\n return k\n\n altk = k.replace('_', '-') if '_' in k else k.replace('-', '_')\n\n if altk in config:\n return altk\n\n return k\n\n\ndef update(old, new, priority='new'):\n \"\"\" Update a nested dictionary with values from another\n\n This is like dict.update except that it smoothly merges nested values\n\n This operates in-place and modifies old\n\n Parameters\n ----------\n priority: string {'old', 'new'}\n If new (default) then the new dictionary has preference.\n Otherwise the old dictionary does.\n\n Examples\n --------\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'x': 2, 'y': {'b': 3}}\n >>> update(a, b) # doctest: +SKIP\n {'x': 2, 'y': {'a': 2, 'b': 3}}\n\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'x': 2, 'y': {'b': 3}}\n >>> update(a, b, priority='old') # doctest: +SKIP\n {'x': 1, 'y': {'a': 2, 'b': 3}}\n\n See Also\n --------\n dask.config.merge\n \"\"\"\n for k, v in new.items():\n k = canonical_name(k, old)\n\n if isinstance(v, Mapping):\n if k not in old or old[k] is None:\n old[k] = {}\n update(old[k], v, priority=priority)\n else:\n if priority == 'new' or k not in old:\n old[k] = v\n\n return old\n\n\ndef merge(*dicts):\n \"\"\" Update a sequence of nested dictionaries\n\n This prefers the values in the latter dictionaries to those in the former\n\n Examples\n --------\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'y': {'b': 3}}\n >>> merge(a, b) # doctest: +SKIP\n {'x': 1, 'y': {'a': 2, 'b': 3}}\n\n See Also\n --------\n dask.config.update\n \"\"\"\n result = {}\n for d in dicts:\n update(result, d)\n return result\n\n\ndef collect_yaml(paths=paths):\n \"\"\" Collect configuration from yaml files\n\n This searches through a list of paths, expands to find all yaml or json\n files, and then parses each file.\n \"\"\"\n # Find all paths\n file_paths = []\n for path in paths:\n if os.path.exists(path):\n if os.path.isdir(path):\n try:\n file_paths.extend(sorted([\n os.path.join(path, p)\n for p in os.listdir(path)\n if os.path.splitext(p)[1].lower() in ('.json', '.yaml', '.yml')\n ]))\n except OSError:\n # Ignore permission errors\n pass\n else:\n file_paths.append(path)\n\n configs = []\n\n # Parse yaml files\n for path in file_paths:\n try:\n with open(path) as f:\n data = yaml.safe_load(f.read()) or {}\n configs.append(data)\n except (OSError, IOError):\n # Ignore permission errors\n pass\n\n return configs\n\n\ndef collect_env(env=None):\n \"\"\" Collect config from environment variables\n\n This grabs environment variables of the form \"DASK_FOO__BAR_BAZ=123\" and\n turns these into config variables of the form ``{\"foo\": {\"bar-baz\": 123}}``\n It transforms the key and value in the following way:\n\n - Lower-cases the key text\n - Treats ``__`` (double-underscore) as nested access\n - Calls ``ast.literal_eval`` on the value\n \"\"\"\n if env is None:\n env = os.environ\n d = {}\n for name, value in env.items():\n if name.startswith('DASK_'):\n varname = name[5:].lower().replace('__', '.')\n try:\n d[varname] = ast.literal_eval(value)\n except (SyntaxError, ValueError):\n d[varname] = value\n\n result = {}\n set(d, config=result)\n\n return result\n\n\ndef ensure_file(\n source,\n destination=None,\n comment=True):\n \"\"\"\n Copy file to default location if it does not already exist\n\n This tries to move a default configuration file to a default location if\n if does not already exist. It also comments out that file by default.\n\n This is to be used by downstream modules (like dask.distributed) that may\n have default configuration files that they wish to include in the default\n configuration path.\n\n Parameters\n ----------\n source : string, filename\n Source configuration file, typically within a source directory.\n destination : string, directory\n Destination directory. Configurable by ``DASK_CONFIG`` environment\n variable, falling back to ~/.config/dask.\n comment : bool, True by default\n Whether or not to comment out the config file when copying.\n \"\"\"\n if destination is None:\n destination = PATH\n\n # destination is a file and already exists, never overwrite\n if os.path.isfile(destination):\n return\n\n # If destination is not an existing file, interpret as a directory,\n # use the source basename as the filename\n directory = destination\n destination = os.path.join(directory, os.path.basename(source))\n\n try:\n if not os.path.exists(destination):\n makedirs(directory, exist_ok=True)\n\n # Atomically create destination. Parallel testing discovered\n # a race condition where a process can be busy creating the\n # destination while another process reads an empty config file.\n tmp = '%s.tmp.%d' % (destination, os.getpid())\n with open(source) as f:\n lines = list(f)\n\n if comment:\n lines = ['# ' + line\n if line.strip() and not line.startswith('#')\n else line\n for line in lines]\n\n with open(tmp, 'w') as f:\n f.write(''.join(lines))\n\n try:\n os.rename(tmp, destination)\n except OSError:\n os.remove(tmp)\n except (IOError, OSError):\n pass\n\n\nclass set(object):\n \"\"\" Temporarily set configuration values within a context manager\n\n Examples\n --------\n >>> import dask\n >>> with dask.config.set({'foo': 123}):\n ... pass\n\n See Also\n --------\n dask.config.get\n \"\"\"\n def __init__(self, arg=None, config=config, lock=config_lock, **kwargs):\n if arg and not kwargs:\n kwargs = arg\n\n with lock:\n self.config = config\n self.old = {}\n\n for key, value in kwargs.items():\n self._assign(key.split('.'), value, config, old=self.old)\n\n def __enter__(self):\n return self.config\n\n def __exit__(self, type, value, traceback):\n for keys, value in self.old.items():\n if value == '--delete--':\n d = self.config\n try:\n while len(keys) > 1:\n d = d[keys[0]]\n keys = keys[1:]\n del d[keys[0]]\n except KeyError:\n pass\n else:\n self._assign(keys, value, self.config)\n\n @classmethod\n def _assign(cls, keys, value, d, old=None, path=[]):\n \"\"\" Assign value into a nested configuration dictionary\n\n Optionally record the old values in old\n\n Parameters\n ----------\n keys: Sequence[str]\n The nested path of keys to assign the value, similar to toolz.put_in\n value: object\n d: dict\n The part of the nested dictionary into which we want to assign the\n value\n old: dict, optional\n If provided this will hold the old values\n path: List[str]\n Used internally to hold the path of old values\n \"\"\"\n key = canonical_name(keys[0], d)\n if len(keys) == 1:\n if old is not None:\n path_key = tuple(path + [key])\n if key in d:\n old[path_key] = d[key]\n else:\n old[path_key] = '--delete--'\n d[key] = value\n else:\n if key not in d:\n d[key] = {}\n if old is not None:\n old[tuple(path + [key])] = '--delete--'\n old = None\n cls._assign(keys[1:], value, d[key], path=path + [key], old=old)\n\n\ndef collect(paths=paths, env=None):\n \"\"\"\n Collect configuration from paths and environment variables\n\n Parameters\n ----------\n paths : List[str]\n A list of paths to search for yaml config files\n\n env : dict\n The system environment variables\n\n Returns\n -------\n config: dict\n\n See Also\n --------\n dask.config.refresh: collect configuration and update into primary config\n \"\"\"\n if env is None:\n env = os.environ\n configs = []\n\n if yaml:\n configs.extend(collect_yaml(paths=paths))\n\n configs.append(collect_env(env=env))\n\n return merge(*configs)\n\n\ndef refresh(config=config, defaults=defaults, **kwargs):\n \"\"\"\n Update configuration by re-reading yaml files and env variables\n\n This mutates the global dask.config.config, or the config parameter if\n passed in.\n\n This goes through the following stages:\n\n 1. Clearing out all old configuration\n 2. Updating from the stored defaults from downstream libraries\n (see update_defaults)\n 3. Updating from yaml files and environment variables\n\n Note that some functionality only checks configuration once at startup and\n may not change behavior, even if configuration changes. It is recommended\n to restart your python process if convenient to ensure that new\n configuration changes take place.\n\n See Also\n --------\n dask.config.collect: for parameters\n dask.config.update_defaults\n \"\"\"\n config.clear()\n\n for d in defaults:\n update(config, d, priority='old')\n\n update(config, collect(**kwargs))\n\n\ndef get(key, default=no_default, config=config):\n \"\"\"\n Get elements from global config\n\n Use '.' for nested access\n\n Examples\n --------\n >>> from dask import config\n >>> config.get('foo') # doctest: +SKIP\n {'x': 1, 'y': 2}\n\n >>> config.get('foo.x') # doctest: +SKIP\n 1\n\n >>> config.get('foo.x.y', default=123) # doctest: +SKIP\n 123\n\n See Also\n --------\n dask.config.set\n \"\"\"\n keys = key.split('.')\n result = config\n for k in keys:\n k = canonical_name(k, result)\n try:\n result = result[k]\n except (TypeError, IndexError, KeyError):\n if default is not no_default:\n return default\n else:\n raise\n return result\n\n\ndef rename(aliases, config=config):\n \"\"\" Rename old keys to new keys\n\n This helps migrate older configuration versions over time\n \"\"\"\n old = []\n new = {}\n for o, n in aliases.items():\n value = get(o, None, config=config)\n if value is not None:\n old.append(o)\n new[n] = value\n\n for k in old:\n del config[k] # TODO: support nested keys\n\n set(new, config=config)\n\n\ndef update_defaults(new, config=config, defaults=defaults):\n \"\"\" Add a new set of defaults to the configuration\n\n It does two things:\n\n 1. Add the defaults to a global collection to be used by refresh later\n 2. Updates the global config with the new configuration\n prioritizing older values over newer ones\n \"\"\"\n defaults.append(new)\n update(config, new, priority='old')\n\n\ndef expand_environment_variables(config):\n ''' Expand environment variables in a nested config dictionary\n\n This function will recursively search through any nested dictionaries\n and/or lists.\n\n Parameters\n ----------\n config : dict, iterable, or str\n Input object to search for environment variables\n\n Returns\n -------\n config : same type as input\n\n Examples\n --------\n >>> expand_environment_variables({'x': [1, 2, '$USER']}) # doctest: +SKIP\n {'x': [1, 2, 'my-username']}\n '''\n if isinstance(config, Mapping):\n return {k: expand_environment_variables(v) for k, v in config.items()}\n elif isinstance(config, str):\n return os.path.expandvars(config)\n elif isinstance(config, (list, tuple, builtins.set)):\n return type(config)([expand_environment_variables(v) for v in config])\n else:\n return config\n\n\nrefresh()\n\n\nif yaml:\n fn = os.path.join(os.path.dirname(__file__), \"dask.yaml\")\n ensure_file(source=fn)\n\n with open(fn) as f:\n _defaults = yaml.safe_load(f)\n\n update_defaults(_defaults)\n del fn, _defaults\n", "path": "dask/config.py" } ]
[ { "content": "from __future__ import print_function, division, absolute_import\n\nimport ast\nimport os\nimport sys\nimport threading\ntry:\n import yaml\nexcept ImportError:\n yaml = None\n\nfrom .compatibility import makedirs, builtins, Mapping\n\n\nno_default = '__no_default__'\n\n\npaths = [\n os.getenv('DASK_ROOT_CONFIG', '/etc/dask'),\n os.path.join(sys.prefix, 'etc', 'dask'),\n os.path.join(os.path.expanduser('~'), '.config', 'dask'),\n os.path.join(os.path.expanduser('~'), '.dask')\n]\n\nif 'DASK_CONFIG' in os.environ:\n PATH = os.environ['DASK_CONFIG']\n paths.append(PATH)\nelse:\n PATH = os.path.join(os.path.expanduser('~'), '.config', 'dask')\n\n\nglobal_config = config = {}\n\n\nconfig_lock = threading.Lock()\n\n\ndefaults = []\n\n\ndef canonical_name(k, config):\n \"\"\"Return the canonical name for a key.\n\n Handles user choice of '-' or '_' conventions by standardizing on whichever\n version was set first. If a key already exists in either hyphen or\n underscore form, the existing version is the canonical name. If neither\n version exists the original key is used as is.\n \"\"\"\n try:\n if k in config:\n return k\n except TypeError:\n # config is not a mapping, return the same name as provided\n return k\n\n altk = k.replace('_', '-') if '_' in k else k.replace('-', '_')\n\n if altk in config:\n return altk\n\n return k\n\n\ndef update(old, new, priority='new'):\n \"\"\" Update a nested dictionary with values from another\n\n This is like dict.update except that it smoothly merges nested values\n\n This operates in-place and modifies old\n\n Parameters\n ----------\n priority: string {'old', 'new'}\n If new (default) then the new dictionary has preference.\n Otherwise the old dictionary does.\n\n Examples\n --------\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'x': 2, 'y': {'b': 3}}\n >>> update(a, b) # doctest: +SKIP\n {'x': 2, 'y': {'a': 2, 'b': 3}}\n\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'x': 2, 'y': {'b': 3}}\n >>> update(a, b, priority='old') # doctest: +SKIP\n {'x': 1, 'y': {'a': 2, 'b': 3}}\n\n See Also\n --------\n dask.config.merge\n \"\"\"\n for k, v in new.items():\n k = canonical_name(k, old)\n\n if isinstance(v, Mapping):\n if k not in old or old[k] is None:\n old[k] = {}\n update(old[k], v, priority=priority)\n else:\n if priority == 'new' or k not in old:\n old[k] = v\n\n return old\n\n\ndef merge(*dicts):\n \"\"\" Update a sequence of nested dictionaries\n\n This prefers the values in the latter dictionaries to those in the former\n\n Examples\n --------\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'y': {'b': 3}}\n >>> merge(a, b) # doctest: +SKIP\n {'x': 1, 'y': {'a': 2, 'b': 3}}\n\n See Also\n --------\n dask.config.update\n \"\"\"\n result = {}\n for d in dicts:\n update(result, d)\n return result\n\n\ndef collect_yaml(paths=paths):\n \"\"\" Collect configuration from yaml files\n\n This searches through a list of paths, expands to find all yaml or json\n files, and then parses each file.\n \"\"\"\n # Find all paths\n file_paths = []\n for path in paths:\n if os.path.exists(path):\n if os.path.isdir(path):\n try:\n file_paths.extend(sorted([\n os.path.join(path, p)\n for p in os.listdir(path)\n if os.path.splitext(p)[1].lower() in ('.json', '.yaml', '.yml')\n ]))\n except OSError:\n # Ignore permission errors\n pass\n else:\n file_paths.append(path)\n\n configs = []\n\n # Parse yaml files\n for path in file_paths:\n try:\n with open(path) as f:\n data = yaml.safe_load(f.read()) or {}\n configs.append(data)\n except (OSError, IOError):\n # Ignore permission errors\n pass\n\n return configs\n\n\ndef collect_env(env=None):\n \"\"\" Collect config from environment variables\n\n This grabs environment variables of the form \"DASK_FOO__BAR_BAZ=123\" and\n turns these into config variables of the form ``{\"foo\": {\"bar-baz\": 123}}``\n It transforms the key and value in the following way:\n\n - Lower-cases the key text\n - Treats ``__`` (double-underscore) as nested access\n - Calls ``ast.literal_eval`` on the value\n \"\"\"\n if env is None:\n env = os.environ\n d = {}\n for name, value in env.items():\n if name.startswith('DASK_'):\n varname = name[5:].lower().replace('__', '.')\n try:\n d[varname] = ast.literal_eval(value)\n except (SyntaxError, ValueError):\n d[varname] = value\n\n result = {}\n set(d, config=result)\n\n return result\n\n\ndef ensure_file(\n source,\n destination=None,\n comment=True):\n \"\"\"\n Copy file to default location if it does not already exist\n\n This tries to move a default configuration file to a default location if\n if does not already exist. It also comments out that file by default.\n\n This is to be used by downstream modules (like dask.distributed) that may\n have default configuration files that they wish to include in the default\n configuration path.\n\n Parameters\n ----------\n source : string, filename\n Source configuration file, typically within a source directory.\n destination : string, directory\n Destination directory. Configurable by ``DASK_CONFIG`` environment\n variable, falling back to ~/.config/dask.\n comment : bool, True by default\n Whether or not to comment out the config file when copying.\n \"\"\"\n if destination is None:\n destination = PATH\n\n # destination is a file and already exists, never overwrite\n if os.path.isfile(destination):\n return\n\n # If destination is not an existing file, interpret as a directory,\n # use the source basename as the filename\n directory = destination\n destination = os.path.join(directory, os.path.basename(source))\n\n try:\n if not os.path.exists(destination):\n makedirs(directory, exist_ok=True)\n\n # Atomically create destination. Parallel testing discovered\n # a race condition where a process can be busy creating the\n # destination while another process reads an empty config file.\n tmp = '%s.tmp.%d' % (destination, os.getpid())\n with open(source) as f:\n lines = list(f)\n\n if comment:\n lines = ['# ' + line\n if line.strip() and not line.startswith('#')\n else line\n for line in lines]\n\n with open(tmp, 'w') as f:\n f.write(''.join(lines))\n\n try:\n os.rename(tmp, destination)\n except OSError:\n os.remove(tmp)\n except (IOError, OSError):\n pass\n\n\nclass set(object):\n \"\"\" Temporarily set configuration values within a context manager\n\n Examples\n --------\n >>> import dask\n >>> with dask.config.set({'foo': 123}):\n ... pass\n\n See Also\n --------\n dask.config.get\n \"\"\"\n def __init__(self, arg=None, config=config, lock=config_lock, **kwargs):\n if arg and not kwargs:\n kwargs = arg\n\n with lock:\n self.config = config\n self.old = {}\n\n for key, value in kwargs.items():\n self._assign(key.split('.'), value, config, old=self.old)\n\n def __enter__(self):\n return self.config\n\n def __exit__(self, type, value, traceback):\n for keys, value in self.old.items():\n if value == '--delete--':\n d = self.config\n try:\n while len(keys) > 1:\n d = d[keys[0]]\n keys = keys[1:]\n del d[keys[0]]\n except KeyError:\n pass\n else:\n self._assign(keys, value, self.config)\n\n @classmethod\n def _assign(cls, keys, value, d, old=None, path=[]):\n \"\"\" Assign value into a nested configuration dictionary\n\n Optionally record the old values in old\n\n Parameters\n ----------\n keys: Sequence[str]\n The nested path of keys to assign the value, similar to toolz.put_in\n value: object\n d: dict\n The part of the nested dictionary into which we want to assign the\n value\n old: dict, optional\n If provided this will hold the old values\n path: List[str]\n Used internally to hold the path of old values\n \"\"\"\n key = canonical_name(keys[0], d)\n if len(keys) == 1:\n if old is not None:\n path_key = tuple(path + [key])\n if key in d:\n old[path_key] = d[key]\n else:\n old[path_key] = '--delete--'\n d[key] = value\n else:\n if key not in d:\n d[key] = {}\n if old is not None:\n old[tuple(path + [key])] = '--delete--'\n old = None\n cls._assign(keys[1:], value, d[key], path=path + [key], old=old)\n\n\ndef collect(paths=paths, env=None):\n \"\"\"\n Collect configuration from paths and environment variables\n\n Parameters\n ----------\n paths : List[str]\n A list of paths to search for yaml config files\n\n env : dict\n The system environment variables\n\n Returns\n -------\n config: dict\n\n See Also\n --------\n dask.config.refresh: collect configuration and update into primary config\n \"\"\"\n if env is None:\n env = os.environ\n configs = []\n\n if yaml:\n configs.extend(collect_yaml(paths=paths))\n\n configs.append(collect_env(env=env))\n\n return merge(*configs)\n\n\ndef refresh(config=config, defaults=defaults, **kwargs):\n \"\"\"\n Update configuration by re-reading yaml files and env variables\n\n This mutates the global dask.config.config, or the config parameter if\n passed in.\n\n This goes through the following stages:\n\n 1. Clearing out all old configuration\n 2. Updating from the stored defaults from downstream libraries\n (see update_defaults)\n 3. Updating from yaml files and environment variables\n\n Note that some functionality only checks configuration once at startup and\n may not change behavior, even if configuration changes. It is recommended\n to restart your python process if convenient to ensure that new\n configuration changes take place.\n\n See Also\n --------\n dask.config.collect: for parameters\n dask.config.update_defaults\n \"\"\"\n config.clear()\n\n for d in defaults:\n update(config, d, priority='old')\n\n update(config, collect(**kwargs))\n\n\ndef get(key, default=no_default, config=config):\n \"\"\"\n Get elements from global config\n\n Use '.' for nested access\n\n Examples\n --------\n >>> from dask import config\n >>> config.get('foo') # doctest: +SKIP\n {'x': 1, 'y': 2}\n\n >>> config.get('foo.x') # doctest: +SKIP\n 1\n\n >>> config.get('foo.x.y', default=123) # doctest: +SKIP\n 123\n\n See Also\n --------\n dask.config.set\n \"\"\"\n keys = key.split('.')\n result = config\n for k in keys:\n k = canonical_name(k, result)\n try:\n result = result[k]\n except (TypeError, IndexError, KeyError):\n if default is not no_default:\n return default\n else:\n raise\n return result\n\n\ndef rename(aliases, config=config):\n \"\"\" Rename old keys to new keys\n\n This helps migrate older configuration versions over time\n \"\"\"\n old = []\n new = {}\n for o, n in aliases.items():\n value = get(o, None, config=config)\n if value is not None:\n old.append(o)\n new[n] = value\n\n for k in old:\n del config[canonical_name(k, config)] # TODO: support nested keys\n\n set(new, config=config)\n\n\ndef update_defaults(new, config=config, defaults=defaults):\n \"\"\" Add a new set of defaults to the configuration\n\n It does two things:\n\n 1. Add the defaults to a global collection to be used by refresh later\n 2. Updates the global config with the new configuration\n prioritizing older values over newer ones\n \"\"\"\n defaults.append(new)\n update(config, new, priority='old')\n\n\ndef expand_environment_variables(config):\n ''' Expand environment variables in a nested config dictionary\n\n This function will recursively search through any nested dictionaries\n and/or lists.\n\n Parameters\n ----------\n config : dict, iterable, or str\n Input object to search for environment variables\n\n Returns\n -------\n config : same type as input\n\n Examples\n --------\n >>> expand_environment_variables({'x': [1, 2, '$USER']}) # doctest: +SKIP\n {'x': [1, 2, 'my-username']}\n '''\n if isinstance(config, Mapping):\n return {k: expand_environment_variables(v) for k, v in config.items()}\n elif isinstance(config, str):\n return os.path.expandvars(config)\n elif isinstance(config, (list, tuple, builtins.set)):\n return type(config)([expand_environment_variables(v) for v in config])\n else:\n return config\n\n\nrefresh()\n\n\nif yaml:\n fn = os.path.join(os.path.dirname(__file__), \"dask.yaml\")\n ensure_file(source=fn)\n\n with open(fn) as f:\n _defaults = yaml.safe_load(f)\n\n update_defaults(_defaults)\n del fn, _defaults\n", "path": "dask/config.py" } ]
diff --git a/dask/config.py b/dask/config.py index 3785e6f9873..918e1cb921f 100644 --- a/dask/config.py +++ b/dask/config.py @@ -448,7 +448,7 @@ def rename(aliases, config=config): new[n] = value for k in old: - del config[k] # TODO: support nested keys + del config[canonical_name(k, config)] # TODO: support nested keys set(new, config=config) diff --git a/dask/tests/test_config.py b/dask/tests/test_config.py index 1f6f4f8919d..a5d7a33d0cf 100644 --- a/dask/tests/test_config.py +++ b/dask/tests/test_config.py @@ -318,7 +318,7 @@ def test_ensure_file_defaults_to_DASK_CONFIG_directory(tmpdir): def test_rename(): - aliases = {'foo-bar': 'foo.bar'} + aliases = {'foo_bar': 'foo.bar'} config = {'foo-bar': 123} rename(aliases, config=config) assert config == {'foo': {'bar': 123}}
statsmodels__statsmodels-1001
[ { "content": "\"\"\"\nMuch of the build system code was adapted from work done by the pandas\ndevelopers [1], which was in turn based on work done in pyzmq [2] and lxml [3].\n\n[1] http://pandas.pydata.org\n[2] http://zeromq.github.io/pyzmq/\n[3] http://lxml.de/\n\"\"\"\n\nimport os\nfrom os.path import splitext, basename, join as pjoin\nimport sys\nimport subprocess\nimport re\n\n# may need to work around setuptools bug by providing a fake Pyrex\ntry:\n import Cython\n sys.path.insert(0, pjoin(os.path.dirname(__file__), \"fake_pyrex\"))\nexcept ImportError:\n pass\n\n# try bootstrapping setuptools if it doesn't exist\ntry:\n import pkg_resources\n try:\n pkg_resources.require(\"setuptools>=0.6c5\")\n except pkg_resources.VersionConflict:\n from ez_setup import use_setuptools\n use_setuptools(version=\"0.6c5\")\n from setuptools import setup, Command, find_packages\n _have_setuptools = True\nexcept ImportError:\n # no setuptools installed\n from distutils.core import setup, Command\n _have_setuptools = False\n\nsetuptools_kwargs = {}\nif sys.version_info[0] >= 3:\n setuptools_kwargs = {'use_2to3': True,\n 'zip_safe': False,\n #'use_2to3_exclude_fixers': [],\n }\n if not _have_setuptools:\n sys.exit(\"need setuptools/distribute for Py3k\"\n \"\\n$ pip install distribute\")\n\nelse:\n setuptools_kwargs = {\n 'install_requires': [],\n 'zip_safe': False,\n }\n\n if not _have_setuptools:\n setuptools_kwargs = {}\n\ncurdir = os.path.abspath(os.path.dirname(__file__))\nREADME = open(pjoin(curdir, \"README.txt\")).read()\nCHANGES = open(pjoin(curdir, \"CHANGES.txt\")).read()\n\nDISTNAME = 'statsmodels'\nDESCRIPTION = 'Statistical computations and models for use with SciPy'\nLONG_DESCRIPTION = README + '\\n\\n' + CHANGES\nMAINTAINER = 'Skipper Seabold, Josef Perktold'\nMAINTAINER_EMAIL ='pystatsmodels@googlegroups.com'\nURL = 'http://statsmodels.sourceforge.net/'\nLICENSE = 'BSD License'\nDOWNLOAD_URL = ''\n\nfrom distutils.extension import Extension\nfrom distutils.command.build import build\nfrom distutils.command.sdist import sdist\nfrom distutils.command.build_ext import build_ext as _build_ext\n\ntry:\n from Cython.Distutils import build_ext as _build_ext\n # from Cython.Distutils import Extension # to get pyrex debugging symbols\n cython = True\nexcept ImportError:\n cython = False\n\n\nclass build_ext(_build_ext):\n def build_extensions(self):\n numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')\n\n for ext in self.extensions:\n if (hasattr(ext, 'include_dirs') and\n not numpy_incl in ext.include_dirs):\n ext.include_dirs.append(numpy_incl)\n _build_ext.build_extensions(self)\n\n\ndef strip_rc(version):\n return re.sub(r\"rc\\d+$\", \"\", version)\n\ndef check_dependency_versions(min_versions):\n \"\"\"\n Don't let setuptools do this. It's rude.\n\n Just makes sure it can import the packages and if not, stops the build\n process.\n \"\"\"\n from distutils.version import StrictVersion\n try:\n from numpy.version import short_version as npversion\n except ImportError:\n raise ImportError(\"statsmodels requires numpy\")\n try:\n from scipy.version import short_version as spversion\n except ImportError:\n try: # scipy 0.7.0\n from scipy.version import version as spversion\n except ImportError:\n raise ImportError(\"statsmodels requires scipy\")\n try:\n from pandas.version import version as pversion\n except ImportError:\n raise ImportError(\"statsmodels requires pandas\")\n try:\n from patsy import __version__ as patsy_version\n except ImportError:\n raise ImportError(\"statsmodels requires patsy. http://patsy.readthedocs.org\")\n\n try:\n assert StrictVersion(strip_rc(npversion)) >= min_versions['numpy']\n except AssertionError:\n raise ImportError(\"Numpy version is %s. Requires >= %s\" %\n (npversion, min_versions['numpy']))\n try:\n assert StrictVersion(strip_rc(spversion)) >= min_versions['scipy']\n except AssertionError:\n raise ImportError(\"Scipy version is %s. Requires >= %s\" %\n (spversion, min_versions['scipy']))\n try:\n #NOTE: not sure how robust this regex is but it at least allows\n # double digit version numbering\n pversion = re.match(\"\\d*\\.\\d*\\.\\d*\", pversion).group()\n assert StrictVersion(pversion) >= min_versions['pandas']\n except AssertionError:\n raise ImportError(\"Pandas version is %s. Requires >= %s\" %\n (pversion, min_versions['pandas']))\n\n try: # patsy dev looks like 0.1.0+dev\n pversion = re.match(\"\\d*\\.\\d*\\.\\d*\", patsy_version).group()\n assert StrictVersion(pversion) >= min_versions['patsy']\n except AssertionError:\n raise ImportError(\"Patsy version is %s. Requires >= %s\" %\n (pversion, min_versions[\"patsy\"]))\n\n\nMAJ = 0\nMIN = 5\nREV = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJ,MIN,REV)\n\nclassifiers = [ 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Topic :: Scientific/Engineering']\n\n# Return the git revision as a string\ndef git_version():\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.Popen(\" \".join(cmd), stdout = subprocess.PIPE, env=env,\n shell=True).communicate()[0]\n return out\n\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n GIT_REVISION = out.strip().decode('ascii')\n except OSError:\n GIT_REVISION = \"Unknown\"\n\n return GIT_REVISION\n\ndef write_version_py(filename=pjoin(curdir, 'statsmodels/version.py')):\n cnt = \"\\n\".join([\"\",\n \"# THIS FILE IS GENERATED FROM SETUP.PY\",\n \"short_version = '%(version)s'\",\n \"version = '%(version)s'\",\n \"full_version = '%(full_version)s'\",\n \"git_revision = '%(git_revision)s'\",\n \"release = %(isrelease)s\", \"\",\n \"if not release:\",\n \" version = full_version\"])\n # Adding the git rev number needs to be done inside write_version_py(),\n # otherwise the import of numpy.version messes up the build under Python 3.\n FULLVERSION = VERSION\n dowrite = True\n if os.path.exists('.git'):\n GIT_REVISION = git_version()\n elif os.path.exists(filename):\n # must be a source distribution, use existing version file\n try:\n from statsmodels.version import git_revision as GIT_REVISION\n except ImportError:\n dowrite = False\n else:\n GIT_REVISION = \"Unknown\"\n\n if not ISRELEASED:\n FULLVERSION += '.dev-' + GIT_REVISION[:7]\n\n\n if dowrite:\n try:\n a = open(filename, 'w')\n a.write(cnt % {'version': VERSION,\n 'full_version' : FULLVERSION,\n 'git_revision' : GIT_REVISION,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\ntry:\n from distutils.command.build_py import build_py_2to3 as build_py\nexcept ImportError:\n # 2.x\n from distutils.command.build_py import build_py\n\n\nclass CleanCommand(Command):\n \"\"\"Custom distutils command to clean the .so and .pyc files.\"\"\"\n\n user_options = [(\"all\", \"a\", \"\")]\n\n def initialize_options(self):\n self.all = True\n self._clean_me = []\n self._clean_trees = []\n self._clean_exclude = [\"bspline_ext.c\",\n \"bspline_impl.c\"]\n\n for root, dirs, files in list(os.walk('statsmodels')):\n for f in files:\n if f in self._clean_exclude:\n continue\n if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',\n '.pyo',\n '.pyd', '.c', '.orig'):\n self._clean_me.append(pjoin(root, f))\n for d in dirs:\n if d == '__pycache__':\n self._clean_trees.append(pjoin(root, d))\n\n for d in ('build',):\n if os.path.exists(d):\n self._clean_trees.append(d)\n\n def finalize_options(self):\n pass\n\n def run(self):\n for clean_me in self._clean_me:\n try:\n os.unlink(clean_me)\n except Exception:\n pass\n for clean_tree in self._clean_trees:\n try:\n import shutil\n shutil.rmtree(clean_tree)\n except Exception:\n pass\n\n\nclass CheckSDist(sdist):\n \"\"\"Custom sdist that ensures Cython has compiled all pyx files to c.\"\"\"\n\n _pyxfiles = ['statsmodels/nonparametric/linbin.pyx',\n 'statsmodels/nonparametric/_smoothers_lowess.pyx',\n 'statsmodels/tsa/kalmanf/kalman_loglike.pyx']\n\n def initialize_options(self):\n sdist.initialize_options(self)\n\n '''\n self._pyxfiles = []\n for root, dirs, files in os.walk('statsmodels'):\n for f in files:\n if f.endswith('.pyx'):\n self._pyxfiles.append(pjoin(root, f))\n '''\n\n def run(self):\n if 'cython' in cmdclass:\n self.run_command('cython')\n else:\n for pyxfile in self._pyxfiles:\n cfile = pyxfile[:-3] + 'c'\n msg = \"C-source file '%s' not found.\" % (cfile) +\\\n \" Run 'setup.py cython' before sdist.\"\n assert os.path.isfile(cfile), msg\n sdist.run(self)\n\n\nclass CheckingBuildExt(build_ext):\n \"\"\"Subclass build_ext to get clearer report if Cython is necessary.\"\"\"\n\n def check_cython_extensions(self, extensions):\n for ext in extensions:\n for src in ext.sources:\n if not os.path.exists(src):\n raise Exception(\"\"\"Cython-generated file '%s' not found.\n Cython is required to compile statsmodels from a development branch.\n Please install Cython or download a source release of statsmodels.\n \"\"\" % src)\n\n def build_extensions(self):\n self.check_cython_extensions(self.extensions)\n build_ext.build_extensions(self)\n\n\nclass CythonCommand(build_ext):\n \"\"\"Custom distutils command subclassed from Cython.Distutils.build_ext\n to compile pyx->c, and stop there. All this does is override the\n C-compile method build_extension() with a no-op.\"\"\"\n def build_extension(self, ext):\n pass\n\n\nclass DummyBuildSrc(Command):\n \"\"\" numpy's build_src command interferes with Cython's build_ext.\n \"\"\"\n user_options = []\n\n def initialize_options(self):\n self.py_modules_dict = {}\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\ncmdclass = {'clean': CleanCommand,\n 'build': build,\n 'sdist': CheckSDist}\n\nif cython:\n suffix = \".pyx\"\n cmdclass[\"build_ext\"] = CheckingBuildExt\n cmdclass[\"cython\"] = CythonCommand\nelse:\n suffix = \".c\"\n cmdclass[\"build_src\"] = DummyBuildSrc\n cmdclass[\"build_ext\"] = CheckingBuildExt\n\nlib_depends = []\n\ndef srcpath(name=None, suffix='.pyx', subdir='src'):\n return pjoin('statsmodels', subdir, name + suffix)\n\nif suffix == \".pyx\":\n lib_depends = [srcpath(f, suffix=\".pyx\") for f in lib_depends]\nelse:\n lib_depends = []\n\ncommon_include = []\n\n# some linux distros require it\nlibraries = ['m'] if 'win32' not in sys.platform else []\n\next_data = dict(\n kalman_loglike = {\"pyxfile\" : \"tsa/kalmanf/kalman_loglike\",\n \"depends\" : [],\n \"sources\" : []},\n\n linbin = {\"pyxfile\" : \"nonparametric/linbin\",\n \"depends\" : [],\n \"sources\" : []},\n _smoothers_lowess = {\"pyxfile\" : \"nonparametric/_smoothers_lowess\",\n \"depends\" : [],\n \"sources\" : []}\n )\n\ndef pxd(name):\n return os.path.abspath(pjoin('pandas', name + '.pxd'))\n\nextensions = []\nfor name, data in ext_data.items():\n sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')]\n pxds = [pxd(x) for x in data.get('pxdfiles', [])]\n destdir = \".\".join(os.path.dirname(data[\"pyxfile\"]).split(\"/\"))\n if suffix == '.pyx' and pxds:\n sources.extend(pxds)\n\n sources.extend(data.get('sources', []))\n\n include = data.get('include', common_include)\n\n obj = Extension('statsmodels.%s.%s' % (destdir, name),\n sources=sources,\n depends=data.get('depends', []),\n include_dirs=include)\n\n extensions.append(obj)\n\nif suffix == '.pyx' and 'setuptools' in sys.modules:\n # undo dumb setuptools bug clobbering .pyx sources back to .c\n for ext in extensions:\n if ext.sources[0].endswith('.c'):\n root, _ = os.path.splitext(ext.sources[0])\n ext.sources[0] = root + suffix\n\nif _have_setuptools:\n setuptools_kwargs[\"test_suite\"] = \"nose.collector\"\n\ntry:\n from os.path import relpath\nexcept ImportError: # python 2.5\n\n def relpath(path, start=os.curdir):\n \"\"\"Return a relative version of a path\"\"\"\n if not path:\n raise ValueError(\"no path specified\")\n start_list = os.path.abspath(start).split(os.path.sep)\n path_list = os.path.abspath(path).split(os.path.sep)\n # Work out how much of the filepath is shared by start and path.\n i = len(os.path.commonprefix([start_list, path_list]))\n rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]\n if not rel_list:\n return os.curdir\n return pjoin(*rel_list)\n\ndef get_data_files():\n sep = os.path.sep\n # install the datasets\n data_files = {}\n root = pjoin(curdir, \"statsmodels\", \"datasets\")\n for i in os.listdir(root):\n if i is \"tests\":\n continue\n path = pjoin(root, i)\n if os.path.isdir(path):\n data_files.update({relpath(path).replace(sep, \".\") : [\"*.csv\",\n \"*.dta\"]})\n # add all the tests and results files\n for r, ds, fs in os.walk(pjoin(curdir, \"statsmodels\")):\n if r.endswith('results') and 'sandbox' not in r:\n data_files.update({relpath(r).replace(sep, \".\") : [\"*.csv\",\n \"*.txt\"]})\n\n return data_files\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.unlink('MANIFEST')\n\n min_versions = {\n 'numpy' : '1.4.0',\n 'scipy' : '0.7.0',\n 'pandas' : '0.7.1',\n 'patsy' : '0.1.0',\n }\n if sys.version_info[0] == 3 and sys.version_info[1] >= 3:\n # 3.3 needs numpy 1.7+\n min_versions.update({\"numpy\" : \"1.7.0b2\"})\n\n check_dependency_versions(min_versions)\n write_version_py()\n\n # this adds *.csv and *.dta files in datasets folders\n # and *.csv and *.txt files in test/results folders\n package_data = get_data_files()\n packages = find_packages()\n packages.append(\"statsmodels.tsa.vector_ar.data\")\n\n package_data[\"statsmodels.datasets.tests\"].append(\"*.zip\")\n package_data[\"statsmodels.iolib.tests.results\"].append(\"*.dta\")\n package_data[\"statsmodels.stats.tests.results\"].append(\"*.json\")\n package_data[\"statsmodels.tsa.vector_ar.tests.results\"].append(\"*.npz\")\n # data files that don't follow the tests/results pattern. should fix.\n package_data.update({\"statsmodels.stats.tests\" : [\"*.txt\"]})\n # the next two are in the sdist, but I don't manage to get them installed\n package_data.update({\"statsmodels.stats.libqstrung\" :\n [\"*.r\", \"*.txt\", \"*.dat\"]})\n package_data.update({\"statsmodels.stats.libqstrung.tests\" :\n [\"*.csv\", \"*.dat\"]})\n package_data.update({\"statsmodels.tsa.vector_ar.data\" : [\"*.dat\"]})\n package_data.update({\"statsmodels.tsa.vector_ar.data\" : [\"*.dat\"]})\n # Why are we installing this stuff?\n\n #TODO: deal with this. Not sure if it ever worked for bdists\n #('docs/build/htmlhelp/statsmodelsdoc.chm',\n # 'statsmodels/statsmodelsdoc.chm')\n\n setup(name = DISTNAME,\n version = VERSION,\n maintainer = MAINTAINER,\n ext_modules = extensions,\n maintainer_email = MAINTAINER_EMAIL,\n description = DESCRIPTION,\n license = LICENSE,\n url = URL,\n download_url = DOWNLOAD_URL,\n long_description = LONG_DESCRIPTION,\n classifiers = classifiers,\n platforms = 'any',\n cmdclass = cmdclass,\n packages = packages,\n package_data = package_data,\n **setuptools_kwargs)\n", "path": "setup.py" } ]
[ { "content": "\"\"\"\nMuch of the build system code was adapted from work done by the pandas\ndevelopers [1], which was in turn based on work done in pyzmq [2] and lxml [3].\n\n[1] http://pandas.pydata.org\n[2] http://zeromq.github.io/pyzmq/\n[3] http://lxml.de/\n\"\"\"\n\nimport os\nfrom os.path import splitext, basename, join as pjoin\nimport sys\nimport subprocess\nimport re\n\n# may need to work around setuptools bug by providing a fake Pyrex\ntry:\n import Cython\n sys.path.insert(0, pjoin(os.path.dirname(__file__), \"fake_pyrex\"))\nexcept ImportError:\n pass\n\n# try bootstrapping setuptools if it doesn't exist\ntry:\n import pkg_resources\n try:\n pkg_resources.require(\"setuptools>=0.6c5\")\n except pkg_resources.VersionConflict:\n from ez_setup import use_setuptools\n use_setuptools(version=\"0.6c5\")\n from setuptools import setup, Command, find_packages\n _have_setuptools = True\nexcept ImportError:\n # no setuptools installed\n from distutils.core import setup, Command\n _have_setuptools = False\n\nsetuptools_kwargs = {}\nif sys.version_info[0] >= 3:\n setuptools_kwargs = {'use_2to3': True,\n 'zip_safe': False,\n #'use_2to3_exclude_fixers': [],\n }\n if not _have_setuptools:\n sys.exit(\"need setuptools/distribute for Py3k\"\n \"\\n$ pip install distribute\")\n\nelse:\n setuptools_kwargs = {\n 'install_requires': [],\n 'zip_safe': False,\n }\n\n if not _have_setuptools:\n setuptools_kwargs = {}\n\ncurdir = os.path.abspath(os.path.dirname(__file__))\nREADME = open(pjoin(curdir, \"README.txt\")).read()\nCHANGES = open(pjoin(curdir, \"CHANGES.txt\")).read()\n\nDISTNAME = 'statsmodels'\nDESCRIPTION = 'Statistical computations and models for use with SciPy'\nLONG_DESCRIPTION = README + '\\n\\n' + CHANGES\nMAINTAINER = 'Skipper Seabold, Josef Perktold'\nMAINTAINER_EMAIL ='pystatsmodels@googlegroups.com'\nURL = 'http://statsmodels.sourceforge.net/'\nLICENSE = 'BSD License'\nDOWNLOAD_URL = ''\n\nfrom distutils.extension import Extension\nfrom distutils.command.build import build\nfrom distutils.command.sdist import sdist\nfrom distutils.command.build_ext import build_ext as _build_ext\n\ntry:\n from Cython.Distutils import build_ext as _build_ext\n # from Cython.Distutils import Extension # to get pyrex debugging symbols\n cython = True\nexcept ImportError:\n cython = False\n\n\nclass build_ext(_build_ext):\n def build_extensions(self):\n numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')\n\n for ext in self.extensions:\n if (hasattr(ext, 'include_dirs') and\n not numpy_incl in ext.include_dirs):\n ext.include_dirs.append(numpy_incl)\n _build_ext.build_extensions(self)\n\n\ndef strip_rc(version):\n return re.sub(r\"rc\\d+$\", \"\", version)\n\ndef check_dependency_versions(min_versions):\n \"\"\"\n Don't let setuptools do this. It's rude.\n\n Just makes sure it can import the packages and if not, stops the build\n process.\n \"\"\"\n from distutils.version import StrictVersion\n try:\n from numpy.version import short_version as npversion\n except ImportError:\n raise ImportError(\"statsmodels requires numpy\")\n try:\n from scipy.version import short_version as spversion\n except ImportError:\n try: # scipy 0.7.0\n from scipy.version import version as spversion\n except ImportError:\n raise ImportError(\"statsmodels requires scipy\")\n try:\n from pandas.version import version as pversion\n except ImportError:\n raise ImportError(\"statsmodels requires pandas\")\n try:\n from patsy import __version__ as patsy_version\n except ImportError:\n raise ImportError(\"statsmodels requires patsy. http://patsy.readthedocs.org\")\n\n try:\n assert StrictVersion(strip_rc(npversion)) >= min_versions['numpy']\n except AssertionError:\n raise ImportError(\"Numpy version is %s. Requires >= %s\" %\n (npversion, min_versions['numpy']))\n try:\n assert StrictVersion(strip_rc(spversion)) >= min_versions['scipy']\n except AssertionError:\n raise ImportError(\"Scipy version is %s. Requires >= %s\" %\n (spversion, min_versions['scipy']))\n try:\n #NOTE: not sure how robust this regex is but it at least allows\n # double digit version numbering\n pversion = re.match(\"\\d*\\.\\d*\\.\\d*\", pversion).group()\n assert StrictVersion(pversion) >= min_versions['pandas']\n except AssertionError:\n raise ImportError(\"Pandas version is %s. Requires >= %s\" %\n (pversion, min_versions['pandas']))\n\n try: # patsy dev looks like 0.1.0+dev\n pversion = re.match(\"\\d*\\.\\d*\\.\\d*\", patsy_version).group()\n assert StrictVersion(pversion) >= min_versions['patsy']\n except AssertionError:\n raise ImportError(\"Patsy version is %s. Requires >= %s\" %\n (pversion, min_versions[\"patsy\"]))\n\n\nMAJ = 0\nMIN = 5\nREV = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJ,MIN,REV)\n\nclassifiers = [ 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Topic :: Scientific/Engineering']\n\n# Return the git revision as a string\ndef git_version():\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.Popen(\" \".join(cmd), stdout = subprocess.PIPE, env=env,\n shell=True).communicate()[0]\n return out\n\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n GIT_REVISION = out.strip().decode('ascii')\n except OSError:\n GIT_REVISION = \"Unknown\"\n\n return GIT_REVISION\n\ndef write_version_py(filename=pjoin(curdir, 'statsmodels/version.py')):\n cnt = \"\\n\".join([\"\",\n \"# THIS FILE IS GENERATED FROM SETUP.PY\",\n \"short_version = '%(version)s'\",\n \"version = '%(version)s'\",\n \"full_version = '%(full_version)s'\",\n \"git_revision = '%(git_revision)s'\",\n \"release = %(isrelease)s\", \"\",\n \"if not release:\",\n \" version = full_version\"])\n # Adding the git rev number needs to be done inside write_version_py(),\n # otherwise the import of numpy.version messes up the build under Python 3.\n FULLVERSION = VERSION\n dowrite = True\n if os.path.exists('.git'):\n GIT_REVISION = git_version()\n elif os.path.exists(filename):\n # must be a source distribution, use existing version file\n try:\n from statsmodels.version import git_revision as GIT_REVISION\n except ImportError:\n dowrite = False\n else:\n GIT_REVISION = \"Unknown\"\n\n if not ISRELEASED:\n FULLVERSION += '.dev-' + GIT_REVISION[:7]\n\n\n if dowrite:\n try:\n a = open(filename, 'w')\n a.write(cnt % {'version': VERSION,\n 'full_version' : FULLVERSION,\n 'git_revision' : GIT_REVISION,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\ntry:\n from distutils.command.build_py import build_py_2to3 as build_py\nexcept ImportError:\n # 2.x\n from distutils.command.build_py import build_py\n\n\nclass CleanCommand(Command):\n \"\"\"Custom distutils command to clean the .so and .pyc files.\"\"\"\n\n user_options = [(\"all\", \"a\", \"\")]\n\n def initialize_options(self):\n self.all = True\n self._clean_me = []\n self._clean_trees = []\n self._clean_exclude = [\"bspline_ext.c\",\n \"bspline_impl.c\"]\n\n for root, dirs, files in list(os.walk('statsmodels')):\n for f in files:\n if f in self._clean_exclude:\n continue\n if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',\n '.pyo',\n '.pyd', '.c', '.orig'):\n self._clean_me.append(pjoin(root, f))\n for d in dirs:\n if d == '__pycache__':\n self._clean_trees.append(pjoin(root, d))\n\n for d in ('build',):\n if os.path.exists(d):\n self._clean_trees.append(d)\n\n def finalize_options(self):\n pass\n\n def run(self):\n for clean_me in self._clean_me:\n try:\n os.unlink(clean_me)\n except Exception:\n pass\n for clean_tree in self._clean_trees:\n try:\n import shutil\n shutil.rmtree(clean_tree)\n except Exception:\n pass\n\n\nclass CheckSDist(sdist):\n \"\"\"Custom sdist that ensures Cython has compiled all pyx files to c.\"\"\"\n\n _pyxfiles = ['statsmodels/nonparametric/linbin.pyx',\n 'statsmodels/nonparametric/_smoothers_lowess.pyx',\n 'statsmodels/tsa/kalmanf/kalman_loglike.pyx']\n\n def initialize_options(self):\n sdist.initialize_options(self)\n\n '''\n self._pyxfiles = []\n for root, dirs, files in os.walk('statsmodels'):\n for f in files:\n if f.endswith('.pyx'):\n self._pyxfiles.append(pjoin(root, f))\n '''\n\n def run(self):\n if 'cython' in cmdclass:\n self.run_command('cython')\n else:\n for pyxfile in self._pyxfiles:\n cfile = pyxfile[:-3] + 'c'\n msg = \"C-source file '%s' not found.\" % (cfile) +\\\n \" Run 'setup.py cython' before sdist.\"\n assert os.path.isfile(cfile), msg\n sdist.run(self)\n\n\nclass CheckingBuildExt(build_ext):\n \"\"\"Subclass build_ext to get clearer report if Cython is necessary.\"\"\"\n\n def check_cython_extensions(self, extensions):\n for ext in extensions:\n for src in ext.sources:\n if not os.path.exists(src):\n raise Exception(\"\"\"Cython-generated file '%s' not found.\n Cython is required to compile statsmodels from a development branch.\n Please install Cython or download a source release of statsmodels.\n \"\"\" % src)\n\n def build_extensions(self):\n self.check_cython_extensions(self.extensions)\n build_ext.build_extensions(self)\n\n\nclass CythonCommand(build_ext):\n \"\"\"Custom distutils command subclassed from Cython.Distutils.build_ext\n to compile pyx->c, and stop there. All this does is override the\n C-compile method build_extension() with a no-op.\"\"\"\n def build_extension(self, ext):\n pass\n\n\nclass DummyBuildSrc(Command):\n \"\"\" numpy's build_src command interferes with Cython's build_ext.\n \"\"\"\n user_options = []\n\n def initialize_options(self):\n self.py_modules_dict = {}\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\ncmdclass = {'clean': CleanCommand,\n 'build': build,\n 'sdist': CheckSDist}\n\nif cython:\n suffix = \".pyx\"\n cmdclass[\"build_ext\"] = CheckingBuildExt\n cmdclass[\"cython\"] = CythonCommand\nelse:\n suffix = \".c\"\n cmdclass[\"build_src\"] = DummyBuildSrc\n cmdclass[\"build_ext\"] = CheckingBuildExt\n\nlib_depends = []\n\ndef srcpath(name=None, suffix='.pyx', subdir='src'):\n return pjoin('statsmodels', subdir, name + suffix)\n\nif suffix == \".pyx\":\n lib_depends = [srcpath(f, suffix=\".pyx\") for f in lib_depends]\nelse:\n lib_depends = []\n\ncommon_include = []\n\n# some linux distros require it\nlibraries = ['m'] if 'win32' not in sys.platform else []\n\next_data = dict(\n kalman_loglike = {\"pyxfile\" : \"tsa/kalmanf/kalman_loglike\",\n \"depends\" : [],\n \"sources\" : []},\n\n linbin = {\"pyxfile\" : \"nonparametric/linbin\",\n \"depends\" : [],\n \"sources\" : []},\n _smoothers_lowess = {\"pyxfile\" : \"nonparametric/_smoothers_lowess\",\n \"depends\" : [],\n \"sources\" : []}\n )\n\ndef pxd(name):\n return os.path.abspath(pjoin('pandas', name + '.pxd'))\n\nextensions = []\nfor name, data in ext_data.items():\n sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')]\n pxds = [pxd(x) for x in data.get('pxdfiles', [])]\n destdir = \".\".join(os.path.dirname(data[\"pyxfile\"]).split(\"/\"))\n if suffix == '.pyx' and pxds:\n sources.extend(pxds)\n\n sources.extend(data.get('sources', []))\n\n include = data.get('include', common_include)\n\n obj = Extension('statsmodels.%s.%s' % (destdir, name),\n sources=sources,\n depends=data.get('depends', []),\n include_dirs=include)\n\n extensions.append(obj)\n\nif suffix == '.pyx' and 'setuptools' in sys.modules:\n # undo dumb setuptools bug clobbering .pyx sources back to .c\n for ext in extensions:\n if ext.sources[0].endswith('.c'):\n root, _ = os.path.splitext(ext.sources[0])\n ext.sources[0] = root + suffix\n\nif _have_setuptools:\n setuptools_kwargs[\"test_suite\"] = \"nose.collector\"\n\ntry:\n from os.path import relpath\nexcept ImportError: # python 2.5\n\n def relpath(path, start=os.curdir):\n \"\"\"Return a relative version of a path\"\"\"\n if not path:\n raise ValueError(\"no path specified\")\n start_list = os.path.abspath(start).split(os.path.sep)\n path_list = os.path.abspath(path).split(os.path.sep)\n # Work out how much of the filepath is shared by start and path.\n i = len(os.path.commonprefix([start_list, path_list]))\n rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]\n if not rel_list:\n return os.curdir\n return pjoin(*rel_list)\n\ndef get_data_files():\n sep = os.path.sep\n # install the datasets\n data_files = {}\n root = pjoin(curdir, \"statsmodels\", \"datasets\")\n for i in os.listdir(root):\n if i is \"tests\":\n continue\n path = pjoin(root, i)\n if os.path.isdir(path):\n data_files.update({relpath(path).replace(sep, \".\") : [\"*.csv\",\n \"*.dta\"]})\n # add all the tests and results files\n for r, ds, fs in os.walk(pjoin(curdir, \"statsmodels\")):\n if r.endswith('results') and 'sandbox' not in r:\n data_files.update({relpath(r).replace(sep, \".\") : [\"*.csv\",\n \"*.txt\"]})\n\n return data_files\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.unlink('MANIFEST')\n\n min_versions = {\n 'numpy' : '1.4.0',\n 'scipy' : '0.7.0',\n 'pandas' : '0.7.1',\n 'patsy' : '0.1.0',\n }\n if sys.version_info[0] == 3 and sys.version_info[1] >= 3:\n # 3.3 needs numpy 1.7+\n min_versions.update({\"numpy\" : \"1.7.0b2\"})\n\n check_dependency_versions(min_versions)\n write_version_py()\n\n # this adds *.csv and *.dta files in datasets folders\n # and *.csv and *.txt files in test/results folders\n package_data = get_data_files()\n packages = find_packages()\n packages.append(\"statsmodels.tsa.vector_ar.data\")\n\n package_data[\"statsmodels.datasets.tests\"].append(\"*.zip\")\n package_data[\"statsmodels.iolib.tests.results\"].append(\"*.dta\")\n package_data[\"statsmodels.stats.tests.results\"].append(\"*.json\")\n package_data[\"statsmodels.tsa.vector_ar.tests.results\"].append(\"*.npz\")\n # data files that don't follow the tests/results pattern. should fix.\n package_data.update({\"statsmodels.stats.tests\" : [\"*.txt\"]})\n # the next two are in the sdist, but I don't manage to get them installed\n package_data.update({\"statsmodels.stats.libqstrung\" :\n [\"*.r\", \"*.txt\", \"*.dat\"]})\n package_data.update({\"statsmodels.stats.libqstrung.tests\" :\n [\"*.csv\", \"*.dat\"]})\n package_data.update({\"statsmodels.tsa.vector_ar.data\" : [\"*.dat\"]})\n package_data.update({\"statsmodels.tsa.vector_ar.data\" : [\"*.dat\"]})\n # Why are we installing this stuff?\n\n #TODO: deal with this. Not sure if it ever worked for bdists\n #('docs/build/htmlhelp/statsmodelsdoc.chm',\n # 'statsmodels/statsmodelsdoc.chm')\n\n setup(name = DISTNAME,\n version = VERSION,\n maintainer = MAINTAINER,\n ext_modules = extensions,\n maintainer_email = MAINTAINER_EMAIL,\n description = DESCRIPTION,\n license = LICENSE,\n url = URL,\n download_url = DOWNLOAD_URL,\n long_description = LONG_DESCRIPTION,\n classifiers = classifiers,\n platforms = 'any',\n cmdclass = cmdclass,\n packages = packages,\n package_data = package_data,\n include_package_data=True,\n **setuptools_kwargs)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 86b66cfb8cc..bf5b48f8091 100644 --- a/setup.py +++ b/setup.py @@ -520,4 +520,5 @@ def get_data_files(): cmdclass = cmdclass, packages = packages, package_data = package_data, + include_package_data=True, **setuptools_kwargs)
liberapay__liberapay.com-1785
[ { "content": "from collections import defaultdict, namedtuple, OrderedDict\nfrom datetime import date, datetime, timedelta\nfrom decimal import Decimal, ROUND_FLOOR, ROUND_HALF_UP, ROUND_UP\nimport re\n\nfrom babel.numbers import get_currency_precision\nfrom mangopay.utils import Money\nfrom markupsafe import Markup\nfrom pando.utils import utc\n\n\ndef ordered_set(keys):\n return OrderedDict((k, None) for k in keys)\n\n\ndef check_bits(bits):\n assert len(set(bits)) == len(bits) # no duplicates\n assert not [b for b in bits if '{0:b}'.format(b).count('1') != 1] # single bit\n\n\nEvent = namedtuple('Event', 'name bit title')\n\n\nclass Fees(namedtuple('Fees', ('var', 'fix'))):\n VAT = Decimal('0.17') # 17% (Luxembourg rate)\n VAT_1 = VAT + 1\n\n @property\n def with_vat(self):\n r = (self.var * self.VAT_1 * 100, self.fix * self.VAT_1)\n return r[0] if not r[1] else r[1].round_up() if not r[0] else r\n\n\ndef to_precision(x, precision, rounding=ROUND_HALF_UP):\n \"\"\"Round `x` to keep only `precision` of its most significant digits.\n\n >>> to_precision(Decimal('0.0086820'), 2)\n Decimal('0.0087')\n >>> to_precision(Decimal('13567.89'), 3)\n Decimal('13600')\n >>> to_precision(Decimal('0.000'), 4)\n Decimal('0')\n \"\"\"\n if x == 0:\n return Decimal(0)\n log10 = x.log10().to_integral(ROUND_FLOOR)\n # round\n factor = Decimal(10) ** (log10 + 1)\n r = (x / factor).quantize(Decimal(10) ** -precision, rounding=rounding) * factor\n # remove trailing zeros\n r = r.quantize(Decimal(10) ** (log10 - precision + 1))\n return r\n\n\ndef convert_symbolic_amount(amount, target_currency, precision=2, rounding=ROUND_HALF_UP):\n from liberapay.website import website\n rate = website.currency_exchange_rates[('EUR', target_currency)]\n minimum = Money.MINIMUMS[target_currency].amount\n return max(\n to_precision(amount * rate, precision, rounding).quantize(minimum, rounding),\n minimum\n )\n\n\nclass MoneyAutoConvertDict(defaultdict):\n\n def __init__(self, *args, **kw):\n super(MoneyAutoConvertDict, self).__init__(None, *args, **kw)\n\n def __missing__(self, currency):\n r = Money(convert_symbolic_amount(self['EUR'].amount, currency, 1), currency)\n self[currency] = r\n return r\n\n\nStandardTip = namedtuple('StandardTip', 'label weekly monthly yearly')\n\n\n_ = lambda a: a\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"-_.\")\n\nAVATAR_QUERY = '?s=160&d=404'\nAVATAR_SOURCES = (\n 'libravatar bitbucket facebook github gitlab google mastodon pleroma twitch twitter youtube'\n).split()\n\nBASE64URL_CHARS = set('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_')\n\nBIRTHDAY = date(2015, 5, 22)\n\nCARD_BRANDS = {\n 'amex': 'American Express',\n 'diners': 'Diners Club',\n 'discover': 'Discover',\n 'jcb': 'JCB',\n 'mastercard': 'Mastercard',\n 'unionpay': 'UnionPay',\n 'visa': 'Visa',\n 'unknown': '',\n}\n\nCURRENCIES = ordered_set([\n 'EUR', 'USD',\n 'AUD', 'BGN', 'BRL', 'CAD', 'CHF', 'CNY', 'CZK', 'DKK', 'GBP', 'HKD', 'HRK',\n 'HUF', 'IDR', 'ILS', 'INR', 'ISK', 'JPY', 'KRW', 'MXN', 'MYR', 'NOK', 'NZD',\n 'PHP', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'THB', 'TRY', 'ZAR'\n])\n\nD_CENT = Decimal('0.01')\nD_MAX = Decimal('999999999999.99')\nD_ZERO = Decimal('0.00')\n\nclass _DonationLimits(defaultdict):\n def __missing__(self, currency):\n minimum = Money.MINIMUMS[currency].amount\n eur_weekly_amounts = DONATION_LIMITS_EUR_USD['weekly']\n converted_weekly_amounts = (\n convert_symbolic_amount(eur_weekly_amounts[0], currency),\n convert_symbolic_amount(eur_weekly_amounts[1], currency)\n )\n r = {\n 'weekly': tuple(Money(x, currency) for x in converted_weekly_amounts),\n 'monthly': tuple(\n Money((x * Decimal(52) / Decimal(12)).quantize(minimum, rounding=ROUND_UP), currency)\n for x in converted_weekly_amounts\n ),\n 'yearly': tuple(Money(x * Decimal(52), currency) for x in converted_weekly_amounts),\n }\n self[currency] = r\n return r\n\nDONATION_LIMITS_WEEKLY_EUR_USD = (Decimal('0.01'), Decimal('100.00'))\nDONATION_LIMITS_EUR_USD = {\n 'weekly': DONATION_LIMITS_WEEKLY_EUR_USD,\n 'monthly': tuple((x * Decimal(52) / Decimal(12)).quantize(D_CENT, rounding=ROUND_UP)\n for x in DONATION_LIMITS_WEEKLY_EUR_USD),\n 'yearly': tuple(x * Decimal(52) for x in DONATION_LIMITS_WEEKLY_EUR_USD),\n}\nDONATION_LIMITS = _DonationLimits(None, {\n 'EUR': {k: (Money(v[0], 'EUR'), Money(v[1], 'EUR')) for k, v in DONATION_LIMITS_EUR_USD.items()},\n 'USD': {k: (Money(v[0], 'USD'), Money(v[1], 'USD')) for k, v in DONATION_LIMITS_EUR_USD.items()},\n})\n\nDOMAIN_RE = re.compile(r'''\n ^\n ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)+\n [a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\n $\n''', re.VERBOSE)\n\nELSEWHERE_ACTIONS = {'connect', 'lock', 'unlock'}\n\nEMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\nEMAIL_RE = re.compile(r'''\n # This is the regexp used by MangoPay (as of February 2017).\n # It rejects some valid but exotic addresses.\n # https://en.wikipedia.org/wiki/Email_address\n ^\n [a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*\n @\n ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)+[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\n $\n''', re.VERBOSE)\n\nEPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\nEUROZONE = set(\"AT BE CY DE EE ES FI FR GR IE IT LT LU LV MT NL PT SI SK\".split())\nSEPA = EUROZONE | set(\"AD BG CH CZ DK GB GI HR HU IS LI MC NO PL RO SE VA\".split())\n\nEVENTS = [\n Event('income', 1, _(\"Every week as long as I am receiving donations\")),\n Event('donate_reminder', 2, _(\"When it's time to renew my donations\")),\n Event('pledgee_joined', 16, _(\"When someone I pledge to joins Liberapay\")),\n Event('team_invite', 32, _(\"When someone invites me to join a team\")),\n Event('payin_failed', 2**11, _(\"When a payment I initiated fails\")),\n Event('payin_succeeded', 2**12, _(\"When a payment I initiated succeeds\")),\n Event('payin_refund_initiated', 2**13, _(\"When money is being refunded back to me\")),\n Event('upcoming_debit', 2**14, _(\"When an automatic donation renewal payment is upcoming\")),\n Event('missing_route', 2**15, _(\"When I no longer have any valid payment instrument\")),\n Event('renewal_aborted', 2**16, _(\"When a donation renewal payment has been aborted\")),\n]\ncheck_bits([e.bit for e in EVENTS])\nEVENTS = OrderedDict((e.name, e) for e in EVENTS)\nEVENTS_S = ' '.join(EVENTS.keys())\n\n# https://www.mangopay.com/pricing/\nFEE_PAYIN_BANK_WIRE = Fees(Decimal('0.005'), 0) # 0.5%\nFEE_PAYIN_CARD = {\n 'EUR': Fees(Decimal('0.018'), Money('0.18', 'EUR')), # 1.8% + €0.18\n 'USD': Fees(Decimal('0.025'), Money('0.30', 'USD')), # 2.5% + $0.30\n}\nFEE_PAYIN_DIRECT_DEBIT = {\n 'EUR': Fees(0, Money('0.50', 'EUR')), # €0.50\n 'GBP': Fees(0, Money('0.50', 'GBP')), # Β£0.50\n}\nFEE_PAYOUT = {\n 'EUR': {\n 'domestic': (SEPA, Fees(0, 0)),\n 'foreign': Fees(0, 0),\n },\n 'GBP': {\n 'domestic': ({'GB'}, Fees(0, Money('0.45', 'GBP'))),\n 'foreign': Fees(0, Money('1.90', 'GBP')),\n },\n 'USD': {\n '*': Fees(0, Money('3.00', 'USD')),\n },\n}\nFEE_PAYOUT_WARN = Decimal('0.03') # warn user when fee exceeds 3%\n\nHTML_A = Markup('<a href=\"%s\">%s</a>')\n\nIDENTITY_FIELDS = set(\"\"\"\n birthdate headquarters_address name nationality occupation organization_name\n postal_address\n\"\"\".split())\n\nINVOICE_DOC_MAX_SIZE = 5000000\nINVOICE_DOCS_EXTS = ['pdf', 'jpeg', 'jpg', 'png']\nINVOICE_DOCS_LIMIT = 25\n\nINVOICE_NATURES = {\n 'expense': _(\"Expense Report\"),\n}\n\nINVOICE_STATUSES = {\n 'pre': _(\"Draft\"),\n 'new': _(\"Sent (awaiting approval)\"),\n 'retracted': _(\"Retracted\"),\n 'accepted': _(\"Accepted (awaiting payment)\"),\n 'paid': _(\"Paid\"),\n 'rejected': _(\"Rejected\"),\n}\n\n# https://docs.mangopay.com/api-references/kyc-rules/\nKYC_DOC_MAX_SIZE = 7000000\nKYC_DOC_MAX_SIZE_MB = int(KYC_DOC_MAX_SIZE / 1000000)\nKYC_DOCS_EXTS = ['pdf', 'jpeg', 'jpg', 'gif', 'png']\nKYC_DOCS_EXTS_STR = ', '.join(KYC_DOCS_EXTS)\nKYC_INCOME_THRESHOLDS = [(i, Money(a, 'EUR')) for i, a in (\n (1, 18000),\n (2, 30000),\n (3, 50000),\n (4, 80000),\n (5, 120000),\n (6, 120000),\n)]\nKYC_PAYIN_YEARLY_THRESHOLD = Money('2500', 'EUR')\nKYC_PAYOUT_YEARLY_THRESHOLD = Money('1000', 'EUR')\n\nLAUNCH_TIME = datetime(2016, 2, 3, 12, 50, 0, 0, utc)\n\nPARTICIPANT_KINDS = {\n 'individual': _(\"Individual\"),\n 'organization': _(\"Organization\"),\n 'group': _(\"Team\"),\n}\n\nPASSWORD_MIN_SIZE = 8\nPASSWORD_MAX_SIZE = 150\n\nPAYIN_BANK_WIRE_MIN = {k: Money('2.00', k) for k in ('EUR', 'USD')} # fee β‰ˆ 0.99%\nPAYIN_BANK_WIRE_TARGET = {k: Money('5.00', k) for k in ('EUR', 'USD')} # fee β‰ˆ 0.6%\nPAYIN_BANK_WIRE_MAX = {k: Money('2500.00', k) for k in ('EUR', 'USD')}\nPAYIN_CARD_MIN = {\n 'EUR': Money('15.00', 'EUR'), # fee β‰ˆ 3.5%\n 'USD': Money('20.00', 'USD'), # fee β‰ˆ 4.58%\n}\nPAYIN_CARD_TARGET = {\n 'EUR': Money('92.00', 'EUR'), # fee β‰ˆ 2.33%\n 'USD': Money('95.00', 'USD'), # fee β‰ˆ 3.27%\n}\nPAYIN_CARD_MAX = {k: Money('2500.00', k) for k in ('EUR', 'USD')}\nPAYIN_DIRECT_DEBIT_COUNTRIES = {\n # https://support.gocardless.com/hc/en-gb/articles/115005758445\n 'EUR': EUROZONE | set(\"MC SM\".split()),\n}\nPAYIN_DIRECT_DEBIT_MIN_EUR_GBP = Decimal('15.00') # fee β‰ˆ 3.78%\nPAYIN_DIRECT_DEBIT_MIN = {\n 'EUR': Money(PAYIN_DIRECT_DEBIT_MIN_EUR_GBP, 'EUR'),\n 'GBP': Money(PAYIN_DIRECT_DEBIT_MIN_EUR_GBP, 'GBP'),\n}\nPAYIN_DIRECT_DEBIT_TARGET_EUR_GBP = Decimal('99.00') # fee β‰ˆ 0.59%\nPAYIN_DIRECT_DEBIT_TARGET = {\n 'EUR': Money(PAYIN_DIRECT_DEBIT_TARGET_EUR_GBP, 'EUR'),\n 'GBP': Money(PAYIN_DIRECT_DEBIT_TARGET_EUR_GBP, 'GBP'),\n}\nPAYIN_DIRECT_DEBIT_MAX = {k: Money('2500.00', k) for k in ('EUR', 'USD')}\n\nPAYIN_AMOUNTS = {\n 'paypal': {\n 'min_acceptable': MoneyAutoConvertDict({ # fee > 10%\n 'EUR': Money('2.00', 'EUR'),\n 'USD': Money('2.00', 'USD'),\n }),\n 'min_recommended': MoneyAutoConvertDict({ # fee < 8%\n 'EUR': Money('10.00', 'EUR'),\n 'USD': Money('12.00', 'USD'),\n }),\n 'low_fee': MoneyAutoConvertDict({ # fee < 6%\n 'EUR': Money('40.00', 'EUR'),\n 'USD': Money('48.00', 'USD'),\n }),\n 'max_acceptable': MoneyAutoConvertDict({\n 'EUR': Money('5000.00', 'EUR'),\n 'USD': Money('5000.00', 'USD'),\n }),\n },\n 'stripe': {\n 'min_acceptable': MoneyAutoConvertDict({ # fee > 10%\n 'EUR': Money('2.00', 'EUR'),\n 'USD': Money('2.00', 'USD'),\n }),\n 'min_recommended': MoneyAutoConvertDict({ # fee < 8%\n 'EUR': Money('10.00', 'EUR'),\n 'USD': Money('12.00', 'USD'),\n }),\n 'low_fee': MoneyAutoConvertDict({ # fee < 6%\n 'EUR': Money('40.00', 'EUR'),\n 'USD': Money('48.00', 'USD'),\n }),\n 'max_acceptable': MoneyAutoConvertDict({\n 'EUR': Money('5000.00', 'EUR'),\n 'USD': Money('5000.00', 'USD'),\n }),\n },\n}\n\nPAYMENT_METHODS = {\n 'mango-ba': _(\"Direct Debit\"),\n 'mango-bw': _(\"Bank Wire\"),\n 'mango-cc': _(\"Credit Card\"),\n 'paypal': \"PayPal\",\n 'stripe-card': _(\"Credit/Debit Card\"),\n 'stripe-sdd': _(\"Direct Debit\"),\n}\nPAYMENT_SLUGS = {\n 'mango-ba': 'direct-debit',\n 'mango-bw': 'bankwire',\n 'mango-cc': 'card',\n}\n\nPAYOUT_COUNTRIES = {\n 'paypal': set(\"\"\"\n AD AE AG AI AL AM AN AO AR AT AU AW AZ BA BB BE BF BG BH BI BJ BM BN BO\n BR BS BT BW BY BZ C2 CA CD CG CH CI CK CL CM CO CR CV CY CZ DE DJ DK DM\n DO DZ EC EE EG ER ES ET FI FJ FK FM FO FR GA GD GE GF GI GL GM GN GP GR\n GT GW GY HK HN HR HU ID IE IL IN IS IT JM JO JP KE KG KH KI KM KN KR KW\n KY KZ LA LC LI LK LS LT LU LV MA MC MD ME MG MH MK ML MN MQ MR MS MT MU\n MV MW MX MY MZ NA NC NE NF NG NI NL NO NP NR NU NZ OM PA PE PF PG PH PL\n PM PN PT PW PY QA RE RO RS RU RW SA SB SC SE SG SH SI SJ SK SL SM SN SO\n SR ST SV SZ TC TD TG TH TJ TM TN TO TT TT TT TT TV TW TZ UA UG GB US UY\n VA VC VE VG VN VU WF WS YE YT ZA ZM ZW\n PR\n \"\"\".split()), # https://www.paypal.com/us/webapps/mpp/country-worldwide\n\n 'stripe': set(\"\"\"\n AT AU BE BG CA CH CY CZ DE DK EE ES FI FR GB GR HK IE IT JP LT LU LV MT\n MX MY NL NO NZ PL PT RO SE SG SI SK US\n PR\n \"\"\".split()), # https://stripe.com/global\n}\n\n# https://developer.paypal.com/docs/api/reference/currency-codes/\nPAYPAL_CURRENCIES = set(\"\"\"\n AUD CAD CHF CZK DKK EUR GBP HKD HUF ILS JPY MXN NOK NZD PHP PLN RUB SEK SGD\n THB TWD USD\n\"\"\".split())\n\nPERIOD_CONVERSION_MAP = {\n ('weekly', 'weekly'): Decimal(1),\n ('monthly', 'weekly'): Decimal(12) / Decimal(52),\n ('yearly', 'weekly'): Decimal(1) / Decimal(52),\n ('weekly', 'monthly'): Decimal(52) / Decimal(12),\n ('monthly', 'monthly'): Decimal(1),\n ('yearly', 'monthly'): Decimal(1) / Decimal(12),\n ('weekly', 'yearly'): Decimal(52),\n ('monthly', 'yearly'): Decimal(12),\n ('yearly', 'yearly'): Decimal(1),\n}\n\nPERIOD_CONVERSION_RATES = {\n 'weekly': Decimal(1),\n 'monthly': Decimal(12) / Decimal(52),\n 'yearly': Decimal(1) / Decimal(52),\n}\n\nPOSTAL_ADDRESS_KEYS = (\n 'AddressLine1', 'AddressLine2', 'City', 'Region', 'PostalCode', 'Country'\n)\nPOSTAL_ADDRESS_KEYS_LIBERAPAY = (\n 'country', 'region', 'city', 'postal_code', 'local_address'\n)\nPOSTAL_ADDRESS_KEYS_STRIPE = (\n 'line1', 'line2', 'city', 'state', 'postal_code', 'country'\n)\n\nPRIVACY_FIELDS = OrderedDict([\n ('hide_giving', (_(\"Hide total giving from others.\"), False)),\n ('hide_receiving', (_(\"Hide total receiving from others.\"), False)),\n ('hide_from_search', (_(\"Hide this profile from search results on Liberapay.\"), True)),\n ('profile_noindex', (_(\"Tell web search engines not to index this profile.\"), True)),\n ('hide_from_lists', (_(\"Prevent this profile from being listed on Liberapay.\"), True)),\n])\nPRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())\n\nPRIVILEGES = dict(admin=1, run_payday=2)\ncheck_bits(list(PRIVILEGES.values()))\n\nPROFILE_VISIBILITY_ATTRS = ('profile_noindex', 'hide_from_lists', 'hide_from_search')\n\nPUBLIC_NAME_MAX_SIZE = 64\n\nQUARANTINE = timedelta(weeks=0)\n\nRATE_LIMITS = {\n 'add_email.source': (5, 60*60*24), # 5 per day\n 'add_email.target': (2, 60*60*24), # 2 per day\n 'admin.http-unsafe': (10, 60*60*24), # 10 per day\n 'change_currency': (4, 60*60*24*7), # 4 per week\n 'change_password': (7, 60*60*24*7), # 7 per week\n 'change_username': (7, 60*60*24*7), # 7 per week\n 'check_password': (25, 60*60*24*7), # 25 per week\n 'elsewhere-lookup.ip-addr': (5, 20), # 5 per 20 seconds\n 'email.bypass_error': (2, 60*60*24*7), # 2 per week\n 'email.unblacklist.source': (5, 60*60*24*7), # 5 per week\n 'email.unblacklist.target': (3, 60*60*24*7), # 3 per week\n 'http-query.ip-addr': (10, 10), # 10 per 10 seconds\n 'http-query.user': (10, 10), # 10 per 10 seconds\n 'http-unsafe.ip-addr': (10, 10), # 10 per 10 seconds\n 'http-unsafe.user': (10, 10), # 10 per 10 seconds\n 'insert_identity': (7, 60*60*24*7), # 7 per week\n 'log-in.country': (10, 60), # 10 per minute per country\n 'log-in.email': (10, 60*60*24), # 10 per day\n 'log-in.email.not-verified': (2, 60*60*24), # 2 per day\n 'log-in.email.verified': (10, 60*60*24), # 10 per day\n 'log-in.ip-addr': (5, 5*60), # 5 per 5 minutes per IP address\n 'log-in.password': (3, 60*60), # 3 per hour\n 'make_team': (5, 60*60*24*7), # 5 per week\n 'payin.from-user': (15, 60*60*24*7), # 15 per week\n 'payin.from-ip-addr': (15, 60*60*24*7), # 15 per week\n 'refetch_elsewhere_data': (1, 60*60*24*7), # retry after one week\n 'refetch_repos': (1, 60*60*24), # retry after one day\n 'sign-up.email': (1, 5*60), # this is used to detect near-simultaneous requests,\n # so 5 minutes should be plenty enough\n 'sign-up.ip-addr': (5, 60*60), # 5 per hour per IP address\n 'sign-up.ip-net': (15, 60*60), # 15 per hour per IP network\n 'sign-up.country': (5, 5*60), # 5 per 5 minutes per country\n 'sign-up.ip-version': (15, 5*60), # 15 per 5 minutes per IP version\n}\n\nSAFE_METHODS = {'GET', 'HEAD', 'OPTIONS'}\n\nSESSION = 'session'\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\n\ndef make_standard_tip(label, weekly, currency):\n precision = get_currency_precision(currency)\n minimum = D_CENT if precision == 2 else Decimal(10) ** (-precision)\n return StandardTip(\n label,\n Money(weekly, currency),\n Money((weekly / PERIOD_CONVERSION_RATES['monthly']).quantize(minimum), currency),\n Money((weekly / PERIOD_CONVERSION_RATES['yearly']).quantize(minimum), currency),\n )\n\n\nclass _StandardTips(defaultdict):\n def __missing__(self, currency):\n r = [\n make_standard_tip(\n label, convert_symbolic_amount(weekly, currency), currency\n ) for label, weekly in STANDARD_TIPS_EUR_USD\n ]\n self[currency] = r\n return r\n\n\nSTANDARD_TIPS_EUR_USD = (\n (_(\"Symbolic\"), Decimal('0.01')),\n (_(\"Small\"), Decimal('0.25')),\n (_(\"Medium\"), Decimal('1.00')),\n (_(\"Large\"), Decimal('5.00')),\n (_(\"Maximum\"), DONATION_LIMITS_EUR_USD['weekly'][1]),\n)\nSTANDARD_TIPS = _StandardTips(None, {\n 'EUR': [make_standard_tip(label, weekly, 'EUR') for label, weekly in STANDARD_TIPS_EUR_USD],\n 'USD': [make_standard_tip(label, weekly, 'USD') for label, weekly in STANDARD_TIPS_EUR_USD],\n})\n\nSUMMARY_MAX_SIZE = 100\n\nTAKE_THROTTLING_THRESHOLD = MoneyAutoConvertDict(\n {k: Money('1.00', k) for k in ('EUR', 'USD')}\n)\n\nUSERNAME_MAX_SIZE = 32\nUSERNAME_SUFFIX_BLACKLIST = set('.txt .html .htm .json .xml'.split())\n\ndel _\n", "path": "liberapay/constants.py" } ]
[ { "content": "from collections import defaultdict, namedtuple, OrderedDict\nfrom datetime import date, datetime, timedelta\nfrom decimal import Decimal, ROUND_FLOOR, ROUND_HALF_UP, ROUND_UP\nimport re\n\nfrom babel.numbers import get_currency_precision\nfrom mangopay.utils import Money\nfrom markupsafe import Markup\nfrom pando.utils import utc\n\n\ndef ordered_set(keys):\n return OrderedDict((k, None) for k in keys)\n\n\ndef check_bits(bits):\n assert len(set(bits)) == len(bits) # no duplicates\n assert not [b for b in bits if '{0:b}'.format(b).count('1') != 1] # single bit\n\n\nEvent = namedtuple('Event', 'name bit title')\n\n\nclass Fees(namedtuple('Fees', ('var', 'fix'))):\n VAT = Decimal('0.17') # 17% (Luxembourg rate)\n VAT_1 = VAT + 1\n\n @property\n def with_vat(self):\n r = (self.var * self.VAT_1 * 100, self.fix * self.VAT_1)\n return r[0] if not r[1] else r[1].round_up() if not r[0] else r\n\n\ndef to_precision(x, precision, rounding=ROUND_HALF_UP):\n \"\"\"Round `x` to keep only `precision` of its most significant digits.\n\n >>> to_precision(Decimal('0.0086820'), 2)\n Decimal('0.0087')\n >>> to_precision(Decimal('13567.89'), 3)\n Decimal('13600')\n >>> to_precision(Decimal('0.000'), 4)\n Decimal('0')\n \"\"\"\n if x == 0:\n return Decimal(0)\n log10 = x.log10().to_integral(ROUND_FLOOR)\n # round\n factor = Decimal(10) ** (log10 + 1)\n r = (x / factor).quantize(Decimal(10) ** -precision, rounding=rounding) * factor\n # remove trailing zeros\n r = r.quantize(Decimal(10) ** (log10 - precision + 1))\n return r\n\n\ndef convert_symbolic_amount(amount, target_currency, precision=2, rounding=ROUND_HALF_UP):\n from liberapay.website import website\n rate = website.currency_exchange_rates[('EUR', target_currency)]\n minimum = Money.MINIMUMS[target_currency].amount\n return max(\n to_precision(amount * rate, precision, rounding).quantize(minimum, rounding),\n minimum\n )\n\n\nclass MoneyAutoConvertDict(defaultdict):\n\n def __init__(self, *args, **kw):\n super(MoneyAutoConvertDict, self).__init__(None, *args, **kw)\n\n def __missing__(self, currency):\n r = Money(convert_symbolic_amount(self['EUR'].amount, currency, 1), currency)\n self[currency] = r\n return r\n\n\nStandardTip = namedtuple('StandardTip', 'label weekly monthly yearly')\n\n\n_ = lambda a: a\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"-_.\")\n\nAVATAR_QUERY = '?s=160&d=404'\nAVATAR_SOURCES = (\n 'libravatar bitbucket facebook github gitlab google mastodon pleroma twitch twitter youtube'\n).split()\n\nBASE64URL_CHARS = set('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_')\n\nBIRTHDAY = date(2015, 5, 22)\n\nCARD_BRANDS = {\n 'amex': 'American Express',\n 'diners': 'Diners Club',\n 'discover': 'Discover',\n 'jcb': 'JCB',\n 'mastercard': 'Mastercard',\n 'unionpay': 'UnionPay',\n 'visa': 'Visa',\n 'unknown': '',\n}\n\nCURRENCIES = ordered_set([\n 'EUR', 'USD',\n 'AUD', 'BGN', 'BRL', 'CAD', 'CHF', 'CNY', 'CZK', 'DKK', 'GBP', 'HKD', 'HRK',\n 'HUF', 'IDR', 'ILS', 'INR', 'ISK', 'JPY', 'KRW', 'MXN', 'MYR', 'NOK', 'NZD',\n 'PHP', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'THB', 'TRY', 'ZAR'\n])\n\nD_CENT = Decimal('0.01')\nD_MAX = Decimal('999999999999.99')\nD_ZERO = Decimal('0.00')\n\nclass _DonationLimits(defaultdict):\n def __missing__(self, currency):\n minimum = Money.MINIMUMS[currency].amount\n eur_weekly_amounts = DONATION_LIMITS_EUR_USD['weekly']\n converted_weekly_amounts = (\n convert_symbolic_amount(eur_weekly_amounts[0], currency),\n convert_symbolic_amount(eur_weekly_amounts[1], currency)\n )\n r = {\n 'weekly': tuple(Money(x, currency) for x in converted_weekly_amounts),\n 'monthly': tuple(\n Money((x * Decimal(52) / Decimal(12)).quantize(minimum, rounding=ROUND_UP), currency)\n for x in converted_weekly_amounts\n ),\n 'yearly': tuple(Money(x * Decimal(52), currency) for x in converted_weekly_amounts),\n }\n self[currency] = r\n return r\n\nDONATION_LIMITS_WEEKLY_EUR_USD = (Decimal('0.01'), Decimal('100.00'))\nDONATION_LIMITS_EUR_USD = {\n 'weekly': DONATION_LIMITS_WEEKLY_EUR_USD,\n 'monthly': tuple((x * Decimal(52) / Decimal(12)).quantize(D_CENT, rounding=ROUND_UP)\n for x in DONATION_LIMITS_WEEKLY_EUR_USD),\n 'yearly': tuple(x * Decimal(52) for x in DONATION_LIMITS_WEEKLY_EUR_USD),\n}\nDONATION_LIMITS = _DonationLimits(None, {\n 'EUR': {k: (Money(v[0], 'EUR'), Money(v[1], 'EUR')) for k, v in DONATION_LIMITS_EUR_USD.items()},\n 'USD': {k: (Money(v[0], 'USD'), Money(v[1], 'USD')) for k, v in DONATION_LIMITS_EUR_USD.items()},\n})\n\nDOMAIN_RE = re.compile(r'''\n ^\n ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)+\n [a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\n $\n''', re.VERBOSE)\n\nELSEWHERE_ACTIONS = {'connect', 'lock', 'unlock'}\n\nEMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\nEMAIL_RE = re.compile(r'''\n # This is the regexp used by MangoPay (as of February 2017).\n # It rejects some valid but exotic addresses.\n # https://en.wikipedia.org/wiki/Email_address\n ^\n [a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*\n @\n ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)+[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\n $\n''', re.VERBOSE)\n\nEPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\nEUROZONE = set(\"AT BE CY DE EE ES FI FR GR IE IT LT LU LV MT NL PT SI SK\".split())\nSEPA = EUROZONE | set(\"AD BG CH CZ DK GB GI HR HU IS LI MC NO PL RO SE VA\".split())\n\nEVENTS = [\n Event('income', 1, _(\"Every week as long as I am receiving donations\")),\n Event('donate_reminder', 2, _(\"When it's time to renew my donations\")),\n Event('pledgee_joined', 16, _(\"When someone I pledge to joins Liberapay\")),\n Event('team_invite', 32, _(\"When someone invites me to join a team\")),\n Event('payin_failed', 2**11, _(\"When a payment I initiated fails\")),\n Event('payin_succeeded', 2**12, _(\"When a payment I initiated succeeds\")),\n Event('payin_refund_initiated', 2**13, _(\"When money is being refunded back to me\")),\n Event('upcoming_debit', 2**14, _(\"When an automatic donation renewal payment is upcoming\")),\n Event('missing_route', 2**15, _(\"When I no longer have any valid payment instrument\")),\n Event('renewal_aborted', 2**16, _(\"When a donation renewal payment has been aborted\")),\n]\ncheck_bits([e.bit for e in EVENTS])\nEVENTS = OrderedDict((e.name, e) for e in EVENTS)\nEVENTS_S = ' '.join(EVENTS.keys())\n\n# https://www.mangopay.com/pricing/\nFEE_PAYIN_BANK_WIRE = Fees(Decimal('0.005'), 0) # 0.5%\nFEE_PAYIN_CARD = {\n 'EUR': Fees(Decimal('0.018'), Money('0.18', 'EUR')), # 1.8% + €0.18\n 'USD': Fees(Decimal('0.025'), Money('0.30', 'USD')), # 2.5% + $0.30\n}\nFEE_PAYIN_DIRECT_DEBIT = {\n 'EUR': Fees(0, Money('0.50', 'EUR')), # €0.50\n 'GBP': Fees(0, Money('0.50', 'GBP')), # Β£0.50\n}\nFEE_PAYOUT = {\n 'EUR': {\n 'domestic': (SEPA, Fees(0, 0)),\n 'foreign': Fees(0, 0),\n },\n 'GBP': {\n 'domestic': ({'GB'}, Fees(0, Money('0.45', 'GBP'))),\n 'foreign': Fees(0, Money('1.90', 'GBP')),\n },\n 'USD': {\n '*': Fees(0, Money('3.00', 'USD')),\n },\n}\nFEE_PAYOUT_WARN = Decimal('0.03') # warn user when fee exceeds 3%\n\nHTML_A = Markup('<a href=\"%s\">%s</a>')\n\nIDENTITY_FIELDS = set(\"\"\"\n birthdate headquarters_address name nationality occupation organization_name\n postal_address\n\"\"\".split())\n\nINVOICE_DOC_MAX_SIZE = 5000000\nINVOICE_DOCS_EXTS = ['pdf', 'jpeg', 'jpg', 'png']\nINVOICE_DOCS_LIMIT = 25\n\nINVOICE_NATURES = {\n 'expense': _(\"Expense Report\"),\n}\n\nINVOICE_STATUSES = {\n 'pre': _(\"Draft\"),\n 'new': _(\"Sent (awaiting approval)\"),\n 'retracted': _(\"Retracted\"),\n 'accepted': _(\"Accepted (awaiting payment)\"),\n 'paid': _(\"Paid\"),\n 'rejected': _(\"Rejected\"),\n}\n\n# https://docs.mangopay.com/api-references/kyc-rules/\nKYC_DOC_MAX_SIZE = 7000000\nKYC_DOC_MAX_SIZE_MB = int(KYC_DOC_MAX_SIZE / 1000000)\nKYC_DOCS_EXTS = ['pdf', 'jpeg', 'jpg', 'gif', 'png']\nKYC_DOCS_EXTS_STR = ', '.join(KYC_DOCS_EXTS)\nKYC_INCOME_THRESHOLDS = [(i, Money(a, 'EUR')) for i, a in (\n (1, 18000),\n (2, 30000),\n (3, 50000),\n (4, 80000),\n (5, 120000),\n (6, 120000),\n)]\nKYC_PAYIN_YEARLY_THRESHOLD = Money('2500', 'EUR')\nKYC_PAYOUT_YEARLY_THRESHOLD = Money('1000', 'EUR')\n\nLAUNCH_TIME = datetime(2016, 2, 3, 12, 50, 0, 0, utc)\n\nPARTICIPANT_KINDS = {\n 'individual': _(\"Individual\"),\n 'organization': _(\"Organization\"),\n 'group': _(\"Team\"),\n}\n\nPASSWORD_MIN_SIZE = 8\nPASSWORD_MAX_SIZE = 150\n\nPAYIN_BANK_WIRE_MIN = {k: Money('2.00', k) for k in ('EUR', 'USD')} # fee β‰ˆ 0.99%\nPAYIN_BANK_WIRE_TARGET = {k: Money('5.00', k) for k in ('EUR', 'USD')} # fee β‰ˆ 0.6%\nPAYIN_BANK_WIRE_MAX = {k: Money('2500.00', k) for k in ('EUR', 'USD')}\nPAYIN_CARD_MIN = {\n 'EUR': Money('15.00', 'EUR'), # fee β‰ˆ 3.5%\n 'USD': Money('20.00', 'USD'), # fee β‰ˆ 4.58%\n}\nPAYIN_CARD_TARGET = {\n 'EUR': Money('92.00', 'EUR'), # fee β‰ˆ 2.33%\n 'USD': Money('95.00', 'USD'), # fee β‰ˆ 3.27%\n}\nPAYIN_CARD_MAX = {k: Money('2500.00', k) for k in ('EUR', 'USD')}\nPAYIN_DIRECT_DEBIT_COUNTRIES = {\n # https://support.gocardless.com/hc/en-gb/articles/115005758445\n 'EUR': EUROZONE | set(\"MC SM\".split()),\n}\nPAYIN_DIRECT_DEBIT_MIN_EUR_GBP = Decimal('15.00') # fee β‰ˆ 3.78%\nPAYIN_DIRECT_DEBIT_MIN = {\n 'EUR': Money(PAYIN_DIRECT_DEBIT_MIN_EUR_GBP, 'EUR'),\n 'GBP': Money(PAYIN_DIRECT_DEBIT_MIN_EUR_GBP, 'GBP'),\n}\nPAYIN_DIRECT_DEBIT_TARGET_EUR_GBP = Decimal('99.00') # fee β‰ˆ 0.59%\nPAYIN_DIRECT_DEBIT_TARGET = {\n 'EUR': Money(PAYIN_DIRECT_DEBIT_TARGET_EUR_GBP, 'EUR'),\n 'GBP': Money(PAYIN_DIRECT_DEBIT_TARGET_EUR_GBP, 'GBP'),\n}\nPAYIN_DIRECT_DEBIT_MAX = {k: Money('2500.00', k) for k in ('EUR', 'USD')}\n\nPAYIN_AMOUNTS = {\n 'paypal': {\n 'min_acceptable': MoneyAutoConvertDict({ # fee > 10%\n 'EUR': Money('2.00', 'EUR'),\n 'USD': Money('2.00', 'USD'),\n }),\n 'min_recommended': MoneyAutoConvertDict({ # fee < 8%\n 'EUR': Money('10.00', 'EUR'),\n 'USD': Money('12.00', 'USD'),\n }),\n 'low_fee': MoneyAutoConvertDict({ # fee < 6%\n 'EUR': Money('40.00', 'EUR'),\n 'USD': Money('48.00', 'USD'),\n }),\n 'max_acceptable': MoneyAutoConvertDict({\n 'EUR': Money('5000.00', 'EUR'),\n 'USD': Money('5000.00', 'USD'),\n }),\n },\n 'stripe': {\n 'min_acceptable': MoneyAutoConvertDict({ # fee > 10%\n 'EUR': Money('2.00', 'EUR'),\n 'USD': Money('2.00', 'USD'),\n }),\n 'min_recommended': MoneyAutoConvertDict({ # fee < 8%\n 'EUR': Money('10.00', 'EUR'),\n 'USD': Money('12.00', 'USD'),\n }),\n 'low_fee': MoneyAutoConvertDict({ # fee < 6%\n 'EUR': Money('40.00', 'EUR'),\n 'USD': Money('48.00', 'USD'),\n }),\n 'max_acceptable': MoneyAutoConvertDict({\n 'EUR': Money('5000.00', 'EUR'),\n 'USD': Money('5000.00', 'USD'),\n }),\n },\n}\n\nPAYIN_SETTLEMENT_DELAYS = {\n 'stripe-sdd': timedelta(days=6),\n}\n\nPAYMENT_METHODS = {\n 'mango-ba': _(\"Direct Debit\"),\n 'mango-bw': _(\"Bank Wire\"),\n 'mango-cc': _(\"Credit Card\"),\n 'paypal': \"PayPal\",\n 'stripe-card': _(\"Credit/Debit Card\"),\n 'stripe-sdd': _(\"Direct Debit\"),\n}\nPAYMENT_SLUGS = {\n 'mango-ba': 'direct-debit',\n 'mango-bw': 'bankwire',\n 'mango-cc': 'card',\n}\n\nPAYOUT_COUNTRIES = {\n 'paypal': set(\"\"\"\n AD AE AG AI AL AM AN AO AR AT AU AW AZ BA BB BE BF BG BH BI BJ BM BN BO\n BR BS BT BW BY BZ C2 CA CD CG CH CI CK CL CM CO CR CV CY CZ DE DJ DK DM\n DO DZ EC EE EG ER ES ET FI FJ FK FM FO FR GA GD GE GF GI GL GM GN GP GR\n GT GW GY HK HN HR HU ID IE IL IN IS IT JM JO JP KE KG KH KI KM KN KR KW\n KY KZ LA LC LI LK LS LT LU LV MA MC MD ME MG MH MK ML MN MQ MR MS MT MU\n MV MW MX MY MZ NA NC NE NF NG NI NL NO NP NR NU NZ OM PA PE PF PG PH PL\n PM PN PT PW PY QA RE RO RS RU RW SA SB SC SE SG SH SI SJ SK SL SM SN SO\n SR ST SV SZ TC TD TG TH TJ TM TN TO TT TT TT TT TV TW TZ UA UG GB US UY\n VA VC VE VG VN VU WF WS YE YT ZA ZM ZW\n PR\n \"\"\".split()), # https://www.paypal.com/us/webapps/mpp/country-worldwide\n\n 'stripe': set(\"\"\"\n AT AU BE BG CA CH CY CZ DE DK EE ES FI FR GB GR HK IE IT JP LT LU LV MT\n MX MY NL NO NZ PL PT RO SE SG SI SK US\n PR\n \"\"\".split()), # https://stripe.com/global\n}\n\n# https://developer.paypal.com/docs/api/reference/currency-codes/\nPAYPAL_CURRENCIES = set(\"\"\"\n AUD CAD CHF CZK DKK EUR GBP HKD HUF ILS JPY MXN NOK NZD PHP PLN RUB SEK SGD\n THB TWD USD\n\"\"\".split())\n\nPERIOD_CONVERSION_MAP = {\n ('weekly', 'weekly'): Decimal(1),\n ('monthly', 'weekly'): Decimal(12) / Decimal(52),\n ('yearly', 'weekly'): Decimal(1) / Decimal(52),\n ('weekly', 'monthly'): Decimal(52) / Decimal(12),\n ('monthly', 'monthly'): Decimal(1),\n ('yearly', 'monthly'): Decimal(1) / Decimal(12),\n ('weekly', 'yearly'): Decimal(52),\n ('monthly', 'yearly'): Decimal(12),\n ('yearly', 'yearly'): Decimal(1),\n}\n\nPERIOD_CONVERSION_RATES = {\n 'weekly': Decimal(1),\n 'monthly': Decimal(12) / Decimal(52),\n 'yearly': Decimal(1) / Decimal(52),\n}\n\nPOSTAL_ADDRESS_KEYS = (\n 'AddressLine1', 'AddressLine2', 'City', 'Region', 'PostalCode', 'Country'\n)\nPOSTAL_ADDRESS_KEYS_LIBERAPAY = (\n 'country', 'region', 'city', 'postal_code', 'local_address'\n)\nPOSTAL_ADDRESS_KEYS_STRIPE = (\n 'line1', 'line2', 'city', 'state', 'postal_code', 'country'\n)\n\nPRIVACY_FIELDS = OrderedDict([\n ('hide_giving', (_(\"Hide total giving from others.\"), False)),\n ('hide_receiving', (_(\"Hide total receiving from others.\"), False)),\n ('hide_from_search', (_(\"Hide this profile from search results on Liberapay.\"), True)),\n ('profile_noindex', (_(\"Tell web search engines not to index this profile.\"), True)),\n ('hide_from_lists', (_(\"Prevent this profile from being listed on Liberapay.\"), True)),\n])\nPRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())\n\nPRIVILEGES = dict(admin=1, run_payday=2)\ncheck_bits(list(PRIVILEGES.values()))\n\nPROFILE_VISIBILITY_ATTRS = ('profile_noindex', 'hide_from_lists', 'hide_from_search')\n\nPUBLIC_NAME_MAX_SIZE = 64\n\nQUARANTINE = timedelta(weeks=0)\n\nRATE_LIMITS = {\n 'add_email.source': (5, 60*60*24), # 5 per day\n 'add_email.target': (2, 60*60*24), # 2 per day\n 'admin.http-unsafe': (10, 60*60*24), # 10 per day\n 'change_currency': (4, 60*60*24*7), # 4 per week\n 'change_password': (7, 60*60*24*7), # 7 per week\n 'change_username': (7, 60*60*24*7), # 7 per week\n 'check_password': (25, 60*60*24*7), # 25 per week\n 'elsewhere-lookup.ip-addr': (5, 20), # 5 per 20 seconds\n 'email.bypass_error': (2, 60*60*24*7), # 2 per week\n 'email.unblacklist.source': (5, 60*60*24*7), # 5 per week\n 'email.unblacklist.target': (3, 60*60*24*7), # 3 per week\n 'http-query.ip-addr': (10, 10), # 10 per 10 seconds\n 'http-query.user': (10, 10), # 10 per 10 seconds\n 'http-unsafe.ip-addr': (10, 10), # 10 per 10 seconds\n 'http-unsafe.user': (10, 10), # 10 per 10 seconds\n 'insert_identity': (7, 60*60*24*7), # 7 per week\n 'log-in.country': (10, 60), # 10 per minute per country\n 'log-in.email': (10, 60*60*24), # 10 per day\n 'log-in.email.not-verified': (2, 60*60*24), # 2 per day\n 'log-in.email.verified': (10, 60*60*24), # 10 per day\n 'log-in.ip-addr': (5, 5*60), # 5 per 5 minutes per IP address\n 'log-in.password': (3, 60*60), # 3 per hour\n 'make_team': (5, 60*60*24*7), # 5 per week\n 'payin.from-user': (15, 60*60*24*7), # 15 per week\n 'payin.from-ip-addr': (15, 60*60*24*7), # 15 per week\n 'refetch_elsewhere_data': (1, 60*60*24*7), # retry after one week\n 'refetch_repos': (1, 60*60*24), # retry after one day\n 'sign-up.email': (1, 5*60), # this is used to detect near-simultaneous requests,\n # so 5 minutes should be plenty enough\n 'sign-up.ip-addr': (5, 60*60), # 5 per hour per IP address\n 'sign-up.ip-net': (15, 60*60), # 15 per hour per IP network\n 'sign-up.country': (5, 5*60), # 5 per 5 minutes per country\n 'sign-up.ip-version': (15, 5*60), # 15 per 5 minutes per IP version\n}\n\nSAFE_METHODS = {'GET', 'HEAD', 'OPTIONS'}\n\nSESSION = 'session'\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\n\ndef make_standard_tip(label, weekly, currency):\n precision = get_currency_precision(currency)\n minimum = D_CENT if precision == 2 else Decimal(10) ** (-precision)\n return StandardTip(\n label,\n Money(weekly, currency),\n Money((weekly / PERIOD_CONVERSION_RATES['monthly']).quantize(minimum), currency),\n Money((weekly / PERIOD_CONVERSION_RATES['yearly']).quantize(minimum), currency),\n )\n\n\nclass _StandardTips(defaultdict):\n def __missing__(self, currency):\n r = [\n make_standard_tip(\n label, convert_symbolic_amount(weekly, currency), currency\n ) for label, weekly in STANDARD_TIPS_EUR_USD\n ]\n self[currency] = r\n return r\n\n\nSTANDARD_TIPS_EUR_USD = (\n (_(\"Symbolic\"), Decimal('0.01')),\n (_(\"Small\"), Decimal('0.25')),\n (_(\"Medium\"), Decimal('1.00')),\n (_(\"Large\"), Decimal('5.00')),\n (_(\"Maximum\"), DONATION_LIMITS_EUR_USD['weekly'][1]),\n)\nSTANDARD_TIPS = _StandardTips(None, {\n 'EUR': [make_standard_tip(label, weekly, 'EUR') for label, weekly in STANDARD_TIPS_EUR_USD],\n 'USD': [make_standard_tip(label, weekly, 'USD') for label, weekly in STANDARD_TIPS_EUR_USD],\n})\n\nSUMMARY_MAX_SIZE = 100\n\nTAKE_THROTTLING_THRESHOLD = MoneyAutoConvertDict(\n {k: Money('1.00', k) for k in ('EUR', 'USD')}\n)\n\nUSERNAME_MAX_SIZE = 32\nUSERNAME_SUFFIX_BLACKLIST = set('.txt .html .htm .json .xml'.split())\n\ndel _\n", "path": "liberapay/constants.py" } ]
diff --git a/emails/payin_sdd_created.spt b/emails/payin_sdd_created.spt index 0df2053df7..712ae3ffbc 100644 --- a/emails/payin_sdd_created.spt +++ b/emails/payin_sdd_created.spt @@ -28,3 +28,8 @@ descriptor=statement_descriptor ) }}</p> % endif + +<p>{{ _( + "Processing this kind of payment takes {timedelta} on average.", + timedelta=constants.PAYIN_SETTLEMENT_DELAYS['stripe-sdd'] +) }}</p> diff --git a/liberapay/constants.py b/liberapay/constants.py index d12b4176dd..bdc77897ff 100644 --- a/liberapay/constants.py +++ b/liberapay/constants.py @@ -330,6 +330,10 @@ def __missing__(self, currency): }, } +PAYIN_SETTLEMENT_DELAYS = { + 'stripe-sdd': timedelta(days=6), +} + PAYMENT_METHODS = { 'mango-ba': _("Direct Debit"), 'mango-bw': _("Bank Wire"), diff --git a/simplates/confirm.spt b/simplates/confirm.spt index 56b4be70be..4050c6e6ce 100644 --- a/simplates/confirm.spt +++ b/simplates/confirm.spt @@ -7,8 +7,9 @@ <form action="" method="POST"> % include "templates/form-repost.html" <input type="hidden" name="confirmed" value="true" /> - <div class="alert alert-{{ cls }}">{{ msg }}</div> + <div class="alert alert-{{ cls }}">{{ escape(msg).replace('\n', '<br>'|safe) }}</div> <a class="btn btn-default" href="{{ response.sanitize_untrusted_url(back_to) if back_to else '' }}">{{ _("Cancel") }}</a> + &nbsp;&nbsp; <button class="btn btn-{{ cls }}">{{ _("Confirm") }}</button> </form> % endblock diff --git a/tests/py/test_settings.py b/tests/py/test_settings.py index ac19e23c21..8b4e31dac4 100644 --- a/tests/py/test_settings.py +++ b/tests/py/test_settings.py @@ -52,12 +52,26 @@ def test_team_participant_doesnt_show_up_on_explore_teams(self): class TestUsername(Harness): + def test_participant_can_set_username(self): + alice = self.make_participant(None) + r = self.client.POST( + f'/~{alice.id}/edit/username', {'username': 'alice'}, + auth_as=alice, raise_immediately=False + ) + assert r.code == 302 + assert r.headers[b'Location'].startswith(b'/alice/edit/username') + alice = alice.refetch() + assert alice.username == 'alice' + def change_username(self, new_username, auth_as='alice'): if auth_as: auth_as = self.make_participant(auth_as) - r = self.client.POST('/alice/edit/username', {'username': new_username}, - auth_as=auth_as, raise_immediately=False) + r = self.client.POST( + '/alice/edit/username', + {'username': new_username, 'confirmed': 'true'}, + auth_as=auth_as, raise_immediately=False, + ) return r def test_participant_can_change_their_username(self): @@ -111,6 +125,10 @@ def test_change_team_name(self): team.add_member(bob) r = self.client.POST('/team/edit/username', {'username': 'Team'}, auth_as=alice, raise_immediately=False) + assert r.code == 200 + assert ">Confirm</button>" in r.text + r = self.client.POST('/team/edit/username', {'username': 'Team', 'confirmed': 'true'}, + auth_as=alice, raise_immediately=False) assert r.code == 302 assert r.headers[b'Location'].startswith(b'/Team/edit/username') team = team.refetch() diff --git a/www/%username/edit/username.spt b/www/%username/edit/username.spt index b94515d2b8..e979a48f92 100644 --- a/www/%username/edit/username.spt +++ b/www/%username/edit/username.spt @@ -8,6 +8,18 @@ errors = [] if request.method == 'POST': new_username = request.body['username'].strip() if new_username != participant.username: + if participant.username[0] != '~' and request.body.get('confirmed') != 'true': + msg = _( + "When you modify your username, the web address of your profile " + "changes accordingly. A redirect from the old URL to the new one " + "is put in place, but any user can remove it by claiming your old " + "username, so you should update all the links to your profile that " + "you've placed on other websites, unless those links contain your " + "immutable account ID ({account_id}) instead of your username.\n\n" + "Are you sure you want to change your username?" + , account_id=participant.id + ) + raise response.render('simplates/confirm.spt', state, cls='warning', msg=msg) try: participant.change_username(new_username, recorder=user) except UsernameError as r: @@ -52,13 +64,8 @@ subhead = _("Username") _("Allowed characters: latin alphanumerics, dots (.), dashes (-), and underscores (_).") }}</p> </div> - % if _username - <div class="alert alert-warning">{{ - _("Have you linked to your Liberapay profile from other websites? Be sure to update those links!") - }}</div> - % endif - <h4 class="no-margin-top">{{ _("Name (optional)") }}</h4> + <h4>{{ _("Name (optional)") }}</h4> <p>{{ _( "A name to show alongside your username on your public profile page. " "It doesn't have to be your legal name." diff --git a/www/%username/giving/pay/stripe/%payin_id.spt b/www/%username/giving/pay/stripe/%payin_id.spt index 1d68c6a195..31d5a0736b 100644 --- a/www/%username/giving/pay/stripe/%payin_id.spt +++ b/www/%username/giving/pay/stripe/%payin_id.spt @@ -245,6 +245,10 @@ title = _("Funding your donations") <p><a class="btn btn-primary btn-lg" href="{{ payer.path('giving/pay') }}?retry={{ payin.id }}">{{ _("Try again") }}</a></p> % elif status == 'pending' <div class="alert alert-info">{{ _("The payment has been initiated.") }}</div> + <p>{{ _( + "Processing this kind of payment takes {timedelta} on average.", + timedelta=constants.PAYIN_SETTLEMENT_DELAYS['stripe-sdd'] + ) }}</p> <p>{{ _( "A receipt will be available once the payment has been successfully processed. You can see all your payments and receipts in {link_start}your account's ledger{link_end}.", link_start='<a href="%s">'|safe % payer.path('ledger/'), diff --git a/www/about/contact.spt b/www/about/contact.spt index 07f448df09..3843232c47 100644 --- a/www/about/contact.spt +++ b/www/about/contact.spt @@ -4,11 +4,20 @@ title = _("Contact") % extends "templates/layouts/about.html" % block content - <h4>{{ _("To contact us privately:") }}</h4> + <h4>{{ _("To contact the Liberapay team privately:") }}</h4> <p><a class="btn btn-primary" data-email="support&#64;liberapay.com" >{{ _("Send us an email") }}</a></p> - <p>{{ _("We are currently able to answer in English and French.") }}</p> + <p>{{ _( + "If your message isn't written in English or French, then it will be translated " + "automatically by a machine. Our reply will be translated to your language " + "the same way." + ) }}</p> <p>{{ _("We currently don't have a phone number.") }}</p> + <p class="text-warning">{{ glyphicon("minus-sign") }} {{ _( + "Do not contact us if you are trying to reach one of our users. We do not " + "relay messages, and we cannot reveal the identity or contact details of " + "our users unless you have a valid court order." + ) }}</p> <br> <h4>{{ _("To report a problem or make a suggestion in English:") }}</h4> diff --git a/www/about/privacy.spt b/www/about/privacy.spt index 468b4693a8..7d094537c9 100644 --- a/www/about/privacy.spt +++ b/www/about/privacy.spt @@ -12,7 +12,14 @@ title = _("Privacy") "actually need, and we don't sell it to anyone." ) }}</p> -<h3>{{ _("Cookies") }}</h3> +<h3>{{ _("Cookies") }} + <small title="{{ _( + 'A cookie is a piece of information sent by a website to your browser, ' + 'stored on your machine, and resent by your browser to that same ' + 'website in every subsequent request.' + ) }}" + data-toggle="tooltip" data-placement="bottom">{{ glyphicon("question-sign") }}</small> +</h3> <p>{{ _( "The liberapay.com website only sets technical cookies which are required to "
cocotb__cocotb-3179
[ { "content": "# Copyright cocotb contributors\n# Licensed under the Revised BSD License, see LICENSE for details.\n# SPDX-License-Identifier: BSD-3-Clause\nimport glob\nimport os\nimport shutil\nimport sys\nfrom contextlib import suppress\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple\n\nimport nox\n\n# Sessions run by default if nox is called without further arguments.\nnox.options.sessions = [\"dev_test\"]\n\ntest_deps = [\"pytest\"]\ncoverage_deps = [\"coverage\", \"pytest-cov\"]\n# gcovr 5.1 has an issue parsing some gcov files, so pin to 5.0. See\n# https://github.com/gcovr/gcovr/issues/596\n# When using gcovr 5.0, deprecated jinja2.Markup was removed in 3.1, so an\n# Exception is raised during html report generation.\n# See https://github.com/gcovr/gcovr/pull/576\n# These issues are fixed on gcovr master branch, so next release should work.\ncoverage_report_deps = [\"coverage\", \"jinja2<3.1\", \"gcovr==5.0\"]\n\ndev_deps = [\n \"black\",\n \"isort\",\n \"mypy\",\n \"pre-commit\",\n \"nox\",\n \"flake8\",\n \"clang-format\",\n]\n\n#\n# Helpers for use within this file.\n#\n\n\ndef simulator_support_matrix() -> List[Tuple[str, str, str]]:\n \"\"\"\n Get a list of supported simulator/toplevel-language/GPI-interface tuples.\n \"\"\"\n\n # Simulators with support for VHDL through VHPI, and Verilog through VPI.\n standard = [\n (sim, toplevel_lang, gpi_interface)\n for sim in (\"activehdl\", \"rivierapro\", \"xcelium\")\n for toplevel_lang in (\"verilog\", \"vhdl\")\n for gpi_interface in (\"vpi\", \"vhpi\")\n if (toplevel_lang, gpi_interface) in ((\"verilog\", \"vpi\"), (\"vhdl\", \"vhpi\"))\n ]\n\n # Special-case simulators.\n special = [\n (\"cvc\", \"verilog\", \"vpi\"),\n (\"ghdl\", \"vhdl\", \"vpi\"),\n (\"icarus\", \"verilog\", \"vpi\"),\n (\"questa\", \"verilog\", \"vpi\"),\n (\"questa\", \"vhdl\", \"fli\"),\n (\"questa\", \"vhdl\", \"vhpi\"),\n (\"verilator\", \"verilog\", \"vpi\"),\n (\"vcs\", \"verilog\", \"vpi\"),\n ]\n\n return standard + special\n\n\ndef env_vars_for_test(\n sim: Optional[str], toplevel_lang: Optional[str], gpi_interface: Optional[str]\n) -> Dict[str, str]:\n \"\"\"Prepare the environment variables controlling the test run.\"\"\"\n e = {}\n if sim is not None:\n e[\"SIM\"] = sim\n\n if os.getenv(\"TOPLEVEL_LANG\") is not None:\n e[\"HDL_TOPLEVEL_LANG\"] = os.getenv(\"TOPLEVEL_LANG\")\n\n if toplevel_lang is not None:\n e[\"TOPLEVEL_LANG\"] = toplevel_lang\n e[\"HDL_TOPLEVEL_LANG\"] = toplevel_lang\n\n assert not (toplevel_lang == \"verilog\" and gpi_interface != \"vpi\")\n if toplevel_lang == \"vhdl\" and gpi_interface is not None:\n e[\"VHDL_GPI_INTERFACE\"] = gpi_interface\n\n return e\n\n\ndef stringify_dict(d: Dict[str, str]) -> str:\n return \", \".join(f\"{k}={v}\" for k, v in d.items())\n\n\ndef configure_env_for_dev_build(session: nox.session) -> None:\n \"\"\"Set environment variables for a development build.\n\n - Enable coverage collection.\n - Build with more aggressive error checking.\n \"\"\"\n session.env[\"CFLAGS\"] = \"-Werror -Wno-deprecated-declarations -g --coverage\"\n session.env[\"COCOTB_LIBRARY_COVERAGE\"] = \"1\"\n session.env[\"CXXFLAGS\"] = \"-Werror\"\n session.env[\"LDFLAGS\"] = \"--coverage\"\n\n\n#\n# Development pipeline\n#\n# - Use nox to build an sdist; no separate build step is required.\n# - Run tests against the installed sdist.\n# - Collect coverage.\n#\n\n\n@nox.session\ndef dev_build(session: nox.Session) -> None:\n session.warn(\"No building is necessary for development sessions.\")\n\n\n@nox.session\ndef dev_test(session: nox.Session) -> None:\n \"\"\"Run all development tests as configured through environment variables.\"\"\"\n\n dev_test_sim(session, sim=None, toplevel_lang=None, gpi_interface=None)\n dev_test_nosim(session)\n dev_coverage_combine(session)\n\n\n@nox.session\n@nox.parametrize(\"sim,toplevel_lang,gpi_interface\", simulator_support_matrix())\ndef dev_test_sim(\n session: nox.Session,\n sim: Optional[str],\n toplevel_lang: Optional[str],\n gpi_interface: Optional[str],\n) -> None:\n \"\"\"Test a development version of cocotb against a simulator.\"\"\"\n\n configure_env_for_dev_build(session)\n\n session.run(\"pip\", \"install\", *test_deps, *coverage_deps)\n\n # Editable installs break C/C++ coverage collection; don't use them.\n # C/C++ coverage collection requires that the object files produced by the\n # compiler are not moved around, otherwise the gcno and gcda files produced\n # at compile and runtime, respectively, are located in the wrong\n # directories. Depending on the version of the Python install machinery\n # editable builds are done in a directory in /tmp, which is removed after\n # the build completes, taking all gcno files with them, as well as the path\n # to place the gcda files.\n session.run(\"pip\", \"install\", \".\")\n\n env = env_vars_for_test(sim, toplevel_lang, gpi_interface)\n config_str = stringify_dict(env)\n\n # Remove a potentially existing coverage file from a previous run for the\n # same test configuration. Use a filename *not* starting with `.coverage.`,\n # as coverage.py assumes ownership over these files and deleted them at\n # will.\n coverage_file = Path(f\".cov.test.sim-{sim}-{toplevel_lang}-{gpi_interface}\")\n with suppress(FileNotFoundError):\n coverage_file.unlink()\n\n session.log(f\"Running 'make test' against a simulator {config_str}\")\n session.run(\"make\", \"clean\", \"test\", external=True, env=env)\n\n session.log(f\"Running simulator-specific tests against a simulator {config_str}\")\n session.run(\n \"pytest\",\n \"-v\",\n \"--cov=cocotb\",\n \"--cov-branch\",\n # Don't display coverage report here\n \"--cov-report=\",\n \"-k\",\n \"simulator_required\",\n env=env,\n )\n Path(\".coverage\").rename(\".coverage.pytest\")\n\n session.log(f\"All tests passed with configuration {config_str}!\")\n\n # Combine coverage produced during the test runs, and place it in a file\n # with a name specific to this invocation of dev_test_sim().\n coverage_files = glob.glob(\"**/.coverage.cocotb\", recursive=True)\n if not coverage_files:\n session.error(\n \"No coverage files found. Something went wrong during the test execution.\"\n )\n coverage_files.append(\".coverage.pytest\")\n session.run(\"coverage\", \"combine\", \"--append\", *coverage_files)\n Path(\".coverage\").rename(coverage_file)\n\n session.log(f\"Stored Python coverage for this test run in {coverage_file}.\")\n\n\n@nox.session\ndef dev_test_nosim(session: nox.Session) -> None:\n \"\"\"Run the simulator-agnostic tests against a cocotb development version.\"\"\"\n\n configure_env_for_dev_build(session)\n\n session.run(\"pip\", \"install\", *test_deps, *coverage_deps)\n session.run(\"pip\", \"install\", \"-e\", \".\")\n\n # Remove a potentially existing coverage file from a previous run for the\n # same test configuration. Use a filename *not* starting with `.coverage.`,\n # as coverage.py assumes ownership over these files and deleted them at\n # will.\n coverage_file = Path(\".cov.test.nosim\")\n with suppress(FileNotFoundError):\n coverage_file.unlink()\n\n # Run pytest with the default configuration in setup.cfg.\n session.log(\"Running simulator-agnostic tests with pytest\")\n session.run(\n \"pytest\",\n \"-v\",\n \"--cov=cocotb\",\n \"--cov-branch\",\n # Don't display coverage report here\n \"--cov-report=\",\n \"-k\",\n \"not simulator_required\",\n )\n\n # Run pytest for files which can only be tested in the source tree, not in\n # the installed binary (otherwise we get an \"import file mismatch\" error\n # from pytest).\n session.log(\"Running simulator-agnostic tests in the source tree with pytest\")\n pytest_sourcetree = [\n \"cocotb/utils.py\",\n \"cocotb/binary.py\",\n \"cocotb/types/\",\n \"cocotb/_sim_versions.py\",\n ]\n session.run(\n \"pytest\",\n \"-v\",\n \"--doctest-modules\",\n \"--cov=cocotb\",\n \"--cov-branch\",\n # Don't display coverage report here\n \"--cov-report=\",\n # Append to the .coverage file created in the previous pytest\n # invocation in this session.\n \"--cov-append\",\n \"-k\",\n \"not simulator_required\",\n *pytest_sourcetree,\n )\n\n session.log(\"All tests passed!\")\n\n # Rename the .coverage file to make it unique to the session.\n Path(\".coverage\").rename(coverage_file)\n\n session.log(f\"Stored Python coverage for this test run in {coverage_file}.\")\n\n\n@nox.session\ndef dev_coverage_combine(session: nox.Session) -> None:\n \"\"\"Combine coverage from previous dev_* runs into a .coverage file.\"\"\"\n session.run(\"pip\", \"install\", *coverage_report_deps)\n\n coverage_files = glob.glob(\"**/.cov.test.*\", recursive=True)\n session.run(\"coverage\", \"combine\", *coverage_files)\n assert Path(\".coverage\").is_file()\n\n session.log(\"Wrote combined coverage database for all tests to '.coverage'.\")\n\n session.notify(\"dev_coverage_report\")\n\n\n@nox.session\ndef dev_coverage_report(session: nox.Session) -> None:\n \"\"\"Report coverage results.\"\"\"\n session.run(\"pip\", \"install\", *coverage_report_deps)\n\n # Produce Cobertura XML coverage reports.\n session.log(\"Producing Python and C/C++ coverage in Cobertura XML format\")\n\n coverage_python_xml = Path(\".python_coverage.xml\")\n session.run(\"coverage\", \"xml\", \"-o\", str(coverage_python_xml))\n assert coverage_python_xml.is_file()\n\n coverage_cpp_xml = Path(\".cpp_coverage.xml\")\n session.run(\n \"gcovr\",\n \"--xml\",\n \"--output\",\n str(coverage_cpp_xml),\n \".\",\n )\n assert coverage_cpp_xml.is_file()\n\n session.log(\n f\"Cobertura XML files written to {str(coverage_cpp_xml)!r} (C/C++) and {str(coverage_python_xml)!r} (Python)\"\n )\n\n # Report human-readable coverage.\n session.log(\"Python coverage\")\n session.run(\"coverage\", \"report\")\n\n session.log(\"Library coverage\")\n session.run(\"gcovr\", \"--print-summary\", \"--txt\")\n\n\n#\n# Release pipeline.\n#\n# - Clean out the dist directory.\n# - Build wheels (release builds).\n# - Install cocotb from wheel.\n# - Run tests against cocotb installed from the wheel.\n#\n# The release pipeline does not collect coverage, and does not run doctests.\n#\n\n# Directory containing the distribution artifacts (sdist and bdist).\ndist_dir = \"dist\"\n\n\n@nox.session\ndef release_clean(session: nox.Session) -> None:\n \"\"\"Remove all build artifacts from the dist directory.\"\"\"\n shutil.rmtree(dist_dir, ignore_errors=True)\n\n\n@nox.session\ndef release_build(session: nox.Session) -> None:\n \"\"\"Build a release (sdist and bdist).\"\"\"\n session.notify(\"release_build_bdist\")\n session.notify(\"release_build_sdist\")\n\n\n@nox.session\ndef release_build_bdist(session: nox.Session) -> None:\n \"\"\"Build a binary distribution (wheels) on the current operating system.\"\"\"\n\n # Pin a version to ensure reproducible builds.\n session.run(\"pip\", \"install\", \"cibuildwheel==2.11.2\")\n\n # cibuildwheel only auto-detects the platform if it runs on a CI server.\n # Do the auto-detect manually to enable local runs.\n if sys.platform.startswith(\"linux\"):\n platform = \"linux\"\n elif sys.platform == \"darwin\":\n platform = \"macos\"\n elif sys.platform == \"win32\":\n platform = \"windows\"\n else:\n session.error(f\"Unknown platform: {sys.platform!r}\")\n\n session.log(\"Building binary distribution (wheels)\")\n session.run(\n \"cibuildwheel\",\n \"--platform\",\n platform,\n \"--output-dir\",\n dist_dir,\n )\n\n session.log(\n f\"Binary distribution in release mode for {platform!r} built into {dist_dir!r}\"\n )\n\n\n@nox.session\ndef release_build_sdist(session: nox.Session) -> None:\n \"\"\"Build the source distribution.\"\"\"\n\n session.run(\"pip\", \"install\", \"build\")\n\n session.log(\"Building source distribution (sdist)\")\n session.run(\"python\", \"-m\", \"build\", \"--sdist\", \"--outdir\", dist_dir, \".\")\n\n session.log(f\"Source distribution in release mode built into {dist_dir!r}\")\n\n\n@nox.session\ndef release_test_sdist(session: nox.Session) -> None:\n \"\"\"Build and install the sdist.\"\"\"\n\n # Find the sdist to install.\n sdists = list(Path(dist_dir).glob(\"cocotb-*.tar.gz\"))\n if len(sdists) == 0:\n session.error(\n f\"No *.tar.gz sdist file found in {dist_dir!r} \"\n f\"Run the 'release_build' session first.\"\n )\n if len(sdists) > 1:\n session.error(\n f\"More than one potential sdist found in the {dist_dir!r} \"\n f\"directory. Run the 'release_clean' session first!\"\n )\n sdist_path = sdists[0]\n assert sdist_path.is_file()\n\n session.log(\"Installing cocotb from sdist, which includes the build step\")\n session.run(\n \"pip\",\n \"install\",\n str(sdist_path),\n )\n\n session.log(\"Running cocotb-config as basic installation smoke test\")\n session.run(\"cocotb-config\", \"--version\")\n\n\ndef release_install(session: nox.Session) -> None:\n \"\"\"Helper: Install cocotb from wheels and also install test dependencies.\"\"\"\n\n # We have to disable the use of the PyPi index when installing cocotb to\n # guarantee that the wheels in dist are being used. But without an index\n # pip cannot find the dependencies, which need to be installed from PyPi.\n # Work around that by explicitly installing the dependencies first from\n # PyPi, and then installing cocotb itself from the local dist directory.\n\n session.log(\"Installing cocotb dependencies from PyPi\")\n session.run(\"pip\", \"install\", \"find_libpython\")\n\n session.log(f\"Installing cocotb from wheels in {dist_dir!r}\")\n session.run(\n \"pip\",\n \"install\",\n \"--force-reinstall\",\n \"--only-binary\",\n \"cocotb\",\n \"--no-index\",\n \"--no-dependencies\",\n \"--find-links\",\n dist_dir,\n \"cocotb\",\n )\n\n session.log(\"Running cocotb-config as basic installation smoke test\")\n session.run(\"cocotb-config\", \"--version\")\n\n session.log(\"Installing test dependencies\")\n session.run(\"pip\", \"install\", *test_deps)\n\n\n@nox.session\n@nox.parametrize(\"sim,toplevel_lang,gpi_interface\", simulator_support_matrix())\ndef release_test_sim(\n session: nox.Session, sim: str, toplevel_lang: str, gpi_interface: str\n) -> None:\n \"\"\"Test a release version of cocotb against a simulator.\"\"\"\n\n release_install(session)\n\n env = env_vars_for_test(sim, toplevel_lang, gpi_interface)\n config_str = stringify_dict(env)\n\n session.log(f\"Running tests against a simulator: {config_str}\")\n session.run(\"make\", \"clean\", \"test\", external=True, env=env)\n\n session.log(f\"Running simulator-specific tests against a simulator {config_str}\")\n session.run(\n \"pytest\",\n \"-v\",\n \"-k\",\n \"simulator_required\",\n )\n\n session.log(f\"All tests passed with configuration {config_str}!\")\n\n\n@nox.session\ndef release_test_nosim(session: nox.Session) -> None:\n \"\"\"Run the simulator-agnostic tests against a cocotb release.\"\"\"\n\n release_install(session)\n\n session.log(\"Running simulator-agnostic tests\")\n session.run(\n \"pytest\",\n \"-v\",\n \"-k\",\n \"not simulator_required\",\n )\n\n session.log(\"All tests passed!\")\n\n\n@nox.session\ndef docs(session: nox.Session) -> None:\n \"\"\"invoke sphinx-build to build the HTML docs\"\"\"\n session.run(\"pip\", \"install\", \"-r\", \"documentation/requirements.txt\")\n session.run(\"pip\", \"install\", \"-e\", \".\")\n outdir = session.cache_dir / \"docs_out\"\n session.run(\n \"sphinx-build\", \"./documentation/source\", str(outdir), \"--color\", \"-b\", \"html\"\n )\n index = (outdir / \"index.html\").resolve().as_uri()\n session.log(f\"Documentation is available at {index}\")\n\n\n@nox.session\ndef docs_linkcheck(session: nox.Session) -> None:\n \"\"\"invoke sphinx-build to linkcheck the docs\"\"\"\n session.run(\"pip\", \"install\", \"-r\", \"documentation/requirements.txt\")\n session.run(\"pip\", \"install\", \"-e\", \".\")\n outdir = session.cache_dir / \"docs_out\"\n session.run(\n \"sphinx-build\",\n \"./documentation/source\",\n str(outdir),\n \"--color\",\n \"-b\",\n \"linkcheck\",\n )\n\n\n@nox.session\ndef docs_spelling(session: nox.Session) -> None:\n \"\"\"invoke sphinx-build to spellcheck the docs\"\"\"\n session.run(\"pip\", \"install\", \"-r\", \"documentation/requirements.txt\")\n session.run(\"pip\", \"install\", \"-e\", \".\")\n outdir = session.cache_dir / \"docs_out\"\n session.run(\n \"sphinx-build\",\n \"./documentation/source\",\n str(outdir),\n \"--color\",\n \"-b\",\n \"spelling\",\n )\n\n\n@nox.session(reuse_venv=True)\ndef dev(session: nox.Session) -> None:\n \"\"\"Build a development environment and optionally run a command given as extra args\"\"\"\n\n configure_env_for_dev_build(session)\n\n session.run(\"pip\", \"install\", *test_deps)\n session.run(\"pip\", \"install\", *dev_deps)\n session.run(\"pip\", \"install\", \"-e\", \".\")\n if session.posargs:\n session.run(*session.posargs, external=True)\n", "path": "noxfile.py" } ]
[ { "content": "# Copyright cocotb contributors\n# Licensed under the Revised BSD License, see LICENSE for details.\n# SPDX-License-Identifier: BSD-3-Clause\nimport glob\nimport os\nimport shutil\nimport sys\nfrom contextlib import suppress\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple\n\nimport nox\n\n# Sessions run by default if nox is called without further arguments.\nnox.options.sessions = [\"dev_test\"]\n\ntest_deps = [\"pytest\"]\ncoverage_deps = [\"coverage\", \"pytest-cov\"]\n# gcovr 5.1 has an issue parsing some gcov files, so pin to 5.0. See\n# https://github.com/gcovr/gcovr/issues/596\n# When using gcovr 5.0, deprecated jinja2.Markup was removed in 3.1, so an\n# Exception is raised during html report generation.\n# See https://github.com/gcovr/gcovr/pull/576\n# These issues are fixed on gcovr master branch, so next release should work.\ncoverage_report_deps = [\"coverage\", \"jinja2<3.1\", \"gcovr==5.0\"]\n\ndev_deps = [\n \"black\",\n \"isort\",\n \"mypy\",\n \"pre-commit\",\n \"nox\",\n \"flake8\",\n \"clang-format\",\n]\n\n#\n# Helpers for use within this file.\n#\n\n\ndef simulator_support_matrix() -> List[Tuple[str, str, str]]:\n \"\"\"\n Get a list of supported simulator/toplevel-language/GPI-interface tuples.\n \"\"\"\n\n # Simulators with support for VHDL through VHPI, and Verilog through VPI.\n standard = [\n (sim, toplevel_lang, gpi_interface)\n for sim in (\"activehdl\", \"rivierapro\", \"xcelium\")\n for toplevel_lang in (\"verilog\", \"vhdl\")\n for gpi_interface in (\"vpi\", \"vhpi\")\n if (toplevel_lang, gpi_interface) in ((\"verilog\", \"vpi\"), (\"vhdl\", \"vhpi\"))\n ]\n\n # Special-case simulators.\n special = [\n (\"cvc\", \"verilog\", \"vpi\"),\n (\"ghdl\", \"vhdl\", \"vpi\"),\n (\"icarus\", \"verilog\", \"vpi\"),\n (\"questa\", \"verilog\", \"vpi\"),\n (\"questa\", \"vhdl\", \"fli\"),\n (\"questa\", \"vhdl\", \"vhpi\"),\n (\"verilator\", \"verilog\", \"vpi\"),\n (\"vcs\", \"verilog\", \"vpi\"),\n ]\n\n return standard + special\n\n\ndef env_vars_for_test(\n sim: Optional[str], toplevel_lang: Optional[str], gpi_interface: Optional[str]\n) -> Dict[str, str]:\n \"\"\"Prepare the environment variables controlling the test run.\"\"\"\n e = {}\n if sim is not None:\n e[\"SIM\"] = sim\n\n if os.getenv(\"TOPLEVEL_LANG\") is not None:\n e[\"HDL_TOPLEVEL_LANG\"] = os.getenv(\"TOPLEVEL_LANG\")\n\n if toplevel_lang is not None:\n e[\"TOPLEVEL_LANG\"] = toplevel_lang\n e[\"HDL_TOPLEVEL_LANG\"] = toplevel_lang\n\n assert not (toplevel_lang == \"verilog\" and gpi_interface != \"vpi\")\n if toplevel_lang == \"vhdl\" and gpi_interface is not None:\n e[\"VHDL_GPI_INTERFACE\"] = gpi_interface\n\n return e\n\n\ndef stringify_dict(d: Dict[str, str]) -> str:\n return \", \".join(f\"{k}={v}\" for k, v in d.items())\n\n\ndef configure_env_for_dev_build(session: nox.session) -> None:\n \"\"\"Set environment variables for a development build.\n\n - Enable coverage collection.\n - Build with more aggressive error checking.\n \"\"\"\n session.env[\"CFLAGS\"] = \"-Werror -Wno-deprecated-declarations -g --coverage\"\n session.env[\"COCOTB_LIBRARY_COVERAGE\"] = \"1\"\n session.env[\"CXXFLAGS\"] = \"-Werror\"\n session.env[\"LDFLAGS\"] = \"--coverage\"\n\n\n#\n# Development pipeline\n#\n# - Use nox to build an sdist; no separate build step is required.\n# - Run tests against the installed sdist.\n# - Collect coverage.\n#\n\n\n@nox.session\ndef dev_build(session: nox.Session) -> None:\n session.warn(\"No building is necessary for development sessions.\")\n\n\n@nox.session\ndef dev_test(session: nox.Session) -> None:\n \"\"\"Run all development tests as configured through environment variables.\"\"\"\n\n dev_test_sim(session, sim=None, toplevel_lang=None, gpi_interface=None)\n dev_test_nosim(session)\n dev_coverage_combine(session)\n\n\n@nox.session\n@nox.parametrize(\"sim,toplevel_lang,gpi_interface\", simulator_support_matrix())\ndef dev_test_sim(\n session: nox.Session,\n sim: Optional[str],\n toplevel_lang: Optional[str],\n gpi_interface: Optional[str],\n) -> None:\n \"\"\"Test a development version of cocotb against a simulator.\"\"\"\n\n configure_env_for_dev_build(session)\n\n session.run(\"pip\", \"install\", *test_deps, *coverage_deps)\n\n # Editable installs break C/C++ coverage collection; don't use them.\n # C/C++ coverage collection requires that the object files produced by the\n # compiler are not moved around, otherwise the gcno and gcda files produced\n # at compile and runtime, respectively, are located in the wrong\n # directories. Depending on the version of the Python install machinery\n # editable builds are done in a directory in /tmp, which is removed after\n # the build completes, taking all gcno files with them, as well as the path\n # to place the gcda files.\n session.run(\"pip\", \"install\", \".\")\n\n env = env_vars_for_test(sim, toplevel_lang, gpi_interface)\n config_str = stringify_dict(env)\n\n # Remove a potentially existing coverage file from a previous run for the\n # same test configuration. Use a filename *not* starting with `.coverage.`,\n # as coverage.py assumes ownership over these files and deleted them at\n # will.\n coverage_file = Path(f\".cov.test.sim-{sim}-{toplevel_lang}-{gpi_interface}\")\n with suppress(FileNotFoundError):\n coverage_file.unlink()\n\n session.log(f\"Running 'make test' against a simulator {config_str}\")\n session.run(\"make\", \"clean\", \"test\", external=True, env=env)\n\n session.log(f\"Running simulator-specific tests against a simulator {config_str}\")\n session.run(\n \"pytest\",\n \"-v\",\n \"--cov=cocotb\",\n \"--cov-branch\",\n # Don't display coverage report here\n \"--cov-report=\",\n \"-k\",\n \"simulator_required\",\n env=env,\n )\n Path(\".coverage\").rename(\".coverage.pytest\")\n\n session.log(f\"All tests passed with configuration {config_str}!\")\n\n # Combine coverage produced during the test runs, and place it in a file\n # with a name specific to this invocation of dev_test_sim().\n coverage_files = glob.glob(\"**/.coverage.cocotb\", recursive=True)\n if not coverage_files:\n session.error(\n \"No coverage files found. Something went wrong during the test execution.\"\n )\n coverage_files.append(\".coverage.pytest\")\n session.run(\"coverage\", \"combine\", \"--append\", *coverage_files)\n Path(\".coverage\").rename(coverage_file)\n\n session.log(f\"Stored Python coverage for this test run in {coverage_file}.\")\n\n\n@nox.session\ndef dev_test_nosim(session: nox.Session) -> None:\n \"\"\"Run the simulator-agnostic tests against a cocotb development version.\"\"\"\n\n configure_env_for_dev_build(session)\n\n session.run(\"pip\", \"install\", *test_deps, *coverage_deps)\n session.run(\"pip\", \"install\", \"-e\", \".\")\n\n # Remove a potentially existing coverage file from a previous run for the\n # same test configuration. Use a filename *not* starting with `.coverage.`,\n # as coverage.py assumes ownership over these files and deleted them at\n # will.\n coverage_file = Path(\".cov.test.nosim\")\n with suppress(FileNotFoundError):\n coverage_file.unlink()\n\n # Run pytest with the default configuration in setup.cfg.\n session.log(\"Running simulator-agnostic tests with pytest\")\n session.run(\n \"pytest\",\n \"-v\",\n \"--cov=cocotb\",\n \"--cov-branch\",\n # Don't display coverage report here\n \"--cov-report=\",\n \"-k\",\n \"not simulator_required\",\n )\n\n # Run pytest for files which can only be tested in the source tree, not in\n # the installed binary (otherwise we get an \"import file mismatch\" error\n # from pytest).\n session.log(\"Running simulator-agnostic tests in the source tree with pytest\")\n pytest_sourcetree = [\n \"cocotb/utils.py\",\n \"cocotb/binary.py\",\n \"cocotb/types/\",\n \"cocotb/_sim_versions.py\",\n ]\n session.run(\n \"pytest\",\n \"-v\",\n \"--doctest-modules\",\n \"--cov=cocotb\",\n \"--cov-branch\",\n # Don't display coverage report here\n \"--cov-report=\",\n # Append to the .coverage file created in the previous pytest\n # invocation in this session.\n \"--cov-append\",\n \"-k\",\n \"not simulator_required\",\n *pytest_sourcetree,\n )\n\n session.log(\"All tests passed!\")\n\n # Rename the .coverage file to make it unique to the session.\n Path(\".coverage\").rename(coverage_file)\n\n session.log(f\"Stored Python coverage for this test run in {coverage_file}.\")\n\n\n@nox.session\ndef dev_coverage_combine(session: nox.Session) -> None:\n \"\"\"Combine coverage from previous dev_* runs into a .coverage file.\"\"\"\n session.run(\"pip\", \"install\", *coverage_report_deps)\n\n coverage_files = glob.glob(\"**/.cov.test.*\", recursive=True)\n session.run(\"coverage\", \"combine\", *coverage_files)\n assert Path(\".coverage\").is_file()\n\n session.log(\"Wrote combined coverage database for all tests to '.coverage'.\")\n\n session.notify(\"dev_coverage_report\")\n\n\n@nox.session\ndef dev_coverage_report(session: nox.Session) -> None:\n \"\"\"Report coverage results.\"\"\"\n session.run(\"pip\", \"install\", *coverage_report_deps)\n\n # Produce Cobertura XML coverage reports.\n session.log(\"Producing Python and C/C++ coverage in Cobertura XML format\")\n\n coverage_python_xml = Path(\".python_coverage.xml\")\n session.run(\"coverage\", \"xml\", \"-o\", str(coverage_python_xml))\n assert coverage_python_xml.is_file()\n\n coverage_cpp_xml = Path(\".cpp_coverage.xml\")\n session.run(\n \"gcovr\",\n \"--xml\",\n \"--output\",\n str(coverage_cpp_xml),\n \".\",\n )\n assert coverage_cpp_xml.is_file()\n\n session.log(\n f\"Cobertura XML files written to {str(coverage_cpp_xml)!r} (C/C++) and {str(coverage_python_xml)!r} (Python)\"\n )\n\n # Report human-readable coverage.\n session.log(\"Python coverage\")\n session.run(\"coverage\", \"report\")\n\n session.log(\"Library coverage\")\n session.run(\"gcovr\", \"--print-summary\", \"--txt\")\n\n\n#\n# Release pipeline.\n#\n# - Clean out the dist directory.\n# - Build wheels (release builds).\n# - Install cocotb from wheel.\n# - Run tests against cocotb installed from the wheel.\n#\n# The release pipeline does not collect coverage, and does not run doctests.\n#\n\n# Directory containing the distribution artifacts (sdist and bdist).\ndist_dir = \"dist\"\n\n\n@nox.session\ndef release_clean(session: nox.Session) -> None:\n \"\"\"Remove all build artifacts from the dist directory.\"\"\"\n shutil.rmtree(dist_dir, ignore_errors=True)\n\n\n@nox.session\ndef release_build(session: nox.Session) -> None:\n \"\"\"Build a release (sdist and bdist).\"\"\"\n session.notify(\"release_build_bdist\")\n session.notify(\"release_build_sdist\")\n\n\n@nox.session\ndef release_build_bdist(session: nox.Session) -> None:\n \"\"\"Build a binary distribution (wheels) on the current operating system.\"\"\"\n\n # Pin a version to ensure reproducible builds.\n session.run(\"pip\", \"install\", \"cibuildwheel==2.11.2\")\n\n # cibuildwheel only auto-detects the platform if it runs on a CI server.\n # Do the auto-detect manually to enable local runs.\n if sys.platform.startswith(\"linux\"):\n platform = \"linux\"\n elif sys.platform == \"darwin\":\n platform = \"macos\"\n elif sys.platform == \"win32\":\n platform = \"windows\"\n else:\n session.error(f\"Unknown platform: {sys.platform!r}\")\n\n session.log(\"Building binary distribution (wheels)\")\n session.run(\n \"cibuildwheel\",\n \"--platform\",\n platform,\n \"--output-dir\",\n dist_dir,\n )\n\n session.log(\n f\"Binary distribution in release mode for {platform!r} built into {dist_dir!r}\"\n )\n\n\n@nox.session\ndef release_build_sdist(session: nox.Session) -> None:\n \"\"\"Build the source distribution.\"\"\"\n\n session.run(\"pip\", \"install\", \"build\")\n\n session.log(\"Building source distribution (sdist)\")\n session.run(\"python\", \"-m\", \"build\", \"--sdist\", \"--outdir\", dist_dir, \".\")\n\n session.log(f\"Source distribution in release mode built into {dist_dir!r}\")\n\n\n@nox.session\ndef release_test_sdist(session: nox.Session) -> None:\n \"\"\"Build and install the sdist.\"\"\"\n\n # Find the sdist to install.\n sdists = list(Path(dist_dir).glob(\"cocotb-*.tar.gz\"))\n if len(sdists) == 0:\n session.error(\n f\"No *.tar.gz sdist file found in {dist_dir!r} \"\n f\"Run the 'release_build' session first.\"\n )\n if len(sdists) > 1:\n session.error(\n f\"More than one potential sdist found in the {dist_dir!r} \"\n f\"directory. Run the 'release_clean' session first!\"\n )\n sdist_path = sdists[0]\n assert sdist_path.is_file()\n\n session.log(\"Installing cocotb from sdist, which includes the build step\")\n session.run(\n \"pip\",\n \"install\",\n str(sdist_path),\n )\n\n session.log(\"Running cocotb-config as basic installation smoke test\")\n session.run(\"cocotb-config\", \"--version\")\n\n\ndef release_install(session: nox.Session) -> None:\n \"\"\"Helper: Install cocotb from wheels and also install test dependencies.\"\"\"\n\n # We have to disable the use of the PyPi index when installing cocotb to\n # guarantee that the wheels in dist are being used. But without an index\n # pip cannot find the dependencies, which need to be installed from PyPi.\n # Work around that by explicitly installing the dependencies first from\n # PyPi, and then installing cocotb itself from the local dist directory.\n\n session.log(\"Installing cocotb dependencies from PyPi\")\n session.run(\"pip\", \"install\", \"find_libpython\")\n\n session.log(f\"Installing cocotb from wheels in {dist_dir!r}\")\n session.run(\n \"pip\",\n \"install\",\n \"--force-reinstall\",\n \"--only-binary\",\n \"cocotb\",\n \"--no-index\",\n \"--no-dependencies\",\n \"--find-links\",\n dist_dir,\n \"cocotb\",\n )\n\n session.log(\"Running cocotb-config as basic installation smoke test\")\n session.run(\"cocotb-config\", \"--version\")\n\n session.log(\"Installing test dependencies\")\n session.run(\"pip\", \"install\", *test_deps)\n\n\n@nox.session\n@nox.parametrize(\"sim,toplevel_lang,gpi_interface\", simulator_support_matrix())\ndef release_test_sim(\n session: nox.Session, sim: str, toplevel_lang: str, gpi_interface: str\n) -> None:\n \"\"\"Test a release version of cocotb against a simulator.\"\"\"\n\n release_install(session)\n\n env = env_vars_for_test(sim, toplevel_lang, gpi_interface)\n config_str = stringify_dict(env)\n\n session.log(f\"Running tests against a simulator: {config_str}\")\n session.run(\"make\", \"clean\", \"test\", external=True, env=env)\n\n session.log(f\"Running simulator-specific tests against a simulator {config_str}\")\n session.run(\n \"pytest\",\n \"-v\",\n \"-k\",\n \"simulator_required\",\n env=env,\n )\n\n session.log(f\"All tests passed with configuration {config_str}!\")\n\n\n@nox.session\ndef release_test_nosim(session: nox.Session) -> None:\n \"\"\"Run the simulator-agnostic tests against a cocotb release.\"\"\"\n\n release_install(session)\n\n session.log(\"Running simulator-agnostic tests\")\n session.run(\n \"pytest\",\n \"-v\",\n \"-k\",\n \"not simulator_required\",\n )\n\n session.log(\"All tests passed!\")\n\n\n@nox.session\ndef docs(session: nox.Session) -> None:\n \"\"\"invoke sphinx-build to build the HTML docs\"\"\"\n session.run(\"pip\", \"install\", \"-r\", \"documentation/requirements.txt\")\n session.run(\"pip\", \"install\", \"-e\", \".\")\n outdir = session.cache_dir / \"docs_out\"\n session.run(\n \"sphinx-build\", \"./documentation/source\", str(outdir), \"--color\", \"-b\", \"html\"\n )\n index = (outdir / \"index.html\").resolve().as_uri()\n session.log(f\"Documentation is available at {index}\")\n\n\n@nox.session\ndef docs_linkcheck(session: nox.Session) -> None:\n \"\"\"invoke sphinx-build to linkcheck the docs\"\"\"\n session.run(\"pip\", \"install\", \"-r\", \"documentation/requirements.txt\")\n session.run(\"pip\", \"install\", \"-e\", \".\")\n outdir = session.cache_dir / \"docs_out\"\n session.run(\n \"sphinx-build\",\n \"./documentation/source\",\n str(outdir),\n \"--color\",\n \"-b\",\n \"linkcheck\",\n )\n\n\n@nox.session\ndef docs_spelling(session: nox.Session) -> None:\n \"\"\"invoke sphinx-build to spellcheck the docs\"\"\"\n session.run(\"pip\", \"install\", \"-r\", \"documentation/requirements.txt\")\n session.run(\"pip\", \"install\", \"-e\", \".\")\n outdir = session.cache_dir / \"docs_out\"\n session.run(\n \"sphinx-build\",\n \"./documentation/source\",\n str(outdir),\n \"--color\",\n \"-b\",\n \"spelling\",\n )\n\n\n@nox.session(reuse_venv=True)\ndef dev(session: nox.Session) -> None:\n \"\"\"Build a development environment and optionally run a command given as extra args\"\"\"\n\n configure_env_for_dev_build(session)\n\n session.run(\"pip\", \"install\", *test_deps)\n session.run(\"pip\", \"install\", *dev_deps)\n session.run(\"pip\", \"install\", \"-e\", \".\")\n if session.posargs:\n session.run(*session.posargs, external=True)\n", "path": "noxfile.py" } ]
diff --git a/noxfile.py b/noxfile.py index 022efe917a..5bf6a8ff17 100644 --- a/noxfile.py +++ b/noxfile.py @@ -465,6 +465,7 @@ def release_test_sim( "-v", "-k", "simulator_required", + env=env, ) session.log(f"All tests passed with configuration {config_str}!")
wright-group__WrightTools-522
[ { "content": "\"\"\"WrightTools init.\"\"\"\n# flake8: noqa\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport sys as _sys\n\nfrom .__version__ import *\nfrom . import artists\nfrom . import collection\nfrom . import data\nfrom . import diagrams\nfrom . import fit\nfrom . import kit\nfrom . import units\nfrom . import exceptions\n\nfrom ._open import *\nfrom .collection._collection import *\nfrom .data._data import *\n\n\n# --- rcparams ------------------------------------------------------------------------------------\n\n\nif int(_sys.version.split('.')[0]) > 2:\n artists.apply_rcparams('fast')\n", "path": "WrightTools/__init__.py" } ]
[ { "content": "\"\"\"WrightTools init.\"\"\"\n# flake8: noqa\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport sys as _sys\n\nfrom .__version__ import *\nfrom . import artists\nfrom . import collection\nfrom . import data\nfrom . import diagrams\nfrom . import kit\nfrom . import units\nfrom . import exceptions\n\nfrom ._open import *\nfrom .collection._collection import *\nfrom .data._data import *\n\n\n# --- rcparams ------------------------------------------------------------------------------------\n\n\nif int(_sys.version.split('.')[0]) > 2:\n artists.apply_rcparams('fast')\n", "path": "WrightTools/__init__.py" } ]
diff --git a/WrightTools/__init__.py b/WrightTools/__init__.py index 68566ae89..f1d8121d0 100644 --- a/WrightTools/__init__.py +++ b/WrightTools/__init__.py @@ -12,7 +12,6 @@ from . import collection from . import data from . import diagrams -from . import fit from . import kit from . import units from . import exceptions
python-poetry__poetry-979
[ { "content": "import os\n\nfrom .env_command import EnvCommand\n\n\nclass InstallCommand(EnvCommand):\n \"\"\"\n Installs the project dependencies.\n\n install\n { --no-dev : Do not install dev dependencies. }\n { --no-root : Do not install the root package (your project). }\n { --dry-run : Outputs the operations but will not execute anything\n (implicitly enables --verbose). }\n { --E|extras=* : Extra sets of dependencies to install. }\n { --develop=* : Install given packages in development mode. }\n \"\"\"\n\n help = \"\"\"The <info>install</info> command reads the <comment>poetry.lock</> file from\nthe current directory, processes it, and downloads and installs all the\nlibraries and dependencies outlined in that file. If the file does not\nexist it will look for <comment>pyproject.toml</> and do the same.\n\n<info>poetry install</info>\n\"\"\"\n\n _loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self):\n from clikit.io import NullIO\n from poetry.installation import Installer\n from poetry.masonry.builders import SdistBuilder\n from poetry.masonry.utils.module import ModuleOrPackageNotFound\n from poetry.utils._compat import decode\n from poetry.utils.env import NullEnv\n\n installer = Installer(\n self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool\n )\n\n extras = []\n for extra in self.option(\"extras\"):\n if \" \" in extra:\n extras += [e.strip() for e in extra.split(\" \")]\n else:\n extras.append(extra)\n\n installer.extras(extras)\n installer.dev_mode(not self.option(\"no-dev\"))\n installer.develop(self.option(\"develop\"))\n installer.dry_run(self.option(\"dry-run\"))\n installer.verbose(self.option(\"verbose\"))\n\n return_code = installer.run()\n\n if return_code != 0:\n return return_code\n\n if not self.option(\"no-root\"):\n return 0\n\n try:\n builder = SdistBuilder(self.poetry, NullEnv(), NullIO())\n except ModuleOrPackageNotFound:\n # This is likely due to the fact that the project is an application\n # not following the structure expected by Poetry\n # If this is a true error it will be picked up later by build anyway.\n return 0\n\n self.line(\n \" - Installing <info>{}</info> (<comment>{}</comment>)\".format(\n self.poetry.package.pretty_name, self.poetry.package.pretty_version\n )\n )\n\n if self.option(\"dry-run\"):\n return 0\n\n setup = self.poetry.file.parent / \"setup.py\"\n has_setup = setup.exists()\n\n if has_setup:\n self.line(\"<warning>A setup.py file already exists. Using it.</warning>\")\n else:\n with setup.open(\"w\", encoding=\"utf-8\") as f:\n f.write(decode(builder.build_setup()))\n\n try:\n self.env.run(\"pip\", \"install\", \"-e\", str(setup.parent), \"--no-deps\")\n finally:\n if not has_setup:\n os.remove(str(setup))\n", "path": "poetry/console/commands/install.py" } ]
[ { "content": "import os\n\nfrom .env_command import EnvCommand\n\n\nclass InstallCommand(EnvCommand):\n \"\"\"\n Installs the project dependencies.\n\n install\n { --no-dev : Do not install dev dependencies. }\n { --no-root : Do not install the root package (your project). }\n { --dry-run : Outputs the operations but will not execute anything\n (implicitly enables --verbose). }\n { --E|extras=* : Extra sets of dependencies to install. }\n { --develop=* : Install given packages in development mode. }\n \"\"\"\n\n help = \"\"\"The <info>install</info> command reads the <comment>poetry.lock</> file from\nthe current directory, processes it, and downloads and installs all the\nlibraries and dependencies outlined in that file. If the file does not\nexist it will look for <comment>pyproject.toml</> and do the same.\n\n<info>poetry install</info>\n\"\"\"\n\n _loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self):\n from clikit.io import NullIO\n from poetry.installation import Installer\n from poetry.masonry.builders import SdistBuilder\n from poetry.masonry.utils.module import ModuleOrPackageNotFound\n from poetry.utils._compat import decode\n from poetry.utils.env import NullEnv\n\n installer = Installer(\n self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool\n )\n\n extras = []\n for extra in self.option(\"extras\"):\n if \" \" in extra:\n extras += [e.strip() for e in extra.split(\" \")]\n else:\n extras.append(extra)\n\n installer.extras(extras)\n installer.dev_mode(not self.option(\"no-dev\"))\n installer.develop(self.option(\"develop\"))\n installer.dry_run(self.option(\"dry-run\"))\n installer.verbose(self.option(\"verbose\"))\n\n return_code = installer.run()\n\n if return_code != 0:\n return return_code\n\n if self.option(\"no-root\"):\n return 0\n\n try:\n builder = SdistBuilder(self.poetry, NullEnv(), NullIO())\n except ModuleOrPackageNotFound:\n # This is likely due to the fact that the project is an application\n # not following the structure expected by Poetry\n # If this is a true error it will be picked up later by build anyway.\n return 0\n\n self.line(\n \" - Installing <info>{}</info> (<comment>{}</comment>)\".format(\n self.poetry.package.pretty_name, self.poetry.package.pretty_version\n )\n )\n\n if self.option(\"dry-run\"):\n return 0\n\n setup = self.poetry.file.parent / \"setup.py\"\n has_setup = setup.exists()\n\n if has_setup:\n self.line(\"<warning>A setup.py file already exists. Using it.</warning>\")\n else:\n with setup.open(\"w\", encoding=\"utf-8\") as f:\n f.write(decode(builder.build_setup()))\n\n try:\n self.env.run(\"pip\", \"install\", \"-e\", str(setup.parent), \"--no-deps\")\n finally:\n if not has_setup:\n os.remove(str(setup))\n", "path": "poetry/console/commands/install.py" } ]
diff --git a/poetry/console/commands/install.py b/poetry/console/commands/install.py index e5c41e0708b..d65755e2108 100644 --- a/poetry/console/commands/install.py +++ b/poetry/console/commands/install.py @@ -56,7 +56,7 @@ def handle(self): if return_code != 0: return return_code - if not self.option("no-root"): + if self.option("no-root"): return 0 try:
mit-ll-responsible-ai__hydra-zen-97
[ { "content": "# Copyright (c) 2021 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nDISTNAME = \"hydra_zen\"\nLICENSE = \"MIT\"\nAUTHOR = \"Justin Goodwin, Ryan Soklaski\"\nAUTHOR_EMAIL = \"ryan.soklaski@ll.mit.edu\"\nURL = \"https://github.com/mit-ll-responsible-ai/hydra_zen\"\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n]\nKEYWORDS = \"machine learning research configuration scalable reproducible\"\nINSTALL_REQUIRES = [\n \"hydra-core >= 1.1.0\",\n \"typing-extensions >= 3.7.4.1\",\n]\nTESTS_REQUIRE = [\n \"pytest >= 3.8\",\n \"hypothesis >= 5.32.0\",\n]\n\nDESCRIPTION = \"Utilities for making hydra scale to ML workflows\"\nLONG_DESCRIPTION = \"\"\"\nhydra-zen helps you configure your project using the power of Hydra, while enjoying the Zen of Python!\n\nhydra-zen eliminates the boilerplate code that you write to configure, orchestrate, and organize the results of large-scale projects, such as machine learning experiments. It does so by providing Hydra-compatible tools that dynamically generate \"structured configurations\" of your code, and enables Python-centric workflows for running configured instances of your code.\n\nhydra-zen offers:\n\n - Functions for automatically and dynamically generating structured configs that can be used to fully or partially instantiate objects in your application.\n - The ability to launch Hydra jobs, complete with parameter sweeps and multi-run configurations, from within a notebook or any other Python environment.\n - Incisive type annotations that provide enriched context about your project's configurations to IDEs, type checkers, and other tooling.\n - Runtime validation of configurations to catch mistakes before your application launches.\n - Equal support for both object-oriented libraries (e.g., torch.nn) and functional ones (e.g., jax and numpy).\n\nThese functions and capabilities can be used to great effect alongside PyTorch Lightning to design boilerplate-free machine learning projects!\n\"\"\"\n\n\nsetup(\n name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n keywords=KEYWORDS,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n install_requires=INSTALL_REQUIRES,\n tests_require=TESTS_REQUIRE,\n url=URL,\n download_url=\"https://github.com/mit-ll-responsible-ai/hydra-zen/tarball/\"\n + versioneer.get_version(),\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n package_dir={\"\": \"src\"},\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright (c) 2021 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nDISTNAME = \"hydra_zen\"\nLICENSE = \"MIT\"\nAUTHOR = \"Justin Goodwin, Ryan Soklaski\"\nAUTHOR_EMAIL = \"ryan.soklaski@ll.mit.edu\"\nURL = \"https://github.com/mit-ll-responsible-ai/hydra_zen\"\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n]\nKEYWORDS = \"machine learning research configuration scalable reproducible\"\nINSTALL_REQUIRES = [\n \"hydra-core >= 1.1.0\",\n \"typing-extensions >= 3.7.4.1\",\n]\nTESTS_REQUIRE = [\n \"pytest >= 3.8\",\n \"hypothesis >= 5.32.0\",\n]\n\nDESCRIPTION = \"Utilities for making hydra scale to ML workflows\"\nLONG_DESCRIPTION = \"\"\"\nhydra-zen helps you configure your project using the power of Hydra, while enjoying the Zen of Python!\n\nhydra-zen eliminates the boilerplate code that you write to configure, orchestrate, and organize the results of large-scale projects, such as machine learning experiments. It does so by providing Hydra-compatible tools that dynamically generate \"structured configurations\" of your code, and enables Python-centric workflows for running configured instances of your code.\n\nhydra-zen offers:\n\n - Functions for automatically and dynamically generating structured configs that can be used to fully or partially instantiate objects in your application.\n - The ability to launch Hydra jobs, complete with parameter sweeps and multi-run configurations, from within a notebook or any other Python environment.\n - Incisive type annotations that provide enriched context about your project's configurations to IDEs, type checkers, and other tooling.\n - Runtime validation of configurations to catch mistakes before your application launches.\n - Equal support for both object-oriented libraries (e.g., torch.nn) and functional ones (e.g., jax and numpy).\n\nThese functions and capabilities can be used to great effect alongside PyTorch Lightning to design boilerplate-free machine learning projects!\n\"\"\"\n\n\nsetup(\n name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n keywords=KEYWORDS,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n install_requires=INSTALL_REQUIRES,\n tests_require=TESTS_REQUIRE,\n url=URL,\n download_url=\"https://github.com/mit-ll-responsible-ai/hydra-zen/tarball/\"\n + versioneer.get_version(),\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n package_dir={\"\": \"src\"},\n package_data={\"hydra_zen\": [\"py.typed\"]}\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index f4020d2c9..bc8c70b85 100644 --- a/setup.py +++ b/setup.py @@ -68,4 +68,5 @@ python_requires=">=3.6", packages=find_packages(where="src", exclude=["tests", "tests.*"]), package_dir={"": "src"}, + package_data={"hydra_zen": ["py.typed"]} ) diff --git a/src/hydra_zen/py.typed b/src/hydra_zen/py.typed new file mode 100644 index 000000000..e69de29bb
nipy__nipype-3385
[ { "content": "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nThe maths module provides higher-level interfaces to some of the operations\nthat can be performed with the fslmaths command-line program.\n\"\"\"\nimport os\nimport numpy as np\n\nfrom ..base import TraitedSpec, File, traits, InputMultiPath, isdefined\nfrom .base import FSLCommand, FSLCommandInputSpec\n\n\nclass MathsInput(FSLCommandInputSpec):\n\n in_file = File(\n position=2, argstr=\"%s\", exists=True, mandatory=True, desc=\"image to operate on\"\n )\n out_file = File(\n genfile=True, position=-2, argstr=\"%s\", desc=\"image to write\", hash_files=False\n )\n _dtypes = [\"float\", \"char\", \"int\", \"short\", \"double\", \"input\"]\n internal_datatype = traits.Enum(\n *_dtypes,\n position=1,\n argstr=\"-dt %s\",\n desc=(\"datatype to use for calculations \" \"(default is float)\")\n )\n output_datatype = traits.Enum(\n *_dtypes,\n position=-1,\n argstr=\"-odt %s\",\n desc=(\"datatype to use for output (default \" \"uses input type)\")\n )\n\n nan2zeros = traits.Bool(\n position=3, argstr=\"-nan\", desc=\"change NaNs to zeros before doing anything\"\n )\n\n\nclass MathsOutput(TraitedSpec):\n\n out_file = File(exists=True, desc=\"image written after calculations\")\n\n\nclass MathsCommand(FSLCommand):\n\n _cmd = \"fslmaths\"\n input_spec = MathsInput\n output_spec = MathsOutput\n _suffix = \"_maths\"\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = self.inputs.out_file\n if not isdefined(self.inputs.out_file):\n outputs[\"out_file\"] = self._gen_fname(\n self.inputs.in_file, suffix=self._suffix\n )\n outputs[\"out_file\"] = os.path.abspath(outputs[\"out_file\"])\n return outputs\n\n def _gen_filename(self, name):\n if name == \"out_file\":\n return self._list_outputs()[\"out_file\"]\n return None\n\n\nclass ChangeDataTypeInput(MathsInput):\n\n _dtypes = [\"float\", \"char\", \"int\", \"short\", \"double\", \"input\"]\n output_datatype = traits.Enum(\n *_dtypes, position=-1, argstr=\"-odt %s\", mandatory=True, desc=\"output data type\"\n )\n\n\nclass ChangeDataType(MathsCommand):\n \"\"\"Use fslmaths to change the datatype of an image.\"\"\"\n\n input_spec = ChangeDataTypeInput\n _suffix = \"_chdt\"\n\n\nclass ThresholdInputSpec(MathsInput):\n\n thresh = traits.Float(\n mandatory=True, position=4, argstr=\"%s\", desc=\"threshold value\"\n )\n direction = traits.Enum(\n \"below\",\n \"above\",\n usedefault=True,\n desc=\"zero-out either below or above thresh value\",\n )\n use_robust_range = traits.Bool(\n desc=\"interpret thresh as percentage (0-100) of robust range\"\n )\n use_nonzero_voxels = traits.Bool(\n desc=\"use nonzero voxels to calculate robust range\",\n requires=[\"use_robust_range\"],\n )\n\n\nclass Threshold(MathsCommand):\n \"\"\"Use fslmaths to apply a threshold to an image in a variety of ways.\"\"\"\n\n input_spec = ThresholdInputSpec\n _suffix = \"_thresh\"\n\n def _format_arg(self, name, spec, value):\n if name == \"thresh\":\n arg = \"-\"\n _si = self.inputs\n if self.inputs.direction == \"above\":\n arg += \"u\"\n arg += \"thr\"\n if isdefined(_si.use_robust_range) and _si.use_robust_range:\n if isdefined(_si.use_nonzero_voxels) and _si.use_nonzero_voxels:\n arg += \"P\"\n else:\n arg += \"p\"\n arg += \" %.10f\" % value\n return arg\n return super(Threshold, self)._format_arg(name, spec, value)\n\n\nclass StdImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%sstd\",\n position=4,\n desc=\"dimension to standard deviate across\",\n )\n\n\nclass StdImage(MathsCommand):\n \"\"\"Use fslmaths to generate a standard deviation in an image across a given\n dimension.\n \"\"\"\n\n input_spec = StdImageInput\n _suffix = \"_std\"\n\n\nclass MeanImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smean\",\n position=4,\n desc=\"dimension to mean across\",\n )\n\n\nclass MeanImage(MathsCommand):\n \"\"\"Use fslmaths to generate a mean image across a given dimension.\"\"\"\n\n input_spec = MeanImageInput\n _suffix = \"_mean\"\n\n\nclass MaxImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smax\",\n position=4,\n desc=\"dimension to max across\",\n )\n\n\nclass MaxImage(MathsCommand):\n \"\"\"Use fslmaths to generate a max image across a given dimension.\n\n Examples\n --------\n >>> from nipype.interfaces.fsl.maths import MaxImage\n >>> maxer = MaxImage()\n >>> maxer.inputs.in_file = \"functional.nii\" # doctest: +SKIP\n >>> maxer.dimension = \"T\"\n >>> maxer.cmdline # doctest: +SKIP\n 'fslmaths functional.nii -Tmax functional_max.nii'\n\n \"\"\"\n\n input_spec = MaxImageInput\n _suffix = \"_max\"\n\n\nclass PercentileImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%sperc\",\n position=4,\n desc=\"dimension to percentile across\",\n )\n perc = traits.Range(\n low=0,\n high=100,\n argstr=\"%f\",\n position=5,\n desc=(\"nth percentile (0-100) of FULL RANGE \" \"across dimension\"),\n )\n\n\nclass PercentileImage(MathsCommand):\n \"\"\"Use fslmaths to generate a percentile image across a given dimension.\n\n Examples\n --------\n >>> from nipype.interfaces.fsl.maths import MaxImage\n >>> percer = PercentileImage()\n >>> percer.inputs.in_file = \"functional.nii\" # doctest: +SKIP\n >>> percer.dimension = \"T\"\n >>> percer.perc = 90\n >>> percer.cmdline # doctest: +SKIP\n 'fslmaths functional.nii -Tperc 90 functional_perc.nii'\n\n \"\"\"\n\n input_spec = PercentileImageInput\n _suffix = \"_perc\"\n\n\nclass MaxnImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smaxn\",\n position=4,\n desc=\"dimension to index max across\",\n )\n\n\nclass MaxnImage(MathsCommand):\n \"\"\"Use fslmaths to generate an image of index of max across\n a given dimension.\n\n \"\"\"\n\n input_spec = MaxnImageInput\n _suffix = \"_maxn\"\n\n\nclass MinImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smin\",\n position=4,\n desc=\"dimension to min across\",\n )\n\n\nclass MinImage(MathsCommand):\n \"\"\"Use fslmaths to generate a minimum image across a given dimension.\"\"\"\n\n input_spec = MinImageInput\n _suffix = \"_min\"\n\n\nclass MedianImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smedian\",\n position=4,\n desc=\"dimension to median across\",\n )\n\n\nclass MedianImage(MathsCommand):\n \"\"\"Use fslmaths to generate a median image across a given dimension.\"\"\"\n\n input_spec = MedianImageInput\n _suffix = \"_median\"\n\n\nclass AR1ImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%sar1\",\n position=4,\n desc=(\"dimension to find AR(1) coefficient\" \"across\"),\n )\n\n\nclass AR1Image(MathsCommand):\n \"\"\"Use fslmaths to generate an AR1 coefficient image across a\n given dimension. (Should use -odt float and probably demean first)\n\n \"\"\"\n\n input_spec = AR1ImageInput\n _suffix = \"_ar1\"\n\n\nclass IsotropicSmoothInput(MathsInput):\n\n fwhm = traits.Float(\n mandatory=True,\n xor=[\"sigma\"],\n position=4,\n argstr=\"-s %.5f\",\n desc=\"fwhm of smoothing kernel [mm]\",\n )\n sigma = traits.Float(\n mandatory=True,\n xor=[\"fwhm\"],\n position=4,\n argstr=\"-s %.5f\",\n desc=\"sigma of smoothing kernel [mm]\",\n )\n\n\nclass IsotropicSmooth(MathsCommand):\n \"\"\"Use fslmaths to spatially smooth an image with a gaussian kernel.\"\"\"\n\n input_spec = IsotropicSmoothInput\n _suffix = \"_smooth\"\n\n def _format_arg(self, name, spec, value):\n if name == \"fwhm\":\n sigma = float(value) / np.sqrt(8 * np.log(2))\n return spec.argstr % sigma\n return super(IsotropicSmooth, self)._format_arg(name, spec, value)\n\n\nclass ApplyMaskInput(MathsInput):\n\n mask_file = File(\n exists=True,\n mandatory=True,\n argstr=\"-mas %s\",\n position=4,\n desc=\"binary image defining mask space\",\n )\n\n\nclass ApplyMask(MathsCommand):\n \"\"\"Use fslmaths to apply a binary mask to another image.\"\"\"\n\n input_spec = ApplyMaskInput\n _suffix = \"_masked\"\n\n\nclass KernelInput(MathsInput):\n\n kernel_shape = traits.Enum(\n \"3D\",\n \"2D\",\n \"box\",\n \"boxv\",\n \"gauss\",\n \"sphere\",\n \"file\",\n argstr=\"-kernel %s\",\n position=4,\n desc=\"kernel shape to use\",\n )\n kernel_size = traits.Float(\n argstr=\"%.4f\",\n position=5,\n xor=[\"kernel_file\"],\n desc=(\n \"kernel size - voxels for box/boxv, mm \" \"for sphere, mm sigma for gauss\"\n ),\n )\n kernel_file = File(\n exists=True,\n argstr=\"%s\",\n position=5,\n xor=[\"kernel_size\"],\n desc=\"use external file for kernel\",\n )\n\n\nclass DilateInput(KernelInput):\n\n operation = traits.Enum(\n \"mean\",\n \"modal\",\n \"max\",\n argstr=\"-dil%s\",\n position=6,\n mandatory=True,\n desc=\"filtering operation to perfoem in dilation\",\n )\n\n\nclass DilateImage(MathsCommand):\n \"\"\"Use fslmaths to perform a spatial dilation of an image.\"\"\"\n\n input_spec = DilateInput\n _suffix = \"_dil\"\n\n def _format_arg(self, name, spec, value):\n if name == \"operation\":\n return spec.argstr % dict(mean=\"M\", modal=\"D\", max=\"F\")[value]\n return super(DilateImage, self)._format_arg(name, spec, value)\n\n\nclass ErodeInput(KernelInput):\n\n minimum_filter = traits.Bool(\n argstr=\"%s\",\n position=6,\n usedefault=True,\n default_value=False,\n desc=(\"if true, minimum filter rather than \" \"erosion by zeroing-out\"),\n )\n\n\nclass ErodeImage(MathsCommand):\n \"\"\"Use fslmaths to perform a spatial erosion of an image.\"\"\"\n\n input_spec = ErodeInput\n _suffix = \"_ero\"\n\n def _format_arg(self, name, spec, value):\n if name == \"minimum_filter\":\n if value:\n return \"-eroF\"\n return \"-ero\"\n return super(ErodeImage, self)._format_arg(name, spec, value)\n\n\nclass SpatialFilterInput(KernelInput):\n\n operation = traits.Enum(\n \"mean\",\n \"median\",\n \"meanu\",\n argstr=\"-f%s\",\n position=6,\n mandatory=True,\n desc=\"operation to filter with\",\n )\n\n\nclass SpatialFilter(MathsCommand):\n \"\"\"Use fslmaths to spatially filter an image.\"\"\"\n\n input_spec = SpatialFilterInput\n _suffix = \"_filt\"\n\n\nclass UnaryMathsInput(MathsInput):\n\n operation = traits.Enum(\n \"exp\",\n \"log\",\n \"sin\",\n \"cos\",\n \"tan\",\n \"asin\",\n \"acos\",\n \"atan\",\n \"sqr\",\n \"sqrt\",\n \"recip\",\n \"abs\",\n \"bin\",\n \"binv\",\n \"fillh\",\n \"fillh26\",\n \"index\",\n \"edge\",\n \"nan\",\n \"nanm\",\n \"rand\",\n \"randn\",\n \"range\",\n argstr=\"-%s\",\n position=4,\n mandatory=True,\n desc=\"operation to perform\",\n )\n\n\nclass UnaryMaths(MathsCommand):\n \"\"\"Use fslmaths to perorm a variety of mathematical operations on an image.\"\"\"\n\n input_spec = UnaryMathsInput\n\n def _list_outputs(self):\n self._suffix = \"_\" + self.inputs.operation\n return super(UnaryMaths, self)._list_outputs()\n\n\nclass BinaryMathsInput(MathsInput):\n\n operation = traits.Enum(\n \"add\",\n \"sub\",\n \"mul\",\n \"div\",\n \"rem\",\n \"max\",\n \"min\",\n mandatory=True,\n argstr=\"-%s\",\n position=4,\n desc=\"operation to perform\",\n )\n operand_file = File(\n exists=True,\n argstr=\"%s\",\n mandatory=True,\n position=5,\n xor=[\"operand_value\"],\n desc=\"second image to perform operation with\",\n )\n operand_value = traits.Float(\n argstr=\"%.8f\",\n mandatory=True,\n position=5,\n xor=[\"operand_file\"],\n desc=\"value to perform operation with\",\n )\n\n\nclass BinaryMaths(MathsCommand):\n \"\"\"Use fslmaths to perform mathematical operations using a second image or\n a numeric value.\n\n \"\"\"\n\n input_spec = BinaryMathsInput\n\n\nclass MultiImageMathsInput(MathsInput):\n\n op_string = traits.String(\n position=4,\n argstr=\"%s\",\n mandatory=True,\n desc=(\"python formatted string of operations \" \"to perform\"),\n )\n operand_files = InputMultiPath(\n File(exists=True),\n mandatory=True,\n desc=(\"list of file names to plug into op \" \"string\"),\n )\n\n\nclass MultiImageMaths(MathsCommand):\n \"\"\"Use fslmaths to perform a sequence of mathematical operations.\n\n Examples\n --------\n >>> from nipype.interfaces.fsl import MultiImageMaths\n >>> maths = MultiImageMaths()\n >>> maths.inputs.in_file = \"functional.nii\"\n >>> maths.inputs.op_string = \"-add %s -mul -1 -div %s\"\n >>> maths.inputs.operand_files = [\"functional2.nii\", \"functional3.nii\"]\n >>> maths.inputs.out_file = \"functional4.nii\"\n >>> maths.cmdline\n 'fslmaths functional.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii'\n\n \"\"\"\n\n input_spec = MultiImageMathsInput\n\n def _format_arg(self, name, spec, value):\n if name == \"op_string\":\n return value % tuple(self.inputs.operand_files)\n return super(MultiImageMaths, self)._format_arg(name, spec, value)\n\n\nclass TemporalFilterInput(MathsInput):\n\n lowpass_sigma = traits.Float(\n -1,\n argstr=\"%.6f\",\n position=5,\n usedefault=True,\n desc=\"lowpass filter sigma (in volumes)\",\n )\n highpass_sigma = traits.Float(\n -1,\n argstr=\"-bptf %.6f\",\n position=4,\n usedefault=True,\n desc=\"highpass filter sigma (in volumes)\",\n )\n\n\nclass TemporalFilter(MathsCommand):\n \"\"\"Use fslmaths to apply a low, high, or bandpass temporal filter to a\n timeseries.\n\n \"\"\"\n\n input_spec = TemporalFilterInput\n _suffix = \"_filt\"\n", "path": "nipype/interfaces/fsl/maths.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nThe maths module provides higher-level interfaces to some of the operations\nthat can be performed with the fslmaths command-line program.\n\"\"\"\nimport os\nimport numpy as np\n\nfrom ..base import TraitedSpec, File, traits, InputMultiPath, isdefined\nfrom .base import FSLCommand, FSLCommandInputSpec\n\n\nclass MathsInput(FSLCommandInputSpec):\n\n in_file = File(\n position=2, argstr=\"%s\", exists=True, mandatory=True, desc=\"image to operate on\"\n )\n out_file = File(\n genfile=True, position=-2, argstr=\"%s\", desc=\"image to write\", hash_files=False\n )\n _dtypes = [\"float\", \"char\", \"int\", \"short\", \"double\", \"input\"]\n internal_datatype = traits.Enum(\n *_dtypes,\n position=1,\n argstr=\"-dt %s\",\n desc=(\"datatype to use for calculations \" \"(default is float)\")\n )\n output_datatype = traits.Enum(\n *_dtypes,\n position=-1,\n argstr=\"-odt %s\",\n desc=(\"datatype to use for output (default \" \"uses input type)\")\n )\n\n nan2zeros = traits.Bool(\n position=3, argstr=\"-nan\", desc=\"change NaNs to zeros before doing anything\"\n )\n\n\nclass MathsOutput(TraitedSpec):\n\n out_file = File(desc=\"image written after calculations\")\n\n\nclass MathsCommand(FSLCommand):\n\n _cmd = \"fslmaths\"\n input_spec = MathsInput\n output_spec = MathsOutput\n _suffix = \"_maths\"\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = self.inputs.out_file\n if not isdefined(self.inputs.out_file):\n outputs[\"out_file\"] = self._gen_fname(\n self.inputs.in_file, suffix=self._suffix\n )\n outputs[\"out_file\"] = os.path.abspath(outputs[\"out_file\"])\n return outputs\n\n def _gen_filename(self, name):\n if name == \"out_file\":\n return self._list_outputs()[\"out_file\"]\n return None\n\n\nclass ChangeDataTypeInput(MathsInput):\n\n _dtypes = [\"float\", \"char\", \"int\", \"short\", \"double\", \"input\"]\n output_datatype = traits.Enum(\n *_dtypes, position=-1, argstr=\"-odt %s\", mandatory=True, desc=\"output data type\"\n )\n\n\nclass ChangeDataType(MathsCommand):\n \"\"\"Use fslmaths to change the datatype of an image.\"\"\"\n\n input_spec = ChangeDataTypeInput\n _suffix = \"_chdt\"\n\n\nclass ThresholdInputSpec(MathsInput):\n\n thresh = traits.Float(\n mandatory=True, position=4, argstr=\"%s\", desc=\"threshold value\"\n )\n direction = traits.Enum(\n \"below\",\n \"above\",\n usedefault=True,\n desc=\"zero-out either below or above thresh value\",\n )\n use_robust_range = traits.Bool(\n desc=\"interpret thresh as percentage (0-100) of robust range\"\n )\n use_nonzero_voxels = traits.Bool(\n desc=\"use nonzero voxels to calculate robust range\",\n requires=[\"use_robust_range\"],\n )\n\n\nclass Threshold(MathsCommand):\n \"\"\"Use fslmaths to apply a threshold to an image in a variety of ways.\"\"\"\n\n input_spec = ThresholdInputSpec\n _suffix = \"_thresh\"\n\n def _format_arg(self, name, spec, value):\n if name == \"thresh\":\n arg = \"-\"\n _si = self.inputs\n if self.inputs.direction == \"above\":\n arg += \"u\"\n arg += \"thr\"\n if isdefined(_si.use_robust_range) and _si.use_robust_range:\n if isdefined(_si.use_nonzero_voxels) and _si.use_nonzero_voxels:\n arg += \"P\"\n else:\n arg += \"p\"\n arg += \" %.10f\" % value\n return arg\n return super(Threshold, self)._format_arg(name, spec, value)\n\n\nclass StdImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%sstd\",\n position=4,\n desc=\"dimension to standard deviate across\",\n )\n\n\nclass StdImage(MathsCommand):\n \"\"\"Use fslmaths to generate a standard deviation in an image across a given\n dimension.\n \"\"\"\n\n input_spec = StdImageInput\n _suffix = \"_std\"\n\n\nclass MeanImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smean\",\n position=4,\n desc=\"dimension to mean across\",\n )\n\n\nclass MeanImage(MathsCommand):\n \"\"\"Use fslmaths to generate a mean image across a given dimension.\"\"\"\n\n input_spec = MeanImageInput\n _suffix = \"_mean\"\n\n\nclass MaxImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smax\",\n position=4,\n desc=\"dimension to max across\",\n )\n\n\nclass MaxImage(MathsCommand):\n \"\"\"Use fslmaths to generate a max image across a given dimension.\n\n Examples\n --------\n >>> from nipype.interfaces.fsl.maths import MaxImage\n >>> maxer = MaxImage()\n >>> maxer.inputs.in_file = \"functional.nii\" # doctest: +SKIP\n >>> maxer.dimension = \"T\"\n >>> maxer.cmdline # doctest: +SKIP\n 'fslmaths functional.nii -Tmax functional_max.nii'\n\n \"\"\"\n\n input_spec = MaxImageInput\n _suffix = \"_max\"\n\n\nclass PercentileImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%sperc\",\n position=4,\n desc=\"dimension to percentile across\",\n )\n perc = traits.Range(\n low=0,\n high=100,\n argstr=\"%f\",\n position=5,\n desc=(\"nth percentile (0-100) of FULL RANGE \" \"across dimension\"),\n )\n\n\nclass PercentileImage(MathsCommand):\n \"\"\"Use fslmaths to generate a percentile image across a given dimension.\n\n Examples\n --------\n >>> from nipype.interfaces.fsl.maths import MaxImage\n >>> percer = PercentileImage()\n >>> percer.inputs.in_file = \"functional.nii\" # doctest: +SKIP\n >>> percer.dimension = \"T\"\n >>> percer.perc = 90\n >>> percer.cmdline # doctest: +SKIP\n 'fslmaths functional.nii -Tperc 90 functional_perc.nii'\n\n \"\"\"\n\n input_spec = PercentileImageInput\n _suffix = \"_perc\"\n\n\nclass MaxnImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smaxn\",\n position=4,\n desc=\"dimension to index max across\",\n )\n\n\nclass MaxnImage(MathsCommand):\n \"\"\"Use fslmaths to generate an image of index of max across\n a given dimension.\n\n \"\"\"\n\n input_spec = MaxnImageInput\n _suffix = \"_maxn\"\n\n\nclass MinImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smin\",\n position=4,\n desc=\"dimension to min across\",\n )\n\n\nclass MinImage(MathsCommand):\n \"\"\"Use fslmaths to generate a minimum image across a given dimension.\"\"\"\n\n input_spec = MinImageInput\n _suffix = \"_min\"\n\n\nclass MedianImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%smedian\",\n position=4,\n desc=\"dimension to median across\",\n )\n\n\nclass MedianImage(MathsCommand):\n \"\"\"Use fslmaths to generate a median image across a given dimension.\"\"\"\n\n input_spec = MedianImageInput\n _suffix = \"_median\"\n\n\nclass AR1ImageInput(MathsInput):\n\n dimension = traits.Enum(\n \"T\",\n \"X\",\n \"Y\",\n \"Z\",\n usedefault=True,\n argstr=\"-%sar1\",\n position=4,\n desc=(\"dimension to find AR(1) coefficient\" \"across\"),\n )\n\n\nclass AR1Image(MathsCommand):\n \"\"\"Use fslmaths to generate an AR1 coefficient image across a\n given dimension. (Should use -odt float and probably demean first)\n\n \"\"\"\n\n input_spec = AR1ImageInput\n _suffix = \"_ar1\"\n\n\nclass IsotropicSmoothInput(MathsInput):\n\n fwhm = traits.Float(\n mandatory=True,\n xor=[\"sigma\"],\n position=4,\n argstr=\"-s %.5f\",\n desc=\"fwhm of smoothing kernel [mm]\",\n )\n sigma = traits.Float(\n mandatory=True,\n xor=[\"fwhm\"],\n position=4,\n argstr=\"-s %.5f\",\n desc=\"sigma of smoothing kernel [mm]\",\n )\n\n\nclass IsotropicSmooth(MathsCommand):\n \"\"\"Use fslmaths to spatially smooth an image with a gaussian kernel.\"\"\"\n\n input_spec = IsotropicSmoothInput\n _suffix = \"_smooth\"\n\n def _format_arg(self, name, spec, value):\n if name == \"fwhm\":\n sigma = float(value) / np.sqrt(8 * np.log(2))\n return spec.argstr % sigma\n return super(IsotropicSmooth, self)._format_arg(name, spec, value)\n\n\nclass ApplyMaskInput(MathsInput):\n\n mask_file = File(\n exists=True,\n mandatory=True,\n argstr=\"-mas %s\",\n position=4,\n desc=\"binary image defining mask space\",\n )\n\n\nclass ApplyMask(MathsCommand):\n \"\"\"Use fslmaths to apply a binary mask to another image.\"\"\"\n\n input_spec = ApplyMaskInput\n _suffix = \"_masked\"\n\n\nclass KernelInput(MathsInput):\n\n kernel_shape = traits.Enum(\n \"3D\",\n \"2D\",\n \"box\",\n \"boxv\",\n \"gauss\",\n \"sphere\",\n \"file\",\n argstr=\"-kernel %s\",\n position=4,\n desc=\"kernel shape to use\",\n )\n kernel_size = traits.Float(\n argstr=\"%.4f\",\n position=5,\n xor=[\"kernel_file\"],\n desc=(\n \"kernel size - voxels for box/boxv, mm \" \"for sphere, mm sigma for gauss\"\n ),\n )\n kernel_file = File(\n exists=True,\n argstr=\"%s\",\n position=5,\n xor=[\"kernel_size\"],\n desc=\"use external file for kernel\",\n )\n\n\nclass DilateInput(KernelInput):\n\n operation = traits.Enum(\n \"mean\",\n \"modal\",\n \"max\",\n argstr=\"-dil%s\",\n position=6,\n mandatory=True,\n desc=\"filtering operation to perfoem in dilation\",\n )\n\n\nclass DilateImage(MathsCommand):\n \"\"\"Use fslmaths to perform a spatial dilation of an image.\"\"\"\n\n input_spec = DilateInput\n _suffix = \"_dil\"\n\n def _format_arg(self, name, spec, value):\n if name == \"operation\":\n return spec.argstr % dict(mean=\"M\", modal=\"D\", max=\"F\")[value]\n return super(DilateImage, self)._format_arg(name, spec, value)\n\n\nclass ErodeInput(KernelInput):\n\n minimum_filter = traits.Bool(\n argstr=\"%s\",\n position=6,\n usedefault=True,\n default_value=False,\n desc=(\"if true, minimum filter rather than \" \"erosion by zeroing-out\"),\n )\n\n\nclass ErodeImage(MathsCommand):\n \"\"\"Use fslmaths to perform a spatial erosion of an image.\"\"\"\n\n input_spec = ErodeInput\n _suffix = \"_ero\"\n\n def _format_arg(self, name, spec, value):\n if name == \"minimum_filter\":\n if value:\n return \"-eroF\"\n return \"-ero\"\n return super(ErodeImage, self)._format_arg(name, spec, value)\n\n\nclass SpatialFilterInput(KernelInput):\n\n operation = traits.Enum(\n \"mean\",\n \"median\",\n \"meanu\",\n argstr=\"-f%s\",\n position=6,\n mandatory=True,\n desc=\"operation to filter with\",\n )\n\n\nclass SpatialFilter(MathsCommand):\n \"\"\"Use fslmaths to spatially filter an image.\"\"\"\n\n input_spec = SpatialFilterInput\n _suffix = \"_filt\"\n\n\nclass UnaryMathsInput(MathsInput):\n\n operation = traits.Enum(\n \"exp\",\n \"log\",\n \"sin\",\n \"cos\",\n \"tan\",\n \"asin\",\n \"acos\",\n \"atan\",\n \"sqr\",\n \"sqrt\",\n \"recip\",\n \"abs\",\n \"bin\",\n \"binv\",\n \"fillh\",\n \"fillh26\",\n \"index\",\n \"edge\",\n \"nan\",\n \"nanm\",\n \"rand\",\n \"randn\",\n \"range\",\n argstr=\"-%s\",\n position=4,\n mandatory=True,\n desc=\"operation to perform\",\n )\n\n\nclass UnaryMaths(MathsCommand):\n \"\"\"Use fslmaths to perorm a variety of mathematical operations on an image.\"\"\"\n\n input_spec = UnaryMathsInput\n\n def _list_outputs(self):\n self._suffix = \"_\" + self.inputs.operation\n return super(UnaryMaths, self)._list_outputs()\n\n\nclass BinaryMathsInput(MathsInput):\n\n operation = traits.Enum(\n \"add\",\n \"sub\",\n \"mul\",\n \"div\",\n \"rem\",\n \"max\",\n \"min\",\n mandatory=True,\n argstr=\"-%s\",\n position=4,\n desc=\"operation to perform\",\n )\n operand_file = File(\n exists=True,\n argstr=\"%s\",\n mandatory=True,\n position=5,\n xor=[\"operand_value\"],\n desc=\"second image to perform operation with\",\n )\n operand_value = traits.Float(\n argstr=\"%.8f\",\n mandatory=True,\n position=5,\n xor=[\"operand_file\"],\n desc=\"value to perform operation with\",\n )\n\n\nclass BinaryMaths(MathsCommand):\n \"\"\"Use fslmaths to perform mathematical operations using a second image or\n a numeric value.\n\n \"\"\"\n\n input_spec = BinaryMathsInput\n\n\nclass MultiImageMathsInput(MathsInput):\n\n op_string = traits.String(\n position=4,\n argstr=\"%s\",\n mandatory=True,\n desc=(\"python formatted string of operations \" \"to perform\"),\n )\n operand_files = InputMultiPath(\n File(exists=True),\n mandatory=True,\n desc=(\"list of file names to plug into op \" \"string\"),\n )\n\n\nclass MultiImageMaths(MathsCommand):\n \"\"\"Use fslmaths to perform a sequence of mathematical operations.\n\n Examples\n --------\n >>> from nipype.interfaces.fsl import MultiImageMaths\n >>> maths = MultiImageMaths()\n >>> maths.inputs.in_file = \"functional.nii\"\n >>> maths.inputs.op_string = \"-add %s -mul -1 -div %s\"\n >>> maths.inputs.operand_files = [\"functional2.nii\", \"functional3.nii\"]\n >>> maths.inputs.out_file = \"functional4.nii\"\n >>> maths.cmdline\n 'fslmaths functional.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii'\n\n \"\"\"\n\n input_spec = MultiImageMathsInput\n\n def _format_arg(self, name, spec, value):\n if name == \"op_string\":\n return value % tuple(self.inputs.operand_files)\n return super(MultiImageMaths, self)._format_arg(name, spec, value)\n\n\nclass TemporalFilterInput(MathsInput):\n\n lowpass_sigma = traits.Float(\n -1,\n argstr=\"%.6f\",\n position=5,\n usedefault=True,\n desc=\"lowpass filter sigma (in volumes)\",\n )\n highpass_sigma = traits.Float(\n -1,\n argstr=\"-bptf %.6f\",\n position=4,\n usedefault=True,\n desc=\"highpass filter sigma (in volumes)\",\n )\n\n\nclass TemporalFilter(MathsCommand):\n \"\"\"Use fslmaths to apply a low, high, or bandpass temporal filter to a\n timeseries.\n\n \"\"\"\n\n input_spec = TemporalFilterInput\n _suffix = \"_filt\"\n", "path": "nipype/interfaces/fsl/maths.py" } ]
diff --git a/nipype/interfaces/fsl/maths.py b/nipype/interfaces/fsl/maths.py index 9e05b4d102..f3276024b7 100644 --- a/nipype/interfaces/fsl/maths.py +++ b/nipype/interfaces/fsl/maths.py @@ -41,7 +41,7 @@ class MathsInput(FSLCommandInputSpec): class MathsOutput(TraitedSpec): - out_file = File(exists=True, desc="image written after calculations") + out_file = File(desc="image written after calculations") class MathsCommand(FSLCommand):
tensorflow__addons-897
[ { "content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Useful extra functionality for TensorFlow maintained by SIG-addons.\"\"\"\n\n# Local project imports\nfrom tensorflow_addons import activations\nfrom tensorflow_addons import callbacks\nfrom tensorflow_addons import image\nfrom tensorflow_addons import layers\nfrom tensorflow_addons import losses\nfrom tensorflow_addons import metrics\nfrom tensorflow_addons import optimizers\nfrom tensorflow_addons import rnn\nfrom tensorflow_addons import seq2seq\nfrom tensorflow_addons import text\n\nfrom tensorflow_addons.version import __version__\n\n# Cleanup symbols to avoid polluting namespace.\ndel absolute_import\ndel division\ndel print_function\n", "path": "tensorflow_addons/__init__.py" } ]
[ { "content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Useful extra functionality for TensorFlow maintained by SIG-addons.\"\"\"\n\n# Local project imports\nfrom tensorflow_addons import activations\nfrom tensorflow_addons import callbacks\nfrom tensorflow_addons import image\nfrom tensorflow_addons import layers\nfrom tensorflow_addons import losses\nfrom tensorflow_addons import metrics\nfrom tensorflow_addons import optimizers\nfrom tensorflow_addons import rnn\nfrom tensorflow_addons import seq2seq\nfrom tensorflow_addons import text\n\nfrom tensorflow_addons.version import __version__\n", "path": "tensorflow_addons/__init__.py" } ]
diff --git a/tensorflow_addons/__init__.py b/tensorflow_addons/__init__.py index 254ee1f09b..0f26412c06 100644 --- a/tensorflow_addons/__init__.py +++ b/tensorflow_addons/__init__.py @@ -27,8 +27,3 @@ from tensorflow_addons import text from tensorflow_addons.version import __version__ - -# Cleanup symbols to avoid polluting namespace. -del absolute_import -del division -del print_function
python-discord__bot-1404
[ { "content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects duplicated messages sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and msg.content == last_message.content\n )\n )\n\n total_duplicated = len(relevant_messages)\n\n if total_duplicated > config['max']:\n return (\n f\"sent {total_duplicated} duplicated messages in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/duplicates.py" } ]
[ { "content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects duplicated messages sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and msg.content == last_message.content\n and msg.content\n )\n )\n\n total_duplicated = len(relevant_messages)\n\n if total_duplicated > config['max']:\n return (\n f\"sent {total_duplicated} duplicated messages in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/duplicates.py" } ]
diff --git a/bot/rules/duplicates.py b/bot/rules/duplicates.py index 455764b535..8e4fbc12df 100644 --- a/bot/rules/duplicates.py +++ b/bot/rules/duplicates.py @@ -13,6 +13,7 @@ async def apply( if ( msg.author == last_message.author and msg.content == last_message.content + and msg.content ) ) diff --git a/config-default.yml b/config-default.yml index 25bbcc3c5b..beaf89f2cc 100644 --- a/config-default.yml +++ b/config-default.yml @@ -371,7 +371,7 @@ anti_spam: rules: attachments: interval: 10 - max: 9 + max: 6 burst: interval: 10
ethereum__consensus-specs-1102
[ { "content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import *\nfrom eth2spec.utils.bls_stub import *\n\n\"\"\")\n for i in (1, 2, 3, 4, 8, 32, 48, 96):\n code_lines.append(\"def int_to_bytes%d(x): return x.to_bytes(%d, 'little')\" % (i, i))\n\n code_lines.append(\"\"\"\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n", "path": "scripts/phase0/build_spec.py" } ]
[ { "content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import *\nfrom eth2spec.utils.bls import *\n\n\"\"\")\n for i in (1, 2, 3, 4, 8, 32, 48, 96):\n code_lines.append(\"def int_to_bytes%d(x): return x.to_bytes(%d, 'little')\" % (i, i))\n\n code_lines.append(\"\"\"\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n", "path": "scripts/phase0/build_spec.py" } ]
diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index da5845951d..26b0e5a8a6 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -13,7 +13,7 @@ def build_phase0_spec(sourcefile, outfile): Tuple, ) from eth2spec.utils.minimal_ssz import * -from eth2spec.utils.bls_stub import * +from eth2spec.utils.bls import * """) for i in (1, 2, 3, 4, 8, 32, 48, 96): diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e56fd976cc..46c811fedb 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1756,7 +1756,8 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: amount = deposit.data.amount validator_pubkeys = [v.pubkey for v in state.validator_registry] if pubkey not in validator_pubkeys: - # Verify the deposit signature (proof of possession) + # Verify the deposit signature (proof of possession). + # Invalid signatures are allowed by the deposit contract, and hence included on-chain, but must not be processed. if not bls_verify(pubkey, signing_root(deposit.data), deposit.data.signature, get_domain(state, DOMAIN_DEPOSIT)): return diff --git a/test_generators/README.md b/test_generators/README.md index 43bf7af031..309a64bd92 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -58,7 +58,7 @@ It's recommended to extend the base-generator. Create a `requirements.txt` in the root of your generator directory: ``` -eth-utils==1.4.1 +eth-utils==1.6.0 ../../test_libs/gen_helpers ../../test_libs/config_helpers ../../test_libs/pyspec diff --git a/test_generators/bls/requirements.txt b/test_generators/bls/requirements.txt index 8a933d41ca..329a4ce152 100644 --- a/test_generators/bls/requirements.txt +++ b/test_generators/bls/requirements.txt @@ -1,3 +1,3 @@ py-ecc==1.6.0 -eth-utils==1.4.1 +eth-utils==1.6.0 ../../test_libs/gen_helpers diff --git a/test_generators/operations/requirements.txt b/test_generators/operations/requirements.txt index 8f9bede8f3..595cee69cd 100644 --- a/test_generators/operations/requirements.txt +++ b/test_generators/operations/requirements.txt @@ -1,4 +1,4 @@ -eth-utils==1.4.1 +eth-utils==1.6.0 ../../test_libs/gen_helpers ../../test_libs/config_helpers ../../test_libs/pyspec \ No newline at end of file diff --git a/test_generators/operations/suite_creator.py b/test_generators/operations/suite_creator.py index d27e3efc19..caff0c7db9 100644 --- a/test_generators/operations/suite_creator.py +++ b/test_generators/operations/suite_creator.py @@ -15,7 +15,7 @@ def generate_from_tests(pkg): for name in fn_names: tfn = getattr(pkg, name) try: - out.append(tfn(generator_mode=True)) + out.append(tfn(generator_mode=True, bls_active=True)) except AssertionError: print("ERROR: failed to generate vector from test: %s (pkg: %s)" % (name, pkg.__name__)) return out @@ -34,6 +34,6 @@ def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput: forks=["phase0"], config=config_name, runner="operations", - handler=config_name, + handler=operation_name, test_cases=get_cases())) return suite_definition diff --git a/test_generators/shuffling/requirements.txt b/test_generators/shuffling/requirements.txt index 8f9bede8f3..595cee69cd 100644 --- a/test_generators/shuffling/requirements.txt +++ b/test_generators/shuffling/requirements.txt @@ -1,4 +1,4 @@ -eth-utils==1.4.1 +eth-utils==1.6.0 ../../test_libs/gen_helpers ../../test_libs/config_helpers ../../test_libs/pyspec \ No newline at end of file diff --git a/test_generators/ssz_generic/requirements.txt b/test_generators/ssz_generic/requirements.txt index 94afc9d91b..dcdb0824ff 100644 --- a/test_generators/ssz_generic/requirements.txt +++ b/test_generators/ssz_generic/requirements.txt @@ -1,4 +1,4 @@ -eth-utils==1.4.1 +eth-utils==1.6.0 ../../test_libs/gen_helpers ../../test_libs/config_helpers ssz==0.1.0a2 diff --git a/test_generators/ssz_static/requirements.txt b/test_generators/ssz_static/requirements.txt index 8f9bede8f3..595cee69cd 100644 --- a/test_generators/ssz_static/requirements.txt +++ b/test_generators/ssz_static/requirements.txt @@ -1,4 +1,4 @@ -eth-utils==1.4.1 +eth-utils==1.6.0 ../../test_libs/gen_helpers ../../test_libs/config_helpers ../../test_libs/pyspec \ No newline at end of file diff --git a/test_libs/config_helpers/requirements.txt b/test_libs/config_helpers/requirements.txt index e441a474b8..f2f208c3fb 100644 --- a/test_libs/config_helpers/requirements.txt +++ b/test_libs/config_helpers/requirements.txt @@ -1 +1 @@ -ruamel.yaml==0.15.87 +ruamel.yaml==0.15.96 diff --git a/test_libs/config_helpers/setup.py b/test_libs/config_helpers/setup.py index 90ad94ee44..9f0ea06419 100644 --- a/test_libs/config_helpers/setup.py +++ b/test_libs/config_helpers/setup.py @@ -4,6 +4,6 @@ name='config_helpers', packages=['preset_loader'], install_requires=[ - "ruamel.yaml==0.15.87" + "ruamel.yaml==0.15.96" ] ) diff --git a/test_libs/gen_helpers/requirements.txt b/test_libs/gen_helpers/requirements.txt index 3d6a39458e..557cae6317 100644 --- a/test_libs/gen_helpers/requirements.txt +++ b/test_libs/gen_helpers/requirements.txt @@ -1,2 +1,2 @@ -ruamel.yaml==0.15.87 -eth-utils==1.4.1 +ruamel.yaml==0.15.96 +eth-utils==1.6.0 diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py index 5de27a6dbe..29cf04fd19 100644 --- a/test_libs/gen_helpers/setup.py +++ b/test_libs/gen_helpers/setup.py @@ -4,7 +4,7 @@ name='gen_helpers', packages=['gen_base'], install_requires=[ - "ruamel.yaml==0.15.87", - "eth-utils==1.4.1" + "ruamel.yaml==0.15.96", + "eth-utils==1.6.0" ] ) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py index 248b04ef4e..0dae852f0b 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py @@ -1,22 +1,23 @@ from copy import deepcopy import eth2spec.phase0.spec as spec - -from eth2spec.phase0.state_transition import ( - state_transition, -) from eth2spec.phase0.spec import ( get_current_epoch, process_attestation ) -from eth2spec.test.helpers import ( - build_empty_block_for_next_slot, +from eth2spec.phase0.state_transition import ( + state_transition_to, +) +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.helpers.attestations import ( get_valid_attestation, + sign_attestation, +) +from eth2spec.test.helpers.state import ( next_epoch, next_slot, ) - -from eth2spec.test.context import spec_state_test, expect_assertion_error +from eth2spec.test.helpers.block import apply_empty_block def run_attestation_processing(state, attestation, valid=True): @@ -56,7 +57,7 @@ def run_attestation_processing(state, attestation, valid=True): @spec_state_test def test_success(state): - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=True) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY yield from run_attestation_processing(state, attestation) @@ -64,17 +65,25 @@ def test_success(state): @spec_state_test def test_success_previous_epoch(state): - attestation = get_valid_attestation(state) - block = build_empty_block_for_next_slot(state) - block.slot = state.slot + spec.SLOTS_PER_EPOCH - state_transition(state, block) + attestation = get_valid_attestation(state, signed=True) + next_epoch(state) + apply_empty_block(state) yield from run_attestation_processing(state, attestation) +@always_bls +@spec_state_test +def test_invalid_attestation_signature(state): + attestation = get_valid_attestation(state, signed=False) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + yield from run_attestation_processing(state, attestation, False) + + @spec_state_test def test_before_inclusion_delay(state): - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=True) # do not increment slot to allow for inclusion delay yield from run_attestation_processing(state, attestation, False) @@ -82,11 +91,10 @@ def test_before_inclusion_delay(state): @spec_state_test def test_after_epoch_slots(state): - attestation = get_valid_attestation(state) - block = build_empty_block_for_next_slot(state) + attestation = get_valid_attestation(state, signed=True) # increment past latest inclusion slot - block.slot = state.slot + spec.SLOTS_PER_EPOCH + 1 - state_transition(state, block) + state_transition_to(state, state.slot + spec.SLOTS_PER_EPOCH + 1) + apply_empty_block(state) yield from run_attestation_processing(state, attestation, False) @@ -97,7 +105,7 @@ def test_old_source_epoch(state): state.finalized_epoch = 2 state.previous_justified_epoch = 3 state.current_justified_epoch = 4 - attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) + attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1, signed=False) # test logic sanity check: make sure the attestation is pointing to oldest known source epoch assert attestation.data.source_epoch == state.previous_justified_epoch @@ -105,36 +113,44 @@ def test_old_source_epoch(state): # Now go beyond that, it will be invalid attestation.data.source_epoch -= 1 + sign_attestation(state, attestation) + yield from run_attestation_processing(state, attestation, False) @spec_state_test def test_wrong_shard(state): - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=False) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation.data.shard += 1 + sign_attestation(state, attestation) + yield from run_attestation_processing(state, attestation, False) @spec_state_test def test_new_source_epoch(state): - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=False) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation.data.source_epoch += 1 + sign_attestation(state, attestation) + yield from run_attestation_processing(state, attestation, False) @spec_state_test def test_source_root_is_target_root(state): - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=False) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation.data.source_root = attestation.data.target_root + sign_attestation(state, attestation) + yield from run_attestation_processing(state, attestation, False) @@ -149,7 +165,7 @@ def test_invalid_current_source_root(state): state.current_justified_epoch = 4 state.current_justified_root = b'\xff' * 32 - attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) + attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1, signed=False) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY # Test logic sanity checks: @@ -159,56 +175,81 @@ def test_invalid_current_source_root(state): # Make attestation source root invalid: should be previous justified, not current one attestation.data.source_root = state.current_justified_root + sign_attestation(state, attestation) + yield from run_attestation_processing(state, attestation, False) @spec_state_test def test_bad_source_root(state): - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=False) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation.data.source_root = b'\x42' * 32 + sign_attestation(state, attestation) + yield from run_attestation_processing(state, attestation, False) @spec_state_test def test_non_zero_crosslink_data_root(state): - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=False) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation.data.crosslink_data_root = b'\x42' * 32 + sign_attestation(state, attestation) + yield from run_attestation_processing(state, attestation, False) @spec_state_test def test_bad_previous_crosslink(state): next_epoch(state) - attestation = get_valid_attestation(state) + apply_empty_block(state) + + attestation = get_valid_attestation(state, signed=True) for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): next_slot(state) + apply_empty_block(state) state.current_crosslinks[attestation.data.shard].epoch += 10 yield from run_attestation_processing(state, attestation, False) +@spec_state_test +def test_inconsistent_bitfields(state): + attestation = get_valid_attestation(state, signed=False) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + b'\x00' + + sign_attestation(state, attestation) + + yield from run_attestation_processing(state, attestation, False) + + @spec_state_test def test_non_empty_custody_bitfield(state): - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=False) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + + sign_attestation(state, attestation) yield from run_attestation_processing(state, attestation, False) @spec_state_test def test_empty_aggregation_bitfield(state): - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=False) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield) + sign_attestation(state, attestation) + yield from run_attestation_processing(state, attestation, False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py index 957b9a9f07..28e2322772 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py +++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py @@ -3,14 +3,15 @@ get_beacon_proposer_index, process_attester_slashing, ) -from eth2spec.test.helpers import ( +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.helpers.attestations import sign_indexed_attestation +from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing +from eth2spec.test.helpers.block import apply_empty_block +from eth2spec.test.helpers.state import ( get_balance, - get_valid_attester_slashing, next_epoch, ) -from eth2spec.test.context import spec_state_test, expect_assertion_error - def run_attester_slashing_processing(state, attester_slashing, valid=True): """ @@ -45,24 +46,22 @@ def run_attester_slashing_processing(state, attester_slashing, valid=True): assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH - # lost whistleblower reward - assert ( - get_balance(state, slashed_index) < - pre_slashed_balance - ) - - # gained whistleblower reward - assert ( - get_balance(state, proposer_index) > - pre_proposer_balance - ) + if slashed_index != proposer_index: + # lost whistleblower reward + assert get_balance(state, slashed_index) < pre_slashed_balance + # gained whistleblower reward + assert get_balance(state, proposer_index) > pre_proposer_balance + else: + # gained rewards for all slashings, which may include others. And only lost that of themselves. + # Netto at least 0, if more people where slashed, a balance increase. + assert get_balance(state, slashed_index) >= pre_slashed_balance yield 'post', state @spec_state_test def test_success_double(state): - attester_slashing = get_valid_attester_slashing(state) + attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True) yield from run_attester_slashing_processing(state, attester_slashing) @@ -70,37 +69,64 @@ def test_success_double(state): @spec_state_test def test_success_surround(state): next_epoch(state) + apply_empty_block(state) + state.current_justified_epoch += 1 - attester_slashing = get_valid_attester_slashing(state) + attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) # set attestion1 to surround attestation 2 attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1 attester_slashing.attestation_1.data.target_epoch = attester_slashing.attestation_2.data.target_epoch + 1 + sign_indexed_attestation(state, attester_slashing.attestation_1) + yield from run_attester_slashing_processing(state, attester_slashing) +@always_bls +@spec_state_test +def test_invalid_sig_1(state): + attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) + yield from run_attester_slashing_processing(state, attester_slashing, False) + + +@always_bls +@spec_state_test +def test_invalid_sig_2(state): + attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=False) + yield from run_attester_slashing_processing(state, attester_slashing, False) + + +@always_bls +@spec_state_test +def test_invalid_sig_1_and_2(state): + attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=False) + yield from run_attester_slashing_processing(state, attester_slashing, False) + + @spec_state_test def test_same_data(state): - attester_slashing = get_valid_attester_slashing(state) + attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) attester_slashing.attestation_1.data = attester_slashing.attestation_2.data + sign_indexed_attestation(state, attester_slashing.attestation_1) yield from run_attester_slashing_processing(state, attester_slashing, False) @spec_state_test def test_no_double_or_surround(state): - attester_slashing = get_valid_attester_slashing(state) + attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) attester_slashing.attestation_1.data.target_epoch += 1 + sign_indexed_attestation(state, attester_slashing.attestation_1) yield from run_attester_slashing_processing(state, attester_slashing, False) @spec_state_test def test_participants_already_slashed(state): - attester_slashing = get_valid_attester_slashing(state) + attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True) # set all indices to slashed attestation_1 = attester_slashing.attestation_1 @@ -113,10 +139,11 @@ def test_participants_already_slashed(state): @spec_state_test def test_custody_bit_0_and_1(state): - attester_slashing = get_valid_attester_slashing(state) + attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) attester_slashing.attestation_1.custody_bit_1_indices = ( attester_slashing.attestation_1.custody_bit_0_indices ) + sign_indexed_attestation(state, attester_slashing.attestation_1) yield from run_attester_slashing_processing(state, attester_slashing, False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py index a176f79587..28e215a3a0 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py +++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py @@ -6,12 +6,12 @@ advance_slot, process_block_header, ) -from eth2spec.test.helpers import ( +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, - next_slot, + sign_block ) - -from eth2spec.test.context import spec_state_test, expect_assertion_error +from eth2spec.test.helpers.state import next_slot def prepare_state_for_header_processing(state): @@ -42,23 +42,32 @@ def run_block_header_processing(state, block, valid=True): @spec_state_test -def test_success(state): - block = build_empty_block_for_next_slot(state) +def test_success_block_header(state): + block = build_empty_block_for_next_slot(state, signed=True) yield from run_block_header_processing(state, block) +@always_bls +@spec_state_test +def test_invalid_sig_block_header(state): + block = build_empty_block_for_next_slot(state, signed=False) + yield from run_block_header_processing(state, block, valid=False) + + @spec_state_test -def test_invalid_slot(state): - block = build_empty_block_for_next_slot(state) +def test_invalid_slot_block_header(state): + block = build_empty_block_for_next_slot(state, signed=False) block.slot = state.slot + 2 # invalid slot + sign_block(state, block) yield from run_block_header_processing(state, block, valid=False) @spec_state_test def test_invalid_previous_block_root(state): - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.previous_block_root = b'\12' * 32 # invalid prev root + sign_block(state, block) yield from run_block_header_processing(state, block, valid=False) @@ -73,6 +82,6 @@ def test_proposer_slashed(state): # set proposer to slashed state.validator_registry[proposer_index].slashed = True - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=True) yield from run_block_header_processing(state, block, valid=False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py index fe2dae6a84..b520c809f8 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py +++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py @@ -1,21 +1,12 @@ import eth2spec.phase0.spec as spec +from eth2spec.phase0.spec import process_deposit +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.helpers.deposits import prepare_state_and_deposit, sign_deposit_data +from eth2spec.test.helpers.state import get_balance +from eth2spec.test.helpers.keys import privkeys -from eth2spec.phase0.spec import ( - ZERO_HASH, - process_deposit, -) -from eth2spec.test.helpers import ( - get_balance, - build_deposit, - prepare_state_and_deposit, - privkeys, - pubkeys, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error - - -def run_deposit_processing(state, deposit, validator_index, valid=True): +def run_deposit_processing(state, deposit, validator_index, valid=True, effective=True): """ Run ``process_deposit``, yielding: - pre-state ('pre') @@ -43,47 +34,76 @@ def run_deposit_processing(state, deposit, validator_index, valid=True): yield 'post', state - if validator_index < pre_validator_count: - # top-up + if not effective: assert len(state.validator_registry) == pre_validator_count assert len(state.balances) == pre_validator_count + if validator_index < pre_validator_count: + assert get_balance(state, validator_index) == pre_balance else: - # new validator - assert len(state.validator_registry) == pre_validator_count + 1 - assert len(state.balances) == pre_validator_count + 1 + if validator_index < pre_validator_count: + # top-up + assert len(state.validator_registry) == pre_validator_count + assert len(state.balances) == pre_validator_count + else: + # new validator + assert len(state.validator_registry) == pre_validator_count + 1 + assert len(state.balances) == pre_validator_count + 1 + assert get_balance(state, validator_index) == pre_balance + deposit.data.amount assert state.deposit_index == state.latest_eth1_data.deposit_count - assert get_balance(state, validator_index) == pre_balance + deposit.data.amount @spec_state_test -def test_success(state): +def test_new_deposit(state): # fresh deposit = next validator index = validator appended to registry validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True) yield from run_deposit_processing(state, deposit, validator_index) +@always_bls +@spec_state_test +def test_invalid_sig_new_deposit(state): + # fresh deposit = next validator index = validator appended to registry + validator_index = len(state.validator_registry) + amount = spec.MAX_EFFECTIVE_BALANCE + deposit = prepare_state_and_deposit(state, validator_index, amount, signed=False) + yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=False) + + @spec_state_test def test_success_top_up(state): validator_index = 0 amount = spec.MAX_EFFECTIVE_BALANCE // 4 - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True) yield from run_deposit_processing(state, deposit, validator_index) +@always_bls +@spec_state_test +def test_invalid_sig_top_up(state): + validator_index = 0 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + deposit = prepare_state_and_deposit(state, validator_index, amount, signed=False) + + # invalid signatures, in top-ups, are allowed! + yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=True) + + @spec_state_test def test_wrong_index(state): validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(state, validator_index, amount, signed=False) # mess up deposit_index deposit.index = state.deposit_index + 1 + sign_deposit_data(state, deposit.data, privkeys[validator_index]) + yield from run_deposit_processing(state, deposit, validator_index, valid=False) @@ -94,9 +114,11 @@ def test_wrong_index(state): def test_bad_merkle_proof(state): validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(state, validator_index, amount, signed=False) # mess up merkle branch deposit.proof[-1] = spec.ZERO_HASH + sign_deposit_data(state, deposit.data, privkeys[validator_index]) + yield from run_deposit_processing(state, deposit, validator_index, valid=False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py index 609c97ce6d..07ccc25f1c 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py +++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py @@ -3,12 +3,11 @@ get_current_epoch, process_proposer_slashing, ) -from eth2spec.test.helpers import ( - get_balance, - get_valid_proposer_slashing, -) - -from eth2spec.test.context import spec_state_test, expect_assertion_error +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.helpers.block_header import sign_block_header +from eth2spec.test.helpers.keys import privkeys +from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing +from eth2spec.test.helpers.state import get_balance def run_proposer_slashing_processing(state, proposer_slashing, valid=True): @@ -48,14 +47,35 @@ def run_proposer_slashing_processing(state, proposer_slashing, valid=True): @spec_state_test def test_success(state): - proposer_slashing = get_valid_proposer_slashing(state) + proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) yield from run_proposer_slashing_processing(state, proposer_slashing) +@always_bls +@spec_state_test +def test_invalid_sig_1(state): + proposer_slashing = get_valid_proposer_slashing(state, signed_1=False, signed_2=True) + yield from run_proposer_slashing_processing(state, proposer_slashing, False) + + +@always_bls +@spec_state_test +def test_invalid_sig_2(state): + proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False) + yield from run_proposer_slashing_processing(state, proposer_slashing, False) + + +@always_bls +@spec_state_test +def test_invalid_sig_1_and_2(state): + proposer_slashing = get_valid_proposer_slashing(state, signed_1=False, signed_2=False) + yield from run_proposer_slashing_processing(state, proposer_slashing, False) + + @spec_state_test def test_invalid_proposer_index(state): - proposer_slashing = get_valid_proposer_slashing(state) + proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) # Index just too high (by 1) proposer_slashing.proposer_index = len(state.validator_registry) @@ -64,17 +84,18 @@ def test_invalid_proposer_index(state): @spec_state_test def test_epochs_are_different(state): - proposer_slashing = get_valid_proposer_slashing(state) + proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False) # set slots to be in different epochs proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH + sign_block_header(state, proposer_slashing.header_2, privkeys[proposer_slashing.proposer_index]) yield from run_proposer_slashing_processing(state, proposer_slashing, False) @spec_state_test def test_headers_are_same(state): - proposer_slashing = get_valid_proposer_slashing(state) + proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False) # set headers to be the same proposer_slashing.header_2 = proposer_slashing.header_1 @@ -84,7 +105,7 @@ def test_headers_are_same(state): @spec_state_test def test_proposer_is_not_activated(state): - proposer_slashing = get_valid_proposer_slashing(state) + proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) # set proposer to be not active yet state.validator_registry[proposer_slashing.proposer_index].activation_epoch = get_current_epoch(state) + 1 @@ -94,7 +115,7 @@ def test_proposer_is_not_activated(state): @spec_state_test def test_proposer_is_slashed(state): - proposer_slashing = get_valid_proposer_slashing(state) + proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) # set proposer to slashed state.validator_registry[proposer_slashing.proposer_index].slashed = True @@ -104,8 +125,10 @@ def test_proposer_is_slashed(state): @spec_state_test def test_proposer_is_withdrawn(state): - proposer_slashing = get_valid_proposer_slashing(state) + proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) + # move 1 epoch into future, to allow for past withdrawable epoch + state.slot += spec.SLOTS_PER_EPOCH # set proposer withdrawable_epoch in past current_epoch = get_current_epoch(state) proposer_index = proposer_slashing.proposer_index diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py index 10d2ccede0..e5f52e209f 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py +++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py @@ -1,17 +1,14 @@ import eth2spec.phase0.spec as spec - from eth2spec.phase0.spec import ( get_active_validator_indices, get_beacon_proposer_index, get_current_epoch, process_transfer, ) -from eth2spec.test.helpers import ( - get_valid_transfer, - next_epoch, -) - -from eth2spec.test.context import spec_state_test, expect_assertion_error +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.helpers.state import next_epoch +from eth2spec.test.helpers.block import apply_empty_block +from eth2spec.test.helpers.transfers import get_valid_transfer def run_transfer_processing(state, transfer, valid=True): @@ -48,7 +45,7 @@ def run_transfer_processing(state, transfer, valid=True): @spec_state_test def test_success_non_activated(state): - transfer = get_valid_transfer(state) + transfer = get_valid_transfer(state, signed=True) # un-activate so validator can transfer state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH @@ -58,8 +55,9 @@ def test_success_non_activated(state): @spec_state_test def test_success_withdrawable(state): next_epoch(state) + apply_empty_block(state) - transfer = get_valid_transfer(state) + transfer = get_valid_transfer(state, signed=True) # withdrawable_epoch in past so can transfer state.validator_registry[transfer.sender].withdrawable_epoch = get_current_epoch(state) - 1 @@ -71,7 +69,7 @@ def test_success_withdrawable(state): def test_success_active_above_max_effective(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True) yield from run_transfer_processing(state, transfer) @@ -80,24 +78,34 @@ def test_success_active_above_max_effective(state): def test_success_active_above_max_effective_fee(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1, signed=True) yield from run_transfer_processing(state, transfer) +@always_bls +@spec_state_test +def test_invalid_signature(state): + transfer = get_valid_transfer(state, signed=False) + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(state, transfer, False) + + @spec_state_test def test_active_but_transfer_past_effective_balance(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] amount = spec.MAX_EFFECTIVE_BALANCE // 32 state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0, signed=True) yield from run_transfer_processing(state, transfer, False) @spec_state_test def test_incorrect_slot(state): - transfer = get_valid_transfer(state, slot=state.slot+1) + transfer = get_valid_transfer(state, slot=state.slot + 1, signed=True) # un-activate so validator can transfer state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH @@ -108,7 +116,7 @@ def test_incorrect_slot(state): def test_insufficient_balance_for_fee(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1, signed=True) # un-activate so validator can transfer state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH @@ -120,7 +128,7 @@ def test_insufficient_balance_for_fee(state): def test_insufficient_balance(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True) # un-activate so validator can transfer state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH @@ -132,7 +140,7 @@ def test_insufficient_balance(state): def test_no_dust_sender(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] balance = state.balances[sender_index] - transfer = get_valid_transfer(state, sender_index=sender_index, amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, fee=0) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, fee=0, signed=True) # un-activate so validator can transfer state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH @@ -144,7 +152,7 @@ def test_no_dust_sender(state): def test_no_dust_recipient(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True) state.balances[transfer.recipient] = 0 # un-activate so validator can transfer @@ -155,7 +163,7 @@ def test_no_dust_recipient(state): @spec_state_test def test_invalid_pubkey(state): - transfer = get_valid_transfer(state) + transfer = get_valid_transfer(state, signed=True) state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH # un-activate so validator can transfer diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py index be0ef1e7a4..fe33fb6318 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py +++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py @@ -1,17 +1,13 @@ import eth2spec.phase0.spec as spec - from eth2spec.phase0.spec import ( get_active_validator_indices, get_churn_limit, get_current_epoch, process_voluntary_exit, ) -from eth2spec.test.helpers import ( - build_voluntary_exit, - pubkey_to_privkey, -) - -from eth2spec.test.context import spec_state_test, expect_assertion_error +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.helpers.keys import pubkey_to_privkey +from eth2spec.test.helpers.voluntary_exits import build_voluntary_exit, sign_voluntary_exit def run_voluntary_exit_processing(state, voluntary_exit, valid=True): @@ -51,16 +47,26 @@ def test_success(state): validator_index = get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] - voluntary_exit = build_voluntary_exit( - state, - current_epoch, - validator_index, - privkey, - ) + voluntary_exit = build_voluntary_exit(state, current_epoch, validator_index, privkey, signed=True) yield from run_voluntary_exit_processing(state, voluntary_exit) +@always_bls +@spec_state_test +def test_invalid_signature(state): + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state, current_epoch)[0] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + + voluntary_exit = build_voluntary_exit(state, current_epoch, validator_index, privkey, signed=False) + + yield from run_voluntary_exit_processing(state, voluntary_exit, False) + + @spec_state_test def test_success_exit_queue(state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit @@ -80,6 +86,7 @@ def test_success_exit_queue(state): current_epoch, index, privkey, + signed=True, )) # Now run all the exits @@ -96,6 +103,7 @@ def test_success_exit_queue(state): current_epoch, validator_index, privkey, + signed=True, ) # This is the interesting part of the test: on a pre-state with a full exit queue, @@ -122,8 +130,10 @@ def test_validator_exit_in_future(state): current_epoch, validator_index, privkey, + signed=False, ) voluntary_exit.epoch += 1 + sign_voluntary_exit(state, voluntary_exit, privkey) yield from run_voluntary_exit_processing(state, voluntary_exit, False) @@ -142,8 +152,10 @@ def test_validator_invalid_validator_index(state): current_epoch, validator_index, privkey, + signed=False, ) voluntary_exit.validator_index = len(state.validator_registry) + sign_voluntary_exit(state, voluntary_exit, privkey) yield from run_voluntary_exit_processing(state, voluntary_exit, False) @@ -162,6 +174,7 @@ def test_validator_not_active(state): current_epoch, validator_index, privkey, + signed=True, ) yield from run_voluntary_exit_processing(state, voluntary_exit, False) @@ -184,6 +197,7 @@ def test_validator_already_exited(state): current_epoch, validator_index, privkey, + signed=True, ) yield from run_voluntary_exit_processing(state, voluntary_exit, False) @@ -200,6 +214,7 @@ def test_validator_not_active_long_enough(state): current_epoch, validator_index, privkey, + signed=True, ) assert ( diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py index afabd4a57e..a484cc995f 100644 --- a/test_libs/pyspec/eth2spec/test/context.py +++ b/test_libs/pyspec/eth2spec/test/context.py @@ -1,16 +1,27 @@ from eth2spec.phase0 import spec +from eth2spec.utils import bls -from .helpers import create_genesis_state +from .helpers.genesis import create_genesis_state -from .utils import spectest, with_args +from .utils import spectest, with_args, with_tags # Provides a genesis state as first argument to the function decorated with this -with_state = with_args(lambda: [create_genesis_state(spec.SLOTS_PER_EPOCH * 8, list())]) +with_state = with_args(lambda: [create_genesis_state(spec.SLOTS_PER_EPOCH * 8)]) + + +# BLS is turned off by default *for performance purposes during TESTING*. +# The runner of the test can indicate the preferred setting (test generators prefer BLS to be ON). +# - Some tests are marked as BLS-requiring, and ignore this setting. +# (tests that express differences caused by BLS, e.g. invalid signatures being rejected) +# - Some other tests are marked as BLS-ignoring, and ignore this setting. +# (tests that are heavily performance impacted / require unsigned state transitions) +# - Most tests respect the BLS setting. +DEFAULT_BLS_ACTIVE = False # shorthand for decorating @with_state @spectest() def spec_state_test(fn): - return with_state(spectest()(fn)) + return with_state(bls_switch(spectest()(fn))) def expect_assertion_error(fn): @@ -25,3 +36,47 @@ def expect_assertion_error(fn): pass if bad: raise AssertionError('expected an assertion error, but got none.') + + +# Tags a test to be ignoring BLS for it to pass. +bls_ignored = with_tags({'bls_ignored': True}) + + +def never_bls(fn): + """ + Decorator to apply on ``bls_switch`` decorator to force BLS de-activation. Useful to mark tests as BLS-ignorant. + """ + def entry(*args, **kw): + # override bls setting + kw['bls_active'] = False + return fn(*args, **kw) + return bls_ignored(entry) + + +# Tags a test to be requiring BLS for it to pass. +bls_required = with_tags({'bls_required': True}) + + +def always_bls(fn): + """ + Decorator to apply on ``bls_switch`` decorator to force BLS activation. Useful to mark tests as BLS-dependent. + """ + def entry(*args, **kw): + # override bls setting + kw['bls_active'] = True + return fn(*args, **kw) + return bls_required(entry) + + +def bls_switch(fn): + """ + Decorator to make a function execute with BLS ON, or BLS off. + Based on an optional bool argument ``bls_active``, passed to the function at runtime. + """ + def entry(*args, **kw): + old_state = bls.bls_active + bls.bls_active = kw.pop('bls_active', DEFAULT_BLS_ACTIVE) + out = fn(*args, **kw) + bls.bls_active = old_state + return out + return entry diff --git a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py b/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py index 203978d29b..688bb54ac8 100644 --- a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py +++ b/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py @@ -1,25 +1,27 @@ from copy import deepcopy import eth2spec.phase0.spec as spec - -from eth2spec.phase0.state_transition import ( - state_transition, -) from eth2spec.phase0.spec import ( cache_state, get_crosslink_deltas, process_crosslinks, ) -from eth2spec.test.helpers import ( +from eth2spec.phase0.state_transition import ( + state_transition, +) +from eth2spec.test.context import spec_state_test +from eth2spec.test.helpers.state import ( + next_epoch, + next_slot +) +from eth2spec.test.helpers.block import apply_empty_block, sign_block +from eth2spec.test.helpers.attestations import ( add_attestation_to_state, build_empty_block_for_next_slot, fill_aggregate_attestation, get_crosslink_committee, get_valid_attestation, - next_epoch, - next_slot, ) -from eth2spec.test.context import spec_state_test def run_process_crosslinks(state, valid=True): @@ -31,8 +33,9 @@ def run_process_crosslinks(state, valid=True): """ # transition state to slot before state transition slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.slot = slot + sign_block(state, block) state_transition(state, block) # cache state before epoch transition @@ -55,7 +58,7 @@ def test_no_attestations(state): def test_single_crosslink_update_from_current_epoch(state): next_epoch(state) - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=True) fill_aggregate_attestation(state, attestation) add_attestation_to_state(state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -75,7 +78,7 @@ def test_single_crosslink_update_from_current_epoch(state): def test_single_crosslink_update_from_previous_epoch(state): next_epoch(state) - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=True) fill_aggregate_attestation(state, attestation) add_attestation_to_state(state, attestation, state.slot + spec.SLOTS_PER_EPOCH) @@ -103,7 +106,7 @@ def test_double_late_crosslink(state): next_epoch(state) state.slot += 4 - attestation_1 = get_valid_attestation(state) + attestation_1 = get_valid_attestation(state, signed=True) fill_aggregate_attestation(state, attestation_1) # add attestation_1 in the next epoch @@ -111,10 +114,12 @@ def test_double_late_crosslink(state): add_attestation_to_state(state, attestation_1, state.slot + 1) for slot in range(spec.SLOTS_PER_EPOCH): - attestation_2 = get_valid_attestation(state) + attestation_2 = get_valid_attestation(state, signed=True) if attestation_2.data.shard == attestation_1.data.shard: break next_slot(state) + apply_empty_block(state) + fill_aggregate_attestation(state, attestation_2) # add attestation_2 in the next epoch after attestation_1 has @@ -126,7 +131,7 @@ def test_double_late_crosslink(state): assert len(state.current_epoch_attestations) == 0 crosslink_deltas = get_crosslink_deltas(state) - + yield from run_process_crosslinks(state) shard = attestation_2.data.shard diff --git a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py b/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py index 970c309429..2086f4ef2d 100644 --- a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py +++ b/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py @@ -4,10 +4,7 @@ get_current_epoch, is_active_validator, ) -from eth2spec.test.helpers import ( - next_epoch, -) - +from eth2spec.test.helpers.state import next_epoch from eth2spec.test.context import spec_state_test diff --git a/test_libs/pyspec/eth2spec/test/helpers.py b/test_libs/pyspec/eth2spec/test/helpers.py deleted file mode 100644 index 8eb6d88913..0000000000 --- a/test_libs/pyspec/eth2spec/test/helpers.py +++ /dev/null @@ -1,445 +0,0 @@ -from copy import deepcopy - -from py_ecc import bls - -from eth2spec.phase0.state_transition import ( - state_transition, -) -import eth2spec.phase0.spec as spec -from eth2spec.utils.minimal_ssz import signing_root -from eth2spec.phase0.spec import ( - # constants - ZERO_HASH, - # SSZ - Attestation, - AttestationData, - AttestationDataAndCustodyBit, - AttesterSlashing, - BeaconBlock, - BeaconBlockHeader, - Deposit, - DepositData, - Eth1Data, - ProposerSlashing, - Transfer, - VoluntaryExit, - # functions - convert_to_indexed, - get_active_validator_indices, - get_attesting_indices, - get_block_root, - get_block_root_at_slot, - get_crosslink_committee, - get_current_epoch, - get_domain, - get_epoch_start_slot, - get_genesis_beacon_state, - get_previous_epoch, - get_shard_delta, - hash_tree_root, - slot_to_epoch, - verify_merkle_branch, - hash, -) -from eth2spec.utils.merkle_minimal import ( - calc_merkle_tree_from_leaves, - get_merkle_proof, - get_merkle_root, -) - - -privkeys = [i + 1 for i in range(1024)] -pubkeys = [bls.privtopub(privkey) for privkey in privkeys] -pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} - - -def get_balance(state, index): - return state.balances[index] - - -def set_bitfield_bit(bitfield, i): - """ - Set the bit in ``bitfield`` at position ``i`` to ``1``. - """ - byte_index = i // 8 - bit_index = i % 8 - return ( - bitfield[:byte_index] + - bytes([bitfield[byte_index] | (1 << bit_index)]) + - bitfield[byte_index+1:] - ) - - -def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): - if not deposit_data_leaves: - deposit_data_leaves = [] - signature = b'\x33' * 96 - - deposit_data_list = [] - for i in range(num_validators): - pubkey = pubkeys[i] - deposit_data = DepositData( - pubkey=pubkey, - # insecurely use pubkey as withdrawal key as well - withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], - amount=spec.MAX_EFFECTIVE_BALANCE, - signature=signature, - ) - item = deposit_data.hash_tree_root() - deposit_data_leaves.append(item) - tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) - root = get_merkle_root((tuple(deposit_data_leaves))) - proof = list(get_merkle_proof(tree, item_index=i)) - assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, i, root) - deposit_data_list.append(deposit_data) - - genesis_validator_deposits = [] - for i in range(num_validators): - genesis_validator_deposits.append(Deposit( - proof=list(get_merkle_proof(tree, item_index=i)), - index=i, - data=deposit_data_list[i] - )) - return genesis_validator_deposits, root - - -def create_genesis_state(num_validators, deposit_data_leaves=None): - initial_deposits, deposit_root = create_mock_genesis_validator_deposits( - num_validators, - deposit_data_leaves, - ) - return get_genesis_beacon_state( - initial_deposits, - genesis_time=0, - genesis_eth1_data=Eth1Data( - deposit_root=deposit_root, - deposit_count=len(initial_deposits), - block_hash=spec.ZERO_HASH, - ), - ) - - -def build_empty_block_for_next_slot(state): - empty_block = BeaconBlock() - empty_block.slot = state.slot + 1 - empty_block.body.eth1_data.deposit_count = state.deposit_index - previous_block_header = deepcopy(state.latest_block_header) - if previous_block_header.state_root == spec.ZERO_HASH: - previous_block_header.state_root = state.hash_tree_root() - empty_block.previous_block_root = signing_root(previous_block_header) - return empty_block - - -def build_deposit_data(state, pubkey, privkey, amount): - deposit_data = DepositData( - pubkey=pubkey, - # insecurely use pubkey as withdrawal key as well - withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], - amount=amount, - ) - signature = bls.sign( - message_hash=signing_root(deposit_data), - privkey=privkey, - domain=get_domain( - state, - spec.DOMAIN_DEPOSIT, - ) - ) - deposit_data.signature = signature - return deposit_data - - -def build_attestation_data(state, slot, shard): - assert state.slot >= slot - - if slot == state.slot: - block_root = build_empty_block_for_next_slot(state).previous_block_root - else: - block_root = get_block_root_at_slot(state, slot) - - current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) - if slot < current_epoch_start_slot: - epoch_boundary_root = get_block_root(state, get_previous_epoch(state)) - elif slot == current_epoch_start_slot: - epoch_boundary_root = block_root - else: - epoch_boundary_root = get_block_root(state, get_current_epoch(state)) - - if slot < current_epoch_start_slot: - justified_epoch = state.previous_justified_epoch - justified_block_root = state.previous_justified_root - else: - justified_epoch = state.current_justified_epoch - justified_block_root = state.current_justified_root - - crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks - return AttestationData( - shard=shard, - beacon_block_root=block_root, - source_epoch=justified_epoch, - source_root=justified_block_root, - target_epoch=slot_to_epoch(slot), - target_root=epoch_boundary_root, - crosslink_data_root=spec.ZERO_HASH, - previous_crosslink_root=hash_tree_root(crosslinks[shard]), - ) - - -def build_voluntary_exit(state, epoch, validator_index, privkey): - voluntary_exit = VoluntaryExit( - epoch=epoch, - validator_index=validator_index, - ) - voluntary_exit.signature = bls.sign( - message_hash=signing_root(voluntary_exit), - privkey=privkey, - domain=get_domain( - state=state, - domain_type=spec.DOMAIN_VOLUNTARY_EXIT, - message_epoch=epoch, - ) - ) - - return voluntary_exit - - -def build_deposit(state, - deposit_data_leaves, - pubkey, - privkey, - amount): - deposit_data = build_deposit_data(state, pubkey, privkey, amount) - - item = deposit_data.hash_tree_root() - index = len(deposit_data_leaves) - deposit_data_leaves.append(item) - tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) - root = get_merkle_root((tuple(deposit_data_leaves))) - proof = list(get_merkle_proof(tree, item_index=index)) - assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) - - deposit = Deposit( - proof=list(proof), - index=index, - data=deposit_data, - ) - - return deposit, root, deposit_data_leaves - - -def get_valid_proposer_slashing(state): - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[-1] - privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] - slot = state.slot - - header_1 = BeaconBlockHeader( - slot=slot, - previous_block_root=ZERO_HASH, - state_root=ZERO_HASH, - block_body_root=ZERO_HASH, - ) - header_2 = deepcopy(header_1) - header_2.previous_block_root = b'\x02' * 32 - header_2.slot = slot + 1 - - domain = get_domain( - state=state, - domain_type=spec.DOMAIN_BEACON_PROPOSER, - ) - header_1.signature = bls.sign( - message_hash=signing_root(header_1), - privkey=privkey, - domain=domain, - ) - header_2.signature = bls.sign( - message_hash=signing_root(header_2), - privkey=privkey, - domain=domain, - ) - - return ProposerSlashing( - proposer_index=validator_index, - header_1=header_1, - header_2=header_2, - ) - - -def get_valid_attester_slashing(state): - attestation_1 = get_valid_attestation(state) - attestation_2 = deepcopy(attestation_1) - attestation_2.data.target_root = b'\x01' * 32 - - return AttesterSlashing( - attestation_1=convert_to_indexed(state, attestation_1), - attestation_2=convert_to_indexed(state, attestation_2), - ) - - -def get_valid_attestation(state, slot=None): - if slot is None: - slot = state.slot - - if slot_to_epoch(slot) == get_current_epoch(state): - shard = (state.latest_start_shard + slot) % spec.SLOTS_PER_EPOCH - else: - previous_shard_delta = get_shard_delta(state, get_previous_epoch(state)) - shard = (state.latest_start_shard - previous_shard_delta + slot) % spec.SHARD_COUNT - - attestation_data = build_attestation_data(state, slot, shard) - - crosslink_committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.shard) - - committee_size = len(crosslink_committee) - bitfield_length = (committee_size + 7) // 8 - aggregation_bitfield = b'\xC0' + b'\x00' * (bitfield_length - 1) - custody_bitfield = b'\x00' * bitfield_length - attestation = Attestation( - aggregation_bitfield=aggregation_bitfield, - data=attestation_data, - custody_bitfield=custody_bitfield, - ) - participants = get_attesting_indices( - state, - attestation.data, - attestation.aggregation_bitfield, - ) - assert len(participants) == 2 - - signatures = [] - for validator_index in participants: - privkey = privkeys[validator_index] - signatures.append( - get_attestation_signature( - state, - attestation.data, - privkey - ) - ) - - attestation.aggregation_signature = bls.aggregate_signatures(signatures) - return attestation - - -def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=None): - if slot is None: - slot = state.slot - current_epoch = get_current_epoch(state) - if sender_index is None: - sender_index = get_active_validator_indices(state, current_epoch)[-1] - recipient_index = get_active_validator_indices(state, current_epoch)[0] - transfer_pubkey = pubkeys[-1] - transfer_privkey = privkeys[-1] - - if fee is None: - fee = get_balance(state, sender_index) // 32 - if amount is None: - amount = get_balance(state, sender_index) - fee - - transfer = Transfer( - sender=sender_index, - recipient=recipient_index, - amount=amount, - fee=fee, - slot=slot, - pubkey=transfer_pubkey, - signature=ZERO_HASH, - ) - transfer.signature = bls.sign( - message_hash=signing_root(transfer), - privkey=transfer_privkey, - domain=get_domain( - state=state, - domain_type=spec.DOMAIN_TRANSFER, - message_epoch=get_current_epoch(state), - ) - ) - - # ensure withdrawal_credentials reproducable - state.validator_registry[transfer.sender].withdrawal_credentials = ( - spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:] - ) - - return transfer - - -def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0): - message_hash = AttestationDataAndCustodyBit( - data=attestation_data, - custody_bit=custody_bit, - ).hash_tree_root() - - return bls.sign( - message_hash=message_hash, - privkey=privkey, - domain=get_domain( - state=state, - domain_type=spec.DOMAIN_ATTESTATION, - message_epoch=attestation_data.target_epoch, - ) - ) - - -def fill_aggregate_attestation(state, attestation): - crosslink_committee = get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.shard) - for i in range(len(crosslink_committee)): - attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i) - - -def add_attestation_to_state(state, attestation, slot): - block = build_empty_block_for_next_slot(state) - block.slot = slot - block.body.attestations.append(attestation) - state_transition(state, block) - - -def next_slot(state): - """ - Transition to the next slot via an empty block. - Return the empty block that triggered the transition. - """ - block = build_empty_block_for_next_slot(state) - state_transition(state, block) - return block - - -def next_epoch(state): - """ - Transition to the start slot of the next epoch via an empty block. - Return the empty block that triggered the transition. - """ - block = build_empty_block_for_next_slot(state) - block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - state_transition(state, block) - return block - - -def get_state_root(state, slot) -> bytes: - """ - Return the state root at a recent ``slot``. - """ - assert slot < state.slot <= slot + spec.SLOTS_PER_HISTORICAL_ROOT - return state.latest_state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT] - - -def prepare_state_and_deposit(state, validator_index, amount): - """ - Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount. - """ - pre_validator_count = len(state.validator_registry) - # fill previous deposits with zero-hash - deposit_data_leaves = [ZERO_HASH] * pre_validator_count - - pubkey = pubkeys[validator_index] - privkey = privkeys[validator_index] - deposit, root, deposit_data_leaves = build_deposit( - state, - deposit_data_leaves, - pubkey, - privkey, - amount, - ) - - state.latest_eth1_data.deposit_root = root - state.latest_eth1_data.deposit_count = len(deposit_data_leaves) - return deposit diff --git a/test_libs/pyspec/eth2spec/test/helpers/__init__.py b/test_libs/pyspec/eth2spec/test/helpers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py new file mode 100644 index 0000000000..e9b863463c --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -0,0 +1,146 @@ +from typing import List + +# Access constants from spec pkg reference. +import eth2spec.phase0.spec as spec +from eth2spec.phase0.spec import ( + Attestation, + AttestationData, + AttestationDataAndCustodyBit, + get_epoch_start_slot, get_block_root, get_current_epoch, get_previous_epoch, slot_to_epoch, + get_crosslink_committee, get_domain, IndexedAttestation, get_attesting_indices, BeaconState, get_block_root_at_slot, + get_epoch_start_shard, get_epoch_committee_count) +from eth2spec.phase0.state_transition import ( + state_transition, state_transition_to +) +from eth2spec.test.helpers.bitfields import set_bitfield_bit +from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block +from eth2spec.test.helpers.keys import privkeys +from eth2spec.utils.bls import bls_sign, bls_aggregate_signatures +from eth2spec.utils.minimal_ssz import hash_tree_root + + +def build_attestation_data(state, slot, shard): + assert state.slot >= slot + + if slot == state.slot: + block_root = build_empty_block_for_next_slot(state).previous_block_root + else: + block_root = get_block_root_at_slot(state, slot) + + current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) + if slot < current_epoch_start_slot: + epoch_boundary_root = get_block_root(state, get_previous_epoch(state)) + elif slot == current_epoch_start_slot: + epoch_boundary_root = block_root + else: + epoch_boundary_root = get_block_root(state, get_current_epoch(state)) + + if slot < current_epoch_start_slot: + justified_epoch = state.previous_justified_epoch + justified_block_root = state.previous_justified_root + else: + justified_epoch = state.current_justified_epoch + justified_block_root = state.current_justified_root + + crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch( + state) else state.previous_crosslinks + return AttestationData( + shard=shard, + beacon_block_root=block_root, + source_epoch=justified_epoch, + source_root=justified_block_root, + target_epoch=slot_to_epoch(slot), + target_root=epoch_boundary_root, + crosslink_data_root=spec.ZERO_HASH, + previous_crosslink_root=hash_tree_root(crosslinks[shard]), + ) + + +def get_valid_attestation(state, slot=None, signed=False): + if slot is None: + slot = state.slot + + epoch = slot_to_epoch(slot) + epoch_start_shard = get_epoch_start_shard(state, epoch) + committees_per_slot = get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH + shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT + + attestation_data = build_attestation_data(state, slot, shard) + + crosslink_committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.shard) + + committee_size = len(crosslink_committee) + bitfield_length = (committee_size + 7) // 8 + aggregation_bitfield = b'\x00' * bitfield_length + custody_bitfield = b'\x00' * bitfield_length + attestation = Attestation( + aggregation_bitfield=aggregation_bitfield, + data=attestation_data, + custody_bitfield=custody_bitfield, + ) + fill_aggregate_attestation(state, attestation) + if signed: + sign_attestation(state, attestation) + return attestation + + +def sign_aggregate_attestation(state: BeaconState, data: AttestationData, participants: List[int]): + signatures = [] + for validator_index in participants: + privkey = privkeys[validator_index] + signatures.append( + get_attestation_signature( + state, + data, + privkey + ) + ) + + return bls_aggregate_signatures(signatures) + + +def sign_indexed_attestation(state, indexed_attestation: IndexedAttestation): + participants = indexed_attestation.custody_bit_0_indices + indexed_attestation.custody_bit_1_indices + indexed_attestation.signature = sign_aggregate_attestation(state, indexed_attestation.data, participants) + + +def sign_attestation(state, attestation: Attestation): + participants = get_attesting_indices( + state, + attestation.data, + attestation.aggregation_bitfield, + ) + + attestation.signature = sign_aggregate_attestation(state, attestation.data, participants) + + +def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0): + message_hash = AttestationDataAndCustodyBit( + data=attestation_data, + custody_bit=custody_bit, + ).hash_tree_root() + + return bls_sign( + message_hash=message_hash, + privkey=privkey, + domain=get_domain( + state=state, + domain_type=spec.DOMAIN_ATTESTATION, + message_epoch=attestation_data.target_epoch, + ) + ) + + +def fill_aggregate_attestation(state, attestation): + crosslink_committee = get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.shard) + for i in range(len(crosslink_committee)): + attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i) + + +def add_attestation_to_state(state, attestation, slot): + block = build_empty_block_for_next_slot(state, signed=False) + block.slot = slot + block.body.attestations.append(attestation) + state_transition_to(state, block.slot) + sign_block(state, block) + state_transition(state, block) diff --git a/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py new file mode 100644 index 0000000000..d19b41dfec --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py @@ -0,0 +1,19 @@ +from copy import deepcopy + +from eth2spec.phase0.spec import AttesterSlashing, convert_to_indexed +from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation + + +def get_valid_attester_slashing(state, signed_1=False, signed_2=False): + attestation_1 = get_valid_attestation(state, signed=signed_1) + + attestation_2 = deepcopy(attestation_1) + attestation_2.data.target_root = b'\x01' * 32 + + if signed_2: + sign_attestation(state, attestation_2) + + return AttesterSlashing( + attestation_1=convert_to_indexed(state, attestation_1), + attestation_2=convert_to_indexed(state, attestation_2), + ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/bitfields.py b/test_libs/pyspec/eth2spec/test/helpers/bitfields.py new file mode 100644 index 0000000000..7c25d073ab --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/bitfields.py @@ -0,0 +1,11 @@ +def set_bitfield_bit(bitfield, i): + """ + Set the bit in ``bitfield`` at position ``i`` to ``1``. + """ + byte_index = i // 8 + bit_index = i % 8 + return ( + bitfield[:byte_index] + + bytes([bitfield[byte_index] | (1 << bit_index)]) + + bitfield[byte_index + 1:] + ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/block.py b/test_libs/pyspec/eth2spec/test/helpers/block.py new file mode 100644 index 0000000000..81c5e9ef5b --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/block.py @@ -0,0 +1,77 @@ +from copy import deepcopy + +from eth2spec.phase0 import spec +from eth2spec.phase0.spec import get_beacon_proposer_index, slot_to_epoch, get_domain, BeaconBlock +from eth2spec.phase0.state_transition import state_transition, state_transition_to +from eth2spec.test.helpers.keys import privkeys +from eth2spec.utils.bls import bls_sign, only_with_bls +from eth2spec.utils.minimal_ssz import signing_root, hash_tree_root + + +# Fully ignore the function if BLS is off, beacon-proposer index calculation is slow. +@only_with_bls() +def sign_block(state, block, proposer_index=None): + assert state.slot <= block.slot + + if proposer_index is None: + if block.slot == state.slot: + proposer_index = get_beacon_proposer_index(state) + else: + if slot_to_epoch(state.slot) + 1 > slot_to_epoch(block.slot): + print("warning: block slot far away, and no proposer index manually given." + " Signing block is slow due to transition for proposer index calculation.") + # use stub state to get proposer index of future slot + stub_state = deepcopy(state) + state_transition_to(stub_state, block.slot) + proposer_index = get_beacon_proposer_index(stub_state) + + privkey = privkeys[proposer_index] + + block.body.randao_reveal = bls_sign( + privkey=privkey, + message_hash=hash_tree_root(slot_to_epoch(block.slot)), + domain=get_domain( + state, + message_epoch=slot_to_epoch(block.slot), + domain_type=spec.DOMAIN_RANDAO, + ) + ) + block.signature = bls_sign( + message_hash=signing_root(block), + privkey=privkey, + domain=get_domain( + state, + spec.DOMAIN_BEACON_PROPOSER, + slot_to_epoch(block.slot))) + + +def apply_empty_block(state): + """ + Transition via an empty block (on current slot, assuming no block has been applied yet). + :return: the empty block that triggered the transition. + """ + block = build_empty_block(state, signed=True) + state_transition(state, block) + return block + + +def build_empty_block(state, slot=None, signed=False): + if slot is None: + slot = state.slot + empty_block = BeaconBlock() + empty_block.slot = slot + empty_block.body.eth1_data.deposit_count = state.deposit_index + previous_block_header = deepcopy(state.latest_block_header) + if previous_block_header.state_root == spec.ZERO_HASH: + previous_block_header.state_root = state.hash_tree_root() + empty_block.previous_block_root = signing_root(previous_block_header) + + if signed: + sign_block(state, empty_block) + + return empty_block + + +def build_empty_block_for_next_slot(state, signed=False): + return build_empty_block(state, state.slot + 1, signed=signed) + diff --git a/test_libs/pyspec/eth2spec/test/helpers/block_header.py b/test_libs/pyspec/eth2spec/test/helpers/block_header.py new file mode 100644 index 0000000000..9aba62d37d --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/block_header.py @@ -0,0 +1,18 @@ +# Access constants from spec pkg reference. +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.spec import get_domain +from eth2spec.utils.bls import bls_sign +from eth2spec.utils.minimal_ssz import signing_root + + +def sign_block_header(state, header, privkey): + domain = get_domain( + state=state, + domain_type=spec.DOMAIN_BEACON_PROPOSER, + ) + header.signature = bls_sign( + message_hash=signing_root(header), + privkey=privkey, + domain=domain, + ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/deposits.py b/test_libs/pyspec/eth2spec/test/helpers/deposits.py new file mode 100644 index 0000000000..c5deb124e6 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/deposits.py @@ -0,0 +1,81 @@ +# Access constants from spec pkg reference. +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.spec import get_domain, DepositData, verify_merkle_branch, Deposit, ZERO_HASH +from eth2spec.test.helpers.keys import pubkeys, privkeys +from eth2spec.utils.bls import bls_sign +from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_root, get_merkle_proof +from eth2spec.utils.minimal_ssz import signing_root + + +def build_deposit_data(state, pubkey, privkey, amount, signed=False): + deposit_data = DepositData( + pubkey=pubkey, + # insecurely use pubkey as withdrawal key as well + withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(pubkey)[1:], + amount=amount, + ) + if signed: + sign_deposit_data(state, deposit_data, privkey) + return deposit_data + + +def sign_deposit_data(state, deposit_data, privkey): + signature = bls_sign( + message_hash=signing_root(deposit_data), + privkey=privkey, + domain=get_domain( + state, + spec.DOMAIN_DEPOSIT, + ) + ) + deposit_data.signature = signature + + +def build_deposit(state, + deposit_data_leaves, + pubkey, + privkey, + amount, + signed): + deposit_data = build_deposit_data(state, pubkey, privkey, amount, signed) + + item = deposit_data.hash_tree_root() + index = len(deposit_data_leaves) + deposit_data_leaves.append(item) + tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) + root = get_merkle_root((tuple(deposit_data_leaves))) + proof = list(get_merkle_proof(tree, item_index=index)) + assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) + + deposit = Deposit( + proof=list(proof), + index=index, + data=deposit_data, + ) + + return deposit, root, deposit_data_leaves + + +def prepare_state_and_deposit(state, validator_index, amount, signed=False): + """ + Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount. + """ + pre_validator_count = len(state.validator_registry) + # fill previous deposits with zero-hash + deposit_data_leaves = [ZERO_HASH] * pre_validator_count + + pubkey = pubkeys[validator_index] + privkey = privkeys[validator_index] + deposit, root, deposit_data_leaves = build_deposit( + state, + deposit_data_leaves, + pubkey, + privkey, + amount, + signed + ) + + state.latest_eth1_data.deposit_root = root + state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + return deposit diff --git a/test_libs/pyspec/eth2spec/test/helpers/genesis.py b/test_libs/pyspec/eth2spec/test/helpers/genesis.py new file mode 100644 index 0000000000..01011cacd0 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/genesis.py @@ -0,0 +1,51 @@ +# Access constants from spec pkg reference. +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.spec import Eth1Data, ZERO_HASH, get_active_validator_indices +from eth2spec.test.helpers.keys import pubkeys +from eth2spec.utils.minimal_ssz import hash_tree_root + + +def build_mock_validator(i: int, balance: int): + pubkey = pubkeys[i] + # insecurely use pubkey as withdrawal key as well + withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(pubkey)[1:] + return spec.Validator( + pubkey=pubkeys[i], + withdrawal_credentials=withdrawal_credentials, + activation_eligibility_epoch=spec.FAR_FUTURE_EPOCH, + activation_epoch=spec.FAR_FUTURE_EPOCH, + exit_epoch=spec.FAR_FUTURE_EPOCH, + withdrawable_epoch=spec.FAR_FUTURE_EPOCH, + effective_balance=min(balance - balance % spec.EFFECTIVE_BALANCE_INCREMENT, spec.MAX_EFFECTIVE_BALANCE) + ) + + +def create_genesis_state(num_validators): + deposit_root = b'\x42' * 32 + + state = spec.BeaconState( + genesis_time=0, + deposit_index=num_validators, + latest_eth1_data=Eth1Data( + deposit_root=deposit_root, + deposit_count=num_validators, + block_hash=ZERO_HASH, + )) + + # We "hack" in the initial validators, + # as it is much faster than creating and processing genesis deposits for every single test case. + state.balances = [spec.MAX_EFFECTIVE_BALANCE] * num_validators + state.validator_registry = [build_mock_validator(i, state.balances[i]) for i in range(num_validators)] + + # Process genesis activations + for validator in state.validator_registry: + if validator.effective_balance >= spec.MAX_EFFECTIVE_BALANCE: + validator.activation_eligibility_epoch = spec.GENESIS_EPOCH + validator.activation_epoch = spec.GENESIS_EPOCH + + genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, spec.GENESIS_EPOCH)) + for index in range(spec.LATEST_ACTIVE_INDEX_ROOTS_LENGTH): + state.latest_active_index_roots[index] = genesis_active_index_root + + return state diff --git a/test_libs/pyspec/eth2spec/test/helpers/keys.py b/test_libs/pyspec/eth2spec/test/helpers/keys.py new file mode 100644 index 0000000000..f47cd7c10b --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/keys.py @@ -0,0 +1,6 @@ +from py_ecc import bls +from eth2spec.phase0 import spec + +privkeys = [i + 1 for i in range(spec.SLOTS_PER_EPOCH * 16)] +pubkeys = [bls.privtopub(privkey) for privkey in privkeys] +pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} diff --git a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py new file mode 100644 index 0000000000..dfb8895dc2 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py @@ -0,0 +1,35 @@ +from copy import deepcopy + +from eth2spec.phase0.spec import ( + get_current_epoch, get_active_validator_indices, BeaconBlockHeader, ProposerSlashing +) +from eth2spec.test.helpers.block_header import sign_block_header +from eth2spec.test.helpers.keys import pubkey_to_privkey + + +def get_valid_proposer_slashing(state, signed_1=False, signed_2=False): + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state, current_epoch)[-1] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + slot = state.slot + + header_1 = BeaconBlockHeader( + slot=slot, + previous_block_root=b'\x33' * 32, + state_root=b'\x44' * 32, + block_body_root=b'\x55' * 32, + ) + header_2 = deepcopy(header_1) + header_2.previous_block_root = b'\x99' * 32 + header_2.slot = slot + 1 + + if signed_1: + sign_block_header(state, header_1, privkey) + if signed_2: + sign_block_header(state, header_2, privkey) + + return ProposerSlashing( + proposer_index=validator_index, + header_1=header_1, + header_2=header_2, + ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py new file mode 100644 index 0000000000..e720a9709f --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/state.py @@ -0,0 +1,31 @@ +# Access constants from spec pkg reference. +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.state_transition import state_transition_to + + +def get_balance(state, index): + return state.balances[index] + + +def next_slot(state): + """ + Transition to the next slot. + """ + state_transition_to(state, state.slot + 1) + + +def next_epoch(state): + """ + Transition to the start slot of the next epoch + """ + slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + state_transition_to(state, slot) + + +def get_state_root(state, slot) -> bytes: + """ + Return the state root at a recent ``slot``. + """ + assert slot < state.slot <= slot + spec.SLOTS_PER_HISTORICAL_ROOT + return state.latest_state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT] diff --git a/test_libs/pyspec/eth2spec/test/helpers/transfers.py b/test_libs/pyspec/eth2spec/test/helpers/transfers.py new file mode 100644 index 0000000000..2045f48ad6 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/transfers.py @@ -0,0 +1,55 @@ +# Access constants from spec pkg reference. +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.spec import get_current_epoch, get_active_validator_indices, Transfer, get_domain +from eth2spec.test.helpers.keys import pubkeys, privkeys +from eth2spec.test.helpers.state import get_balance +from eth2spec.utils.bls import bls_sign +from eth2spec.utils.minimal_ssz import signing_root + + +def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=None, signed=False): + if slot is None: + slot = state.slot + current_epoch = get_current_epoch(state) + if sender_index is None: + sender_index = get_active_validator_indices(state, current_epoch)[-1] + recipient_index = get_active_validator_indices(state, current_epoch)[0] + transfer_pubkey = pubkeys[-1] + transfer_privkey = privkeys[-1] + + if fee is None: + fee = get_balance(state, sender_index) // 32 + if amount is None: + amount = get_balance(state, sender_index) - fee + + transfer = Transfer( + sender=sender_index, + recipient=recipient_index, + amount=amount, + fee=fee, + slot=slot, + pubkey=transfer_pubkey, + ) + if signed: + sign_transfer(state, transfer, transfer_privkey) + + # ensure withdrawal_credentials reproducible + state.validator_registry[transfer.sender].withdrawal_credentials = ( + spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:] + ) + + return transfer + + +def sign_transfer(state, transfer, privkey): + transfer.signature = bls_sign( + message_hash=signing_root(transfer), + privkey=privkey, + domain=get_domain( + state=state, + domain_type=spec.DOMAIN_TRANSFER, + message_epoch=get_current_epoch(state), + ) + ) + return transfer diff --git a/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py b/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py new file mode 100644 index 0000000000..54376d694b --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py @@ -0,0 +1,28 @@ +# Access constants from spec pkg reference. +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.spec import VoluntaryExit, get_domain +from eth2spec.utils.bls import bls_sign +from eth2spec.utils.minimal_ssz import signing_root + + +def build_voluntary_exit(state, epoch, validator_index, privkey, signed=False): + voluntary_exit = VoluntaryExit( + epoch=epoch, + validator_index=validator_index, + ) + if signed: + sign_voluntary_exit(state, voluntary_exit, privkey) + return voluntary_exit + + +def sign_voluntary_exit(state, voluntary_exit, privkey): + voluntary_exit.signature = bls_sign( + message_hash=signing_root(voluntary_exit), + privkey=privkey, + domain=get_domain( + state=state, + domain_type=spec.DOMAIN_VOLUNTARY_EXIT, + message_epoch=voluntary_exit.epoch, + ) + ) diff --git a/test_libs/pyspec/eth2spec/test/test_finality.py b/test_libs/pyspec/eth2spec/test/test_finality.py index 16bf24a4e6..56f65eca9a 100644 --- a/test_libs/pyspec/eth2spec/test/test_finality.py +++ b/test_libs/pyspec/eth2spec/test/test_finality.py @@ -1,21 +1,18 @@ from copy import deepcopy import eth2spec.phase0.spec as spec - from eth2spec.phase0.state_transition import ( state_transition, ) -from .helpers import ( - build_empty_block_for_next_slot, - fill_aggregate_attestation, +from .context import spec_state_test, never_bls +from .helpers.state import next_epoch +from .helpers.block import build_empty_block_for_next_slot, apply_empty_block +from .helpers.attestations import ( get_current_epoch, get_epoch_start_slot, get_valid_attestation, - next_epoch, ) -from .context import spec_state_test - def check_finality(state, prev_state, @@ -55,13 +52,11 @@ def next_epoch_with_attestations(state, slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 if slot_to_attest >= get_epoch_start_slot(get_current_epoch(post_state)): cur_attestation = get_valid_attestation(post_state, slot_to_attest) - fill_aggregate_attestation(post_state, cur_attestation) block.body.attestations.append(cur_attestation) if fill_prev_epoch: slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 prev_attestation = get_valid_attestation(post_state, slot_to_attest) - fill_aggregate_attestation(post_state, prev_attestation) block.body.attestations.append(prev_attestation) state_transition(post_state, block) @@ -70,6 +65,7 @@ def next_epoch_with_attestations(state, return state, blocks, post_state +@never_bls @spec_state_test def test_finality_rule_4(state): yield 'pre', state @@ -97,11 +93,14 @@ def test_finality_rule_4(state): yield 'post', state +@never_bls @spec_state_test def test_finality_rule_1(state): # get past first two epochs that finality does not run on next_epoch(state) + apply_empty_block(state) next_epoch(state) + apply_empty_block(state) yield 'pre', state @@ -124,11 +123,14 @@ def test_finality_rule_1(state): yield 'post', state +@never_bls @spec_state_test def test_finality_rule_2(state): # get past first two epochs that finality does not run on next_epoch(state) + apply_empty_block(state) next_epoch(state) + apply_empty_block(state) yield 'pre', state @@ -153,6 +155,7 @@ def test_finality_rule_2(state): yield 'post', state +@never_bls @spec_state_test def test_finality_rule_3(state): """ @@ -161,7 +164,9 @@ def test_finality_rule_3(state): """ # get past first two epochs that finality does not run on next_epoch(state) + apply_empty_block(state) next_epoch(state) + apply_empty_block(state) yield 'pre', state diff --git a/test_libs/pyspec/eth2spec/test/test_sanity.py b/test_libs/pyspec/eth2spec/test/test_sanity.py index 1951415acb..6d65cc7f4b 100644 --- a/test_libs/pyspec/eth2spec/test/test_sanity.py +++ b/test_libs/pyspec/eth2spec/test/test_sanity.py @@ -1,7 +1,7 @@ from copy import deepcopy -from py_ecc import bls import eth2spec.phase0.spec as spec +from eth2spec.utils.bls import bls_sign from eth2spec.utils.minimal_ssz import signing_root from eth2spec.phase0.spec import ( @@ -19,20 +19,22 @@ from eth2spec.phase0.state_transition import ( state_transition, ) -from .helpers import ( +from .helpers.state import ( get_balance, - build_empty_block_for_next_slot, - get_state_root, - get_valid_attestation, - get_valid_attester_slashing, - get_valid_proposer_slashing, - get_valid_transfer, - prepare_state_and_deposit, + get_state_root +) +from .helpers.transfers import get_valid_transfer +from .helpers.block import build_empty_block_for_next_slot, sign_block +from .helpers.keys import ( privkeys, pubkeys, ) +from .helpers.attester_slashings import get_valid_attester_slashing +from .helpers.proposer_slashings import get_valid_proposer_slashing +from .helpers.attestations import get_valid_attestation +from .helpers.deposits import prepare_state_and_deposit -from .context import spec_state_test +from .context import spec_state_test, never_bls @spec_state_test @@ -49,6 +51,7 @@ def test_slot_transition(state): assert get_state_root(state, pre_slot) == pre_root +@never_bls @spec_state_test def test_empty_block_transition(state): pre_slot = state.slot @@ -56,7 +59,7 @@ def test_empty_block_transition(state): yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=True) yield 'blocks', [block], [spec.BeaconBlock] state_transition(state, block) @@ -66,13 +69,15 @@ def test_empty_block_transition(state): assert get_block_root_at_slot(state, pre_slot) == block.previous_block_root +@never_bls @spec_state_test def test_skipped_slots(state): pre_slot = state.slot yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.slot += 3 + sign_block(state, block) yield 'blocks', [block], [spec.BeaconBlock] state_transition(state, block) @@ -88,8 +93,9 @@ def test_empty_epoch_transition(state): pre_slot = state.slot yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.slot += spec.SLOTS_PER_EPOCH + sign_block(state, block) yield 'blocks', [block], [spec.BeaconBlock] state_transition(state, block) @@ -100,30 +106,31 @@ def test_empty_epoch_transition(state): assert get_block_root_at_slot(state, slot) == block.previous_block_root -@spec_state_test -def test_empty_epoch_transition_not_finalizing(state): - # copy for later balance lookups. - pre_state = deepcopy(state) - yield 'pre', state - - block = build_empty_block_for_next_slot(state) - block.slot += spec.SLOTS_PER_EPOCH * 5 - yield 'blocks', [block], [spec.BeaconBlock] - - state_transition(state, block) - yield 'post', state - - assert state.slot == block.slot - assert state.finalized_epoch < get_current_epoch(state) - 4 - for index in range(len(state.validator_registry)): - assert get_balance(state, index) < get_balance(pre_state, index) +# @spec_state_test +# def test_empty_epoch_transition_not_finalizing(state): +# # copy for later balance lookups. +# pre_state = deepcopy(state) +# yield 'pre', state +# +# block = build_empty_block_for_next_slot(state, signed=False) +# block.slot += spec.SLOTS_PER_EPOCH * 5 +# sign_block(state, block, proposer_index=0) +# yield 'blocks', [block], [spec.BeaconBlock] +# +# state_transition(state, block) +# yield 'post', state +# +# assert state.slot == block.slot +# assert state.finalized_epoch < get_current_epoch(state) - 4 +# for index in range(len(state.validator_registry)): +# assert get_balance(state, index) < get_balance(pre_state, index) @spec_state_test def test_proposer_slashing(state): # copy for later balance lookups. pre_state = deepcopy(state) - proposer_slashing = get_valid_proposer_slashing(state) + proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) validator_index = proposer_slashing.proposer_index assert not state.validator_registry[validator_index].slashed @@ -133,8 +140,9 @@ def test_proposer_slashing(state): # # Add to state via block transition # - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.body.proposer_slashings.append(proposer_slashing) + sign_block(state, block) yield 'blocks', [block], [spec.BeaconBlock] state_transition(state, block) @@ -154,8 +162,9 @@ def test_attester_slashing(state): # copy for later balance lookups. pre_state = deepcopy(state) - attester_slashing = get_valid_attester_slashing(state) - validator_index = attester_slashing.attestation_1.custody_bit_0_indices[0] + attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True) + validator_index = (attester_slashing.attestation_1.custody_bit_0_indices + + attester_slashing.attestation_1.custody_bit_1_indices)[0] assert not state.validator_registry[validator_index].slashed @@ -164,8 +173,9 @@ def test_attester_slashing(state): # # Add to state via block transition # - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.body.attester_slashings.append(attester_slashing) + sign_block(state, block) yield 'blocks', [block], [spec.BeaconBlock] state_transition(state, block) @@ -195,12 +205,13 @@ def test_deposit_in_block(state): validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True) yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.body.deposits.append(deposit) + sign_block(state, block) yield 'blocks', [block], [spec.BeaconBlock] @@ -225,8 +236,9 @@ def test_deposit_top_up(state): yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.body.deposits.append(deposit) + sign_block(state, block) yield 'blocks', [block], [spec.BeaconBlock] @@ -244,23 +256,24 @@ def test_attestation(state): yield 'pre', state - attestation = get_valid_attestation(state) + attestation = get_valid_attestation(state, signed=True) # Add to state via block transition pre_current_attestations_len = len(state.current_epoch_attestations) - attestation_block = build_empty_block_for_next_slot(state) + attestation_block = build_empty_block_for_next_slot(state, signed=False) attestation_block.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation_block.body.attestations.append(attestation) + sign_block(state, attestation_block) state_transition(state, attestation_block) assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1 - # Epoch transition should move to previous_epoch_attestations pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations) - epoch_block = build_empty_block_for_next_slot(state) + epoch_block = build_empty_block_for_next_slot(state, signed=False) epoch_block.slot += spec.SLOTS_PER_EPOCH + sign_block(state, epoch_block) state_transition(state, epoch_block) yield 'blocks', [attestation_block, epoch_block], [spec.BeaconBlock] @@ -286,7 +299,7 @@ def test_voluntary_exit(state): epoch=get_current_epoch(state), validator_index=validator_index, ) - voluntary_exit.signature = bls.sign( + voluntary_exit.signature = bls_sign( message_hash=signing_root(voluntary_exit), privkey=privkeys[validator_index], domain=get_domain( @@ -296,15 +309,17 @@ def test_voluntary_exit(state): ) # Add to state via block transition - initiate_exit_block = build_empty_block_for_next_slot(state) + initiate_exit_block = build_empty_block_for_next_slot(state, signed=False) initiate_exit_block.body.voluntary_exits.append(voluntary_exit) + sign_block(state, initiate_exit_block) state_transition(state, initiate_exit_block) assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH # Process within epoch transition - exit_block = build_empty_block_for_next_slot(state) + exit_block = build_empty_block_for_next_slot(state, signed=False) exit_block.slot += spec.SLOTS_PER_EPOCH + sign_block(state, exit_block) state_transition(state, exit_block) yield 'blocks', [initiate_exit_block, exit_block], [spec.BeaconBlock] @@ -321,7 +336,7 @@ def test_transfer(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] amount = get_balance(state, sender_index) - transfer = get_valid_transfer(state, state.slot + 1, sender_index, amount) + transfer = get_valid_transfer(state, state.slot + 1, sender_index, amount, signed=True) recipient_index = transfer.recipient pre_transfer_recipient_balance = get_balance(state, recipient_index) @@ -331,8 +346,9 @@ def test_transfer(state): yield 'pre', state # Add to state via block transition - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.body.transfers.append(transfer) + sign_block(state, block) yield 'blocks', [block], [spec.BeaconBlock] @@ -358,8 +374,9 @@ def test_balance_driven_status_transitions(state): yield 'pre', state # trigger epoch transition - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=False) block.slot += spec.SLOTS_PER_EPOCH + sign_block(state, block) state_transition(state, block) yield 'blocks', [block], [spec.BeaconBlock] @@ -375,7 +392,7 @@ def test_historical_batch(state): yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(state, signed=True) state_transition(state, block) yield 'blocks', [block], [spec.BeaconBlock] @@ -386,28 +403,28 @@ def test_historical_batch(state): assert len(state.historical_roots) == pre_historical_roots_len + 1 -@spec_state_test -def test_eth1_data_votes(state): - yield 'pre', state - - expected_votes = 0 - assert len(state.eth1_data_votes) == expected_votes - - blocks = [] - for _ in range(spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1): - block = build_empty_block_for_next_slot(state) - state_transition(state, block) - expected_votes += 1 - assert len(state.eth1_data_votes) == expected_votes - blocks.append(block) - - block = build_empty_block_for_next_slot(state) - blocks.append(block) - - state_transition(state, block) - - yield 'blocks', [block], [spec.BeaconBlock] - yield 'post', state - - assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0 - assert len(state.eth1_data_votes) == 1 +# @spec_state_test +# def test_eth1_data_votes(state): +# yield 'pre', state +# +# expected_votes = 0 +# assert len(state.eth1_data_votes) == expected_votes +# +# blocks = [] +# for _ in range(spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1): +# block = build_empty_block_for_next_slot(state, signed=False) +# state_transition(state, block) +# expected_votes += 1 +# assert len(state.eth1_data_votes) == expected_votes +# blocks.append(block) +# +# block = build_empty_block_for_next_slot(state, signed=False) +# blocks.append(block) +# +# state_transition(state, block) +# +# yield 'blocks', [block], [spec.BeaconBlock] +# yield 'post', state +# +# assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0 +# assert len(state.eth1_data_votes) == 1 diff --git a/test_libs/pyspec/eth2spec/test/utils.py b/test_libs/pyspec/eth2spec/test/utils.py index b19d4df595..c1d4241099 100644 --- a/test_libs/pyspec/eth2spec/test/utils.py +++ b/test_libs/pyspec/eth2spec/test/utils.py @@ -1,3 +1,4 @@ +from typing import Dict, Any, Callable, Iterable from eth2spec.debug.encode import encode @@ -40,10 +41,34 @@ def entry(*args, **kw): return runner -def with_args(create_args): +def with_tags(tags: Dict[str, Any]): + """ + Decorator factory, adds tags (key, value) pairs to the output of the function. + Useful to build test-vector annotations with. + This decorator is applied after the ``spectest`` decorator is applied. + :param tags: dict of tags + :return: Decorator. + """ + def runner(fn): + def entry(*args, **kw): + fn_out = fn(*args, **kw) + # do not add tags if the function is not returning a dict at all (i.e. not in generator mode) + if fn_out is None: + return fn_out + return {**tags, **fn_out} + return entry + return runner + + +def with_args(create_args: Callable[[], Iterable[Any]]): + """ + Decorator factory, adds given extra arguments to the decorated function. + :param create_args: function to create arguments with. + :return: Decorator. + """ def runner(fn): # this wraps the function, to hide that the function actually yielding data. def entry(*args, **kw): - return fn(*(create_args() + list(args)), **kw) + return fn(*(list(create_args()) + list(args)), **kw) return entry return runner diff --git a/test_libs/pyspec/eth2spec/utils/bls.py b/test_libs/pyspec/eth2spec/utils/bls.py new file mode 100644 index 0000000000..52f1fed632 --- /dev/null +++ b/test_libs/pyspec/eth2spec/utils/bls.py @@ -0,0 +1,46 @@ +from py_ecc import bls + +# Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing. +bls_active = True + +STUB_SIGNATURE = b'\x11' * 96 +STUB_PUBKEY = b'\x22' * 48 + + +def only_with_bls(alt_return=None): + """ + Decorator factory to make a function only run when BLS is active. Otherwise return the default. + """ + def runner(fn): + def entry(*args, **kw): + if bls_active: + return fn(*args, **kw) + else: + return alt_return + return entry + return runner + + +@only_with_bls(alt_return=True) +def bls_verify(pubkey, message_hash, signature, domain): + return bls.verify(message_hash=message_hash, pubkey=pubkey, signature=signature, domain=domain) + + +@only_with_bls(alt_return=True) +def bls_verify_multiple(pubkeys, message_hashes, signature, domain): + return bls.verify_multiple(pubkeys, message_hashes, signature, domain) + + +@only_with_bls(alt_return=STUB_PUBKEY) +def bls_aggregate_pubkeys(pubkeys): + return bls.aggregate_pubkeys(pubkeys) + + +@only_with_bls(alt_return=STUB_SIGNATURE) +def bls_aggregate_signatures(signatures): + return bls.aggregate_signatures(signatures) + + +@only_with_bls(alt_return=STUB_SIGNATURE) +def bls_sign(message_hash, privkey, domain): + return bls.sign(message_hash=message_hash, privkey=privkey, domain=domain) diff --git a/test_libs/pyspec/eth2spec/utils/bls_stub.py b/test_libs/pyspec/eth2spec/utils/bls_stub.py deleted file mode 100644 index 108c4ef710..0000000000 --- a/test_libs/pyspec/eth2spec/utils/bls_stub.py +++ /dev/null @@ -1,12 +0,0 @@ - - -def bls_verify(pubkey, message_hash, signature, domain): - return True - - -def bls_verify_multiple(pubkeys, message_hashes, signature, domain): - return True - - -def bls_aggregate_pubkeys(pubkeys): - return b'\x42' * 96
pex-tool__pex-630
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.5.3'\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.0'\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index dc4fc3874..27fe3afbe 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,15 @@ Release Notes ============= +1.6.0 +----- + +* Fix pex force local to handle PEP 420. (#613) + `PR #613 <https://github.com/pantsbuild/pex/pull/613>`_ + +* Vendor ``setuptools`` and ``wheel``. (#624) + `PR #624 <https://github.com/pantsbuild/pex/pull/624>`_ + 1.5.3 ----- diff --git a/pex/version.py b/pex/version.py index 832d0f00b..bd843a23e 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '1.5.3' +__version__ = '1.6.0'
pex-tool__pex-916
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.5'\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.6'\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 80c591fe1..a9984d053 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,14 +4,23 @@ Release Notes 2.1.5 ----- +* Don't delete the root __init__.py when devendoring. (#915) + `PR #915 <https://github.com/pantsbuild/pex/pull/915>`_ + +* Remove unused Interpreter.clear_cache. (#911) + `PR #911 <https://github.com/pantsbuild/pex/pull/911>`_ + +2.1.5 +----- + * Silence pip warnings about Python 2.7. (#908) - `PR #908 <https://github.com/pantsbuild/pexpull/908>`_ + `PR #908 <https://github.com/pantsbuild/pex/pull/908>`_ * Kill `Pip.spawn_install_wheel` `overwrite` arg. (#907) - `PR #907 <https://github.com/pantsbuild/pexpull/907>`_ + `PR #907 <https://github.com/pantsbuild/pex/pull/907>`_ * Show pex-root from env as default in help output (#901) - `PR #901 <https://github.com/pantsbuild/pexpull/901>`_ + `PR #901 <https://github.com/pantsbuild/pex/pull/901>`_ 2.1.4 ----- diff --git a/pex/version.py b/pex/version.py index 3e0c53016..87300b586 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '2.1.5' +__version__ = '2.1.6'
google-research__text-to-text-transfer-transformer-480
[ { "content": "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Separate file for storing the current version of T5.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n__version__ = '0.7.0'\n", "path": "t5/version.py" } ]
[ { "content": "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Separate file for storing the current version of T5.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n__version__ = '0.7.1'\n", "path": "t5/version.py" } ]
diff --git a/t5/version.py b/t5/version.py index 5e0f7011..37363532 100644 --- a/t5/version.py +++ b/t5/version.py @@ -18,4 +18,4 @@ Stored in a separate file so that setup.py can reference the version without pulling in all the dependencies in __init__.py. """ -__version__ = '0.7.0' +__version__ = '0.7.1'
comic__grand-challenge.org-3379
[ { "content": "from django.conf import settings\nfrom django.urls import include, path\nfrom django.views.generic import TemplateView\n\nfrom grandchallenge.challenges.views import ChallengeUpdate\n\nurlpatterns = [\n path(\n \"robots.txt\",\n TemplateView.as_view(\n template_name=\"robots.txt\", content_type=\"text/plain\"\n ),\n name=\"subdomain_robots_txt\",\n ),\n path(\n \"evaluation/\",\n include(\"grandchallenge.evaluation.urls\", namespace=\"evaluation\"),\n ),\n path(\"teams/\", include(\"grandchallenge.teams.urls\", namespace=\"teams\")),\n path(\n \"participants/\",\n include(\"grandchallenge.participants.urls\", namespace=\"participants\"),\n ),\n path(\"admins/\", include(\"grandchallenge.admins.urls\", namespace=\"admins\")),\n path(\"update/\", ChallengeUpdate.as_view(), name=\"challenge-update\"),\n path(\"summernote/\", include(\"django_summernote.urls\")),\n path(\"\", include(\"grandchallenge.pages.urls\", namespace=\"pages\")),\n]\n\nif settings.DEBUG and settings.ENABLE_DEBUG_TOOLBAR:\n import debug_toolbar\n\n urlpatterns = [\n path(\"__debug__/\", include(debug_toolbar.urls))\n ] + urlpatterns\n", "path": "app/config/urls/challenge_subdomain.py" } ]
[ { "content": "from django.conf import settings\nfrom django.urls import include, path\nfrom django.views.generic import TemplateView\n\nfrom grandchallenge.challenges.views import ChallengeUpdate\n\nhandler500 = \"grandchallenge.core.views.handler500\"\n\n\nurlpatterns = [\n path(\n \"robots.txt\",\n TemplateView.as_view(\n template_name=\"robots.txt\", content_type=\"text/plain\"\n ),\n name=\"subdomain_robots_txt\",\n ),\n path(\n \"evaluation/\",\n include(\"grandchallenge.evaluation.urls\", namespace=\"evaluation\"),\n ),\n path(\"teams/\", include(\"grandchallenge.teams.urls\", namespace=\"teams\")),\n path(\n \"participants/\",\n include(\"grandchallenge.participants.urls\", namespace=\"participants\"),\n ),\n path(\"admins/\", include(\"grandchallenge.admins.urls\", namespace=\"admins\")),\n path(\"update/\", ChallengeUpdate.as_view(), name=\"challenge-update\"),\n path(\"summernote/\", include(\"django_summernote.urls\")),\n path(\"\", include(\"grandchallenge.pages.urls\", namespace=\"pages\")),\n]\n\nif settings.DEBUG and settings.ENABLE_DEBUG_TOOLBAR:\n import debug_toolbar\n\n urlpatterns = [\n path(\"__debug__/\", include(debug_toolbar.urls))\n ] + urlpatterns\n", "path": "app/config/urls/challenge_subdomain.py" } ]
diff --git a/app/config/urls/challenge_subdomain.py b/app/config/urls/challenge_subdomain.py index 54939b55a9..1d4d14e63a 100644 --- a/app/config/urls/challenge_subdomain.py +++ b/app/config/urls/challenge_subdomain.py @@ -4,6 +4,9 @@ from grandchallenge.challenges.views import ChallengeUpdate +handler500 = "grandchallenge.core.views.handler500" + + urlpatterns = [ path( "robots.txt",
encode__httpx-1199
[ { "content": "\"\"\"\nOur exception hierarchy:\n\n* HTTPError\n x RequestError\n + TransportError\n - TimeoutException\n Β· ConnectTimeout\n Β· ReadTimeout\n Β· WriteTimeout\n Β· PoolTimeout\n - NetworkError\n Β· ConnectError\n Β· ReadError\n Β· WriteError\n Β· CloseError\n - ProtocolError\n Β· LocalProtocolError\n Β· RemoteProtocolError\n - ProxyError\n - UnsupportedProtocol\n + DecodingError\n + TooManyRedirects\n + RequestBodyUnavailable\n x HTTPStatusError\n* InvalidURL\n* NotRedirectResponse\n* CookieConflict\n* StreamError\n x StreamConsumed\n x ResponseNotRead\n x RequestNotRead\n x ResponseClosed\n\"\"\"\nimport contextlib\nimport typing\n\nimport httpcore\n\nif typing.TYPE_CHECKING:\n from ._models import Request, Response # pragma: nocover\n\n\nclass HTTPError(Exception):\n \"\"\"\n Base class for `RequestError` and `HTTPStatusError`.\n\n Useful for `try...except` blocks when issuing a request,\n and then calling `.raise_for_status()`.\n\n For example:\n\n ```\n try:\n response = httpx.get(\"https://www.example.com\")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f\"HTTP Exception for {exc.request.url} - {exc.message}\")\n ```\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message)\n self.request = request\n\n\nclass RequestError(HTTPError):\n \"\"\"\n Base class for all exceptions that may occur when issuing a `.request()`.\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message, request=request)\n\n\nclass TransportError(RequestError):\n \"\"\"\n Base class for all exceptions that occur at the level of the Transport API.\n\n All of these exceptions also have an equivelent mapping in `httpcore`.\n \"\"\"\n\n\n# Timeout exceptions...\n\n\nclass TimeoutException(TransportError):\n \"\"\"\n The base class for timeout errors.\n\n An operation has timed out.\n \"\"\"\n\n\nclass ConnectTimeout(TimeoutException):\n \"\"\"\n Timed out while connecting to the host.\n \"\"\"\n\n\nclass ReadTimeout(TimeoutException):\n \"\"\"\n Timed out while receiving data from the host.\n \"\"\"\n\n\nclass WriteTimeout(TimeoutException):\n \"\"\"\n Timed out while sending data to the host.\n \"\"\"\n\n\nclass PoolTimeout(TimeoutException):\n \"\"\"\n Timed out waiting to acquire a connection from the pool.\n \"\"\"\n\n\n# Core networking exceptions...\n\n\nclass NetworkError(TransportError):\n \"\"\"\n The base class for network-related errors.\n\n An error occurred while interacting with the network.\n \"\"\"\n\n\nclass ReadError(NetworkError):\n \"\"\"\n Failed to receive data from the network.\n \"\"\"\n\n\nclass WriteError(NetworkError):\n \"\"\"\n Failed to send data through the network.\n \"\"\"\n\n\nclass ConnectError(NetworkError):\n \"\"\"\n Failed to establish a connection.\n \"\"\"\n\n\nclass CloseError(NetworkError):\n \"\"\"\n Failed to close a connection.\n \"\"\"\n\n\n# Other transport exceptions...\n\n\nclass ProxyError(TransportError):\n \"\"\"\n An error occurred while establishing a proxy connection.\n \"\"\"\n\n\nclass UnsupportedProtocol(TransportError):\n \"\"\"\n Attempted to make a request to an unsupported protocol.\n\n For example issuing a request to `ftp://www.example.com`.\n \"\"\"\n\n\nclass ProtocolError(TransportError):\n \"\"\"\n The protocol was violated.\n \"\"\"\n\n\nclass LocalProtocolError(ProtocolError):\n \"\"\"\n A protocol was violated by the client.\n\n For example if the user instantiated a `Request` instance explicitly,\n failed to include the mandatory `Host:` header, and then issued it directly\n using `client.send()`.\n \"\"\"\n\n\nclass RemoteProtocolError(ProtocolError):\n \"\"\"\n The protocol was violated by the server.\n\n For exaample, returning malformed HTTP.\n \"\"\"\n\n\n# Other request exceptions...\n\n\nclass DecodingError(RequestError):\n \"\"\"\n Decoding of the response failed, due to a malformed encoding.\n \"\"\"\n\n\nclass TooManyRedirects(RequestError):\n \"\"\"\n Too many redirects.\n \"\"\"\n\n\nclass RequestBodyUnavailable(RequestError):\n \"\"\"\n Had to send the request again, but the request body was streaming, and is\n no longer available.\n \"\"\"\n\n\n# Client errors\n\n\nclass HTTPStatusError(HTTPError):\n \"\"\"\n The response had an error HTTP status of 4xx or 5xx.\n\n May be raised when calling `response.raise_for_status()`\n \"\"\"\n\n def __init__(\n self, message: str, *, request: \"Request\", response: \"Response\"\n ) -> None:\n super().__init__(message, request=request)\n self.response = response\n\n\nclass InvalidURL(Exception):\n \"\"\"\n URL is improperly formed or cannot be parsed.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass NotRedirectResponse(Exception):\n \"\"\"\n Response was not a redirect response.\n\n May be raised if `response.next()` is called without first\n properly checking `response.is_redirect`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass CookieConflict(Exception):\n \"\"\"\n Attempted to lookup a cookie by name, but multiple cookies existed.\n\n Can occur when calling `response.cookies.get(...)`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\n# Stream exceptions...\n\n# These may occur as the result of a programming error, by accessing\n# the request/response stream in an invalid manner.\n\n\nclass StreamError(Exception):\n \"\"\"\n The base class for stream exceptions.\n\n The developer made an error in accessing the request stream in\n an invalid way.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass StreamConsumed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the content has already\n been streamed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the content has \"\n \"already been streamed.\"\n )\n super().__init__(message)\n\n\nclass ResponseNotRead(StreamError):\n \"\"\"\n Attempted to access response content, without having called `read()`\n after a streaming response.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to access response content, without having called `read()` \"\n \"after a streaming response.\"\n )\n super().__init__(message)\n\n\nclass RequestNotRead(StreamError):\n \"\"\"\n Attempted to access request content, without having called `read()`.\n \"\"\"\n\n def __init__(self) -> None:\n message = \"Attempted to access request content, without having called `read()`.\"\n super().__init__(message)\n\n\nclass ResponseClosed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the request has been\n closed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the request has \"\n \"been closed.\"\n )\n super().__init__(message)\n\n\n@contextlib.contextmanager\ndef map_exceptions(\n mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],\n **kwargs: typing.Any,\n) -> typing.Iterator[None]:\n try:\n yield\n except Exception as exc:\n mapped_exc = None\n\n for from_exc, to_exc in mapping.items():\n if not isinstance(exc, from_exc):\n continue\n # We want to map to the most specific exception we can find.\n # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to\n # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.\n if mapped_exc is None or issubclass(to_exc, mapped_exc):\n mapped_exc = to_exc\n\n if mapped_exc is None:\n raise\n\n message = str(exc)\n raise mapped_exc(message, **kwargs) from None # type: ignore\n\n\nHTTPCORE_EXC_MAP = {\n httpcore.TimeoutException: TimeoutException,\n httpcore.ConnectTimeout: ConnectTimeout,\n httpcore.ReadTimeout: ReadTimeout,\n httpcore.WriteTimeout: WriteTimeout,\n httpcore.PoolTimeout: PoolTimeout,\n httpcore.NetworkError: NetworkError,\n httpcore.ConnectError: ConnectError,\n httpcore.ReadError: ReadError,\n httpcore.WriteError: WriteError,\n httpcore.CloseError: CloseError,\n httpcore.ProxyError: ProxyError,\n httpcore.UnsupportedProtocol: UnsupportedProtocol,\n httpcore.ProtocolError: ProtocolError,\n httpcore.LocalProtocolError: LocalProtocolError,\n httpcore.RemoteProtocolError: RemoteProtocolError,\n}\n", "path": "httpx/_exceptions.py" } ]
[ { "content": "\"\"\"\nOur exception hierarchy:\n\n* HTTPError\n x RequestError\n + TransportError\n - TimeoutException\n Β· ConnectTimeout\n Β· ReadTimeout\n Β· WriteTimeout\n Β· PoolTimeout\n - NetworkError\n Β· ConnectError\n Β· ReadError\n Β· WriteError\n Β· CloseError\n - ProtocolError\n Β· LocalProtocolError\n Β· RemoteProtocolError\n - ProxyError\n - UnsupportedProtocol\n + DecodingError\n + TooManyRedirects\n + RequestBodyUnavailable\n x HTTPStatusError\n* InvalidURL\n* NotRedirectResponse\n* CookieConflict\n* StreamError\n x StreamConsumed\n x ResponseNotRead\n x RequestNotRead\n x ResponseClosed\n\"\"\"\nimport contextlib\nimport typing\n\nimport httpcore\n\nif typing.TYPE_CHECKING:\n from ._models import Request, Response # pragma: nocover\n\n\nclass HTTPError(Exception):\n \"\"\"\n Base class for `RequestError` and `HTTPStatusError`.\n\n Useful for `try...except` blocks when issuing a request,\n and then calling `.raise_for_status()`.\n\n For example:\n\n ```\n try:\n response = httpx.get(\"https://www.example.com\")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f\"HTTP Exception for {exc.request.url} - {exc.message}\")\n ```\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message)\n self.request = request\n\n\nclass RequestError(HTTPError):\n \"\"\"\n Base class for all exceptions that may occur when issuing a `.request()`.\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message, request=request)\n\n\nclass TransportError(RequestError):\n \"\"\"\n Base class for all exceptions that occur at the level of the Transport API.\n\n All of these exceptions also have an equivelent mapping in `httpcore`.\n \"\"\"\n\n\n# Timeout exceptions...\n\n\nclass TimeoutException(TransportError):\n \"\"\"\n The base class for timeout errors.\n\n An operation has timed out.\n \"\"\"\n\n\nclass ConnectTimeout(TimeoutException):\n \"\"\"\n Timed out while connecting to the host.\n \"\"\"\n\n\nclass ReadTimeout(TimeoutException):\n \"\"\"\n Timed out while receiving data from the host.\n \"\"\"\n\n\nclass WriteTimeout(TimeoutException):\n \"\"\"\n Timed out while sending data to the host.\n \"\"\"\n\n\nclass PoolTimeout(TimeoutException):\n \"\"\"\n Timed out waiting to acquire a connection from the pool.\n \"\"\"\n\n\n# Core networking exceptions...\n\n\nclass NetworkError(TransportError):\n \"\"\"\n The base class for network-related errors.\n\n An error occurred while interacting with the network.\n \"\"\"\n\n\nclass ReadError(NetworkError):\n \"\"\"\n Failed to receive data from the network.\n \"\"\"\n\n\nclass WriteError(NetworkError):\n \"\"\"\n Failed to send data through the network.\n \"\"\"\n\n\nclass ConnectError(NetworkError):\n \"\"\"\n Failed to establish a connection.\n \"\"\"\n\n\nclass CloseError(NetworkError):\n \"\"\"\n Failed to close a connection.\n \"\"\"\n\n\n# Other transport exceptions...\n\n\nclass ProxyError(TransportError):\n \"\"\"\n An error occurred while establishing a proxy connection.\n \"\"\"\n\n\nclass UnsupportedProtocol(TransportError):\n \"\"\"\n Attempted to make a request to an unsupported protocol.\n\n For example issuing a request to `ftp://www.example.com`.\n \"\"\"\n\n\nclass ProtocolError(TransportError):\n \"\"\"\n The protocol was violated.\n \"\"\"\n\n\nclass LocalProtocolError(ProtocolError):\n \"\"\"\n A protocol was violated by the client.\n\n For example if the user instantiated a `Request` instance explicitly,\n failed to include the mandatory `Host:` header, and then issued it directly\n using `client.send()`.\n \"\"\"\n\n\nclass RemoteProtocolError(ProtocolError):\n \"\"\"\n The protocol was violated by the server.\n\n For exaample, returning malformed HTTP.\n \"\"\"\n\n\n# Other request exceptions...\n\n\nclass DecodingError(RequestError):\n \"\"\"\n Decoding of the response failed, due to a malformed encoding.\n \"\"\"\n\n\nclass TooManyRedirects(RequestError):\n \"\"\"\n Too many redirects.\n \"\"\"\n\n\nclass RequestBodyUnavailable(RequestError):\n \"\"\"\n Had to send the request again, but the request body was streaming, and is\n no longer available.\n \"\"\"\n\n\n# Client errors\n\n\nclass HTTPStatusError(HTTPError):\n \"\"\"\n The response had an error HTTP status of 4xx or 5xx.\n\n May be raised when calling `response.raise_for_status()`\n \"\"\"\n\n def __init__(\n self, message: str, *, request: \"Request\", response: \"Response\"\n ) -> None:\n super().__init__(message, request=request)\n self.response = response\n\n\nclass InvalidURL(Exception):\n \"\"\"\n URL is improperly formed or cannot be parsed.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass NotRedirectResponse(Exception):\n \"\"\"\n Response was not a redirect response.\n\n May be raised if `response.next()` is called without first\n properly checking `response.is_redirect`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass CookieConflict(Exception):\n \"\"\"\n Attempted to lookup a cookie by name, but multiple cookies existed.\n\n Can occur when calling `response.cookies.get(...)`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\n# Stream exceptions...\n\n# These may occur as the result of a programming error, by accessing\n# the request/response stream in an invalid manner.\n\n\nclass StreamError(Exception):\n \"\"\"\n The base class for stream exceptions.\n\n The developer made an error in accessing the request stream in\n an invalid way.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass StreamConsumed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the content has already\n been streamed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the content has \"\n \"already been streamed.\"\n )\n super().__init__(message)\n\n\nclass ResponseNotRead(StreamError):\n \"\"\"\n Attempted to access response content, without having called `read()`\n after a streaming response.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to access response content, without having called `read()` \"\n \"after a streaming response.\"\n )\n super().__init__(message)\n\n\nclass RequestNotRead(StreamError):\n \"\"\"\n Attempted to access request content, without having called `read()`.\n \"\"\"\n\n def __init__(self) -> None:\n message = \"Attempted to access request content, without having called `read()`.\"\n super().__init__(message)\n\n\nclass ResponseClosed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the request has been\n closed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the request has \"\n \"been closed.\"\n )\n super().__init__(message)\n\n\n@contextlib.contextmanager\ndef map_exceptions(\n mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],\n **kwargs: typing.Any,\n) -> typing.Iterator[None]:\n try:\n yield\n except Exception as exc:\n mapped_exc = None\n\n for from_exc, to_exc in mapping.items():\n if not isinstance(exc, from_exc):\n continue\n # We want to map to the most specific exception we can find.\n # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to\n # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.\n if mapped_exc is None or issubclass(to_exc, mapped_exc):\n mapped_exc = to_exc\n\n if mapped_exc is None:\n raise\n\n message = str(exc)\n raise mapped_exc(message, **kwargs) from exc # type: ignore\n\n\nHTTPCORE_EXC_MAP = {\n httpcore.TimeoutException: TimeoutException,\n httpcore.ConnectTimeout: ConnectTimeout,\n httpcore.ReadTimeout: ReadTimeout,\n httpcore.WriteTimeout: WriteTimeout,\n httpcore.PoolTimeout: PoolTimeout,\n httpcore.NetworkError: NetworkError,\n httpcore.ConnectError: ConnectError,\n httpcore.ReadError: ReadError,\n httpcore.WriteError: WriteError,\n httpcore.CloseError: CloseError,\n httpcore.ProxyError: ProxyError,\n httpcore.UnsupportedProtocol: UnsupportedProtocol,\n httpcore.ProtocolError: ProtocolError,\n httpcore.LocalProtocolError: LocalProtocolError,\n httpcore.RemoteProtocolError: RemoteProtocolError,\n}\n", "path": "httpx/_exceptions.py" } ]
diff --git a/httpx/_exceptions.py b/httpx/_exceptions.py index 9a46d7d24a..4d6837778a 100644 --- a/httpx/_exceptions.py +++ b/httpx/_exceptions.py @@ -356,7 +356,7 @@ def map_exceptions( raise message = str(exc) - raise mapped_exc(message, **kwargs) from None # type: ignore + raise mapped_exc(message, **kwargs) from exc # type: ignore HTTPCORE_EXC_MAP = {
astronomer__astro-sdk-1401
[ { "content": "from airflow.configuration import conf\nfrom airflow.decorators.base import get_unique_task_id\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.sql.operators.append import AppendOperator, append\nfrom astro.sql.operators.cleanup import CleanupOperator, cleanup\nfrom astro.sql.operators.dataframe import DataframeOperator, dataframe\nfrom astro.sql.operators.drop import DropTableOperator, drop_table\nfrom astro.sql.operators.export_file import ExportFileOperator, export_file\nfrom astro.sql.operators.load_file import LoadFileOperator, load_file\nfrom astro.sql.operators.merge import MergeOperator, merge\nfrom astro.sql.operators.raw_sql import RawSQLOperator, run_raw_sql\nfrom astro.sql.operators.transform import TransformOperator, transform, transform_file\nfrom astro.table import Metadata, Table\n\n__all__ = [\n \"AppendOperator\",\n \"append\",\n \"CleanupOperator\",\n \"cleanup\",\n \"DataframeOperator\",\n \"dataframe\",\n \"DropTableOperator\",\n \"drop_table\",\n \"ExportFileOperator\",\n \"export_file\",\n \"LoadFileOperator\",\n \"load_file\",\n \"MergeOperator\",\n \"merge\",\n \"Metadata\",\n \"run_raw_sql\",\n \"Table\",\n \"TransformOperator\",\n \"transform_file\",\n \"transform\",\n]\n\n\ndef get_value_list(sql: str, conn_id: str, **kwargs) -> XComArg:\n \"\"\"\n Execute a sql statement and return the result.\n By default, the response size is less than equal to value of ``max_map_length`` conf.\n You can call a callable handler to alter the response by default it call ``fetchall`` on database result set.\n\n\n :param sql: sql query to execute.\n If the sql query will return huge number of row then it can overload the XCOM.\n also, If you are using output of this method to expand a task using dynamic task map then\n it can create lots of parallel task. So it is advisable to limit your sql query statement.\n :param conn_id: Airflow connection id. This connection id will be used to identify the database client\n and connect with it at runtime\n \"\"\"\n handler = kwargs.get(\"handler\") or (lambda result_set: result_set.fetchall())\n max_map_length = int(conf.get(section=\"core\", key=\"max_map_length\"))\n op_kwargs = {\n \"handler\": handler,\n \"response_limit\": max_map_length,\n }\n task_id = kwargs.get(\"task_id\") or get_unique_task_id(\n \"get_value_list\", dag=kwargs.get(\"dag\"), task_group=kwargs.get(\"task_group\")\n )\n kwargs.update({\"task_id\": task_id})\n return RawSQLOperator(\n sql=sql, conn_id=conn_id, op_kwargs=op_kwargs, python_callable=(lambda *args: None), **kwargs\n ).output\n", "path": "python-sdk/src/astro/sql/__init__.py" } ]
[ { "content": "from airflow.configuration import conf\nfrom airflow.decorators.base import get_unique_task_id\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.sql.operators.append import AppendOperator, append\nfrom astro.sql.operators.cleanup import CleanupOperator, cleanup\nfrom astro.sql.operators.dataframe import DataframeOperator, dataframe\nfrom astro.sql.operators.drop import DropTableOperator, drop_table\nfrom astro.sql.operators.export_file import ExportFileOperator, export_file\nfrom astro.sql.operators.load_file import LoadFileOperator, load_file\nfrom astro.sql.operators.merge import MergeOperator, merge\nfrom astro.sql.operators.raw_sql import RawSQLOperator, run_raw_sql\nfrom astro.sql.operators.transform import TransformOperator, transform, transform_file\nfrom astro.table import Metadata, Table\n\n__all__ = [\n \"AppendOperator\",\n \"append\",\n \"CleanupOperator\",\n \"cleanup\",\n \"DataframeOperator\",\n \"dataframe\",\n \"DropTableOperator\",\n \"drop_table\",\n \"ExportFileOperator\",\n \"export_file\",\n \"get_value_list\",\n \"LoadFileOperator\",\n \"load_file\",\n \"MergeOperator\",\n \"merge\",\n \"Metadata\",\n \"run_raw_sql\",\n \"Table\",\n \"TransformOperator\",\n \"transform_file\",\n \"transform\",\n]\n\n\ndef get_value_list(sql: str, conn_id: str, **kwargs) -> XComArg:\n \"\"\"\n Execute a sql statement and return the result.\n By default, the response size is less than equal to value of ``max_map_length`` conf.\n You can call a callable handler to alter the response by default it call ``fetchall`` on database result set.\n\n\n :param sql: sql query to execute.\n If the sql query will return huge number of row then it can overload the XCOM.\n also, If you are using output of this method to expand a task using dynamic task map then\n it can create lots of parallel task. So it is advisable to limit your sql query statement.\n :param conn_id: Airflow connection id. This connection id will be used to identify the database client\n and connect with it at runtime\n \"\"\"\n handler = kwargs.get(\"handler\") or (lambda result_set: result_set.fetchall())\n max_map_length = int(conf.get(section=\"core\", key=\"max_map_length\"))\n op_kwargs = {\n \"handler\": handler,\n \"response_limit\": max_map_length,\n }\n task_id = kwargs.get(\"task_id\") or get_unique_task_id(\n \"get_value_list\", dag=kwargs.get(\"dag\"), task_group=kwargs.get(\"task_group\")\n )\n kwargs.update({\"task_id\": task_id})\n return RawSQLOperator(\n sql=sql, conn_id=conn_id, op_kwargs=op_kwargs, python_callable=(lambda *args: None), **kwargs\n ).output\n", "path": "python-sdk/src/astro/sql/__init__.py" } ]
diff --git a/python-sdk/docs/astro/files/operators/get_file_list.rst b/python-sdk/docs/astro/files/operators/get_file_list.rst index 540e9dd6de..c210ca283d 100644 --- a/python-sdk/docs/astro/files/operators/get_file_list.rst +++ b/python-sdk/docs/astro/files/operators/get_file_list.rst @@ -1,8 +1,8 @@ .. _get_file_list: -============= -get_file_list -============= +==================================================== +:py:func:`get_file_list <astro.files.get_file_list>` +==================================================== When to use the ``get_file_list`` operator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/python-sdk/docs/astro/sql/operators/append.rst b/python-sdk/docs/astro/sql/operators/append.rst index ca26662103..360ac29352 100644 --- a/python-sdk/docs/astro/sql/operators/append.rst +++ b/python-sdk/docs/astro/sql/operators/append.rst @@ -1,8 +1,8 @@ .. _append_operator: -================ -append operator -================ +====================================================== +:py:mod:`append operator <astro.sql.operators.append>` +====================================================== When to use the ``append`` operator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``append`` operator allows you to append data from a source table to a target table. diff --git a/python-sdk/docs/astro/sql/operators/cleanup.rst b/python-sdk/docs/astro/sql/operators/cleanup.rst index ebe4dfc714..3c4556dd24 100644 --- a/python-sdk/docs/astro/sql/operators/cleanup.rst +++ b/python-sdk/docs/astro/sql/operators/cleanup.rst @@ -1,6 +1,6 @@ -====================================== -cleanup operator -====================================== +======================================================== +:py:mod:`cleanup operator <astro.sql.operators.cleanup>` +======================================================== .. _cleanup_operator: diff --git a/python-sdk/docs/astro/sql/operators/dataframe.rst b/python-sdk/docs/astro/sql/operators/dataframe.rst index 9d532ec7a0..ad4fda0f93 100644 --- a/python-sdk/docs/astro/sql/operators/dataframe.rst +++ b/python-sdk/docs/astro/sql/operators/dataframe.rst @@ -1,8 +1,8 @@ .. dataframe_operator: -====================================== -dataframe operator -====================================== +============================================================ +:py:mod:`dataframe operator <astro.sql.operators.dataframe>` +============================================================ When to use the ``dataframe`` operator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/python-sdk/docs/astro/sql/operators/drop_table.rst b/python-sdk/docs/astro/sql/operators/drop_table.rst index 00347172aa..74606cfc56 100644 --- a/python-sdk/docs/astro/sql/operators/drop_table.rst +++ b/python-sdk/docs/astro/sql/operators/drop_table.rst @@ -1,8 +1,8 @@ .. _drop_table_operator: -====================================== -drop table operator -====================================== +======================================================== +:py:mod:`drop table operator <astro.sql.operators.drop>` +======================================================== When to use the ``drop_table`` operator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/python-sdk/docs/astro/sql/operators/export.rst b/python-sdk/docs/astro/sql/operators/export.rst index d7e863f8ca..734f93f324 100644 --- a/python-sdk/docs/astro/sql/operators/export.rst +++ b/python-sdk/docs/astro/sql/operators/export.rst @@ -1,8 +1,8 @@ .. _export_file: -==================== -export_file operator -==================== +================================================================ +:py:mod:`export_file operator <astro.sql.operators.export_file>` +================================================================ .. _export_file_operator: diff --git a/python-sdk/docs/astro/sql/operators/get_value_list.rst b/python-sdk/docs/astro/sql/operators/get_value_list.rst index b17731af26..d1770e7311 100644 --- a/python-sdk/docs/astro/sql/operators/get_value_list.rst +++ b/python-sdk/docs/astro/sql/operators/get_value_list.rst @@ -1,8 +1,8 @@ .. _get_value_list: -============== -get_value_list -============== +==================================================== +:py:func:`get_value_list <astro.sql.get_value_list>` +==================================================== When to use the ``get_value_list`` operator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/python-sdk/docs/astro/sql/operators/load_file.rst b/python-sdk/docs/astro/sql/operators/load_file.rst index 6cdc6b6d21..d06584907f 100644 --- a/python-sdk/docs/astro/sql/operators/load_file.rst +++ b/python-sdk/docs/astro/sql/operators/load_file.rst @@ -1,8 +1,8 @@ .. _load_file: -================== -load_file operator -================== +============================================================ +:py:mod:`load_file operator <astro.sql.operators.load_file>` +============================================================ When to use the ``load_file`` operator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/python-sdk/docs/astro/sql/operators/merge.rst b/python-sdk/docs/astro/sql/operators/merge.rst index b184469fed..08f3f96d89 100644 --- a/python-sdk/docs/astro/sql/operators/merge.rst +++ b/python-sdk/docs/astro/sql/operators/merge.rst @@ -1,8 +1,8 @@ .. _merge_operator: -=============== -merge operator -=============== +==================================================== +:py:mod:`merge operator <astro.sql.operators.merge>` +==================================================== When to use the ``merge`` operator ---------------------------------- diff --git a/python-sdk/docs/astro/sql/operators/raw_sql.rst b/python-sdk/docs/astro/sql/operators/raw_sql.rst index e7ad1974c5..170318e2bb 100644 --- a/python-sdk/docs/astro/sql/operators/raw_sql.rst +++ b/python-sdk/docs/astro/sql/operators/raw_sql.rst @@ -1,8 +1,8 @@ .. _run_raw_sql: -==================== -run_raw_sql operator -==================== +============================================================ +:py:mod:`run_raw_sql operator <astro.sql.operators.raw_sql>` +============================================================ When to use the ``run_raw_sql`` operator ----------------------------------------- diff --git a/python-sdk/docs/astro/sql/operators/transform.rst b/python-sdk/docs/astro/sql/operators/transform.rst index 32aca41053..b1e9b7851c 100644 --- a/python-sdk/docs/astro/sql/operators/transform.rst +++ b/python-sdk/docs/astro/sql/operators/transform.rst @@ -1,8 +1,8 @@ .. _transform_operator: -================== -transform operator -================== +============================================================ +:py:mod:`transform operator <astro.sql.operators.transform>` +============================================================ When to use the ``transform`` operator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/python-sdk/docs/astro/sql/operators/transform_file.rst b/python-sdk/docs/astro/sql/operators/transform_file.rst index e3f7a947cc..127d2dc32a 100644 --- a/python-sdk/docs/astro/sql/operators/transform_file.rst +++ b/python-sdk/docs/astro/sql/operators/transform_file.rst @@ -1,8 +1,8 @@ .. _transform_file_operator: -======================= -transform_file operator -======================= +================================================================================= +:py:func:`transform_file operator <astro.sql.operators.transform.transform_file>` +================================================================================= When to use the ``transform_file`` operator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/python-sdk/src/astro/sql/__init__.py b/python-sdk/src/astro/sql/__init__.py index c8b12fc510..ffa1daef35 100644 --- a/python-sdk/src/astro/sql/__init__.py +++ b/python-sdk/src/astro/sql/__init__.py @@ -24,6 +24,7 @@ "drop_table", "ExportFileOperator", "export_file", + "get_value_list", "LoadFileOperator", "load_file", "MergeOperator",
pex-tool__pex-991
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.11'\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.12'\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 0e4721e85..33f1e7aa8 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,20 @@ Release Notes ============= +2.1.12 +------ + +A patch release to deploy the PEX_EXTRA_SYS_PATH feature. + +* A PEX_EXTRA_SYS_PATH runtime variable. (#989) + `PR #989 <https://github.com/pantsbuild/pex/pull/989>`_ + +* Fix typos (#986) + `PR #986 <https://github.com/pantsbuild/pex/pull/986>`_ + +* Update link to avoid a redirect (#982) + `PR #982 <https://github.com/pantsbuild/pex/pull/982>`_ + 2.1.11 ------ diff --git a/pex/version.py b/pex/version.py index 92e3f8b02..b9a600b4b 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '2.1.11' +__version__ = '2.1.12'
TOMToolkit__tom_base-196
[ { "content": "from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='tomtoolkit',\n version='1.1.0',\n description='The TOM Toolkit and base modules',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://tomtoolkit.github.io',\n author='TOM Toolkit Project',\n author_email='ariba@lco.global',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Physics'\n ],\n keywords=['tomtoolkit', 'astronomy', 'astrophysics', 'cosmology', 'science', 'fits', 'observatory'],\n packages=find_packages(),\n install_requires=[\n 'django',\n 'django-bootstrap4',\n 'django-extensions',\n 'django-filter',\n 'django-contrib-comments',\n 'django-gravatar2',\n 'django-crispy-forms',\n 'django-guardian',\n 'numpy',\n 'python-dateutil',\n 'requests',\n 'astroquery',\n 'astropy',\n 'astroplan',\n 'plotly',\n 'matplotlib',\n 'pillow',\n 'fits2image',\n 'specutils',\n ],\n extras_require={\n 'test': ['factory_boy']\n },\n include_package_data=True,\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='tomtoolkit',\n version='1.1.0',\n description='The TOM Toolkit and base modules',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://tomtoolkit.github.io',\n author='TOM Toolkit Project',\n author_email='ariba@lco.global',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Physics'\n ],\n keywords=['tomtoolkit', 'astronomy', 'astrophysics', 'cosmology', 'science', 'fits', 'observatory'],\n packages=find_packages(),\n install_requires=[\n 'django',\n 'django-bootstrap4',\n 'django-extensions',\n 'django-filter',\n 'django-contrib-comments',\n 'django-gravatar2',\n 'django-crispy-forms',\n 'django-guardian',\n 'numpy',\n 'python-dateutil',\n 'requests',\n 'astroquery',\n 'astropy',\n 'astroplan',\n 'plotly',\n 'matplotlib',\n 'pillow',\n 'fits2image',\n 'specutils',\n \"dataclasses; python_version < '3.7'\",\n ],\n extras_require={\n 'test': ['factory_boy']\n },\n include_package_data=True,\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 5d79293e3..8f9909b0c 100644 --- a/setup.py +++ b/setup.py @@ -46,6 +46,7 @@ 'pillow', 'fits2image', 'specutils', + "dataclasses; python_version < '3.7'", ], extras_require={ 'test': ['factory_boy']
pex-tool__pex-1590
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.63\"\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.64\"\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 0c01ae34d..0256851ef 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,14 @@ Release Notes ============= +2.1.64 +------ + +This release brings support for mac universal2 wheels. + +* Update vendored Pip to 386a54f0. (#1589) + `PR #1589 <https://github.com/pantsbuild/pex/pull/1589>`_ + 2.1.63 ------ diff --git a/pex/version.py b/pex/version.py index 7fb3f0947..6c3aca67d 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.63" +__version__ = "2.1.64"
pex-tool__pex-1692
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.73\"\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.74\"\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index b2b079c4a..9ac3f714a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,19 @@ Release Notes ============= +2.1.74 +------ + +This release fixes multiplatform ``--lock`` resolves for sdists that are +built to multiple platform specific wheels and it also introduces +support for VCS requirements in locks. + +* Add support for locking VCS requirements. (#1687) + `PR #1684 <https://github.com/pantsbuild/pex/pull/1687>`_ + +* Fix ``--lock`` for multiplatform via sdists. (#1689) + `PR #1684 <https://github.com/pantsbuild/pex/pull/1689>`_ + 2.1.73 ------ diff --git a/pex/version.py b/pex/version.py index a1e0ffe02..4b0fb2bc9 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.73" +__version__ = "2.1.74"
liqd__a4-meinberlin-382
[ { "content": "from django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\n\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.modules import models as module_models\n\nfrom . import validators\n\n\nclass Poll(module_models.Item):\n comments = GenericRelation(comment_models.Comment,\n related_query_name='poll',\n object_id_field='object_pk')\n\n\nclass Question(models.Model):\n label = models.CharField(max_length=255)\n weight = models.SmallIntegerField()\n\n poll = models.ForeignKey(\n 'Poll',\n on_delete=models.CASCADE,\n related_name='questions'\n )\n\n def user_choices_list(self, user):\n if not user.is_authenticated():\n return []\n\n return self.choices\\\n .filter(votes__creator=user)\\\n .values_list('id', flat=True)\n\n def __str__(self):\n return self.label\n\n class Meta:\n ordering = ['weight']\n\n\nclass ChoiceQuerySet(models.QuerySet):\n\n def annotate_vote_count(self):\n return self.annotate(\n vote_count=models.Count(\n 'votes'\n )\n )\n\n\nclass Choice(models.Model):\n label = models.CharField(max_length=255)\n\n question = models.ForeignKey(\n 'Question',\n on_delete=models.CASCADE,\n related_name='choices',\n )\n\n objects = ChoiceQuerySet.as_manager()\n\n def __str__(self):\n return '%s @%s' % (self.label, self.question)\n\n\nclass Vote(UserGeneratedContentModel):\n choice = models.ForeignKey(\n 'Choice',\n on_delete=models.CASCADE,\n related_name='votes'\n )\n\n def validate_unique(self, exclude=None):\n super(Vote, self).validate_unique(exclude)\n validators.single_vote_per_user(self.creator,\n self.choice.question,\n self.pk)\n\n # Make Vote instances behave like items for rule checking\n @property\n def module(self):\n self.choice.question.poll.module\n\n @property\n def project(self):\n return self.module.project\n\n def __str__(self):\n return '%s: %s' % (self.creator, self.choice)\n", "path": "apps/polls/models.py" } ]
[ { "content": "from django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\n\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.modules import models as module_models\n\nfrom . import validators\n\n\nclass Poll(module_models.Item):\n comments = GenericRelation(comment_models.Comment,\n related_query_name='poll',\n object_id_field='object_pk')\n\n\nclass Question(models.Model):\n label = models.CharField(max_length=255)\n weight = models.SmallIntegerField()\n\n poll = models.ForeignKey(\n 'Poll',\n on_delete=models.CASCADE,\n related_name='questions'\n )\n\n def user_choices_list(self, user):\n if not user.is_authenticated():\n return []\n\n return self.choices\\\n .filter(votes__creator=user)\\\n .values_list('id', flat=True)\n\n def __str__(self):\n return self.label\n\n class Meta:\n ordering = ['weight']\n\n\nclass ChoiceQuerySet(models.QuerySet):\n\n def annotate_vote_count(self):\n return self.annotate(\n vote_count=models.Count(\n 'votes'\n )\n )\n\n\nclass Choice(models.Model):\n label = models.CharField(max_length=255)\n\n question = models.ForeignKey(\n 'Question',\n on_delete=models.CASCADE,\n related_name='choices',\n )\n\n objects = ChoiceQuerySet.as_manager()\n\n class Meta:\n ordering = ['id']\n\n def __str__(self):\n return '%s @%s' % (self.label, self.question)\n\n\nclass Vote(UserGeneratedContentModel):\n choice = models.ForeignKey(\n 'Choice',\n on_delete=models.CASCADE,\n related_name='votes'\n )\n\n def validate_unique(self, exclude=None):\n super(Vote, self).validate_unique(exclude)\n validators.single_vote_per_user(self.creator,\n self.choice.question,\n self.pk)\n\n # Make Vote instances behave like items for rule checking\n @property\n def module(self):\n self.choice.question.poll.module\n\n @property\n def project(self):\n return self.module.project\n\n def __str__(self):\n return '%s: %s' % (self.creator, self.choice)\n", "path": "apps/polls/models.py" } ]
diff --git a/apps/polls/models.py b/apps/polls/models.py index 49dc2bd7a2..a2dcda5069 100644 --- a/apps/polls/models.py +++ b/apps/polls/models.py @@ -60,6 +60,9 @@ class Choice(models.Model): objects = ChoiceQuerySet.as_manager() + class Meta: + ordering = ['id'] + def __str__(self): return '%s @%s' % (self.label, self.question)
pex-tool__pex-1112
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.20\"\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.21\"\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 33aae1286..c7101cdd1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,27 @@ Release Notes ============= +2.1.21 +------ + +* Fix ``iter_compatible_interpreters`` with ``path``. (#1110) + `PR #1110 <https://github.com/pantsbuild/pex/pull/1110>`_ + +* Fix ``Requires-Python`` environment marker mapping. (#1105) + `PR #1105 <https://github.com/pantsbuild/pex/pull/1105>`_ + +* Fix spurious ``InstalledDistribution`` env markers. (#1104) + `PR #1104 <https://github.com/pantsbuild/pex/pull/1104>`_ + +* Deprecate ``-R``/``--resources-directory``. (#1103) + `PR #1103 <https://github.com/pantsbuild/pex/pull/1103>`_ + +* Fix ResourceWarning for unclosed ``/dev/null``. (#1102) + `PR #1102 <https://github.com/pantsbuild/pex/pull/1102>`_ + +* Fix runtime vendoring bytecode compilation races. (#1099) + `PR #1099 <https://github.com/pantsbuild/pex/pull/1099>`_ + 2.1.20 ------ diff --git a/pex/version.py b/pex/version.py index dcc67e836..8b44b5e28 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.20" +__version__ = "2.1.21"
pex-tool__pex-1720
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.78\"\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.79\"\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 1dbea305d..66a74f5dd 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,19 @@ Release Notes ============= +2.1.79 +------ + +This release fixes ``--lock`` resolving for certain cases where extras +are involved as well as introducing support for generating and consuming +portable ``--find-links`` locks using ``-path-mapping``. + +* Fix ``--lock`` resolver extras handling. (#1719) + `PR #1719 <https://github.com/pantsbuild/pex/pull/1719>`_ + +* Support canonicalizing absolute paths in locks. (#1716) + `PR #1712 <https://github.com/pantsbuild/pex/pull/1716>`_ + 2.1.78 ------ diff --git a/pex/version.py b/pex/version.py index ee199b36a..2c3681cd8 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.78" +__version__ = "2.1.79"
pex-tool__pex-1516
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.54\"\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.55\"\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 72f6b28d5..2f660f69b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,23 @@ Release Notes ============= +2.1.55 +------ + +This release brings official support for Python 3.10 as well as fixing +https://pex.readthedocs.io doc generation and fixing help for +``pex-tools`` / ``PEX_TOOLS=1 ./my.pex`` pex tools invocations that have +too few arguments. + +* Add official support for Python 3.10 (#1512) + `PR #1512 <https://github.com/pantsbuild/pex/pull/1512>`_ + +* Always register global options. (#1511) + `PR #1511 <https://github.com/pantsbuild/pex/pull/1511>`_ + +* Fix RTD generation by pinning docutils low. (#1509) + `PR #1509 <https://github.com/pantsbuild/pex/pull/1509>`_ + 2.1.54 ------ diff --git a/pex/version.py b/pex/version.py index 44b59c3f8..419b591f0 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.54" +__version__ = "2.1.55"
pex-tool__pex-1725
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.79\"\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.80\"\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 66a74f5dd..d242dfe78 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,20 @@ Release Notes ============= +2.1.80 +------ + +This release brings another fix for pathologically slow cases of lock +creation as well as a new ``--sh-boot`` feature for creating PEXes that +boot via ``/bin/sh`` for more resilience across systems with differing +Python installations as well as offering lower boot latency. + +* Support booting via `/bin/sh` with `--sh-boot`. (#1721) + `PR #1721 <https://github.com/pantsbuild/pex/pull/1721>`_ + +* Fix more pathologic lock creation slowness. (#1723) + `PR #1723 <https://github.com/pantsbuild/pex/pull/1723>`_ + 2.1.79 ------ diff --git a/pex/version.py b/pex/version.py index 2c3681cd8..5bc5fe77c 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.79" +__version__ = "2.1.80"
pex-tool__pex-1442
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.47\"\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.48\"\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 831b563b8..52558ddeb 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,22 @@ Release Notes ============= +2.1.48 +------ + +This releases introduces the ``--layout`` flag for selecting amongst the +traditional zipapp layout as a single PEX zip file and two new directory +tree based formats that may be useful for more sophisticated deployment +sceanrios. + +The ``--unzip`` / ``PEX_UNZIP`` toggles for PEX runtime execution are +now the default and deprecated as explicit options as a result. You can +still select the venv runtime execution mode via the +``--venv`` / ``PEX_VENV`` toggles though. + +* Remove zipapp execution mode & introduce ``--layout``. (#1438) + `PR #1438 <https://github.com/pantsbuild/pex/pull/1438>`_ + 2.1.47 ------ diff --git a/pex/version.py b/pex/version.py index e403f941c..c923ea6ba 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.47" +__version__ = "2.1.48"
searxng__searxng-2862
[ { "content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bilibili is a Chinese video sharing website.\n\n.. _Bilibili: https://www.bilibili.com\n\"\"\"\n\nimport random\nimport string\nfrom urllib.parse import urlencode\nfrom datetime import datetime, timedelta\n\n# Engine metadata\nabout = {\n \"website\": \"https://www.bilibili.com\",\n \"wikidata_id\": \"Q3077586\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\nresults_per_page = 20\ncategories = [\"videos\"]\n\n# Search URL\nbase_url = \"https://api.bilibili.com/x/web-interface/wbi/search/type\"\n\ncookie = {\n \"innersign\": \"0\",\n \"buvid3\": \"\".join(random.choice(string.hexdigits) for _ in range(16)) + \"infoc\",\n \"i-wanna-go-back\": \"-1\",\n \"b_ut\": \"7\",\n \"FEED_LIVE_VERSION\": \"V8\",\n \"header_theme_version\": \"undefined\",\n \"home_feed_column\": \"4\",\n}\n\n\ndef request(query, params):\n query_params = {\n \"__refresh__\": \"true\",\n \"page\": params[\"pageno\"],\n \"page_size\": results_per_page,\n \"single_column\": \"0\",\n \"keyword\": query,\n \"search_type\": \"video\",\n }\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n params[\"cookies\"] = cookie\n\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n minutes, seconds = map(int, duration.split(\":\"))\n total_seconds = minutes * 60 + seconds\n\n formatted_duration = str(timedelta(seconds=total_seconds))[2:] if 0 <= total_seconds < 3600 else \"\"\n\n return formatted_duration\n\n\ndef response(resp):\n search_res = resp.json()\n\n results = []\n\n for item in search_res.get(\"data\", {}).get(\"result\", []):\n title = item[\"title\"]\n url = item[\"arcurl\"]\n thumbnail = item[\"pic\"]\n description = item[\"description\"]\n author = item[\"author\"]\n video_id = item[\"aid\"]\n unix_date = item[\"pubdate\"]\n\n formatted_date = datetime.utcfromtimestamp(unix_date)\n formatted_duration = format_duration(item[\"duration\"])\n iframe_url = f\"https://player.bilibili.com/player.html?aid={video_id}&high_quality=1&autoplay=false&danmaku=0\"\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": author,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n", "path": "searx/engines/bilibili.py" } ]
[ { "content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bilibili is a Chinese video sharing website.\n\n.. _Bilibili: https://www.bilibili.com\n\"\"\"\n\nimport random\nimport string\nfrom urllib.parse import urlencode\nfrom datetime import datetime, timedelta\n\n# Engine metadata\nabout = {\n \"website\": \"https://www.bilibili.com\",\n \"wikidata_id\": \"Q3077586\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\nresults_per_page = 20\ncategories = [\"videos\"]\n\n# Search URL\nbase_url = \"https://api.bilibili.com/x/web-interface/search/type\"\n\ncookie = {\n \"innersign\": \"0\",\n \"buvid3\": \"\".join(random.choice(string.hexdigits) for _ in range(16)) + \"infoc\",\n \"i-wanna-go-back\": \"-1\",\n \"b_ut\": \"7\",\n \"FEED_LIVE_VERSION\": \"V8\",\n \"header_theme_version\": \"undefined\",\n \"home_feed_column\": \"4\",\n}\n\n\ndef request(query, params):\n query_params = {\n \"__refresh__\": \"true\",\n \"page\": params[\"pageno\"],\n \"page_size\": results_per_page,\n \"single_column\": \"0\",\n \"keyword\": query,\n \"search_type\": \"video\",\n }\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n params[\"cookies\"] = cookie\n\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n minutes, seconds = map(int, duration.split(\":\"))\n total_seconds = minutes * 60 + seconds\n\n formatted_duration = str(timedelta(seconds=total_seconds))[2:] if 0 <= total_seconds < 3600 else \"\"\n\n return formatted_duration\n\n\ndef response(resp):\n search_res = resp.json()\n\n results = []\n\n for item in search_res.get(\"data\", {}).get(\"result\", []):\n title = item[\"title\"]\n url = item[\"arcurl\"]\n thumbnail = item[\"pic\"]\n description = item[\"description\"]\n author = item[\"author\"]\n video_id = item[\"aid\"]\n unix_date = item[\"pubdate\"]\n\n formatted_date = datetime.utcfromtimestamp(unix_date)\n formatted_duration = format_duration(item[\"duration\"])\n iframe_url = f\"https://player.bilibili.com/player.html?aid={video_id}&high_quality=1&autoplay=false&danmaku=0\"\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": author,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n", "path": "searx/engines/bilibili.py" } ]
diff --git a/searx/engines/bilibili.py b/searx/engines/bilibili.py index 1eb8b9b8476..642b33e1aee 100644 --- a/searx/engines/bilibili.py +++ b/searx/engines/bilibili.py @@ -26,7 +26,7 @@ categories = ["videos"] # Search URL -base_url = "https://api.bilibili.com/x/web-interface/wbi/search/type" +base_url = "https://api.bilibili.com/x/web-interface/search/type" cookie = { "innersign": "0",
DDMAL__CantusDB-1077
[ { "content": "from django.contrib import admin\nfrom main_app.models import *\nfrom main_app.forms import (\n AdminCenturyForm,\n AdminChantForm,\n AdminFeastForm,\n AdminGenreForm,\n AdminNotationForm,\n AdminOfficeForm,\n AdminProvenanceForm,\n AdminRismSiglumForm,\n AdminSegmentForm,\n AdminSequenceForm,\n AdminSourceForm,\n)\n\n# these fields should not be editable by all classes\nEXCLUDE = (\n \"created_by\",\n \"last_updated_by\",\n \"json_info\",\n)\n\n\nclass BaseModelAdmin(admin.ModelAdmin):\n exclude = EXCLUDE\n\n # if an object is created in the admin interface, assign the user to the created_by field\n # else if an object is updated in the admin interface, assign the user to the last_updated_by field\n def save_model(self, request, obj, form, change):\n if change:\n obj.last_updated_by = request.user\n else:\n obj.created_by = request.user\n super().save_model(request, obj, form, change)\n\n\nclass CenturyAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminCenturyForm\n\n\nclass ChantAdmin(BaseModelAdmin):\n @admin.display(description=\"Source Siglum\")\n def get_source_siglum(self, obj):\n if obj.source:\n return obj.source.siglum\n\n list_display = (\n \"incipit\",\n \"get_source_siglum\",\n \"genre\",\n )\n search_fields = (\n \"title\",\n \"incipit\",\n \"cantus_id\",\n \"id\",\n )\n\n readonly_fields = (\n \"date_created\",\n \"date_updated\",\n )\n\n list_filter = (\n \"genre\",\n \"office\",\n )\n exclude = EXCLUDE + (\n \"col1\",\n \"col2\",\n \"col3\",\n \"next_chant\",\n \"s_sequence\",\n \"is_last_chant_in_feast\",\n \"visible_status\",\n \"date\",\n \"volpiano_notes\",\n \"volpiano_intervals\",\n )\n form = AdminChantForm\n raw_id_fields = (\n \"source\",\n \"feast\",\n )\n ordering = (\"source__siglum\",)\n\n\nclass FeastAdmin(BaseModelAdmin):\n search_fields = (\n \"name\",\n \"feast_code\",\n )\n list_display = (\n \"name\",\n \"month\",\n \"day\",\n \"feast_code\",\n )\n form = AdminFeastForm\n\n\nclass GenreAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminGenreForm\n\n\nclass NotationAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminNotationForm\n\n\nclass OfficeAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminOfficeForm\n\n\nclass ProvenanceAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminProvenanceForm\n\n\nclass RismSiglumAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminRismSiglumForm\n\n\nclass SegmentAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminSegmentForm\n\n\nclass SequenceAdmin(BaseModelAdmin):\n @admin.display(description=\"Source Siglum\")\n def get_source_siglum(self, obj):\n if obj.source:\n return obj.source.siglum\n\n search_fields = (\n \"title\",\n \"incipit\",\n \"cantus_id\",\n \"id\",\n )\n exclude = EXCLUDE + (\n \"c_sequence\",\n \"next_chant\",\n \"is_last_chant_in_feast\",\n \"visible_status\",\n )\n list_display = (\"incipit\", \"get_source_siglum\", \"genre\")\n list_filter = (\n \"genre\",\n \"office\",\n )\n raw_id_fields = (\n \"source\",\n \"feast\",\n )\n ordering = (\"source__siglum\",)\n form = AdminSequenceForm\n\n\nclass SourceAdmin(BaseModelAdmin):\n # These search fields are also available on the user-source inline relationship in the user admin page\n search_fields = (\n \"siglum\",\n \"title\",\n \"id\",\n )\n readonly_fields = (\n \"number_of_chants\",\n \"number_of_melodies\",\n \"date_created\",\n \"date_updated\",\n )\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript β€œfilter” interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\n filter_horizontal = (\n \"century\",\n \"notation\",\n \"current_editors\",\n \"inventoried_by\",\n \"full_text_entered_by\",\n \"melodies_entered_by\",\n \"proofreaders\",\n \"other_editors\",\n )\n\n list_display = (\n \"title\",\n \"siglum\",\n \"id\",\n )\n\n list_filter = (\n \"full_source\",\n \"segment\",\n \"source_status\",\n \"published\",\n \"century\",\n )\n\n ordering = (\"siglum\",)\n\n form = AdminSourceForm\n\n\nadmin.site.register(Century, CenturyAdmin)\nadmin.site.register(Chant, ChantAdmin)\nadmin.site.register(Feast, FeastAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Notation, NotationAdmin)\nadmin.site.register(Office, OfficeAdmin)\nadmin.site.register(Provenance, ProvenanceAdmin)\nadmin.site.register(RismSiglum, RismSiglumAdmin)\nadmin.site.register(Segment, SegmentAdmin)\nadmin.site.register(Sequence, SequenceAdmin)\nadmin.site.register(Source, SourceAdmin)\n", "path": "django/cantusdb_project/main_app/admin.py" } ]
[ { "content": "from django.contrib import admin\nfrom main_app.models import *\nfrom main_app.forms import (\n AdminCenturyForm,\n AdminChantForm,\n AdminFeastForm,\n AdminGenreForm,\n AdminNotationForm,\n AdminOfficeForm,\n AdminProvenanceForm,\n AdminRismSiglumForm,\n AdminSegmentForm,\n AdminSequenceForm,\n AdminSourceForm,\n)\n\n# these fields should not be editable by all classes\nEXCLUDE = (\n \"created_by\",\n \"last_updated_by\",\n \"json_info\",\n)\n\n\nclass BaseModelAdmin(admin.ModelAdmin):\n exclude = EXCLUDE\n\n # if an object is created in the admin interface, assign the user to the created_by field\n # else if an object is updated in the admin interface, assign the user to the last_updated_by field\n def save_model(self, request, obj, form, change):\n if change:\n obj.last_updated_by = request.user\n else:\n obj.created_by = request.user\n super().save_model(request, obj, form, change)\n\n\nclass CenturyAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminCenturyForm\n\n\nclass ChantAdmin(BaseModelAdmin):\n @admin.display(description=\"Source Siglum\")\n def get_source_siglum(self, obj):\n if obj.source:\n return obj.source.siglum\n\n list_display = (\n \"incipit\",\n \"get_source_siglum\",\n \"genre\",\n )\n search_fields = (\n \"title\",\n \"incipit\",\n \"cantus_id\",\n \"id\",\n )\n\n readonly_fields = (\n \"date_created\",\n \"date_updated\",\n )\n\n list_filter = (\n \"genre\",\n \"office\",\n )\n exclude = EXCLUDE + (\n \"col1\",\n \"col2\",\n \"col3\",\n \"next_chant\",\n \"s_sequence\",\n \"is_last_chant_in_feast\",\n \"visible_status\",\n \"date\",\n \"volpiano_notes\",\n \"volpiano_intervals\",\n \"title\",\n )\n form = AdminChantForm\n raw_id_fields = (\n \"source\",\n \"feast\",\n )\n ordering = (\"source__siglum\",)\n\n\nclass FeastAdmin(BaseModelAdmin):\n search_fields = (\n \"name\",\n \"feast_code\",\n )\n list_display = (\n \"name\",\n \"month\",\n \"day\",\n \"feast_code\",\n )\n form = AdminFeastForm\n\n\nclass GenreAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminGenreForm\n\n\nclass NotationAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminNotationForm\n\n\nclass OfficeAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminOfficeForm\n\n\nclass ProvenanceAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminProvenanceForm\n\n\nclass RismSiglumAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminRismSiglumForm\n\n\nclass SegmentAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminSegmentForm\n\n\nclass SequenceAdmin(BaseModelAdmin):\n @admin.display(description=\"Source Siglum\")\n def get_source_siglum(self, obj):\n if obj.source:\n return obj.source.siglum\n\n search_fields = (\n \"title\",\n \"incipit\",\n \"cantus_id\",\n \"id\",\n )\n exclude = EXCLUDE + (\n \"c_sequence\",\n \"next_chant\",\n \"is_last_chant_in_feast\",\n \"visible_status\",\n )\n list_display = (\"incipit\", \"get_source_siglum\", \"genre\")\n list_filter = (\n \"genre\",\n \"office\",\n )\n raw_id_fields = (\n \"source\",\n \"feast\",\n )\n ordering = (\"source__siglum\",)\n form = AdminSequenceForm\n\n\nclass SourceAdmin(BaseModelAdmin):\n # These search fields are also available on the user-source inline relationship in the user admin page\n search_fields = (\n \"siglum\",\n \"title\",\n \"id\",\n )\n readonly_fields = (\n \"number_of_chants\",\n \"number_of_melodies\",\n \"date_created\",\n \"date_updated\",\n )\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript β€œfilter” interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\n filter_horizontal = (\n \"century\",\n \"notation\",\n \"current_editors\",\n \"inventoried_by\",\n \"full_text_entered_by\",\n \"melodies_entered_by\",\n \"proofreaders\",\n \"other_editors\",\n )\n\n list_display = (\n \"title\",\n \"siglum\",\n \"id\",\n )\n\n list_filter = (\n \"full_source\",\n \"segment\",\n \"source_status\",\n \"published\",\n \"century\",\n )\n\n ordering = (\"siglum\",)\n\n form = AdminSourceForm\n\n\nadmin.site.register(Century, CenturyAdmin)\nadmin.site.register(Chant, ChantAdmin)\nadmin.site.register(Feast, FeastAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Notation, NotationAdmin)\nadmin.site.register(Office, OfficeAdmin)\nadmin.site.register(Provenance, ProvenanceAdmin)\nadmin.site.register(RismSiglum, RismSiglumAdmin)\nadmin.site.register(Segment, SegmentAdmin)\nadmin.site.register(Sequence, SequenceAdmin)\nadmin.site.register(Source, SourceAdmin)\n", "path": "django/cantusdb_project/main_app/admin.py" } ]
diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py index 1885bbffd..efd9e7569 100644 --- a/django/cantusdb_project/main_app/admin.py +++ b/django/cantusdb_project/main_app/admin.py @@ -78,6 +78,7 @@ def get_source_siglum(self, obj): "date", "volpiano_notes", "volpiano_intervals", + "title", ) form = AdminChantForm raw_id_fields = (
pex-tool__pex-1896
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.103\"\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.104\"\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 3e6214a89..729710479 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,42 @@ Release Notes ============= +2.1.104 +------- + +This release brings a long-awaited upgrade of the Pip Pex uses, but +behind a ``--pip-version 22.2.2`` flag you must opt in to. Pex will then +use that version of Pip if it can (your Pex operations target Python +``>=3.7``) and warn and fall back to the older vendored Pip (20.3.4) if +it can't. To turn the need to fallback to older Pip from a warning into +a hard error you can also specify ``--no-allow-pip-version-fallback``. + +The ``pex3 lock update`` command now gains the ability to update just +the index and find links repos the lock's artifacts originate from by +using a combination of ``--no-pypi``, ``--index`` & ``--find-links`` +along with ``--pin`` to ensure the project versions stay pinned as they +are in the lockfile and just the repos they are downloaded from is +altered. Consult the CLI ``--help`` for +``--fingerprint-mismatch {ignore,warn,error}`` to gain more control over +repo migration behavior. + +There are several bug fixes as well dealing with somewhat esoteric +corner cases involving changing a PEX ``--layout`` from one form to +another and building artifacts using certain interpreters on macOS 11.0 +(aka: 10.16). + +* Add support for Pip 22.2.2. (#1893) + `PR #1893 <https://github.com/pantsbuild/pex/pull/1893>`_ + +* Make lock update sensitive to artifacts. (#1887) + `PR #1887 <https://github.com/pantsbuild/pex/pull/1887>`_ + +* Ensure locally built wheel is consumable locally. (#1886) + `PR #1886 <https://github.com/pantsbuild/pex/pull/1886>`_ + +* Ensure ``--output`` always overwrites destination. (#1883) + `PR #1883 <https://github.com/pantsbuild/pex/pull/1883>`_ + 2.1.103 ------- diff --git a/pex/version.py b/pex/version.py index 86980240f..e59c01099 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.103" +__version__ = "2.1.104"
pex-tool__pex-1932
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.107\"\n", "path": "pex/version.py" } ]
[ { "content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.108\"\n", "path": "pex/version.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index be93a0e4d..f1a94fcc9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,15 @@ Release Notes ============= +2.1.108 +------- + +This release fixes a latent PEX boot performance bug triggered by +requirements with large extras sets. + +* Fix slow PEX boot time when there are many extras. (#1929) + `PR #1929 <https://github.com/pantsbuild/pex/pull/1929>`_ + 2.1.107 ------- diff --git a/pex/version.py b/pex/version.py index 648e9a986..c0dfd4790 100644 --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.107" +__version__ = "2.1.108"
wemake-services__wemake-python-styleguide-2619
[ { "content": "import ast\nfrom collections import defaultdict\nfrom typing import (\n Callable,\n ClassVar,\n DefaultDict,\n FrozenSet,\n List,\n Tuple,\n Union,\n)\n\nfrom typing_extensions import TypeAlias, final\n\nfrom wemake_python_styleguide.compat.aliases import FunctionNodes\nfrom wemake_python_styleguide.logic import source, walk\nfrom wemake_python_styleguide.logic.complexity import overuses\nfrom wemake_python_styleguide.logic.tree import annotations\nfrom wemake_python_styleguide.types import AnyNodes, AnyText, AnyTextPrimitive\nfrom wemake_python_styleguide.violations import complexity\nfrom wemake_python_styleguide.visitors import base, decorators\n\n#: We use these types to store the number of nodes usage in different contexts.\n_Expressions: TypeAlias = DefaultDict[str, List[ast.AST]]\n_FunctionExpressions: TypeAlias = DefaultDict[ast.AST, _Expressions]\n_StringConstants: TypeAlias = FrozenSet[Union[str, bytes]]\n\n\n@final\n@decorators.alias('visit_any_string', (\n 'visit_Str',\n 'visit_Bytes',\n))\nclass StringOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"\n Restricts repeated usage of the same string constant.\n\n NB: Some short strings are ignored, as their use is very common and\n forcing assignment would not make much sense (i.e. newlines, \"\",\n comma, dot).\n \"\"\"\n\n _ignored_string_constants: ClassVar[_StringConstants] = frozenset((\n ' ',\n '.',\n ',',\n '',\n '\\n',\n '\\r\\n',\n '\\t',\n '|',\n b' ',\n b'.',\n b',',\n b'',\n b'\\n',\n b'\\r\\n',\n b'\\t',\n ))\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Inits the counter for constants.\"\"\"\n super().__init__(*args, **kwargs)\n self._string_constants: DefaultDict[\n AnyTextPrimitive, int,\n ] = defaultdict(int)\n\n def visit_any_string(self, node: AnyText) -> None:\n \"\"\"Restricts to over-use string constants.\"\"\"\n self._check_string_constant(node)\n self.generic_visit(node)\n\n def _check_string_constant(self, node: AnyText) -> None:\n if annotations.is_annotation(node):\n return\n\n # Some strings are so common, that it makes no sense to check if\n # they are overused.\n if node.s in self._ignored_string_constants:\n return\n\n self._string_constants[node.s] += 1\n\n def _post_visit(self) -> None:\n for string, usage_count in self._string_constants.items():\n if usage_count > self.options.max_string_usages:\n self.add_violation(\n complexity.OverusedStringViolation(\n text=source.render_string(string) or \"''\",\n baseline=self.options.max_string_usages,\n ),\n )\n\n\n@final\nclass ExpressionOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Finds overused expressions.\"\"\"\n\n _expressions: ClassVar[AnyNodes] = (\n # We do not treat `ast.Attribute`s as expressions\n # because they are too widely used. That's a compromise.\n ast.Assert,\n ast.BoolOp,\n ast.BinOp,\n ast.UnaryOp,\n ast.Call,\n ast.Compare,\n ast.Subscript,\n ast.Lambda,\n\n ast.DictComp,\n ast.Dict,\n ast.List,\n ast.ListComp,\n ast.Tuple,\n ast.GeneratorExp,\n ast.Set,\n ast.SetComp,\n )\n\n _ignore_predicates: Tuple[Callable[[ast.AST], bool], ...] = (\n overuses.is_decorator,\n overuses.is_self,\n annotations.is_annotation,\n overuses.is_class_context,\n overuses.is_super_call,\n overuses.is_primitive,\n overuses.is_unary_minus,\n )\n\n _msg: ClassVar[str] = '{0}; used {1}'\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"We need to track expression usage in functions and modules.\"\"\"\n super().__init__(*args, **kwargs)\n self._module_expressions: _Expressions = defaultdict(list)\n self._function_expressions: _FunctionExpressions = defaultdict(\n lambda: defaultdict(list),\n )\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"Visits all nodes in a module to find overused values.\"\"\"\n if isinstance(node, self._expressions):\n self._add_expression(node)\n self.generic_visit(node)\n\n def _add_expression(self, node: ast.AST) -> None:\n if any(ignore(node) for ignore in self._ignore_predicates):\n return\n\n source_code = source.node_to_string(node)\n self._module_expressions[source_code].append(node)\n\n maybe_function = walk.get_closest_parent(node, FunctionNodes)\n if maybe_function is not None:\n self._function_expressions[maybe_function][source_code].append(\n node,\n )\n\n def _post_visit(self) -> None:\n for mod_source, module_nodes in self._module_expressions.items():\n if len(module_nodes) > self.options.max_module_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n module_nodes[0],\n text=self._msg.format(mod_source, len(module_nodes)),\n baseline=self.options.max_module_expressions,\n ),\n )\n\n for function_contexts in self._function_expressions.values():\n for src, function_nodes in function_contexts.items():\n if len(function_nodes) > self.options.max_function_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n function_nodes[0],\n text=self._msg.format(src, len(function_nodes)),\n baseline=self.options.max_function_expressions,\n ),\n )\n", "path": "wemake_python_styleguide/visitors/ast/complexity/overuses.py" } ]
[ { "content": "import ast\nfrom collections import defaultdict\nfrom typing import (\n Callable,\n ClassVar,\n DefaultDict,\n FrozenSet,\n List,\n Tuple,\n Union,\n)\n\nfrom typing_extensions import TypeAlias, final\n\nfrom wemake_python_styleguide.compat.aliases import FunctionNodes\nfrom wemake_python_styleguide.logic import source, walk\nfrom wemake_python_styleguide.logic.complexity import overuses\nfrom wemake_python_styleguide.logic.tree import annotations\nfrom wemake_python_styleguide.types import AnyNodes, AnyText, AnyTextPrimitive\nfrom wemake_python_styleguide.violations import complexity\nfrom wemake_python_styleguide.visitors import base, decorators\n\n#: We use these types to store the number of nodes usage in different contexts.\n_Expressions: TypeAlias = DefaultDict[str, List[ast.AST]]\n_FunctionExpressions: TypeAlias = DefaultDict[ast.AST, _Expressions]\n_StringConstants: TypeAlias = FrozenSet[Union[str, bytes]]\n\n\n@final\n@decorators.alias('visit_any_string', (\n 'visit_Str',\n 'visit_Bytes',\n))\nclass StringOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"\n Restricts repeated usage of the same string constant.\n\n NB: Some short strings are ignored, as their use is very common and\n forcing assignment would not make much sense (i.e. newlines, \"\",\n comma, dot).\n \"\"\"\n\n _ignored_string_constants: ClassVar[_StringConstants] = frozenset((\n ' ',\n '.',\n ',',\n '',\n '\\n',\n '\\r\\n',\n '\\t',\n '|',\n '\"',\n \"'\",\n b'\"',\n b\"'\",\n b' ',\n b'.',\n b',',\n b'',\n b'\\n',\n b'\\r\\n',\n b'\\t',\n ))\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Inits the counter for constants.\"\"\"\n super().__init__(*args, **kwargs)\n self._string_constants: DefaultDict[\n AnyTextPrimitive, int,\n ] = defaultdict(int)\n\n def visit_any_string(self, node: AnyText) -> None:\n \"\"\"Restricts to over-use string constants.\"\"\"\n self._check_string_constant(node)\n self.generic_visit(node)\n\n def _check_string_constant(self, node: AnyText) -> None:\n if annotations.is_annotation(node):\n return\n\n # Some strings are so common, that it makes no sense to check if\n # they are overused.\n if node.s in self._ignored_string_constants:\n return\n\n self._string_constants[node.s] += 1\n\n def _post_visit(self) -> None:\n for string, usage_count in self._string_constants.items():\n if usage_count > self.options.max_string_usages:\n self.add_violation(\n complexity.OverusedStringViolation(\n text=source.render_string(string) or \"''\",\n baseline=self.options.max_string_usages,\n ),\n )\n\n\n@final\nclass ExpressionOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Finds overused expressions.\"\"\"\n\n _expressions: ClassVar[AnyNodes] = (\n # We do not treat `ast.Attribute`s as expressions\n # because they are too widely used. That's a compromise.\n ast.Assert,\n ast.BoolOp,\n ast.BinOp,\n ast.UnaryOp,\n ast.Call,\n ast.Compare,\n ast.Subscript,\n ast.Lambda,\n\n ast.DictComp,\n ast.Dict,\n ast.List,\n ast.ListComp,\n ast.Tuple,\n ast.GeneratorExp,\n ast.Set,\n ast.SetComp,\n )\n\n _ignore_predicates: Tuple[Callable[[ast.AST], bool], ...] = (\n overuses.is_decorator,\n overuses.is_self,\n annotations.is_annotation,\n overuses.is_class_context,\n overuses.is_super_call,\n overuses.is_primitive,\n overuses.is_unary_minus,\n )\n\n _msg: ClassVar[str] = '{0}; used {1}'\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"We need to track expression usage in functions and modules.\"\"\"\n super().__init__(*args, **kwargs)\n self._module_expressions: _Expressions = defaultdict(list)\n self._function_expressions: _FunctionExpressions = defaultdict(\n lambda: defaultdict(list),\n )\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"Visits all nodes in a module to find overused values.\"\"\"\n if isinstance(node, self._expressions):\n self._add_expression(node)\n self.generic_visit(node)\n\n def _add_expression(self, node: ast.AST) -> None:\n if any(ignore(node) for ignore in self._ignore_predicates):\n return\n\n source_code = source.node_to_string(node)\n self._module_expressions[source_code].append(node)\n\n maybe_function = walk.get_closest_parent(node, FunctionNodes)\n if maybe_function is not None:\n self._function_expressions[maybe_function][source_code].append(\n node,\n )\n\n def _post_visit(self) -> None:\n for mod_source, module_nodes in self._module_expressions.items():\n if len(module_nodes) > self.options.max_module_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n module_nodes[0],\n text=self._msg.format(mod_source, len(module_nodes)),\n baseline=self.options.max_module_expressions,\n ),\n )\n\n for function_contexts in self._function_expressions.values():\n for src, function_nodes in function_contexts.items():\n if len(function_nodes) > self.options.max_function_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n function_nodes[0],\n text=self._msg.format(src, len(function_nodes)),\n baseline=self.options.max_function_expressions,\n ),\n )\n", "path": "wemake_python_styleguide/visitors/ast/complexity/overuses.py" } ]
diff --git a/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_string.py b/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_string.py index 7fb4dad0f..dfb6190c9 100644 --- a/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_string.py +++ b/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_string.py @@ -204,6 +204,8 @@ def test_string_type_annotations( '""', '","', '"."', + "'\"'", + '"\'"', ]) @pytest.mark.parametrize('prefix', [ 'b', diff --git a/wemake_python_styleguide/visitors/ast/complexity/overuses.py b/wemake_python_styleguide/visitors/ast/complexity/overuses.py index 39d25855c..7f755edf5 100644 --- a/wemake_python_styleguide/visitors/ast/complexity/overuses.py +++ b/wemake_python_styleguide/visitors/ast/complexity/overuses.py @@ -49,6 +49,10 @@ class StringOveruseVisitor(base.BaseNodeVisitor): '\r\n', '\t', '|', + '"', + "'", + b'"', + b"'", b' ', b'.', b',',
LibraryOfCongress__concordia-535
[ { "content": "# TODO: use correct copyright header\nimport os\n\nfrom django.contrib import messages\n\nimport raven\n\n# Build paths inside the project like this: os.path.join(SITE_ROOT_DIR, ...)\nCONCORDIA_APP_DIR = os.path.abspath(os.path.dirname(__file__))\nSITE_ROOT_DIR = os.path.dirname(CONCORDIA_APP_DIR)\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"django-secret-key\"\n\nCONCORDIA_ENVIRONMENT = os.environ.get(\"CONCORDIA_ENVIRONMENT\", \"development\")\n\n# Optional SMTP authentication information for EMAIL_HOST.\nEMAIL_HOST_USER = \"\"\nEMAIL_HOST_PASSWORD = \"\"\nEMAIL_USE_TLS = False\nDEFAULT_FROM_EMAIL = \"crowd@loc.gov\"\n\nALLOWED_HOSTS = [\"*\"]\n\nDEBUG = False\nCSRF_COOKIE_SECURE = False\n\nAUTH_PASSWORD_VALIDATORS = []\nEMAIL_BACKEND = \"django.core.mail.backends.filebased.EmailBackend\"\n# EMAIL_FILE_PATH = os.path.join(SITE_ROOT_DIR, 'emails')\nEMAIL_HOST = \"localhost\"\nEMAIL_PORT = 25\nLANGUAGE_CODE = \"en-us\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\nROOT_URLCONF = \"concordia.urls\"\nSTATIC_ROOT = \"static-files\"\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [\n os.path.join(CONCORDIA_APP_DIR, \"static\"),\n os.path.join(SITE_ROOT_DIR, \"static\"),\n]\nTEMPLATE_DEBUG = False\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nWSGI_APPLICATION = \"concordia.wsgi.application\"\n\nADMIN_SITE = {\"site_header\": \"Concordia Admin\", \"site_title\": \"Concordia\"}\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"concordia\",\n \"USER\": \"concordia\",\n \"PASSWORD\": os.getenv(\"POSTGRESQL_PW\"),\n \"HOST\": os.getenv(\"POSTGRESQL_HOST\", \"localhost\"),\n \"PORT\": \"5432\",\n \"CONN_MAX_AGE\": 15 * 60, # Keep database connections open for 15 minutes\n }\n}\n\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.humanize\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.sites\",\n \"django.contrib.staticfiles\",\n \"raven.contrib.django.raven_compat\",\n \"maintenance_mode\",\n \"bootstrap4\",\n \"bittersweet\",\n \"concordia.apps.ConcordiaAppConfig\",\n \"exporter\",\n \"importer\",\n \"captcha\",\n \"django_prometheus_metrics\",\n \"robots\",\n]\n\nif DEBUG:\n INSTALLED_APPS += [\"django_extensions\"]\n INSTALLED_APPS += [\"kombu.transport\"]\n\n\nMIDDLEWARE = [\n \"django_prometheus_metrics.middleware.PrometheusBeforeMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n # WhiteNoise serves static files efficiently:\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"maintenance_mode.middleware.MaintenanceModeMiddleware\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(SITE_ROOT_DIR, \"templates\"),\n os.path.join(CONCORDIA_APP_DIR, \"templates\"),\n ],\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.media\",\n # Concordia\n \"concordia.context_processors.system_configuration\",\n \"concordia.context_processors.site_navigation\",\n ],\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n },\n }\n]\n\nMEMCACHED_ADDRESS = os.getenv(\"MEMCACHED_ADDRESS\", \"\")\nMEMCACHED_PORT = os.getenv(\"MEMCACHED_PORT\", \"\")\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.memcached.MemcachedCache\",\n \"LOCATION\": \"{}:{}\".format(MEMCACHED_ADDRESS, MEMCACHED_PORT),\n }\n}\n\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": os.path.join(os.path.dirname(__file__), \"whoosh_index\"),\n }\n}\n\n# Celery settings\nCELERY_BROKER_URL = \"pyamqp://guest@rabbit\"\nCELERY_RESULT_BACKEND = \"rpc://\"\n\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_IMPORTS = (\"importer.tasks\",)\n\nCELERY_BROKER_HEARTBEAT = 0\nCELERY_BROKER_TRANSPORT_OPTIONS = {\n \"confirm_publish\": True,\n \"max_retries\": 3,\n \"interval_start\": 0,\n \"interval_step\": 0.2,\n \"interval_max\": 0.5,\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"long\": {\n \"format\": \"[{asctime} {levelname} {name}:{lineno}] {message}\",\n \"datefmt\": \"%Y-%m-%dT%H:%M:%S\",\n \"style\": \"{\",\n },\n \"short\": {\n \"format\": \"[{levelname} {name}] {message}\",\n \"datefmt\": \"%Y-%m-%dT%H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"stream\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"long\",\n },\n \"null\": {\"level\": \"DEBUG\", \"class\": \"logging.NullHandler\"},\n \"file\": {\n \"class\": \"logging.handlers.TimedRotatingFileHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"long\",\n \"filename\": \"{}/logs/concordia.log\".format(SITE_ROOT_DIR),\n \"when\": \"H\",\n \"interval\": 3,\n \"backupCount\": 16,\n },\n \"celery\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": \"{}/logs/celery.log\".format(SITE_ROOT_DIR),\n \"formatter\": \"long\",\n \"maxBytes\": 1024 * 1024 * 100, # 100 mb\n },\n \"sentry\": {\n \"level\": \"WARNING\",\n \"class\": \"raven.contrib.django.raven_compat.handlers.SentryHandler\",\n },\n },\n \"loggers\": {\n \"django\": {\"handlers\": [\"file\", \"stream\"], \"level\": \"DEBUG\", \"propagate\": True},\n \"celery\": {\"handlers\": [\"celery\", \"stream\"], \"level\": \"DEBUG\"},\n \"sentry.errors\": {\"level\": \"INFO\", \"handlers\": [\"stream\"], \"propagate\": False},\n },\n}\n\n\n################################################################################\n# Django-specific settings above\n################################################################################\n\nACCOUNT_ACTIVATION_DAYS = 7\n\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(SITE_ROOT_DIR, \"media\")\n\nLOGIN_URL = \"login\"\n\nPASSWORD_VALIDATOR = (\n \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n)\n\nAUTH_PASSWORD_VALIDATORS = [\n {\"NAME\": PASSWORD_VALIDATOR},\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\"min_length\": 8},\n },\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n {\"NAME\": \"concordia.validators.complexity\"},\n]\n\nAUTHENTICATION_BACKENDS = [\n \"concordia.email_username_backend.EmailOrUsernameModelBackend\"\n]\n\nCAPTCHA_CHALLENGE_FUNCT = \"captcha.helpers.random_char_challenge\"\n#: Anonymous sessions require captcha validation every day by default:\nANONYMOUS_CAPTCHA_VALIDATION_INTERVAL = 86400\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\nWHITENOISE_ROOT = os.path.join(SITE_ROOT_DIR, \"static\")\n\nPASSWORD_RESET_TIMEOUT_DAYS = 1\nACCOUNT_ACTIVATION_DAYS = 1\nREGISTRATION_OPEN = True # set to false to temporarily disable registrations\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\nMESSAGE_TAGS = {messages.ERROR: \"danger\"}\n\nSENTRY_DSN = os.environ.get(\"SENTRY_DSN\", \"\")\nSENTRY_PUBLIC_DSN = os.environ.get(\"SENTRY_PUBLIC_DSN\", \"\")\n\nRAVEN_CONFIG = {\n \"dsn\": SENTRY_DSN,\n \"environment\": CONCORDIA_ENVIRONMENT,\n \"release\": raven.fetch_git_sha(SITE_ROOT_DIR),\n}\n\n# When the MAINTENANCE_MODE setting is true, this template will be used to\n# generate a 503 response:\nMAINTENANCE_MODE_TEMPLATE = \"maintenance-mode.html\"\n\n# Names of special django.auth Groups\nCOMMUNITY_MANAGER_GROUP_NAME = \"Community Managers\"\nNEWSLETTER_GROUP_NAME = \"Newsletter\"\n\n# Django sites framework setting\nSITE_ID = 1\nROBOTS_USE_SITEMAP = False\nROBOTS_USE_HOST = False\n\n# django-bootstrap4 customization:\nBOOTSTRAP4 = {\"required_css_class\": \"form-group-required\"}\n\n# Transcription-related settings\n\n#: Number of seconds an asset reservation is valid for\nTRANSCRIPTION_RESERVATION_SECONDS = 5 * 60\n\n#: Web cache policy settings\nDEFAULT_PAGE_TTL = 5 * 60\n", "path": "concordia/settings_template.py" } ]
[ { "content": "# TODO: use correct copyright header\nimport os\n\nfrom django.contrib import messages\n\nimport raven\n\n# Build paths inside the project like this: os.path.join(SITE_ROOT_DIR, ...)\nCONCORDIA_APP_DIR = os.path.abspath(os.path.dirname(__file__))\nSITE_ROOT_DIR = os.path.dirname(CONCORDIA_APP_DIR)\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"django-secret-key\"\n\nCONCORDIA_ENVIRONMENT = os.environ.get(\"CONCORDIA_ENVIRONMENT\", \"development\")\n\n# Optional SMTP authentication information for EMAIL_HOST.\nEMAIL_HOST_USER = \"\"\nEMAIL_HOST_PASSWORD = \"\"\nEMAIL_USE_TLS = False\nDEFAULT_FROM_EMAIL = \"crowd@loc.gov\"\n\nALLOWED_HOSTS = [\"*\"]\n\nDEBUG = False\nCSRF_COOKIE_SECURE = False\n\nAUTH_PASSWORD_VALIDATORS = []\nEMAIL_BACKEND = \"django.core.mail.backends.filebased.EmailBackend\"\n# EMAIL_FILE_PATH = os.path.join(SITE_ROOT_DIR, 'emails')\nEMAIL_HOST = \"localhost\"\nEMAIL_PORT = 25\nLANGUAGE_CODE = \"en-us\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\nROOT_URLCONF = \"concordia.urls\"\nSTATIC_ROOT = \"static-files\"\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [\n os.path.join(CONCORDIA_APP_DIR, \"static\"),\n os.path.join(SITE_ROOT_DIR, \"static\"),\n]\nTEMPLATE_DEBUG = False\nTIME_ZONE = \"America/New_York\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nWSGI_APPLICATION = \"concordia.wsgi.application\"\n\nADMIN_SITE = {\"site_header\": \"Concordia Admin\", \"site_title\": \"Concordia\"}\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"concordia\",\n \"USER\": \"concordia\",\n \"PASSWORD\": os.getenv(\"POSTGRESQL_PW\"),\n \"HOST\": os.getenv(\"POSTGRESQL_HOST\", \"localhost\"),\n \"PORT\": \"5432\",\n \"CONN_MAX_AGE\": 15 * 60, # Keep database connections open for 15 minutes\n }\n}\n\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.humanize\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.sites\",\n \"django.contrib.staticfiles\",\n \"raven.contrib.django.raven_compat\",\n \"maintenance_mode\",\n \"bootstrap4\",\n \"bittersweet\",\n \"concordia.apps.ConcordiaAppConfig\",\n \"exporter\",\n \"importer\",\n \"captcha\",\n \"django_prometheus_metrics\",\n \"robots\",\n]\n\nif DEBUG:\n INSTALLED_APPS += [\"django_extensions\"]\n INSTALLED_APPS += [\"kombu.transport\"]\n\n\nMIDDLEWARE = [\n \"django_prometheus_metrics.middleware.PrometheusBeforeMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n # WhiteNoise serves static files efficiently:\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"maintenance_mode.middleware.MaintenanceModeMiddleware\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(SITE_ROOT_DIR, \"templates\"),\n os.path.join(CONCORDIA_APP_DIR, \"templates\"),\n ],\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.media\",\n # Concordia\n \"concordia.context_processors.system_configuration\",\n \"concordia.context_processors.site_navigation\",\n ],\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n },\n }\n]\n\nMEMCACHED_ADDRESS = os.getenv(\"MEMCACHED_ADDRESS\", \"\")\nMEMCACHED_PORT = os.getenv(\"MEMCACHED_PORT\", \"\")\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.memcached.MemcachedCache\",\n \"LOCATION\": \"{}:{}\".format(MEMCACHED_ADDRESS, MEMCACHED_PORT),\n }\n}\n\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": os.path.join(os.path.dirname(__file__), \"whoosh_index\"),\n }\n}\n\n# Celery settings\nCELERY_BROKER_URL = \"pyamqp://guest@rabbit\"\nCELERY_RESULT_BACKEND = \"rpc://\"\n\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_IMPORTS = (\"importer.tasks\",)\n\nCELERY_BROKER_HEARTBEAT = 0\nCELERY_BROKER_TRANSPORT_OPTIONS = {\n \"confirm_publish\": True,\n \"max_retries\": 3,\n \"interval_start\": 0,\n \"interval_step\": 0.2,\n \"interval_max\": 0.5,\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"long\": {\n \"format\": \"[{asctime} {levelname} {name}:{lineno}] {message}\",\n \"datefmt\": \"%Y-%m-%dT%H:%M:%S\",\n \"style\": \"{\",\n },\n \"short\": {\n \"format\": \"[{levelname} {name}] {message}\",\n \"datefmt\": \"%Y-%m-%dT%H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"stream\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"long\",\n },\n \"null\": {\"level\": \"DEBUG\", \"class\": \"logging.NullHandler\"},\n \"file\": {\n \"class\": \"logging.handlers.TimedRotatingFileHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"long\",\n \"filename\": \"{}/logs/concordia.log\".format(SITE_ROOT_DIR),\n \"when\": \"H\",\n \"interval\": 3,\n \"backupCount\": 16,\n },\n \"celery\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": \"{}/logs/celery.log\".format(SITE_ROOT_DIR),\n \"formatter\": \"long\",\n \"maxBytes\": 1024 * 1024 * 100, # 100 mb\n },\n \"sentry\": {\n \"level\": \"WARNING\",\n \"class\": \"raven.contrib.django.raven_compat.handlers.SentryHandler\",\n },\n },\n \"loggers\": {\n \"django\": {\"handlers\": [\"file\", \"stream\"], \"level\": \"DEBUG\", \"propagate\": True},\n \"celery\": {\"handlers\": [\"celery\", \"stream\"], \"level\": \"DEBUG\"},\n \"sentry.errors\": {\"level\": \"INFO\", \"handlers\": [\"stream\"], \"propagate\": False},\n },\n}\n\n\n################################################################################\n# Django-specific settings above\n################################################################################\n\nACCOUNT_ACTIVATION_DAYS = 7\n\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(SITE_ROOT_DIR, \"media\")\n\nLOGIN_URL = \"login\"\n\nPASSWORD_VALIDATOR = (\n \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n)\n\nAUTH_PASSWORD_VALIDATORS = [\n {\"NAME\": PASSWORD_VALIDATOR},\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\"min_length\": 8},\n },\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n {\"NAME\": \"concordia.validators.complexity\"},\n]\n\nAUTHENTICATION_BACKENDS = [\n \"concordia.email_username_backend.EmailOrUsernameModelBackend\"\n]\n\nCAPTCHA_CHALLENGE_FUNCT = \"captcha.helpers.random_char_challenge\"\n#: Anonymous sessions require captcha validation every day by default:\nANONYMOUS_CAPTCHA_VALIDATION_INTERVAL = 86400\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\nWHITENOISE_ROOT = os.path.join(SITE_ROOT_DIR, \"static\")\n\nPASSWORD_RESET_TIMEOUT_DAYS = 1\nACCOUNT_ACTIVATION_DAYS = 1\nREGISTRATION_OPEN = True # set to false to temporarily disable registrations\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\nMESSAGE_TAGS = {messages.ERROR: \"danger\"}\n\nSENTRY_DSN = os.environ.get(\"SENTRY_DSN\", \"\")\nSENTRY_PUBLIC_DSN = os.environ.get(\"SENTRY_PUBLIC_DSN\", \"\")\n\nRAVEN_CONFIG = {\n \"dsn\": SENTRY_DSN,\n \"environment\": CONCORDIA_ENVIRONMENT,\n \"release\": raven.fetch_git_sha(SITE_ROOT_DIR),\n}\n\n# When the MAINTENANCE_MODE setting is true, this template will be used to\n# generate a 503 response:\nMAINTENANCE_MODE_TEMPLATE = \"maintenance-mode.html\"\n\n# Names of special django.auth Groups\nCOMMUNITY_MANAGER_GROUP_NAME = \"Community Managers\"\nNEWSLETTER_GROUP_NAME = \"Newsletter\"\n\n# Django sites framework setting\nSITE_ID = 1\nROBOTS_USE_SITEMAP = False\nROBOTS_USE_HOST = False\n\n# django-bootstrap4 customization:\nBOOTSTRAP4 = {\"required_css_class\": \"form-group-required\"}\n\n# Transcription-related settings\n\n#: Number of seconds an asset reservation is valid for\nTRANSCRIPTION_RESERVATION_SECONDS = 5 * 60\n\n#: Web cache policy settings\nDEFAULT_PAGE_TTL = 5 * 60\n", "path": "concordia/settings_template.py" } ]
diff --git a/concordia/settings_template.py b/concordia/settings_template.py index 36e5a1d76..5535195d6 100755 --- a/concordia/settings_template.py +++ b/concordia/settings_template.py @@ -41,7 +41,7 @@ os.path.join(SITE_ROOT_DIR, "static"), ] TEMPLATE_DEBUG = False -TIME_ZONE = "UTC" +TIME_ZONE = "America/New_York" USE_I18N = True USE_L10N = True USE_TZ = True
ivy-llc__ivy-12770
[ { "content": "# global\nimport ivy\nfrom ivy import with_supported_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_tf_dtype,\n to_ivy_dtype,\n)\n\n\n@with_supported_dtypes(\n {\"2.9.0 and below\": (\"float16\", \"float32\", \"float64\", \"complex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef imag(input, name=None):\n return ivy.imag(input)\n\n\n@to_ivy_arrays_and_back\ndef accumulate_n(inputs, input_type=None, shape=None, dtype=None, name=None):\n return ivy.astype(ivy.sum(ivy.array(inputs)), ivy.int64)\n\n\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.add(x, y)\n\n\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef negative(x, name=None):\n return ivy.negative(x)\n\n\n@to_ivy_arrays_and_back\ndef argmax(input, axis, output_type=None, name=None):\n output_type = to_ivy_dtype(output_type)\n if output_type in [\"uint16\", \"int16\", \"int32\", \"int64\"]:\n return ivy.astype(ivy.argmax(input, axis=axis), output_type)\n else:\n return ivy.astype(ivy.argmax(input, axis=axis), \"int64\")\n\n\n@to_ivy_arrays_and_back\ndef asinh(x, name=\"asinh\"):\n return ivy.asinh(x)\n\n\n@handle_tf_dtype\n@to_ivy_arrays_and_back\ndef confusion_matrix(\n labels, predictions, num_classes=None, weights=None, dtype=ivy.int32, name=None\n):\n labels = ivy.astype(\n ivy.squeeze(ivy.array(labels), axis=None), ivy.int64, copy=False\n )\n predictions = ivy.astype(\n ivy.squeeze(ivy.array(predictions), axis=None), ivy.int64, copy=False\n )\n # failsafe for (1,) array will be squeeze to 0-dim\n labels = ivy.expand_dims(labels, axis=-1) if labels.ndim == 0 else labels\n predictions = (\n ivy.expand_dims(predictions, axis=-1) if predictions.ndim == 0 else predictions\n )\n\n # Sanity check (potential optimization)\n ivy.utils.assertions.check_greater(\n labels, 0, allow_equal=True, message=\"labels contains negative values\"\n )\n ivy.utils.assertions.check_greater(\n predictions, 0, allow_equal=True, message=\"predictions contains negative values\"\n )\n\n if num_classes is None:\n num_classes = max(ivy.max(labels), ivy.max(predictions)) + 1\n else:\n num_classes_int64 = ivy.astype(ivy.array(num_classes), ivy.int64, copy=False)\n ivy.utils.assertions.check_less(\n labels, num_classes_int64, message=\"labels out of bound\"\n )\n ivy.utils.assertions.check_less(\n predictions, num_classes_int64, message=\"predictions out of bound\"\n )\n\n if weights is not None:\n weights = ivy.array(weights)\n ivy.utils.assertions.check_equal(\n ivy.shape(predictions),\n ivy.shape(weights),\n message=\"weights shape do not match predictions\",\n )\n weights = ivy.astype(weights, dtype, copy=False)\n\n shape = ivy.stack([num_classes, num_classes])\n indices = ivy.stack([labels, predictions], axis=1)\n values = ivy.ones_like(predictions, dtype=dtype) if weights is None else weights\n return ivy.scatter_nd(indices, values, shape=shape)\n\n\n@handle_tf_dtype\n@to_ivy_arrays_and_back\ndef count_nonzero(input, axis=None, keepdims=None, dtype=ivy.int64, name=None):\n x = ivy.array(input)\n if keepdims is None:\n keepdims = False\n zero = ivy.zeros(ivy.shape(x), dtype=x.dtype)\n return ivy.astype(\n ivy.sum(\n ivy.astype(ivy.not_equal(x, zero), ivy.int64),\n axis=axis,\n keepdims=keepdims,\n ),\n dtype,\n copy=False,\n )\n\n\ndef cumprod(x, axis, exclusive=False, reverse=False, name=None):\n return ivy.astype(\n ivy.cumprod(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype\n )\n\n\ndef cumsum(x, axis, exclusive=False, reverse=False, name=None):\n return ivy.astype(\n ivy.cumsum(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef divide_no_nan(x, y, name=\"divide_no_nan\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.where(\n y == 0,\n ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),\n x / y,\n )\n\n\n@to_ivy_arrays_and_back\ndef maximum(x, y, name=None):\n return ivy.maximum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef erfcinv(x, name=\"erfcinv\"):\n return 1 / (1 - ivy.erf(x))\n\n\n@to_ivy_arrays_and_back\ndef is_inf(x, name=None):\n return ivy.isinf(x)\n\n\n@to_ivy_arrays_and_back\ndef is_non_decreasing(x, name=\"is_non_decreasing\"):\n if ivy.array(x).size < 2:\n return ivy.array(True)\n if ivy.array(x).size == 2:\n return ivy.array([x[0] <= x[1]])\n return ivy.all(ivy.less_equal(x, ivy.roll(x, -1)))\n\n\n@to_ivy_arrays_and_back\ndef is_strictly_increasing(x, name=\"is_strictly_increasing\"):\n if ivy.array(x).size < 2:\n return ivy.array(True)\n if ivy.array(x).size == 2:\n return ivy.array(x[0] < x[1])\n return ivy.all(ivy.less(x, ivy.roll(x, -1)))\n\n\n@to_ivy_arrays_and_back\ndef log_sigmoid(x, name=None):\n return -ivy.softplus(-x)\n\n\n@to_ivy_arrays_and_back\ndef logical_not(x, name=\"logical_not\"):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@to_ivy_arrays_and_back\ndef logical_and(x, y, name=\"LogicalAnd\"):\n return ivy.logical_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(x, y, name=\"LogicalXor\"):\n return ivy.logical_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(x, y, name=\"logical_or\"):\n return ivy.logical_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.multiply(x, y)\n\n\n@to_ivy_arrays_and_back\ndef multiply_no_nan(x, y, name=\"multiply_no_nan\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.where(\n y == 0,\n ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),\n x * y,\n )\n\n\n@to_ivy_arrays_and_back\ndef polyval(coeffs, x, name=None):\n ivy.utils.assertions.check_isinstance(coeffs, list)\n x = ivy.array(x)\n if len(coeffs) < 1:\n return ivy.zeros_like(x, dtype=x.dtype)\n coeffs = [ivy.array(_) for _ in coeffs]\n p = coeffs[0]\n for c in coeffs[1:]:\n p = c + p * x\n return p\n\n\n@to_ivy_arrays_and_back\ndef pow(x, y, name=\"pow\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.pow(x, y)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=\"reciprocal\"):\n return ivy.reciprocal(x)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal_no_nan(x, name=\"reciprocal_no_nan\"):\n return ivy.where(\n x == 0,\n ivy.array(0.0, dtype=x.dtype),\n ivy.ones_like(x, dtype=x.dtype) / x,\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_all(input_tensor, axis=None, keepdims=False, name=\"reduce_all\"):\n return ivy.all(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_any(input_tensor, axis=None, keepdims=False, name=\"reduce_any\"):\n return ivy.any(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_euclidean_norm(\n input_tensor, axis=None, keepdims=False, name=\"reduce_euclidean_norm\"\n):\n return ivy.vector_norm(\n input_tensor, axis=axis, keepdims=keepdims, ord=2\n ) # ord = '2' is the euclidean norm\n\n\n@to_ivy_arrays_and_back\ndef reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=\"reduce_logsumexp\"):\n # stable logsumexp trick\n max_input_tensor = ivy.max(input_tensor, axis=axis, keepdims=True)\n return (\n ivy.log(\n ivy.sum(\n ivy.exp(input_tensor - max_input_tensor),\n axis=axis,\n keepdims=keepdims,\n )\n )\n + max_input_tensor\n ).astype(input_tensor.dtype)\n\n\n@to_ivy_arrays_and_back\ndef reduce_max(input_tensor, axis=None, keepdims=False, name=\"reduce_max\"):\n return ivy.max(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_mean(input_tensor, axis=None, keepdims=False, name=\"reduce_mean\"):\n if ivy.exists(axis):\n axis = ivy.to_list(axis)\n return ivy.mean(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_min(input_tensor, axis=None, keepdims=False, name=\"reduce_min\"):\n return ivy.min(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_prod(input_tensor, axis=None, keepdims=False, name=\"reduce_prod\"):\n return ivy.prod(input_tensor, axis=axis, keepdims=keepdims).astype(\n input_tensor.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_std(input_tensor, axis=None, keepdims=False, name=\"reduce_std\"):\n return ivy.std(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_sum(input_tensor, axis=None, keepdims=False, name=\"reduce_sum\"):\n return ivy.sum(input_tensor, axis=axis, keepdims=keepdims).astype(\n input_tensor.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_variance(input_tensor, axis=None, keepdims=False, name=\"reduce_variance\"):\n return ivy.var(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef scalar_mul(scalar, x, name=\"scalar_mul\"):\n scalar, x = check_tensorflow_casting(scalar, x)\n return ivy.multiply(x, scalar).astype(x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.subtract(x, y)\n\n\n@to_ivy_arrays_and_back\ndef squared_difference(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.square(ivy.subtract(x, y))\n\n\n@with_supported_dtypes(\n {\n \"2.9.0 and below\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@to_ivy_arrays_and_back\ndef unsorted_segment_mean(\n data, segment_ids, num_segments, name=\"unsorted_segment_mean\"\n):\n ivy.utils.assertions.check_equal(list(segment_ids.shape), [list(data.shape)[0]])\n x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))\n count = ivy.zeros((num_segments,))\n for i in range((segment_ids).shape[0]):\n x[segment_ids[i]] = x[segment_ids[i]] + data[i]\n count[segment_ids[i]] += 1\n for j in range(num_segments):\n x[j] = ivy.divide(x[j], count[j])\n return x\n\n\n@to_ivy_arrays_and_back\ndef unsorted_segment_sqrt_n(\n data, segment_ids, num_segments, name=\"unsorted_segement_sqrt_n\"\n):\n ivy.utils.assertions.check_equal(list(segment_ids.shape), [list(data.shape)[0]])\n x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))\n count = ivy.zeros((num_segments,))\n for i in range((segment_ids).shape[0]):\n x[segment_ids[i]] = x[segment_ids[i]] + data[i]\n count[segment_ids[i]] += 1\n for j in range(num_segments):\n x[j] = ivy.divide(x[j], ivy.sqrt(count[j]))\n return x\n\n\n@to_ivy_arrays_and_back\ndef zero_fraction(value, name=\"zero_fraction\"):\n zero = ivy.zeros(tuple(list(value.shape)), dtype=ivy.float32)\n x = ivy.array(value, dtype=ivy.float32)\n count_zero = ivy.sum(ivy.equal(x, zero))\n count_nonzero = ivy.sum(ivy.not_equal(x, zero))\n return ivy.divide(count_zero, ivy.add(count_zero, count_nonzero))\n\n\n@to_ivy_arrays_and_back\ndef argmin(input, axis=None, output_type=\"int64\", name=None):\n output_type = to_ivy_dtype(output_type)\n if output_type in [\"int32\", \"int64\"]:\n return ivy.astype(ivy.argmin(input, axis=axis), output_type)\n else:\n return ivy.astype(ivy.argmin(input, axis=axis), \"int64\")\n\n\n@to_ivy_arrays_and_back\ndef truediv(x, y, name=\"truediv\"):\n x, y = check_tensorflow_casting(x, y)\n x_dtype = ivy.dtype(x)\n\n if ivy.current_backend_str() == \"torch\":\n if x_dtype in [ivy.int8, ivy.int16]:\n return ivy.divide(ivy.astype(x, ivy.float32), ivy.astype(y, ivy.float32))\n elif x_dtype in [ivy.int32, ivy.int64]:\n return ivy.divide(ivy.astype(x, ivy.float64), ivy.astype(y, ivy.float64))\n else:\n if x_dtype in [ivy.int8, ivy.uint8, ivy.int16, ivy.uint16]:\n return ivy.divide(ivy.astype(x, ivy.float32), ivy.astype(y, ivy.float32))\n elif x_dtype in [ivy.int32, ivy.uint32, ivy.int64, ivy.uint64]:\n return ivy.divide(ivy.astype(x, ivy.float64), ivy.astype(y, ivy.float64))\n return ivy.divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef not_equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.not_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef floordiv(x, y, name=None):\n return ivy.floor_divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@to_ivy_arrays_and_back\ndef minimum(x, y, name=None):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef sigmoid(x, name=None):\n return ivy.sigmoid(x)\n\n\n@with_supported_dtypes(\n {\"2.9.0 and below\": (\"float16\", \"float32\", \"float64\", \"complex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@to_ivy_arrays_and_back\ndef rsqrt(x, name=None):\n return ivy.reciprocal(ivy.sqrt(x))\n\n\n@to_ivy_arrays_and_back\ndef nextafter(x1, x2, name=None):\n return ivy.nextafter(x1, x2)\n\n\n@with_unsupported_dtypes(\n {\n \"1.2.0\": (\"float16\", \"complex64\", \"complex128\"),\n \"1.8.0 and below\": (\"float16\"),\n \"2.9.0 and below\": (\"int8\", \"int16\", \"uint8\", \"uint16\", \"uint32\", \"uint64\"),\n },\n \"tensorflow\",\n)\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@to_ivy_arrays_and_back\ndef log_softmax(logits, axis=None):\n return ivy.log_softmax(logits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@to_ivy_arrays_and_back\ndef acos(x, name=\"acos\"):\n return ivy.acos(x)\n\n\n@to_ivy_arrays_and_back\ndef acosh(x, name=\"acosh\"):\n return ivy.acosh(x)\n\n\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef is_nan(x, name=None):\n return ivy.isnan(x)\n\n\n@with_supported_dtypes(\n {\n \"2.11.0 and below\": (\"bfloat16\", \"half\", \"float32\", \"float64\"),\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef is_finite(x, name=None):\n return ivy.isfinite(x)\n\n\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@to_ivy_arrays_and_back\ndef atan2(y, x, name=None):\n return ivy.atan2(y, x)\n\n\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef add_n(inputs, name=None):\n return ivy.sum(inputs, dtype=inputs.dtype, axis=0)\n\n\n@to_ivy_arrays_and_back\ndef floormod(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef less_equal(x, y, name=\"LessEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef greater(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef less(x, y, name=\"None\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less(x, y)\n\n\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef softmax(logits, axis=-1):\n return ivy.softmax(logits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef softplus(features, name=None):\n return ivy.softplus(features)\n\n\n@to_ivy_arrays_and_back\ndef xlogy(x, y, name=None):\n return ivy.xlogy(x, y)\n\n\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes(\n {\n \"2.11.0 and below\": (\"float32\", \"float64\"),\n },\n \"tensorflow\",\n)\ndef zeta(x, q, name=None):\n return ivy.zeta(x, q)\n\n\n@to_ivy_arrays_and_back\ndef greater_equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater_equal(x, y)\n", "path": "ivy/functional/frontends/tensorflow/math.py" } ]
[ { "content": "# global\nimport ivy\nfrom ivy import with_supported_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_tf_dtype,\n to_ivy_dtype,\n)\n\n\n@with_supported_dtypes(\n {\"2.9.0 and below\": (\"float16\", \"float32\", \"float64\", \"complex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef imag(input, name=None):\n return ivy.imag(input)\n\n\n@to_ivy_arrays_and_back\ndef accumulate_n(inputs, input_type=None, shape=None, dtype=None, name=None):\n return ivy.astype(ivy.sum(ivy.array(inputs)), ivy.int64)\n\n\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.add(x, y)\n\n\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef expm1(x, name=None):\n return ivy.expm1(x)\n\n\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef negative(x, name=None):\n return ivy.negative(x)\n\n\n@to_ivy_arrays_and_back\ndef argmax(input, axis, output_type=None, name=None):\n output_type = to_ivy_dtype(output_type)\n if output_type in [\"uint16\", \"int16\", \"int32\", \"int64\"]:\n return ivy.astype(ivy.argmax(input, axis=axis), output_type)\n else:\n return ivy.astype(ivy.argmax(input, axis=axis), \"int64\")\n\n\n@to_ivy_arrays_and_back\ndef asinh(x, name=\"asinh\"):\n return ivy.asinh(x)\n\n\n@handle_tf_dtype\n@to_ivy_arrays_and_back\ndef confusion_matrix(\n labels, predictions, num_classes=None, weights=None, dtype=ivy.int32, name=None\n):\n labels = ivy.astype(\n ivy.squeeze(ivy.array(labels), axis=None), ivy.int64, copy=False\n )\n predictions = ivy.astype(\n ivy.squeeze(ivy.array(predictions), axis=None), ivy.int64, copy=False\n )\n # failsafe for (1,) array will be squeeze to 0-dim\n labels = ivy.expand_dims(labels, axis=-1) if labels.ndim == 0 else labels\n predictions = (\n ivy.expand_dims(predictions, axis=-1) if predictions.ndim == 0 else predictions\n )\n\n # Sanity check (potential optimization)\n ivy.utils.assertions.check_greater(\n labels, 0, allow_equal=True, message=\"labels contains negative values\"\n )\n ivy.utils.assertions.check_greater(\n predictions, 0, allow_equal=True, message=\"predictions contains negative values\"\n )\n\n if num_classes is None:\n num_classes = max(ivy.max(labels), ivy.max(predictions)) + 1\n else:\n num_classes_int64 = ivy.astype(ivy.array(num_classes), ivy.int64, copy=False)\n ivy.utils.assertions.check_less(\n labels, num_classes_int64, message=\"labels out of bound\"\n )\n ivy.utils.assertions.check_less(\n predictions, num_classes_int64, message=\"predictions out of bound\"\n )\n\n if weights is not None:\n weights = ivy.array(weights)\n ivy.utils.assertions.check_equal(\n ivy.shape(predictions),\n ivy.shape(weights),\n message=\"weights shape do not match predictions\",\n )\n weights = ivy.astype(weights, dtype, copy=False)\n\n shape = ivy.stack([num_classes, num_classes])\n indices = ivy.stack([labels, predictions], axis=1)\n values = ivy.ones_like(predictions, dtype=dtype) if weights is None else weights\n return ivy.scatter_nd(indices, values, shape=shape)\n\n\n@handle_tf_dtype\n@to_ivy_arrays_and_back\ndef count_nonzero(input, axis=None, keepdims=None, dtype=ivy.int64, name=None):\n x = ivy.array(input)\n if keepdims is None:\n keepdims = False\n zero = ivy.zeros(ivy.shape(x), dtype=x.dtype)\n return ivy.astype(\n ivy.sum(\n ivy.astype(ivy.not_equal(x, zero), ivy.int64),\n axis=axis,\n keepdims=keepdims,\n ),\n dtype,\n copy=False,\n )\n\n\ndef cumprod(x, axis, exclusive=False, reverse=False, name=None):\n return ivy.astype(\n ivy.cumprod(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype\n )\n\n\ndef cumsum(x, axis, exclusive=False, reverse=False, name=None):\n return ivy.astype(\n ivy.cumsum(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef divide_no_nan(x, y, name=\"divide_no_nan\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.where(\n y == 0,\n ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),\n x / y,\n )\n\n\n@to_ivy_arrays_and_back\ndef maximum(x, y, name=None):\n return ivy.maximum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef erfcinv(x, name=\"erfcinv\"):\n return 1 / (1 - ivy.erf(x))\n\n\n@to_ivy_arrays_and_back\ndef is_inf(x, name=None):\n return ivy.isinf(x)\n\n\n@to_ivy_arrays_and_back\ndef is_non_decreasing(x, name=\"is_non_decreasing\"):\n if ivy.array(x).size < 2:\n return ivy.array(True)\n if ivy.array(x).size == 2:\n return ivy.array([x[0] <= x[1]])\n return ivy.all(ivy.less_equal(x, ivy.roll(x, -1)))\n\n\n@to_ivy_arrays_and_back\ndef is_strictly_increasing(x, name=\"is_strictly_increasing\"):\n if ivy.array(x).size < 2:\n return ivy.array(True)\n if ivy.array(x).size == 2:\n return ivy.array(x[0] < x[1])\n return ivy.all(ivy.less(x, ivy.roll(x, -1)))\n\n\n@to_ivy_arrays_and_back\ndef log_sigmoid(x, name=None):\n return -ivy.softplus(-x)\n\n\n@to_ivy_arrays_and_back\ndef logical_not(x, name=\"logical_not\"):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@to_ivy_arrays_and_back\ndef logical_and(x, y, name=\"LogicalAnd\"):\n return ivy.logical_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(x, y, name=\"LogicalXor\"):\n return ivy.logical_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(x, y, name=\"logical_or\"):\n return ivy.logical_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.multiply(x, y)\n\n\n@to_ivy_arrays_and_back\ndef multiply_no_nan(x, y, name=\"multiply_no_nan\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.where(\n y == 0,\n ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),\n x * y,\n )\n\n\n@to_ivy_arrays_and_back\ndef polyval(coeffs, x, name=None):\n ivy.utils.assertions.check_isinstance(coeffs, list)\n x = ivy.array(x)\n if len(coeffs) < 1:\n return ivy.zeros_like(x, dtype=x.dtype)\n coeffs = [ivy.array(_) for _ in coeffs]\n p = coeffs[0]\n for c in coeffs[1:]:\n p = c + p * x\n return p\n\n\n@to_ivy_arrays_and_back\ndef pow(x, y, name=\"pow\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.pow(x, y)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=\"reciprocal\"):\n return ivy.reciprocal(x)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal_no_nan(x, name=\"reciprocal_no_nan\"):\n return ivy.where(\n x == 0,\n ivy.array(0.0, dtype=x.dtype),\n ivy.ones_like(x, dtype=x.dtype) / x,\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_all(input_tensor, axis=None, keepdims=False, name=\"reduce_all\"):\n return ivy.all(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_any(input_tensor, axis=None, keepdims=False, name=\"reduce_any\"):\n return ivy.any(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_euclidean_norm(\n input_tensor, axis=None, keepdims=False, name=\"reduce_euclidean_norm\"\n):\n return ivy.vector_norm(\n input_tensor, axis=axis, keepdims=keepdims, ord=2\n ) # ord = '2' is the euclidean norm\n\n\n@to_ivy_arrays_and_back\ndef reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=\"reduce_logsumexp\"):\n # stable logsumexp trick\n max_input_tensor = ivy.max(input_tensor, axis=axis, keepdims=True)\n return (\n ivy.log(\n ivy.sum(\n ivy.exp(input_tensor - max_input_tensor),\n axis=axis,\n keepdims=keepdims,\n )\n )\n + max_input_tensor\n ).astype(input_tensor.dtype)\n\n\n@to_ivy_arrays_and_back\ndef reduce_max(input_tensor, axis=None, keepdims=False, name=\"reduce_max\"):\n return ivy.max(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_mean(input_tensor, axis=None, keepdims=False, name=\"reduce_mean\"):\n if ivy.exists(axis):\n axis = ivy.to_list(axis)\n return ivy.mean(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_min(input_tensor, axis=None, keepdims=False, name=\"reduce_min\"):\n return ivy.min(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_prod(input_tensor, axis=None, keepdims=False, name=\"reduce_prod\"):\n return ivy.prod(input_tensor, axis=axis, keepdims=keepdims).astype(\n input_tensor.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_std(input_tensor, axis=None, keepdims=False, name=\"reduce_std\"):\n return ivy.std(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_sum(input_tensor, axis=None, keepdims=False, name=\"reduce_sum\"):\n return ivy.sum(input_tensor, axis=axis, keepdims=keepdims).astype(\n input_tensor.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_variance(input_tensor, axis=None, keepdims=False, name=\"reduce_variance\"):\n return ivy.var(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef scalar_mul(scalar, x, name=\"scalar_mul\"):\n scalar, x = check_tensorflow_casting(scalar, x)\n return ivy.multiply(x, scalar).astype(x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.subtract(x, y)\n\n\n@to_ivy_arrays_and_back\ndef squared_difference(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.square(ivy.subtract(x, y))\n\n\n@with_supported_dtypes(\n {\n \"2.9.0 and below\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@to_ivy_arrays_and_back\ndef unsorted_segment_mean(\n data, segment_ids, num_segments, name=\"unsorted_segment_mean\"\n):\n ivy.utils.assertions.check_equal(list(segment_ids.shape), [list(data.shape)[0]])\n x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))\n count = ivy.zeros((num_segments,))\n for i in range((segment_ids).shape[0]):\n x[segment_ids[i]] = x[segment_ids[i]] + data[i]\n count[segment_ids[i]] += 1\n for j in range(num_segments):\n x[j] = ivy.divide(x[j], count[j])\n return x\n\n\n@to_ivy_arrays_and_back\ndef unsorted_segment_sqrt_n(\n data, segment_ids, num_segments, name=\"unsorted_segement_sqrt_n\"\n):\n ivy.utils.assertions.check_equal(list(segment_ids.shape), [list(data.shape)[0]])\n x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))\n count = ivy.zeros((num_segments,))\n for i in range((segment_ids).shape[0]):\n x[segment_ids[i]] = x[segment_ids[i]] + data[i]\n count[segment_ids[i]] += 1\n for j in range(num_segments):\n x[j] = ivy.divide(x[j], ivy.sqrt(count[j]))\n return x\n\n\n@to_ivy_arrays_and_back\ndef zero_fraction(value, name=\"zero_fraction\"):\n zero = ivy.zeros(tuple(list(value.shape)), dtype=ivy.float32)\n x = ivy.array(value, dtype=ivy.float32)\n count_zero = ivy.sum(ivy.equal(x, zero))\n count_nonzero = ivy.sum(ivy.not_equal(x, zero))\n return ivy.divide(count_zero, ivy.add(count_zero, count_nonzero))\n\n\n@to_ivy_arrays_and_back\ndef argmin(input, axis=None, output_type=\"int64\", name=None):\n output_type = to_ivy_dtype(output_type)\n if output_type in [\"int32\", \"int64\"]:\n return ivy.astype(ivy.argmin(input, axis=axis), output_type)\n else:\n return ivy.astype(ivy.argmin(input, axis=axis), \"int64\")\n\n\n@to_ivy_arrays_and_back\ndef truediv(x, y, name=\"truediv\"):\n x, y = check_tensorflow_casting(x, y)\n x_dtype = ivy.dtype(x)\n\n if ivy.current_backend_str() == \"torch\":\n if x_dtype in [ivy.int8, ivy.int16]:\n return ivy.divide(ivy.astype(x, ivy.float32), ivy.astype(y, ivy.float32))\n elif x_dtype in [ivy.int32, ivy.int64]:\n return ivy.divide(ivy.astype(x, ivy.float64), ivy.astype(y, ivy.float64))\n else:\n if x_dtype in [ivy.int8, ivy.uint8, ivy.int16, ivy.uint16]:\n return ivy.divide(ivy.astype(x, ivy.float32), ivy.astype(y, ivy.float32))\n elif x_dtype in [ivy.int32, ivy.uint32, ivy.int64, ivy.uint64]:\n return ivy.divide(ivy.astype(x, ivy.float64), ivy.astype(y, ivy.float64))\n return ivy.divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef not_equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.not_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef floordiv(x, y, name=None):\n return ivy.floor_divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@to_ivy_arrays_and_back\ndef minimum(x, y, name=None):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef sigmoid(x, name=None):\n return ivy.sigmoid(x)\n\n\n@with_supported_dtypes(\n {\"2.9.0 and below\": (\"float16\", \"float32\", \"float64\", \"complex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@to_ivy_arrays_and_back\ndef rsqrt(x, name=None):\n return ivy.reciprocal(ivy.sqrt(x))\n\n\n@to_ivy_arrays_and_back\ndef nextafter(x1, x2, name=None):\n return ivy.nextafter(x1, x2)\n\n\n@with_unsupported_dtypes(\n {\n \"1.2.0\": (\"float16\", \"complex64\", \"complex128\"),\n \"1.8.0 and below\": (\"float16\"),\n \"2.9.0 and below\": (\"int8\", \"int16\", \"uint8\", \"uint16\", \"uint32\", \"uint64\"),\n },\n \"tensorflow\",\n)\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@to_ivy_arrays_and_back\ndef log_softmax(logits, axis=None):\n return ivy.log_softmax(logits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@to_ivy_arrays_and_back\ndef acos(x, name=\"acos\"):\n return ivy.acos(x)\n\n\n@to_ivy_arrays_and_back\ndef acosh(x, name=\"acosh\"):\n return ivy.acosh(x)\n\n\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef is_nan(x, name=None):\n return ivy.isnan(x)\n\n\n@with_supported_dtypes(\n {\n \"2.11.0 and below\": (\"bfloat16\", \"half\", \"float32\", \"float64\"),\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef is_finite(x, name=None):\n return ivy.isfinite(x)\n\n\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@to_ivy_arrays_and_back\ndef atan2(y, x, name=None):\n return ivy.atan2(y, x)\n\n\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef add_n(inputs, name=None):\n return ivy.sum(inputs, dtype=inputs.dtype, axis=0)\n\n\n@to_ivy_arrays_and_back\ndef floormod(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef less_equal(x, y, name=\"LessEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef greater(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef less(x, y, name=\"None\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less(x, y)\n\n\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef softmax(logits, axis=-1):\n return ivy.softmax(logits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef softplus(features, name=None):\n return ivy.softplus(features)\n\n\n@to_ivy_arrays_and_back\ndef xlogy(x, y, name=None):\n return ivy.xlogy(x, y)\n\n\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes(\n {\n \"2.11.0 and below\": (\"float32\", \"float64\"),\n },\n \"tensorflow\",\n)\ndef zeta(x, q, name=None):\n return ivy.zeta(x, q)\n\n\n@to_ivy_arrays_and_back\ndef greater_equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater_equal(x, y)\n", "path": "ivy/functional/frontends/tensorflow/math.py" } ]
diff --git a/ivy/functional/frontends/tensorflow/math.py b/ivy/functional/frontends/tensorflow/math.py index 889e080a10d7d..491eb5cc2866e 100644 --- a/ivy/functional/frontends/tensorflow/math.py +++ b/ivy/functional/frontends/tensorflow/math.py @@ -34,6 +34,11 @@ def exp(x, name=None): return ivy.exp(x) +@to_ivy_arrays_and_back +def expm1(x, name=None): + return ivy.expm1(x) + + @to_ivy_arrays_and_back def sqrt(x, name=None): return ivy.sqrt(x) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py index 433016a4ba1ce..bca918aa2336e 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py @@ -176,6 +176,31 @@ def test_tensorflow_exp( ) +# expm1 +@handle_frontend_test( + fn_tree="tensorflow.math.expm1", + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), + test_with_out=st.just(False), +) +def test_tensorflow_expm1( + *, + dtype_and_x, + frontend, + test_flags, + fn_tree, + on_device, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + ) + + # sqrt @handle_frontend_test( fn_tree="tensorflow.math.sqrt", @@ -2104,6 +2129,7 @@ def test_tensorflow_floormod( on_device=on_device, x=x[0], y=x[1], + )
pytorch__pytorch-2063
[ { "content": "import torch\nimport warnings\nfrom . import _tensor_str\nfrom ._utils import _type, _cuda, _range, _rebuild_tensor\nimport sys\n\n\nclass _TensorBase(object):\n #: bool: True if this is a CUDA tensor\n is_cuda = False\n is_sparse = False\n\n # NB: This implementation is CPU only; see THPTensor_(new) for the\n # CUDA case, which handles constructing the tensor on the same GPU\n # as this tensor.\n def new(self, *args, **kwargs):\n \"\"\"Constructs a new tensor of the same data type.\"\"\"\n return self.__class__(*args, **kwargs)\n\n def type_as(self, tensor):\n \"\"\"Returns this tensor cast to the type of the given tensor.\n\n This is a no-op if the tensor is already of the correct type. This is\n equivalent to::\n\n self.type(tensor.type())\n\n Params:\n tensor (Tensor): the tensor which has the desired type\n \"\"\"\n return self.type(tensor.type())\n\n def cpu(self):\n \"\"\"Returns a CPU copy of this tensor if it's not already on the CPU\"\"\"\n return self.type(getattr(torch, self.__class__.__name__))\n\n def double(self):\n \"\"\"Casts this tensor to double type\"\"\"\n return self.type(type(self).__module__ + '.DoubleTensor')\n\n def float(self):\n \"\"\"Casts this tensor to float type\"\"\"\n return self.type(type(self).__module__ + '.FloatTensor')\n\n def half(self):\n \"\"\"Casts this tensor to half-precision float type\"\"\"\n return self.type(type(self).__module__ + '.HalfTensor')\n\n def long(self):\n \"\"\"Casts this tensor to long type\"\"\"\n return self.type(type(self).__module__ + '.LongTensor')\n\n def int(self):\n \"\"\"Casts this tensor to int type\"\"\"\n return self.type(type(self).__module__ + '.IntTensor')\n\n def short(self):\n \"\"\"Casts this tensor to short type\"\"\"\n return self.type(type(self).__module__ + '.ShortTensor')\n\n def char(self):\n \"\"\"Casts this tensor to char type\"\"\"\n return self.type(type(self).__module__ + '.CharTensor')\n\n def byte(self):\n \"\"\"Casts this tensor to byte type\"\"\"\n return self.type(type(self).__module__ + '.ByteTensor')\n\n def is_pinned(self):\n \"\"\"Returns true if this tensor resides in pinned memory\"\"\"\n storage = self.storage()\n return storage.is_pinned() if storage else False\n\n def pin_memory(self):\n \"\"\"Copies the tensor to pinned memory, if it's not already pinned.\"\"\"\n if self.is_cuda:\n raise TypeError(\"cannot pin '{0}' only CPU memory can be pinned\"\n .format(self.type()))\n storage = self.storage()\n if storage is None:\n storage = (self.storage_type())()\n return type(self)().set_(storage.pin_memory()).view_as(self)\n\n def share_memory_(self):\n \"\"\"Moves the underlying storage to shared memory.\n\n This is a no-op if the underlying storage is already in shared memory\n and for CUDA tensors. Tensors in shared memory cannot be resized.\n \"\"\"\n self.storage().share_memory_()\n return self\n\n def is_shared(self):\n \"\"\"Checks if tensor is in shared memory.\n\n This is always ``True`` for CUDA tensors.\n \"\"\"\n return self.storage().is_shared()\n\n @property\n def shape(self):\n \"\"\"Alias for .size()\n\n Returns a torch.Size object, containing the dimensions of the tensor\n \"\"\"\n return self.size()\n\n def __deepcopy__(self, _memo):\n memo = _memo.setdefault('torch', {})\n if self._cdata in memo:\n return memo[self._cdata]\n new_storage = self.storage().__deepcopy__(_memo)\n new_tensor = self.new()\n new_tensor.set_(new_storage, self.storage_offset(), self.size(), self.stride())\n memo[self._cdata] = new_tensor\n return new_tensor\n\n def __reduce__(self):\n # NOTE: _rebuild_tensor does not call __setstate__\n args = self.__getstate__()\n return (_rebuild_tensor, args)\n\n def __getstate__(self):\n return (self.storage(),\n self.storage_offset(),\n tuple(self.size()),\n self.stride())\n\n def __setstate__(self, state):\n self.set_(*state)\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n # All strings are unicode in Python 3, while we have to encode unicode\n # strings in Python2. If we can't, let python decide the best\n # characters to replace unicode characters with.\n if sys.version_info > (3,):\n return _tensor_str._str(self)\n else:\n if hasattr(sys.stdout, 'encoding'):\n return _tensor_str._str(self).encode(\n sys.stdout.encoding or 'UTF-8', 'replace')\n else:\n return _tensor_str._str(self).encode('UTF-8', 'replace')\n\n def __bool__(self):\n if self.numel() == 0:\n return False\n raise RuntimeError(\"bool value of non-empty \" + torch.typename(self) +\n \" objects is ambiguous\")\n\n __nonzero__ = __bool__\n\n def __iter__(self):\n if self.nelement() > 0:\n return iter(map(lambda i: self.select(0, i), _range(self.size(0))))\n else:\n return iter([])\n\n def split(self, split_size, dim=0):\n \"\"\"Splits this tensor into a tuple of tensors.\n\n See :func:`torch.split`.\n \"\"\"\n return torch.split(self, split_size, dim)\n\n def chunk(self, n_chunks, dim=0):\n \"\"\"Splits this tensor into a tuple of tensors.\n\n See :func:`torch.chunk`.\n \"\"\"\n return torch.chunk(self, n_chunks, dim)\n\n def matmul(self, other):\n \"\"\"Matrix product of two tensors.\n\n See :func:`torch.matmul`.\"\"\"\n return torch.matmul(self, other)\n\n def tolist(self):\n \"\"\"Returns a nested list represenation of this tensor.\"\"\"\n dim = self.dim()\n if dim == 1:\n return [v for v in self]\n elif dim > 0:\n return [subt.tolist() for subt in self]\n return []\n\n def view_as(self, tensor):\n \"\"\"Returns this tensor viewed as the size as the specified tensor.\n\n This is equivalent to::\n\n self.view(tensor.size())\n \"\"\"\n return self.view(tensor.size())\n\n def permute(self, *dims):\n \"\"\"Permute the dimensions of this tensor.\n\n Args:\n *dims (int...): The desired ordering of dimensions\n\n Example:\n >>> x = torch.randn(2, 3, 5)\n >>> x.size()\n torch.Size([2, 3, 5])\n >>> x.permute(2, 0, 1).size()\n torch.Size([5, 2, 3])\n \"\"\"\n perm = list(dims)\n tensor = self\n n_dims = tensor.dim()\n assert len(perm) == n_dims, 'Invalid permutation'\n for i, p in enumerate(perm):\n if p != i and p != -1:\n j = i\n while True:\n assert 0 <= perm[j] and perm[j] < n_dims, 'Invalid permutation'\n tensor = tensor.transpose(j, perm[j])\n perm[j], j = -1, perm[j]\n if perm[j] == i:\n break\n perm[j] = -1\n return tensor\n\n def expand_as(self, tensor):\n \"\"\"Expands this tensor to the size of the specified tensor.\n\n This is equivalent to::\n\n self.expand(tensor.size())\n \"\"\"\n return self.expand(tensor.size())\n\n def repeat(self, *sizes):\n \"\"\"Repeats this tensor along the specified dimensions.\n\n Unlike :meth:`expand`, this function copies the tensor's data.\n\n Args:\n *sizes (torch.Size or int...): The number of times to repeat this\n tensor along each dimension\n\n Example:\n >>> x = torch.Tensor([1, 2, 3])\n >>> x.repeat(4, 2)\n 1 2 3 1 2 3\n 1 2 3 1 2 3\n 1 2 3 1 2 3\n 1 2 3 1 2 3\n [torch.FloatTensor of size 4x6]\n >>> x.repeat(4, 2, 1).size()\n torch.Size([4, 2, 3])\n \"\"\"\n # If args == (torch.Size,), then we need to unpack the tuple\n if len(sizes) == 1 and isinstance(sizes[0], torch.Size):\n sizes = sizes[0]\n repeats = list(sizes)\n result = self.new()\n src = self.contiguous()\n\n if len(repeats) < src.dim():\n raise ValueError('Number of dimensions of repeat dims can not be '\n 'smaller than number of dimensions of tensor')\n\n xtensor = src.new().set_(src)\n xsize = list(xtensor.size())\n for i in _range(len(repeats) - src.dim()):\n xsize = [1] + xsize\n\n size = torch.Size([a * b for a, b in zip(xsize, repeats)])\n xtensor.resize_(torch.Size(xsize))\n result.resize_(size)\n urtensor = result.new(result)\n for i in _range(xtensor.dim()):\n urtensor = urtensor.unfold(i, xtensor.size(i), xtensor.size(i))\n for i in _range(urtensor.dim() - xtensor.dim()):\n xsize = [1] + xsize\n xtensor.resize_(torch.Size(xsize))\n xxtensor = xtensor.expand_as(urtensor)\n urtensor.copy_(xxtensor)\n return result\n\n def masked_copy_(self, *args, **kwargs):\n warnings.warn(\"masked_copy_ is deprecated and renamed to masked_scatter_, and will be removed in v0.3\")\n return self.masked_scatter_(*args, **kwargs)\n\n # TODO: add tests for operators\n def __add__(self, other):\n return self.add(other)\n __radd__ = __add__\n\n def __iadd__(self, other):\n return self.add_(other)\n\n def __sub__(self, other):\n return self.sub(other)\n\n def __rsub__(self, other):\n return self.new().resize_as_(self).fill_(other).add_(-1, self)\n\n def __isub__(self, other):\n return self.sub_(other)\n\n def __mul__(self, other):\n return self.mul(other)\n __rmul__ = __mul__\n\n def __imul__(self, other):\n return self.mul_(other)\n\n def __matmul__(self, other):\n if not torch.is_tensor(other):\n return NotImplemented\n return self.matmul(other)\n\n def __pow__(self, other):\n return self.pow(other)\n\n def __ipow__(self, other):\n return self.pow_(other)\n\n def __div__(self, other):\n return self.div(other)\n __truediv__ = __div__\n\n def __rdiv__(self, other):\n return self.new().resize_as_(self).fill_(other).div_(self)\n __rtruediv__ = __rdiv__\n\n def __idiv__(self, other):\n return self.div_(other)\n\n def __mod__(self, other):\n return self.remainder(other)\n\n def __neg__(self):\n return self.neg()\n\n def __eq__(self, other):\n return self.eq(other)\n\n def __ne__(self, other):\n return self.ne(other)\n\n def __lt__(self, other):\n return self.lt(other)\n\n def __le__(self, other):\n return self.le(other)\n\n def __gt__(self, other):\n return self.gt(other)\n\n def __ge__(self, other):\n return self.ge(other)\n\n # TODO: add native add or and xor in the libs\n def __invert__(self):\n if type(self).__name__ != 'ByteTensor':\n raise RuntimeError('logical operations are supported on ByteTensors only')\n return (1 - self)\n\n def __hash__(self):\n return id(self)\n\n # provide user guidance when they inavertently call autograd properties on a Tensor\n @property\n def data(self):\n raise RuntimeError('cannot call .data on a torch.Tensor: did you intend to use autograd.Variable?')\n\n\n_TensorBase.type = _type\n_TensorBase.cuda = _cuda\n", "path": "torch/tensor.py" } ]
[ { "content": "import torch\nimport warnings\nfrom . import _tensor_str\nfrom ._utils import _type, _cuda, _range, _rebuild_tensor\nimport sys\n\n\nclass _TensorBase(object):\n #: bool: True if this is a CUDA tensor\n is_cuda = False\n is_sparse = False\n\n # NB: This implementation is CPU only; see THPTensor_(new) for the\n # CUDA case, which handles constructing the tensor on the same GPU\n # as this tensor.\n def new(self, *args, **kwargs):\n \"\"\"Constructs a new tensor of the same data type.\"\"\"\n return self.__class__(*args, **kwargs)\n\n def type_as(self, tensor):\n \"\"\"Returns this tensor cast to the type of the given tensor.\n\n This is a no-op if the tensor is already of the correct type. This is\n equivalent to::\n\n self.type(tensor.type())\n\n Params:\n tensor (Tensor): the tensor which has the desired type\n \"\"\"\n return self.type(tensor.type())\n\n def cpu(self):\n \"\"\"Returns a CPU copy of this tensor if it's not already on the CPU\"\"\"\n return self.type(getattr(torch, self.__class__.__name__))\n\n def double(self):\n \"\"\"Casts this tensor to double type\"\"\"\n return self.type(type(self).__module__ + '.DoubleTensor')\n\n def float(self):\n \"\"\"Casts this tensor to float type\"\"\"\n return self.type(type(self).__module__ + '.FloatTensor')\n\n def half(self):\n \"\"\"Casts this tensor to half-precision float type\"\"\"\n return self.type(type(self).__module__ + '.HalfTensor')\n\n def long(self):\n \"\"\"Casts this tensor to long type\"\"\"\n return self.type(type(self).__module__ + '.LongTensor')\n\n def int(self):\n \"\"\"Casts this tensor to int type\"\"\"\n return self.type(type(self).__module__ + '.IntTensor')\n\n def short(self):\n \"\"\"Casts this tensor to short type\"\"\"\n return self.type(type(self).__module__ + '.ShortTensor')\n\n def char(self):\n \"\"\"Casts this tensor to char type\"\"\"\n return self.type(type(self).__module__ + '.CharTensor')\n\n def byte(self):\n \"\"\"Casts this tensor to byte type\"\"\"\n return self.type(type(self).__module__ + '.ByteTensor')\n\n def is_pinned(self):\n \"\"\"Returns true if this tensor resides in pinned memory\"\"\"\n storage = self.storage()\n return storage.is_pinned() if storage else False\n\n def pin_memory(self):\n \"\"\"Copies the tensor to pinned memory, if it's not already pinned.\"\"\"\n if self.is_cuda:\n raise TypeError(\"cannot pin '{0}' only CPU memory can be pinned\"\n .format(self.type()))\n storage = self.storage()\n if storage is None:\n storage = (self.storage_type())()\n return type(self)().set_(storage.pin_memory()).view_as(self)\n\n def share_memory_(self):\n \"\"\"Moves the underlying storage to shared memory.\n\n This is a no-op if the underlying storage is already in shared memory\n and for CUDA tensors. Tensors in shared memory cannot be resized.\n \"\"\"\n self.storage().share_memory_()\n return self\n\n def is_shared(self):\n \"\"\"Checks if tensor is in shared memory.\n\n This is always ``True`` for CUDA tensors.\n \"\"\"\n return self.storage().is_shared()\n\n @property\n def shape(self):\n \"\"\"Alias for .size()\n\n Returns a torch.Size object, containing the dimensions of the tensor\n \"\"\"\n return self.size()\n\n def __deepcopy__(self, _memo):\n memo = _memo.setdefault('torch', {})\n if self._cdata in memo:\n return memo[self._cdata]\n new_storage = self.storage().__deepcopy__(_memo)\n new_tensor = self.new()\n new_tensor.set_(new_storage, self.storage_offset(), self.size(), self.stride())\n memo[self._cdata] = new_tensor\n return new_tensor\n\n def __reduce__(self):\n # NOTE: _rebuild_tensor does not call __setstate__\n args = self.__getstate__()\n return (_rebuild_tensor, args)\n\n def __getstate__(self):\n return (self.storage(),\n self.storage_offset(),\n tuple(self.size()),\n self.stride())\n\n def __setstate__(self, state):\n self.set_(*state)\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n # All strings are unicode in Python 3, while we have to encode unicode\n # strings in Python2. If we can't, let python decide the best\n # characters to replace unicode characters with.\n if sys.version_info > (3,):\n return _tensor_str._str(self)\n else:\n if hasattr(sys.stdout, 'encoding'):\n return _tensor_str._str(self).encode(\n sys.stdout.encoding or 'UTF-8', 'replace')\n else:\n return _tensor_str._str(self).encode('UTF-8', 'replace')\n\n def __bool__(self):\n if self.numel() == 0:\n return False\n raise RuntimeError(\"bool value of non-empty \" + torch.typename(self) +\n \" objects is ambiguous\")\n\n __nonzero__ = __bool__\n\n def __iter__(self):\n if self.nelement() > 0:\n return iter(map(lambda i: self.select(0, i), _range(self.size(0))))\n else:\n return iter([])\n\n def split(self, split_size, dim=0):\n \"\"\"Splits this tensor into a tuple of tensors.\n\n See :func:`torch.split`.\n \"\"\"\n return torch.split(self, split_size, dim)\n\n def chunk(self, n_chunks, dim=0):\n \"\"\"Splits this tensor into a tuple of tensors.\n\n See :func:`torch.chunk`.\n \"\"\"\n return torch.chunk(self, n_chunks, dim)\n\n def matmul(self, other):\n \"\"\"Matrix product of two tensors.\n\n See :func:`torch.matmul`.\"\"\"\n return torch.matmul(self, other)\n\n def tolist(self):\n \"\"\"Returns a nested list represenation of this tensor.\"\"\"\n dim = self.dim()\n if dim == 1:\n return [v for v in self]\n elif dim > 0:\n return [subt.tolist() for subt in self]\n return []\n\n def view_as(self, tensor):\n \"\"\"Returns this tensor viewed as the size as the specified tensor.\n\n This is equivalent to::\n\n self.view(tensor.size())\n \"\"\"\n return self.view(tensor.size())\n\n def permute(self, *dims):\n \"\"\"Permute the dimensions of this tensor.\n\n Args:\n *dims (int...): The desired ordering of dimensions\n\n Example:\n >>> x = torch.randn(2, 3, 5)\n >>> x.size()\n torch.Size([2, 3, 5])\n >>> x.permute(2, 0, 1).size()\n torch.Size([5, 2, 3])\n \"\"\"\n perm = list(dims)\n tensor = self\n n_dims = tensor.dim()\n assert len(perm) == n_dims, 'Invalid permutation'\n for i, p in enumerate(perm):\n if p != i and p != -1:\n j = i\n while True:\n assert 0 <= perm[j] and perm[j] < n_dims, 'Invalid permutation'\n tensor = tensor.transpose(j, perm[j])\n perm[j], j = -1, perm[j]\n if perm[j] == i:\n break\n perm[j] = -1\n return tensor\n\n def expand_as(self, tensor):\n \"\"\"Expands this tensor to the size of the specified tensor.\n\n This is equivalent to::\n\n self.expand(tensor.size())\n \"\"\"\n return self.expand(tensor.size())\n\n def repeat(self, *sizes):\n \"\"\"Repeats this tensor along the specified dimensions.\n\n Unlike :meth:`expand`, this function copies the tensor's data.\n\n Args:\n *sizes (torch.Size or int...): The number of times to repeat this\n tensor along each dimension\n\n Example:\n >>> x = torch.Tensor([1, 2, 3])\n >>> x.repeat(4, 2)\n 1 2 3 1 2 3\n 1 2 3 1 2 3\n 1 2 3 1 2 3\n 1 2 3 1 2 3\n [torch.FloatTensor of size 4x6]\n >>> x.repeat(4, 2, 1).size()\n torch.Size([4, 2, 3])\n \"\"\"\n # If args == (torch.Size,), then we need to unpack the tuple\n if len(sizes) == 1 and isinstance(sizes[0], torch.Size):\n sizes = sizes[0]\n repeats = list(sizes)\n result = self.new()\n src = self.contiguous()\n\n if len(repeats) < src.dim():\n raise ValueError('Number of dimensions of repeat dims can not be '\n 'smaller than number of dimensions of tensor')\n\n xtensor = src.new().set_(src)\n xsize = list(xtensor.size())\n for i in _range(len(repeats) - src.dim()):\n xsize = [1] + xsize\n\n size = torch.Size([a * b for a, b in zip(xsize, repeats)])\n xtensor.resize_(torch.Size(xsize))\n result.resize_(size)\n urtensor = result.new(result)\n for i in _range(xtensor.dim()):\n urtensor = urtensor.unfold(i, xtensor.size(i), xtensor.size(i))\n for i in _range(urtensor.dim() - xtensor.dim()):\n xsize = [1] + xsize\n xtensor.resize_(torch.Size(xsize))\n xxtensor = xtensor.expand_as(urtensor)\n urtensor.copy_(xxtensor)\n return result\n\n def masked_copy_(self, *args, **kwargs):\n warnings.warn(\"masked_copy_ is deprecated and renamed to masked_scatter_, and will be removed in v0.3\")\n return self.masked_scatter_(*args, **kwargs)\n\n # TODO: add tests for operators\n def __add__(self, other):\n return self.add(other)\n __radd__ = __add__\n\n def __iadd__(self, other):\n return self.add_(other)\n\n def __sub__(self, other):\n return self.sub(other)\n\n def __rsub__(self, other):\n return self.new().resize_as_(self).fill_(other).add_(-1, self)\n\n def __isub__(self, other):\n return self.sub_(other)\n\n def __mul__(self, other):\n return self.mul(other)\n __rmul__ = __mul__\n\n def __imul__(self, other):\n return self.mul_(other)\n\n def __matmul__(self, other):\n if not torch.is_tensor(other):\n return NotImplemented\n return self.matmul(other)\n\n def __pow__(self, other):\n return self.pow(other)\n\n def __ipow__(self, other):\n return self.pow_(other)\n\n def __div__(self, other):\n return self.div(other)\n __truediv__ = __div__\n\n def __rdiv__(self, other):\n return self.new().resize_as_(self).fill_(other).div_(self)\n __rtruediv__ = __rdiv__\n\n def __idiv__(self, other):\n return self.div_(other)\n __itruediv__ = __idiv__\n\n def __mod__(self, other):\n return self.remainder(other)\n\n def __neg__(self):\n return self.neg()\n\n def __eq__(self, other):\n return self.eq(other)\n\n def __ne__(self, other):\n return self.ne(other)\n\n def __lt__(self, other):\n return self.lt(other)\n\n def __le__(self, other):\n return self.le(other)\n\n def __gt__(self, other):\n return self.gt(other)\n\n def __ge__(self, other):\n return self.ge(other)\n\n # TODO: add native add or and xor in the libs\n def __invert__(self):\n if type(self).__name__ != 'ByteTensor':\n raise RuntimeError('logical operations are supported on ByteTensors only')\n return (1 - self)\n\n def __hash__(self):\n return id(self)\n\n # provide user guidance when they inavertently call autograd properties on a Tensor\n @property\n def data(self):\n raise RuntimeError('cannot call .data on a torch.Tensor: did you intend to use autograd.Variable?')\n\n\n_TensorBase.type = _type\n_TensorBase.cuda = _cuda\n", "path": "torch/tensor.py" } ]
diff --git a/test/test_torch.py b/test/test_torch.py index ff8de2d47a51ed..51df0f3ba009ec 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -4054,6 +4054,13 @@ def test_big_transpose(self): t2 = torch.from_numpy(t.numpy().transpose()) self.assertEqual(t1, t2) + def test_inplace_division(self): + t = torch.rand(5, 5) + id_before = id(t) + t /= 2 + id_after = id(t) + self.assertEqual(id_before, id_after) + # Functions to test negative dimension wrapping METHOD = 1 INPLACE_METHOD = 2 diff --git a/torch/tensor.py b/torch/tensor.py index f38b9182624923..43c5cad6081be8 100644 --- a/torch/tensor.py +++ b/torch/tensor.py @@ -333,6 +333,7 @@ def __rdiv__(self, other): def __idiv__(self, other): return self.div_(other) + __itruediv__ = __idiv__ def __mod__(self, other): return self.remainder(other)
django-oscar__django-oscar-2404
[ { "content": "import zlib\nfrom decimal import Decimal as D\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom django.db import models\nfrom django.db.models import Sum\nfrom django.utils.encoding import python_2_unicode_compatible, smart_text\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom oscar.apps.basket.managers import OpenBasketManager, SavedBasketManager\nfrom oscar.apps.offer import results\nfrom oscar.core.compat import AUTH_USER_MODEL\nfrom oscar.core.loading import get_class\nfrom oscar.core.utils import get_default_currency\nfrom oscar.models.fields.slugfield import SlugField\nfrom oscar.templatetags.currency_filters import currency\n\nUnavailable = get_class('partner.availability', 'Unavailable')\n\n\n@python_2_unicode_compatible\nclass AbstractBasket(models.Model):\n \"\"\"\n Basket object\n \"\"\"\n # Baskets can be anonymously owned - hence this field is nullable. When a\n # anon user signs in, their two baskets are merged.\n owner = models.ForeignKey(\n AUTH_USER_MODEL,\n null=True,\n related_name='baskets',\n on_delete=models.CASCADE,\n verbose_name=_(\"Owner\"))\n\n # Basket statuses\n # - Frozen is for when a basket is in the process of being submitted\n # and we need to prevent any changes to it.\n OPEN, MERGED, SAVED, FROZEN, SUBMITTED = (\n \"Open\", \"Merged\", \"Saved\", \"Frozen\", \"Submitted\")\n STATUS_CHOICES = (\n (OPEN, _(\"Open - currently active\")),\n (MERGED, _(\"Merged - superceded by another basket\")),\n (SAVED, _(\"Saved - for items to be purchased later\")),\n (FROZEN, _(\"Frozen - the basket cannot be modified\")),\n (SUBMITTED, _(\"Submitted - has been ordered at the checkout\")),\n )\n status = models.CharField(\n _(\"Status\"), max_length=128, default=OPEN, choices=STATUS_CHOICES)\n\n # A basket can have many vouchers attached to it. However, it is common\n # for sites to only allow one voucher per basket - this will need to be\n # enforced in the project's codebase.\n vouchers = models.ManyToManyField(\n 'voucher.Voucher', verbose_name=_(\"Vouchers\"), blank=True)\n\n date_created = models.DateTimeField(_(\"Date created\"), auto_now_add=True)\n date_merged = models.DateTimeField(_(\"Date merged\"), null=True, blank=True)\n date_submitted = models.DateTimeField(_(\"Date submitted\"), null=True,\n blank=True)\n\n # Only if a basket is in one of these statuses can it be edited\n editable_statuses = (OPEN, SAVED)\n\n class Meta:\n abstract = True\n app_label = 'basket'\n verbose_name = _('Basket')\n verbose_name_plural = _('Baskets')\n\n objects = models.Manager()\n open = OpenBasketManager()\n saved = SavedBasketManager()\n\n def __init__(self, *args, **kwargs):\n super(AbstractBasket, self).__init__(*args, **kwargs)\n\n # We keep a cached copy of the basket lines as we refer to them often\n # within the same request cycle. Also, applying offers will append\n # discount data to the basket lines which isn't persisted to the DB and\n # so we want to avoid reloading them as this would drop the discount\n # information.\n self._lines = None\n self.offer_applications = results.OfferApplications()\n\n def __str__(self):\n return _(\n u\"%(status)s basket (owner: %(owner)s, lines: %(num_lines)d)\") \\\n % {'status': self.status,\n 'owner': self.owner,\n 'num_lines': self.num_lines}\n\n # ========\n # Strategy\n # ========\n\n @property\n def has_strategy(self):\n return hasattr(self, '_strategy')\n\n def _get_strategy(self):\n if not self.has_strategy:\n raise RuntimeError(\n \"No strategy class has been assigned to this basket. \"\n \"This is normally assigned to the incoming request in \"\n \"oscar.apps.basket.middleware.BasketMiddleware. \"\n \"Since it is missing, you must be doing something different. \"\n \"Ensure that a strategy instance is assigned to the basket!\"\n )\n return self._strategy\n\n def _set_strategy(self, strategy):\n self._strategy = strategy\n\n strategy = property(_get_strategy, _set_strategy)\n\n def all_lines(self):\n \"\"\"\n Return a cached set of basket lines.\n\n This is important for offers as they alter the line models and you\n don't want to reload them from the DB as that information would be\n lost.\n \"\"\"\n if self.id is None:\n return self.lines.none()\n if self._lines is None:\n self._lines = (\n self.lines\n .select_related('product', 'stockrecord')\n .prefetch_related(\n 'attributes', 'product__images')\n .order_by(self._meta.pk.name))\n return self._lines\n\n def is_quantity_allowed(self, qty):\n \"\"\"\n Test whether the passed quantity of items can be added to the basket\n \"\"\"\n # We enforce a max threshold to prevent a DOS attack via the offers\n # system.\n basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD\n if basket_threshold:\n total_basket_quantity = self.num_items\n max_allowed = basket_threshold - total_basket_quantity\n if qty > max_allowed:\n return False, _(\n \"Due to technical limitations we are not able \"\n \"to ship more than %(threshold)d items in one order.\") \\\n % {'threshold': basket_threshold}\n return True, None\n\n # ============\n # Manipulation\n # ============\n\n def flush(self):\n \"\"\"\n Remove all lines from basket.\n \"\"\"\n if self.status == self.FROZEN:\n raise PermissionDenied(\"A frozen basket cannot be flushed\")\n self.lines.all().delete()\n self._lines = None\n\n def add_product(self, product, quantity=1, options=None):\n \"\"\"\n Add a product to the basket\n\n 'stock_info' is the price and availability data returned from\n a partner strategy class.\n\n The 'options' list should contains dicts with keys 'option' and 'value'\n which link the relevant product.Option model and string value\n respectively.\n\n Returns (line, created).\n line: the matching basket line\n created: whether the line was created or updated\n\n \"\"\"\n if options is None:\n options = []\n if not self.id:\n self.save()\n\n # Ensure that all lines are the same currency\n price_currency = self.currency\n stock_info = self.strategy.fetch_for_product(product)\n if price_currency and stock_info.price.currency != price_currency:\n raise ValueError((\n \"Basket lines must all have the same currency. Proposed \"\n \"line has currency %s, while basket has currency %s\")\n % (stock_info.price.currency, price_currency))\n\n if stock_info.stockrecord is None:\n raise ValueError((\n \"Basket lines must all have stock records. Strategy hasn't \"\n \"found any stock record for product %s\") % product)\n\n # Line reference is used to distinguish between variations of the same\n # product (eg T-shirts with different personalisations)\n line_ref = self._create_line_reference(\n product, stock_info.stockrecord, options)\n\n # Determine price to store (if one exists). It is only stored for\n # audit and sometimes caching.\n defaults = {\n 'quantity': quantity,\n 'price_excl_tax': stock_info.price.excl_tax,\n 'price_currency': stock_info.price.currency,\n }\n if stock_info.price.is_tax_known:\n defaults['price_incl_tax'] = stock_info.price.incl_tax\n\n line, created = self.lines.get_or_create(\n line_reference=line_ref,\n product=product,\n stockrecord=stock_info.stockrecord,\n defaults=defaults)\n if created:\n for option_dict in options:\n line.attributes.create(option=option_dict['option'],\n value=option_dict['value'])\n else:\n line.quantity = max(0, line.quantity + quantity)\n line.save()\n self.reset_offer_applications()\n\n # Returning the line is useful when overriding this method.\n return line, created\n add_product.alters_data = True\n add = add_product\n\n def applied_offers(self):\n \"\"\"\n Return a dict of offers successfully applied to the basket.\n\n This is used to compare offers before and after a basket change to see\n if there is a difference.\n \"\"\"\n return self.offer_applications.offers\n\n def reset_offer_applications(self):\n \"\"\"\n Remove any discounts so they get recalculated\n \"\"\"\n self.offer_applications = results.OfferApplications()\n self._lines = None\n\n def merge_line(self, line, add_quantities=True):\n \"\"\"\n For transferring a line from another basket to this one.\n\n This is used with the \"Saved\" basket functionality.\n \"\"\"\n try:\n existing_line = self.lines.get(line_reference=line.line_reference)\n except ObjectDoesNotExist:\n # Line does not already exist - reassign its basket\n line.basket = self\n line.save()\n else:\n # Line already exists - assume the max quantity is correct and\n # delete the old\n if add_quantities:\n existing_line.quantity += line.quantity\n else:\n existing_line.quantity = max(existing_line.quantity,\n line.quantity)\n existing_line.save()\n line.delete()\n finally:\n self._lines = None\n merge_line.alters_data = True\n\n def merge(self, basket, add_quantities=True):\n \"\"\"\n Merges another basket with this one.\n\n :basket: The basket to merge into this one.\n :add_quantities: Whether to add line quantities when they are merged.\n \"\"\"\n # Use basket.lines.all instead of all_lines as this function is called\n # before a strategy has been assigned.\n for line_to_merge in basket.lines.all():\n self.merge_line(line_to_merge, add_quantities)\n basket.status = self.MERGED\n basket.date_merged = now()\n basket._lines = None\n basket.save()\n # Ensure all vouchers are moved to the new basket\n for voucher in basket.vouchers.all():\n basket.vouchers.remove(voucher)\n self.vouchers.add(voucher)\n merge.alters_data = True\n\n def freeze(self):\n \"\"\"\n Freezes the basket so it cannot be modified.\n \"\"\"\n self.status = self.FROZEN\n self.save()\n freeze.alters_data = True\n\n def thaw(self):\n \"\"\"\n Unfreezes a basket so it can be modified again\n \"\"\"\n self.status = self.OPEN\n self.save()\n thaw.alters_data = True\n\n def submit(self):\n \"\"\"\n Mark this basket as submitted\n \"\"\"\n self.status = self.SUBMITTED\n self.date_submitted = now()\n self.save()\n submit.alters_data = True\n\n # Kept for backwards compatibility\n set_as_submitted = submit\n\n def is_shipping_required(self):\n \"\"\"\n Test whether the basket contains physical products that require\n shipping.\n \"\"\"\n for line in self.all_lines():\n if line.product.is_shipping_required:\n return True\n return False\n\n # =======\n # Helpers\n # =======\n\n def _create_line_reference(self, product, stockrecord, options):\n \"\"\"\n Returns a reference string for a line based on the item\n and its options.\n \"\"\"\n base = '%s_%s' % (product.id, stockrecord.id)\n if not options:\n return base\n repr_options = [{'option': repr(option['option']),\n 'value': repr(option['value'])} for option in options]\n return \"%s_%s\" % (base, zlib.crc32(repr(repr_options).encode('utf8')))\n\n def _get_total(self, property):\n \"\"\"\n For executing a named method on each line of the basket\n and returning the total.\n \"\"\"\n total = D('0.00')\n for line in self.all_lines():\n try:\n total += getattr(line, property)\n except ObjectDoesNotExist:\n # Handle situation where the product may have been deleted\n pass\n except TypeError:\n # Handle Unavailable products with no known price\n info = self.strategy.fetch_for_product(line.product)\n if info.availability.is_available_to_buy:\n raise\n pass\n return total\n\n # ==========\n # Properties\n # ==========\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this basket is empty\n \"\"\"\n return self.id is None or self.num_lines == 0\n\n @property\n def is_tax_known(self):\n \"\"\"\n Test if tax values are known for this basket\n \"\"\"\n return all([line.is_tax_known for line in self.all_lines()])\n\n @property\n def total_excl_tax(self):\n \"\"\"\n Return total line price excluding tax\n \"\"\"\n return self._get_total('line_price_excl_tax_incl_discounts')\n\n @property\n def total_tax(self):\n \"\"\"Return total tax for a line\"\"\"\n return self._get_total('line_tax')\n\n @property\n def total_incl_tax(self):\n \"\"\"\n Return total price inclusive of tax and discounts\n \"\"\"\n return self._get_total('line_price_incl_tax_incl_discounts')\n\n @property\n def total_incl_tax_excl_discounts(self):\n \"\"\"\n Return total price inclusive of tax but exclusive discounts\n \"\"\"\n return self._get_total('line_price_incl_tax')\n\n @property\n def total_discount(self):\n return self._get_total('discount_value')\n\n @property\n def offer_discounts(self):\n \"\"\"\n Return basket discounts from non-voucher sources. Does not include\n shipping discounts.\n \"\"\"\n return self.offer_applications.offer_discounts\n\n @property\n def voucher_discounts(self):\n \"\"\"\n Return discounts from vouchers\n \"\"\"\n return self.offer_applications.voucher_discounts\n\n @property\n def has_shipping_discounts(self):\n return len(self.shipping_discounts) > 0\n\n @property\n def shipping_discounts(self):\n \"\"\"\n Return discounts from vouchers\n \"\"\"\n return self.offer_applications.shipping_discounts\n\n @property\n def post_order_actions(self):\n \"\"\"\n Return discounts from vouchers\n \"\"\"\n return self.offer_applications.post_order_actions\n\n @property\n def grouped_voucher_discounts(self):\n \"\"\"\n Return discounts from vouchers but grouped so that a voucher which\n links to multiple offers is aggregated into one object.\n \"\"\"\n return self.offer_applications.grouped_voucher_discounts\n\n @property\n def total_excl_tax_excl_discounts(self):\n \"\"\"\n Return total price excluding tax and discounts\n \"\"\"\n return self._get_total('line_price_excl_tax')\n\n @property\n def num_lines(self):\n \"\"\"Return number of lines\"\"\"\n return self.all_lines().count()\n\n @property\n def num_items(self):\n \"\"\"Return number of items\"\"\"\n return sum(line.quantity for line in self.lines.all())\n\n @property\n def num_items_without_discount(self):\n num = 0\n for line in self.all_lines():\n num += line.quantity_without_discount\n return num\n\n @property\n def num_items_with_discount(self):\n num = 0\n for line in self.all_lines():\n num += line.quantity_with_discount\n return num\n\n @property\n def time_before_submit(self):\n if not self.date_submitted:\n return None\n return self.date_submitted - self.date_created\n\n @property\n def time_since_creation(self, test_datetime=None):\n if not test_datetime:\n test_datetime = now()\n return test_datetime - self.date_created\n\n @property\n def contains_a_voucher(self):\n if not self.id:\n return False\n return self.vouchers.exists()\n\n @property\n def is_submitted(self):\n return self.status == self.SUBMITTED\n\n @property\n def can_be_edited(self):\n \"\"\"\n Test if a basket can be edited\n \"\"\"\n return self.status in self.editable_statuses\n\n @property\n def currency(self):\n # Since all lines should have the same currency, return the currency of\n # the first one found.\n for line in self.all_lines():\n return line.price_currency\n\n # =============\n # Query methods\n # =============\n\n def contains_voucher(self, code):\n \"\"\"\n Test whether the basket contains a voucher with a given code\n \"\"\"\n if self.id is None:\n return False\n try:\n self.vouchers.get(code=code)\n except ObjectDoesNotExist:\n return False\n else:\n return True\n\n def product_quantity(self, product):\n \"\"\"\n Return the quantity of a product in the basket\n\n The basket can contain multiple lines with the same product, but\n different options and stockrecords. Those quantities are summed up.\n \"\"\"\n matching_lines = self.lines.filter(product=product)\n quantity = matching_lines.aggregate(Sum('quantity'))['quantity__sum']\n return quantity or 0\n\n def line_quantity(self, product, stockrecord, options=None):\n \"\"\"\n Return the current quantity of a specific product and options\n \"\"\"\n ref = self._create_line_reference(product, stockrecord, options)\n try:\n return self.lines.get(line_reference=ref).quantity\n except ObjectDoesNotExist:\n return 0\n\n\n@python_2_unicode_compatible\nclass AbstractLine(models.Model):\n \"\"\"A line of a basket (product and a quantity)\n\n Common approaches on ordering basket lines:\n\n a) First added at top. That's the history-like approach; new items are\n added to the bottom of the list. Changing quantities doesn't impact\n position.\n Oscar does this by default. It just sorts by Line.pk, which is\n guaranteed to increment after each creation.\n\n b) Last modified at top. That means items move to the top when you add\n another one, and new items are added to the top as well. Amazon\n mostly does this, but doesn't change the position when you update\n the quantity in the basket view.\n To get this behaviour, add a date_updated field, change\n Meta.ordering and optionally do something similar on wishlist lines.\n Order lines should already be created in the order of the basket\n lines, and are sorted by their primary key, so no changes should be\n necessary there.\n\n \"\"\"\n basket = models.ForeignKey(\n 'basket.Basket',\n on_delete=models.CASCADE,\n related_name='lines',\n verbose_name=_(\"Basket\"))\n\n # This is to determine which products belong to the same line\n # We can't just use product.id as you can have customised products\n # which should be treated as separate lines. Set as a\n # SlugField as it is included in the path for certain views.\n line_reference = SlugField(\n _(\"Line Reference\"), max_length=128, db_index=True)\n\n product = models.ForeignKey(\n 'catalogue.Product',\n on_delete=models.CASCADE,\n related_name='basket_lines',\n verbose_name=_(\"Product\"))\n\n # We store the stockrecord that should be used to fulfil this line.\n stockrecord = models.ForeignKey(\n 'partner.StockRecord',\n on_delete=models.CASCADE,\n related_name='basket_lines')\n\n quantity = models.PositiveIntegerField(_('Quantity'), default=1)\n\n # We store the unit price incl tax of the product when it is first added to\n # the basket. This allows us to tell if a product has changed price since\n # a person first added it to their basket.\n price_currency = models.CharField(\n _(\"Currency\"), max_length=12, default=get_default_currency)\n price_excl_tax = models.DecimalField(\n _('Price excl. Tax'), decimal_places=2, max_digits=12,\n null=True)\n price_incl_tax = models.DecimalField(\n _('Price incl. Tax'), decimal_places=2, max_digits=12, null=True)\n\n # Track date of first addition\n date_created = models.DateTimeField(_(\"Date Created\"), auto_now_add=True)\n\n def __init__(self, *args, **kwargs):\n super(AbstractLine, self).__init__(*args, **kwargs)\n # Instance variables used to persist discount information\n self._discount_excl_tax = D('0.00')\n self._discount_incl_tax = D('0.00')\n self._affected_quantity = 0\n\n class Meta:\n abstract = True\n app_label = 'basket'\n # Enforce sorting by order of creation.\n ordering = ['date_created', 'pk']\n unique_together = (\"basket\", \"line_reference\")\n verbose_name = _('Basket line')\n verbose_name_plural = _('Basket lines')\n\n def __str__(self):\n return _(\n u\"Basket #%(basket_id)d, Product #%(product_id)d, quantity\"\n u\" %(quantity)d\") % {'basket_id': self.basket.pk,\n 'product_id': self.product.pk,\n 'quantity': self.quantity}\n\n def save(self, *args, **kwargs):\n if not self.basket.can_be_edited:\n raise PermissionDenied(\n _(\"You cannot modify a %s basket\") % (\n self.basket.status.lower(),))\n return super(AbstractLine, self).save(*args, **kwargs)\n\n # =============\n # Offer methods\n # =============\n\n def clear_discount(self):\n \"\"\"\n Remove any discounts from this line.\n \"\"\"\n self._discount_excl_tax = D('0.00')\n self._discount_incl_tax = D('0.00')\n self._affected_quantity = 0\n\n def discount(self, discount_value, affected_quantity, incl_tax=True):\n \"\"\"\n Apply a discount to this line\n \"\"\"\n if incl_tax:\n if self._discount_excl_tax > 0:\n raise RuntimeError(\n \"Attempting to discount the tax-inclusive price of a line \"\n \"when tax-exclusive discounts are already applied\")\n self._discount_incl_tax += discount_value\n else:\n if self._discount_incl_tax > 0:\n raise RuntimeError(\n \"Attempting to discount the tax-exclusive price of a line \"\n \"when tax-inclusive discounts are already applied\")\n self._discount_excl_tax += discount_value\n self._affected_quantity += int(affected_quantity)\n\n def consume(self, quantity):\n \"\"\"\n Mark all or part of the line as 'consumed'\n\n Consumed items are no longer available to be used in offers.\n \"\"\"\n if quantity > self.quantity - self._affected_quantity:\n inc = self.quantity - self._affected_quantity\n else:\n inc = quantity\n self._affected_quantity += int(inc)\n\n def get_price_breakdown(self):\n \"\"\"\n Return a breakdown of line prices after discounts have been applied.\n\n Returns a list of (unit_price_incl_tax, unit_price_excl_tax, quantity)\n tuples.\n \"\"\"\n if not self.is_tax_known:\n raise RuntimeError(\"A price breakdown can only be determined \"\n \"when taxes are known\")\n prices = []\n if not self.discount_value:\n prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax,\n self.quantity))\n else:\n # Need to split the discount among the affected quantity\n # of products.\n item_incl_tax_discount = (\n self.discount_value / int(self._affected_quantity))\n item_excl_tax_discount = item_incl_tax_discount * self._tax_ratio\n item_excl_tax_discount = item_excl_tax_discount.quantize(D('0.01'))\n prices.append((self.unit_price_incl_tax - item_incl_tax_discount,\n self.unit_price_excl_tax - item_excl_tax_discount,\n self._affected_quantity))\n if self.quantity_without_discount:\n prices.append((self.unit_price_incl_tax,\n self.unit_price_excl_tax,\n self.quantity_without_discount))\n return prices\n\n # =======\n # Helpers\n # =======\n\n @property\n def _tax_ratio(self):\n if not self.unit_price_incl_tax:\n return 0\n return self.unit_price_excl_tax / self.unit_price_incl_tax\n\n # ==========\n # Properties\n # ==========\n\n @property\n def has_discount(self):\n return self.quantity > self.quantity_without_discount\n\n @property\n def quantity_with_discount(self):\n return self._affected_quantity\n\n @property\n def quantity_without_discount(self):\n return int(self.quantity - self._affected_quantity)\n\n @property\n def is_available_for_discount(self):\n return self.quantity_without_discount > 0\n\n @property\n def discount_value(self):\n # Only one of the incl- and excl- discounts should be non-zero\n return max(self._discount_incl_tax, self._discount_excl_tax)\n\n @property\n def purchase_info(self):\n \"\"\"\n Return the stock/price info\n \"\"\"\n if not hasattr(self, '_info'):\n # Cache the PurchaseInfo instance.\n self._info = self.basket.strategy.fetch_for_line(\n self, self.stockrecord)\n return self._info\n\n @property\n def is_tax_known(self):\n return self.purchase_info.price.is_tax_known\n\n @property\n def unit_effective_price(self):\n \"\"\"\n The price to use for offer calculations\n \"\"\"\n return self.purchase_info.price.effective_price\n\n @property\n def unit_price_excl_tax(self):\n return self.purchase_info.price.excl_tax\n\n @property\n def unit_price_incl_tax(self):\n return self.purchase_info.price.incl_tax\n\n @property\n def unit_tax(self):\n return self.purchase_info.price.tax\n\n @property\n def line_price_excl_tax(self):\n if self.unit_price_excl_tax is not None:\n return self.quantity * self.unit_price_excl_tax\n\n @property\n def line_price_excl_tax_incl_discounts(self):\n if self._discount_excl_tax and self.line_price_excl_tax is not None:\n return self.line_price_excl_tax - self._discount_excl_tax\n if self._discount_incl_tax and self.line_price_incl_tax is not None:\n # This is a tricky situation. We know the discount as calculated\n # against tax inclusive prices but we need to guess how much of the\n # discount applies to tax-exclusive prices. We do this by\n # assuming a linear tax and scaling down the original discount.\n return self.line_price_excl_tax \\\n - self._tax_ratio * self._discount_incl_tax\n return self.line_price_excl_tax\n\n @property\n def line_price_incl_tax_incl_discounts(self):\n # We use whichever discount value is set. If the discount value was\n # calculated against the tax-exclusive prices, then the line price\n # including tax\n if self.line_price_incl_tax is not None:\n return self.line_price_incl_tax - self.discount_value\n\n @property\n def line_tax(self):\n if self.unit_tax:\n return self.quantity * self.unit_tax\n\n @property\n def line_price_incl_tax(self):\n if self.unit_price_incl_tax is not None:\n return self.quantity * self.unit_price_incl_tax\n\n @property\n def description(self):\n d = smart_text(self.product)\n ops = []\n for attribute in self.attributes.all():\n ops.append(\"%s = '%s'\" % (attribute.option.name, attribute.value))\n if ops:\n d = \"%s (%s)\" % (d, \", \".join(ops))\n return d\n\n def get_warning(self):\n \"\"\"\n Return a warning message about this basket line if one is applicable\n\n This could be things like the price has changed\n \"\"\"\n if isinstance(self.purchase_info.availability, Unavailable):\n msg = u\"'%(product)s' is no longer available\"\n return _(msg) % {'product': self.product.get_title()}\n\n if not self.price_incl_tax:\n return\n if not self.purchase_info.price.is_tax_known:\n return\n\n # Compare current price to price when added to basket\n current_price_incl_tax = self.purchase_info.price.incl_tax\n if current_price_incl_tax != self.price_incl_tax:\n product_prices = {\n 'product': self.product.get_title(),\n 'old_price': currency(self.price_incl_tax),\n 'new_price': currency(current_price_incl_tax)\n }\n if current_price_incl_tax > self.price_incl_tax:\n warning = _(\"The price of '%(product)s' has increased from\"\n \" %(old_price)s to %(new_price)s since you added\"\n \" it to your basket\")\n return warning % product_prices\n else:\n warning = _(\"The price of '%(product)s' has decreased from\"\n \" %(old_price)s to %(new_price)s since you added\"\n \" it to your basket\")\n return warning % product_prices\n\n\nclass AbstractLineAttribute(models.Model):\n \"\"\"\n An attribute of a basket line\n \"\"\"\n line = models.ForeignKey(\n 'basket.Line',\n on_delete=models.CASCADE,\n related_name='attributes',\n verbose_name=_(\"Line\"))\n option = models.ForeignKey(\n 'catalogue.Option',\n on_delete=models.CASCADE,\n verbose_name=_(\"Option\"))\n value = models.CharField(_(\"Value\"), max_length=255)\n\n class Meta:\n abstract = True\n app_label = 'basket'\n verbose_name = _('Line attribute')\n verbose_name_plural = _('Line attributes')\n", "path": "src/oscar/apps/basket/abstract_models.py" } ]
[ { "content": "import zlib\nfrom decimal import Decimal as D\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom django.db import models\nfrom django.db.models import Sum\nfrom django.utils.encoding import python_2_unicode_compatible, smart_text\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom oscar.apps.basket.managers import OpenBasketManager, SavedBasketManager\nfrom oscar.apps.offer import results\nfrom oscar.core.compat import AUTH_USER_MODEL\nfrom oscar.core.loading import get_class\nfrom oscar.core.utils import get_default_currency\nfrom oscar.models.fields.slugfield import SlugField\nfrom oscar.templatetags.currency_filters import currency\n\nUnavailable = get_class('partner.availability', 'Unavailable')\n\n\n@python_2_unicode_compatible\nclass AbstractBasket(models.Model):\n \"\"\"\n Basket object\n \"\"\"\n # Baskets can be anonymously owned - hence this field is nullable. When a\n # anon user signs in, their two baskets are merged.\n owner = models.ForeignKey(\n AUTH_USER_MODEL,\n null=True,\n related_name='baskets',\n on_delete=models.CASCADE,\n verbose_name=_(\"Owner\"))\n\n # Basket statuses\n # - Frozen is for when a basket is in the process of being submitted\n # and we need to prevent any changes to it.\n OPEN, MERGED, SAVED, FROZEN, SUBMITTED = (\n \"Open\", \"Merged\", \"Saved\", \"Frozen\", \"Submitted\")\n STATUS_CHOICES = (\n (OPEN, _(\"Open - currently active\")),\n (MERGED, _(\"Merged - superceded by another basket\")),\n (SAVED, _(\"Saved - for items to be purchased later\")),\n (FROZEN, _(\"Frozen - the basket cannot be modified\")),\n (SUBMITTED, _(\"Submitted - has been ordered at the checkout\")),\n )\n status = models.CharField(\n _(\"Status\"), max_length=128, default=OPEN, choices=STATUS_CHOICES)\n\n # A basket can have many vouchers attached to it. However, it is common\n # for sites to only allow one voucher per basket - this will need to be\n # enforced in the project's codebase.\n vouchers = models.ManyToManyField(\n 'voucher.Voucher', verbose_name=_(\"Vouchers\"), blank=True)\n\n date_created = models.DateTimeField(_(\"Date created\"), auto_now_add=True)\n date_merged = models.DateTimeField(_(\"Date merged\"), null=True, blank=True)\n date_submitted = models.DateTimeField(_(\"Date submitted\"), null=True,\n blank=True)\n\n # Only if a basket is in one of these statuses can it be edited\n editable_statuses = (OPEN, SAVED)\n\n class Meta:\n abstract = True\n app_label = 'basket'\n verbose_name = _('Basket')\n verbose_name_plural = _('Baskets')\n\n objects = models.Manager()\n open = OpenBasketManager()\n saved = SavedBasketManager()\n\n def __init__(self, *args, **kwargs):\n super(AbstractBasket, self).__init__(*args, **kwargs)\n\n # We keep a cached copy of the basket lines as we refer to them often\n # within the same request cycle. Also, applying offers will append\n # discount data to the basket lines which isn't persisted to the DB and\n # so we want to avoid reloading them as this would drop the discount\n # information.\n self._lines = None\n self.offer_applications = results.OfferApplications()\n\n def __str__(self):\n return _(\n u\"%(status)s basket (owner: %(owner)s, lines: %(num_lines)d)\") \\\n % {'status': self.status,\n 'owner': self.owner,\n 'num_lines': self.num_lines}\n\n # ========\n # Strategy\n # ========\n\n @property\n def has_strategy(self):\n return hasattr(self, '_strategy')\n\n def _get_strategy(self):\n if not self.has_strategy:\n raise RuntimeError(\n \"No strategy class has been assigned to this basket. \"\n \"This is normally assigned to the incoming request in \"\n \"oscar.apps.basket.middleware.BasketMiddleware. \"\n \"Since it is missing, you must be doing something different. \"\n \"Ensure that a strategy instance is assigned to the basket!\"\n )\n return self._strategy\n\n def _set_strategy(self, strategy):\n self._strategy = strategy\n\n strategy = property(_get_strategy, _set_strategy)\n\n def all_lines(self):\n \"\"\"\n Return a cached set of basket lines.\n\n This is important for offers as they alter the line models and you\n don't want to reload them from the DB as that information would be\n lost.\n \"\"\"\n if self.id is None:\n return self.lines.none()\n if self._lines is None:\n self._lines = (\n self.lines\n .select_related('product', 'stockrecord')\n .prefetch_related(\n 'attributes', 'product__images')\n .order_by(self._meta.pk.name))\n return self._lines\n\n def is_quantity_allowed(self, qty):\n \"\"\"\n Test whether the passed quantity of items can be added to the basket\n \"\"\"\n # We enforce a max threshold to prevent a DOS attack via the offers\n # system.\n basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD\n if basket_threshold:\n total_basket_quantity = self.num_items\n max_allowed = basket_threshold - total_basket_quantity\n if qty > max_allowed:\n return False, _(\n \"Due to technical limitations we are not able \"\n \"to ship more than %(threshold)d items in one order.\") \\\n % {'threshold': basket_threshold}\n return True, None\n\n # ============\n # Manipulation\n # ============\n\n def flush(self):\n \"\"\"\n Remove all lines from basket.\n \"\"\"\n if self.status == self.FROZEN:\n raise PermissionDenied(\"A frozen basket cannot be flushed\")\n self.lines.all().delete()\n self._lines = None\n\n def add_product(self, product, quantity=1, options=None):\n \"\"\"\n Add a product to the basket\n\n 'stock_info' is the price and availability data returned from\n a partner strategy class.\n\n The 'options' list should contains dicts with keys 'option' and 'value'\n which link the relevant product.Option model and string value\n respectively.\n\n Returns (line, created).\n line: the matching basket line\n created: whether the line was created or updated\n\n \"\"\"\n if options is None:\n options = []\n if not self.id:\n self.save()\n\n # Ensure that all lines are the same currency\n price_currency = self.currency\n stock_info = self.strategy.fetch_for_product(product)\n if price_currency and stock_info.price.currency != price_currency:\n raise ValueError((\n \"Basket lines must all have the same currency. Proposed \"\n \"line has currency %s, while basket has currency %s\")\n % (stock_info.price.currency, price_currency))\n\n if stock_info.stockrecord is None:\n raise ValueError((\n \"Basket lines must all have stock records. Strategy hasn't \"\n \"found any stock record for product %s\") % product)\n\n # Line reference is used to distinguish between variations of the same\n # product (eg T-shirts with different personalisations)\n line_ref = self._create_line_reference(\n product, stock_info.stockrecord, options)\n\n # Determine price to store (if one exists). It is only stored for\n # audit and sometimes caching.\n defaults = {\n 'quantity': quantity,\n 'price_excl_tax': stock_info.price.excl_tax,\n 'price_currency': stock_info.price.currency,\n }\n if stock_info.price.is_tax_known:\n defaults['price_incl_tax'] = stock_info.price.incl_tax\n\n line, created = self.lines.get_or_create(\n line_reference=line_ref,\n product=product,\n stockrecord=stock_info.stockrecord,\n defaults=defaults)\n if created:\n for option_dict in options:\n line.attributes.create(option=option_dict['option'],\n value=option_dict['value'])\n else:\n line.quantity = max(0, line.quantity + quantity)\n line.save()\n self.reset_offer_applications()\n\n # Returning the line is useful when overriding this method.\n return line, created\n add_product.alters_data = True\n add = add_product\n\n def applied_offers(self):\n \"\"\"\n Return a dict of offers successfully applied to the basket.\n\n This is used to compare offers before and after a basket change to see\n if there is a difference.\n \"\"\"\n return self.offer_applications.offers\n\n def reset_offer_applications(self):\n \"\"\"\n Remove any discounts so they get recalculated\n \"\"\"\n self.offer_applications = results.OfferApplications()\n self._lines = None\n\n def merge_line(self, line, add_quantities=True):\n \"\"\"\n For transferring a line from another basket to this one.\n\n This is used with the \"Saved\" basket functionality.\n \"\"\"\n try:\n existing_line = self.lines.get(line_reference=line.line_reference)\n except ObjectDoesNotExist:\n # Line does not already exist - reassign its basket\n line.basket = self\n line.save()\n else:\n # Line already exists - assume the max quantity is correct and\n # delete the old\n if add_quantities:\n existing_line.quantity += line.quantity\n else:\n existing_line.quantity = max(existing_line.quantity,\n line.quantity)\n existing_line.save()\n line.delete()\n finally:\n self._lines = None\n merge_line.alters_data = True\n\n def merge(self, basket, add_quantities=True):\n \"\"\"\n Merges another basket with this one.\n\n :basket: The basket to merge into this one.\n :add_quantities: Whether to add line quantities when they are merged.\n \"\"\"\n # Use basket.lines.all instead of all_lines as this function is called\n # before a strategy has been assigned.\n for line_to_merge in basket.lines.all():\n self.merge_line(line_to_merge, add_quantities)\n basket.status = self.MERGED\n basket.date_merged = now()\n basket._lines = None\n basket.save()\n # Ensure all vouchers are moved to the new basket\n for voucher in basket.vouchers.all():\n basket.vouchers.remove(voucher)\n self.vouchers.add(voucher)\n merge.alters_data = True\n\n def freeze(self):\n \"\"\"\n Freezes the basket so it cannot be modified.\n \"\"\"\n self.status = self.FROZEN\n self.save()\n freeze.alters_data = True\n\n def thaw(self):\n \"\"\"\n Unfreezes a basket so it can be modified again\n \"\"\"\n self.status = self.OPEN\n self.save()\n thaw.alters_data = True\n\n def submit(self):\n \"\"\"\n Mark this basket as submitted\n \"\"\"\n self.status = self.SUBMITTED\n self.date_submitted = now()\n self.save()\n submit.alters_data = True\n\n # Kept for backwards compatibility\n set_as_submitted = submit\n\n def is_shipping_required(self):\n \"\"\"\n Test whether the basket contains physical products that require\n shipping.\n \"\"\"\n for line in self.all_lines():\n if line.product.is_shipping_required:\n return True\n return False\n\n # =======\n # Helpers\n # =======\n\n def _create_line_reference(self, product, stockrecord, options):\n \"\"\"\n Returns a reference string for a line based on the item\n and its options.\n \"\"\"\n base = '%s_%s' % (product.id, stockrecord.id)\n if not options:\n return base\n repr_options = [{'option': repr(option['option']),\n 'value': repr(option['value'])} for option in options]\n return \"%s_%s\" % (base, zlib.crc32(repr(repr_options).encode('utf8')))\n\n def _get_total(self, property):\n \"\"\"\n For executing a named method on each line of the basket\n and returning the total.\n \"\"\"\n total = D('0.00')\n for line in self.all_lines():\n try:\n total += getattr(line, property)\n except ObjectDoesNotExist:\n # Handle situation where the product may have been deleted\n pass\n except TypeError:\n # Handle Unavailable products with no known price\n info = self.strategy.fetch_for_product(line.product)\n if info.availability.is_available_to_buy:\n raise\n pass\n return total\n\n # ==========\n # Properties\n # ==========\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this basket is empty\n \"\"\"\n return self.id is None or self.num_lines == 0\n\n @property\n def is_tax_known(self):\n \"\"\"\n Test if tax values are known for this basket\n \"\"\"\n return all([line.is_tax_known for line in self.all_lines()])\n\n @property\n def total_excl_tax(self):\n \"\"\"\n Return total line price excluding tax\n \"\"\"\n return self._get_total('line_price_excl_tax_incl_discounts')\n\n @property\n def total_tax(self):\n \"\"\"Return total tax for a line\"\"\"\n return self._get_total('line_tax')\n\n @property\n def total_incl_tax(self):\n \"\"\"\n Return total price inclusive of tax and discounts\n \"\"\"\n return self._get_total('line_price_incl_tax_incl_discounts')\n\n @property\n def total_incl_tax_excl_discounts(self):\n \"\"\"\n Return total price inclusive of tax but exclusive discounts\n \"\"\"\n return self._get_total('line_price_incl_tax')\n\n @property\n def total_discount(self):\n return self._get_total('discount_value')\n\n @property\n def offer_discounts(self):\n \"\"\"\n Return basket discounts from non-voucher sources. Does not include\n shipping discounts.\n \"\"\"\n return self.offer_applications.offer_discounts\n\n @property\n def voucher_discounts(self):\n \"\"\"\n Return discounts from vouchers\n \"\"\"\n return self.offer_applications.voucher_discounts\n\n @property\n def has_shipping_discounts(self):\n return len(self.shipping_discounts) > 0\n\n @property\n def shipping_discounts(self):\n \"\"\"\n Return discounts from vouchers\n \"\"\"\n return self.offer_applications.shipping_discounts\n\n @property\n def post_order_actions(self):\n \"\"\"\n Return discounts from vouchers\n \"\"\"\n return self.offer_applications.post_order_actions\n\n @property\n def grouped_voucher_discounts(self):\n \"\"\"\n Return discounts from vouchers but grouped so that a voucher which\n links to multiple offers is aggregated into one object.\n \"\"\"\n return self.offer_applications.grouped_voucher_discounts\n\n @property\n def total_excl_tax_excl_discounts(self):\n \"\"\"\n Return total price excluding tax and discounts\n \"\"\"\n return self._get_total('line_price_excl_tax')\n\n @property\n def num_lines(self):\n \"\"\"Return number of lines\"\"\"\n return self.all_lines().count()\n\n @property\n def num_items(self):\n \"\"\"Return number of items\"\"\"\n return sum(line.quantity for line in self.lines.all())\n\n @property\n def num_items_without_discount(self):\n num = 0\n for line in self.all_lines():\n num += line.quantity_without_discount\n return num\n\n @property\n def num_items_with_discount(self):\n num = 0\n for line in self.all_lines():\n num += line.quantity_with_discount\n return num\n\n @property\n def time_before_submit(self):\n if not self.date_submitted:\n return None\n return self.date_submitted - self.date_created\n\n @property\n def time_since_creation(self, test_datetime=None):\n if not test_datetime:\n test_datetime = now()\n return test_datetime - self.date_created\n\n @property\n def contains_a_voucher(self):\n if not self.id:\n return False\n return self.vouchers.exists()\n\n @property\n def is_submitted(self):\n return self.status == self.SUBMITTED\n\n @property\n def can_be_edited(self):\n \"\"\"\n Test if a basket can be edited\n \"\"\"\n return self.status in self.editable_statuses\n\n @property\n def currency(self):\n # Since all lines should have the same currency, return the currency of\n # the first one found.\n for line in self.all_lines():\n return line.price_currency\n\n # =============\n # Query methods\n # =============\n\n def contains_voucher(self, code):\n \"\"\"\n Test whether the basket contains a voucher with a given code\n \"\"\"\n if self.id is None:\n return False\n try:\n self.vouchers.get(code=code)\n except ObjectDoesNotExist:\n return False\n else:\n return True\n\n def product_quantity(self, product):\n \"\"\"\n Return the quantity of a product in the basket\n\n The basket can contain multiple lines with the same product, but\n different options and stockrecords. Those quantities are summed up.\n \"\"\"\n matching_lines = self.lines.filter(product=product)\n quantity = matching_lines.aggregate(Sum('quantity'))['quantity__sum']\n return quantity or 0\n\n def line_quantity(self, product, stockrecord, options=None):\n \"\"\"\n Return the current quantity of a specific product and options\n \"\"\"\n ref = self._create_line_reference(product, stockrecord, options)\n try:\n return self.lines.get(line_reference=ref).quantity\n except ObjectDoesNotExist:\n return 0\n\n\n@python_2_unicode_compatible\nclass AbstractLine(models.Model):\n \"\"\"A line of a basket (product and a quantity)\n\n Common approaches on ordering basket lines:\n\n a) First added at top. That's the history-like approach; new items are\n added to the bottom of the list. Changing quantities doesn't impact\n position.\n Oscar does this by default. It just sorts by Line.pk, which is\n guaranteed to increment after each creation.\n\n b) Last modified at top. That means items move to the top when you add\n another one, and new items are added to the top as well. Amazon\n mostly does this, but doesn't change the position when you update\n the quantity in the basket view.\n To get this behaviour, add a date_updated field, change\n Meta.ordering and optionally do something similar on wishlist lines.\n Order lines should already be created in the order of the basket\n lines, and are sorted by their primary key, so no changes should be\n necessary there.\n\n \"\"\"\n basket = models.ForeignKey(\n 'basket.Basket',\n on_delete=models.CASCADE,\n related_name='lines',\n verbose_name=_(\"Basket\"))\n\n # This is to determine which products belong to the same line\n # We can't just use product.id as you can have customised products\n # which should be treated as separate lines. Set as a\n # SlugField as it is included in the path for certain views.\n line_reference = SlugField(\n _(\"Line Reference\"), max_length=128, db_index=True)\n\n product = models.ForeignKey(\n 'catalogue.Product',\n on_delete=models.CASCADE,\n related_name='basket_lines',\n verbose_name=_(\"Product\"))\n\n # We store the stockrecord that should be used to fulfil this line.\n stockrecord = models.ForeignKey(\n 'partner.StockRecord',\n on_delete=models.CASCADE,\n related_name='basket_lines')\n\n quantity = models.PositiveIntegerField(_('Quantity'), default=1)\n\n # We store the unit price incl tax of the product when it is first added to\n # the basket. This allows us to tell if a product has changed price since\n # a person first added it to their basket.\n price_currency = models.CharField(\n _(\"Currency\"), max_length=12, default=get_default_currency)\n price_excl_tax = models.DecimalField(\n _('Price excl. Tax'), decimal_places=2, max_digits=12,\n null=True)\n price_incl_tax = models.DecimalField(\n _('Price incl. Tax'), decimal_places=2, max_digits=12, null=True)\n\n # Track date of first addition\n date_created = models.DateTimeField(_(\"Date Created\"), auto_now_add=True)\n\n def __init__(self, *args, **kwargs):\n super(AbstractLine, self).__init__(*args, **kwargs)\n # Instance variables used to persist discount information\n self._discount_excl_tax = D('0.00')\n self._discount_incl_tax = D('0.00')\n self._affected_quantity = 0\n\n class Meta:\n abstract = True\n app_label = 'basket'\n # Enforce sorting by order of creation.\n ordering = ['date_created', 'pk']\n unique_together = (\"basket\", \"line_reference\")\n verbose_name = _('Basket line')\n verbose_name_plural = _('Basket lines')\n\n def __str__(self):\n return _(\n u\"Basket #%(basket_id)d, Product #%(product_id)d, quantity\"\n u\" %(quantity)d\") % {'basket_id': self.basket.pk,\n 'product_id': self.product.pk,\n 'quantity': self.quantity}\n\n def save(self, *args, **kwargs):\n if not self.basket.can_be_edited:\n raise PermissionDenied(\n _(\"You cannot modify a %s basket\") % (\n self.basket.status.lower(),))\n return super(AbstractLine, self).save(*args, **kwargs)\n\n # =============\n # Offer methods\n # =============\n\n def clear_discount(self):\n \"\"\"\n Remove any discounts from this line.\n \"\"\"\n self._discount_excl_tax = D('0.00')\n self._discount_incl_tax = D('0.00')\n self._affected_quantity = 0\n\n def discount(self, discount_value, affected_quantity, incl_tax=True):\n \"\"\"\n Apply a discount to this line\n \"\"\"\n if incl_tax:\n if self._discount_excl_tax > 0:\n raise RuntimeError(\n \"Attempting to discount the tax-inclusive price of a line \"\n \"when tax-exclusive discounts are already applied\")\n self._discount_incl_tax += discount_value\n else:\n if self._discount_incl_tax > 0:\n raise RuntimeError(\n \"Attempting to discount the tax-exclusive price of a line \"\n \"when tax-inclusive discounts are already applied\")\n self._discount_excl_tax += discount_value\n self._affected_quantity += int(affected_quantity)\n\n def consume(self, quantity):\n \"\"\"\n Mark all or part of the line as 'consumed'\n\n Consumed items are no longer available to be used in offers.\n \"\"\"\n if quantity > self.quantity - self._affected_quantity:\n inc = self.quantity - self._affected_quantity\n else:\n inc = quantity\n self._affected_quantity += int(inc)\n\n def get_price_breakdown(self):\n \"\"\"\n Return a breakdown of line prices after discounts have been applied.\n\n Returns a list of (unit_price_incl_tax, unit_price_excl_tax, quantity)\n tuples.\n \"\"\"\n if not self.is_tax_known:\n raise RuntimeError(\"A price breakdown can only be determined \"\n \"when taxes are known\")\n prices = []\n if not self.discount_value:\n prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax,\n self.quantity))\n else:\n # Need to split the discount among the affected quantity\n # of products.\n item_incl_tax_discount = (\n self.discount_value / int(self._affected_quantity))\n item_excl_tax_discount = item_incl_tax_discount * self._tax_ratio\n item_excl_tax_discount = item_excl_tax_discount.quantize(D('0.01'))\n prices.append((self.unit_price_incl_tax - item_incl_tax_discount,\n self.unit_price_excl_tax - item_excl_tax_discount,\n self._affected_quantity))\n if self.quantity_without_discount:\n prices.append((self.unit_price_incl_tax,\n self.unit_price_excl_tax,\n self.quantity_without_discount))\n return prices\n\n # =======\n # Helpers\n # =======\n\n @property\n def _tax_ratio(self):\n if not self.unit_price_incl_tax:\n return 0\n return self.unit_price_excl_tax / self.unit_price_incl_tax\n\n # ==========\n # Properties\n # ==========\n\n @property\n def has_discount(self):\n return self.quantity > self.quantity_without_discount\n\n @property\n def quantity_with_discount(self):\n return self._affected_quantity\n\n @property\n def quantity_without_discount(self):\n return int(self.quantity - self._affected_quantity)\n\n @property\n def is_available_for_discount(self):\n return self.quantity_without_discount > 0\n\n @property\n def discount_value(self):\n # Only one of the incl- and excl- discounts should be non-zero\n return max(self._discount_incl_tax, self._discount_excl_tax)\n\n @property\n def purchase_info(self):\n \"\"\"\n Return the stock/price info\n \"\"\"\n if not hasattr(self, '_info'):\n # Cache the PurchaseInfo instance.\n self._info = self.basket.strategy.fetch_for_line(\n self, self.stockrecord)\n return self._info\n\n @property\n def is_tax_known(self):\n return self.purchase_info.price.is_tax_known\n\n @property\n def unit_effective_price(self):\n \"\"\"\n The price to use for offer calculations\n \"\"\"\n return self.purchase_info.price.effective_price\n\n @property\n def unit_price_excl_tax(self):\n return self.purchase_info.price.excl_tax\n\n @property\n def unit_price_incl_tax(self):\n return self.purchase_info.price.incl_tax\n\n @property\n def unit_tax(self):\n return self.purchase_info.price.tax\n\n @property\n def line_price_excl_tax(self):\n if self.unit_price_excl_tax is not None:\n return self.quantity * self.unit_price_excl_tax\n\n @property\n def line_price_excl_tax_incl_discounts(self):\n if self._discount_excl_tax and self.line_price_excl_tax is not None:\n return self.line_price_excl_tax - self._discount_excl_tax\n if self._discount_incl_tax and self.line_price_incl_tax is not None:\n # This is a tricky situation. We know the discount as calculated\n # against tax inclusive prices but we need to guess how much of the\n # discount applies to tax-exclusive prices. We do this by\n # assuming a linear tax and scaling down the original discount.\n return self.line_price_excl_tax \\\n - self._tax_ratio * self._discount_incl_tax\n return self.line_price_excl_tax\n\n @property\n def line_price_incl_tax_incl_discounts(self):\n # We use whichever discount value is set. If the discount value was\n # calculated against the tax-exclusive prices, then the line price\n # including tax\n if self.line_price_incl_tax is not None:\n return self.line_price_incl_tax - self.discount_value\n\n @property\n def line_tax(self):\n if self.is_tax_known:\n return self.quantity * self.unit_tax\n\n @property\n def line_price_incl_tax(self):\n if self.unit_price_incl_tax is not None:\n return self.quantity * self.unit_price_incl_tax\n\n @property\n def description(self):\n d = smart_text(self.product)\n ops = []\n for attribute in self.attributes.all():\n ops.append(\"%s = '%s'\" % (attribute.option.name, attribute.value))\n if ops:\n d = \"%s (%s)\" % (d, \", \".join(ops))\n return d\n\n def get_warning(self):\n \"\"\"\n Return a warning message about this basket line if one is applicable\n\n This could be things like the price has changed\n \"\"\"\n if isinstance(self.purchase_info.availability, Unavailable):\n msg = u\"'%(product)s' is no longer available\"\n return _(msg) % {'product': self.product.get_title()}\n\n if not self.price_incl_tax:\n return\n if not self.purchase_info.price.is_tax_known:\n return\n\n # Compare current price to price when added to basket\n current_price_incl_tax = self.purchase_info.price.incl_tax\n if current_price_incl_tax != self.price_incl_tax:\n product_prices = {\n 'product': self.product.get_title(),\n 'old_price': currency(self.price_incl_tax),\n 'new_price': currency(current_price_incl_tax)\n }\n if current_price_incl_tax > self.price_incl_tax:\n warning = _(\"The price of '%(product)s' has increased from\"\n \" %(old_price)s to %(new_price)s since you added\"\n \" it to your basket\")\n return warning % product_prices\n else:\n warning = _(\"The price of '%(product)s' has decreased from\"\n \" %(old_price)s to %(new_price)s since you added\"\n \" it to your basket\")\n return warning % product_prices\n\n\nclass AbstractLineAttribute(models.Model):\n \"\"\"\n An attribute of a basket line\n \"\"\"\n line = models.ForeignKey(\n 'basket.Line',\n on_delete=models.CASCADE,\n related_name='attributes',\n verbose_name=_(\"Line\"))\n option = models.ForeignKey(\n 'catalogue.Option',\n on_delete=models.CASCADE,\n verbose_name=_(\"Option\"))\n value = models.CharField(_(\"Value\"), max_length=255)\n\n class Meta:\n abstract = True\n app_label = 'basket'\n verbose_name = _('Line attribute')\n verbose_name_plural = _('Line attributes')\n", "path": "src/oscar/apps/basket/abstract_models.py" } ]
diff --git a/src/oscar/apps/basket/abstract_models.py b/src/oscar/apps/basket/abstract_models.py index b0d5f4293bd..c0c2068c5e3 100644 --- a/src/oscar/apps/basket/abstract_models.py +++ b/src/oscar/apps/basket/abstract_models.py @@ -828,7 +828,7 @@ def line_price_incl_tax_incl_discounts(self): @property def line_tax(self): - if self.unit_tax: + if self.is_tax_known: return self.quantity * self.unit_tax @property diff --git a/tests/integration/basket/test_models.py b/tests/integration/basket/test_models.py index 26dc8eb90dc..6f2ec27111f 100644 --- a/tests/integration/basket/test_models.py +++ b/tests/integration/basket/test_models.py @@ -78,6 +78,33 @@ def test_basket_lines_queryset_is_ordered(self): queryset = basket.all_lines() self.assertTrue(queryset.ordered) + def test_line_tax_for_zero_tax_strategies(self): + basket = Basket() + basket.strategy = strategy.Default() + product = factories.create_product() + # Tax for the default strategy will be 0 + factories.create_stockrecord( + product, price_excl_tax=D('75.00'), num_in_stock=10) + basket.add(product, 1) + + self.assertEqual(basket.lines.first().line_tax, D('0')) + + def test_line_tax_for_unknown_tax_strategies(self): + + class UnknownTaxStrategy(strategy.Default): + """ A test strategy where the tax is not known """ + + def pricing_policy(self, product, stockrecord): + return prices.FixedPrice('GBP', stockrecord.price_excl_tax, tax=None) + + basket = Basket() + basket.strategy = UnknownTaxStrategy() + product = factories.create_product() + factories.create_stockrecord(product, num_in_stock=10) + basket.add(product, 1) + + self.assertEqual(basket.lines.first().line_tax, None) + class TestAddingAProductToABasket(TestCase): diff --git a/tests/integration/order/test_models.py b/tests/integration/order/test_models.py index 88246ca4151..46528bf3f1e 100644 --- a/tests/integration/order/test_models.py +++ b/tests/integration/order/test_models.py @@ -345,22 +345,14 @@ def test_contains_voucher_details_after_voucher_is_deleted(self): class OrderTests(TestCase): - def get_date_tuple(self, date=None): - """ - Returns a tuple like (year, month, day, hour, minute) for - datetime comparisons. - We probably don't want to assert datetime objects have the same - number of miliseconds etc. just in case the object in the test - differs by some insignificant amount. - """ - if date is None: - date = timezone.now() - return date.timetuple()[:-4] - def test_sets_date_placed_to_now_by_default(self): + @mock.patch('oscar.apps.order.abstract_models.now') + def test_sets_date_placed_to_now_by_default(self, mock_now): + tzinfo = timezone.get_current_timezone() + mock_now.return_value = datetime(2017, 6, 23, 16, 14, tzinfo=tzinfo) order = create_order(number='100003') - self.assertTupleEqual(self.get_date_tuple(order.date_placed), - self.get_date_tuple()) + self.assertEqual(order.date_placed, + datetime(2017, 6, 23, 16, 14, tzinfo=tzinfo)) def test_allows_date_placed_to_be_changed_and_set_explicitly(self): order = create_order(number='100003') @@ -368,8 +360,8 @@ def test_allows_date_placed_to_be_changed_and_set_explicitly(self): order.date_placed = datetime(2012, 8, 11, 16, 14, tzinfo=tzinfo) order.save() - self.assertTupleEqual(self.get_date_tuple(order.date_placed), - (2012, 8, 11, 16, 14)) + self.assertEqual(order.date_placed, + datetime(2012, 8, 11, 16, 14, tzinfo=tzinfo)) def test_shipping_status(self): order = OrderFactory()
qutip__qutip-2305
[ { "content": "\"\"\"\nThis function provides functions for parallel execution of loops and function\nmappings, using the builtin Python module multiprocessing.\n\"\"\"\n__all__ = ['parfor', 'parallel_map', 'serial_map']\n\nfrom scipy import array\nimport multiprocessing\nfrom functools import partial\nimport os\nimport sys\nimport signal\nimport qutip.settings as qset\nfrom qutip.ui.progressbar import BaseProgressBar, TextProgressBar\n\n\nif sys.platform == 'darwin':\n Pool = multiprocessing.get_context('fork').Pool\nelse:\n Pool = multiprocessing.Pool\n\n\ndef _task_wrapper(args):\n try:\n return args[0](*args[1])\n except KeyboardInterrupt:\n os.kill(args[2], signal.SIGINT)\n sys.exit(1)\n\n\ndef _task_wrapper_with_args(args, user_args):\n try:\n return args[0](*args[1], **user_args)\n except KeyboardInterrupt:\n os.kill(args[2], signal.SIGINT)\n sys.exit(1)\n\n\ndef parfor(func, *args, **kwargs):\n \"\"\"Executes a multi-variable function in parallel on the local machine.\n\n Parallel execution of a for-loop over function `func` for multiple input\n arguments and keyword arguments.\n\n .. note::\n\n From QuTiP 3.1, we recommend to use :func:`qutip.parallel.parallel_map`\n instead of this function.\n\n Parameters\n ----------\n func : function_type\n A function to run in parallel on the local machine. The function 'func'\n accepts a series of arguments that are passed to the function as\n variables. In general, the function can have multiple input variables,\n and these arguments must be passed in the same order as they are\n defined in the function definition. In addition, the user can pass\n multiple keyword arguments to the function.\n\n The following keyword argument is reserved:\n\n num_cpus : int\n Number of CPU's to use. Default uses maximum number of CPU's.\n Performance degrades if num_cpus is larger than the physical CPU\n count of your machine.\n\n Returns\n -------\n result : list\n A ``list`` with length equal to number of input parameters\n containing the output from `func`.\n\n \"\"\"\n os.environ['QUTIP_IN_PARALLEL'] = 'TRUE'\n kw = _default_kwargs()\n if 'num_cpus' in kwargs.keys():\n kw['num_cpus'] = kwargs['num_cpus']\n del kwargs['num_cpus']\n if len(kwargs) != 0:\n task_func = partial(_task_wrapper_with_args, user_args=kwargs)\n else:\n task_func = _task_wrapper\n\n if kw['num_cpus'] > qset.num_cpus:\n print(\"Requested number of CPUs (%s) \" % kw['num_cpus'] +\n \"is larger than physical number (%s).\" % qset.num_cpus)\n print(\"Reduce 'num_cpus' for greater performance.\")\n\n pool = Pool(processes=kw['num_cpus'])\n args = [list(arg) for arg in args]\n var = [[args[j][i] for j in range(len(args))]\n for i in range(len(list(args[0])))]\n try:\n map_args = ((func, v, os.getpid()) for v in var)\n par_return = list(pool.map(task_func, map_args))\n\n pool.terminate()\n pool.join()\n os.environ['QUTIP_IN_PARALLEL'] = 'FALSE'\n if isinstance(par_return[0], tuple):\n par_return = [elem for elem in par_return]\n num_elems = len(par_return[0])\n dt = [type(ii) for ii in par_return[0]]\n return [array([elem[ii] for elem in par_return], dtype=dt[ii])\n for ii in range(num_elems)]\n else:\n return par_return\n\n except KeyboardInterrupt:\n os.environ['QUTIP_IN_PARALLEL'] = 'FALSE'\n pool.terminate()\n\n\ndef serial_map(task, values, task_args=tuple(), task_kwargs={}, **kwargs):\n \"\"\"\n Serial mapping function with the same call signature as parallel_map, for\n easy switching between serial and parallel execution. This\n is functionally equivalent to::\n\n result = [task(value, *task_args, **task_kwargs) for value in values]\n\n This function work as a drop-in replacement of\n :func:`qutip.parallel.parallel_map`.\n\n Parameters\n ----------\n task : a Python function\n The function that is to be called for each value in ``task_vec``.\n values : array / list\n The list or array of values for which the ``task`` function is to be\n evaluated.\n task_args : list / dictionary\n The optional additional argument to the ``task`` function.\n task_kwargs : list / dictionary\n The optional additional keyword argument to the ``task`` function.\n progress_bar : ProgressBar\n Progress bar class instance for showing progress.\n\n Returns\n --------\n result : list\n The result list contains the value of\n ``task(value, *task_args, **task_kwargs)`` for each\n value in ``values``.\n\n \"\"\"\n try:\n progress_bar = kwargs['progress_bar']\n if progress_bar is True:\n progress_bar = TextProgressBar()\n except:\n progress_bar = BaseProgressBar()\n\n progress_bar.start(len(values))\n results = []\n for n, value in enumerate(values):\n progress_bar.update(n)\n result = task(value, *task_args, **task_kwargs)\n results.append(result)\n progress_bar.finished()\n\n return results\n\n\ndef parallel_map(task, values, task_args=tuple(), task_kwargs={}, **kwargs):\n \"\"\"\n Parallel execution of a mapping of `values` to the function `task`. This\n is functionally equivalent to::\n\n result = [task(value, *task_args, **task_kwargs) for value in values]\n\n Parameters\n ----------\n task : a Python function\n The function that is to be called for each value in ``task_vec``.\n values : array / list\n The list or array of values for which the ``task`` function is to be\n evaluated.\n task_args : list / dictionary\n The optional additional argument to the ``task`` function.\n task_kwargs : list / dictionary\n The optional additional keyword argument to the ``task`` function.\n progress_bar : ProgressBar\n Progress bar class instance for showing progress.\n\n Returns\n --------\n result : list\n The result list contains the value of\n ``task(value, *task_args, **task_kwargs)`` for\n each value in ``values``.\n\n \"\"\"\n os.environ['QUTIP_IN_PARALLEL'] = 'TRUE'\n kw = _default_kwargs()\n if 'num_cpus' in kwargs:\n kw['num_cpus'] = kwargs['num_cpus']\n\n try:\n progress_bar = kwargs['progress_bar']\n if progress_bar is True:\n progress_bar = TextProgressBar()\n except:\n progress_bar = BaseProgressBar()\n\n progress_bar.start(len(values))\n nfinished = [0]\n\n def _update_progress_bar(x):\n nfinished[0] += 1\n progress_bar.update(nfinished[0])\n\n try:\n pool = Pool(processes=kw['num_cpus'])\n\n async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs,\n _update_progress_bar)\n for value in values]\n\n while not all([ar.ready() for ar in async_res]):\n for ar in async_res:\n ar.wait(timeout=0.1)\n\n pool.terminate()\n pool.join()\n\n except KeyboardInterrupt as e:\n os.environ['QUTIP_IN_PARALLEL'] = 'FALSE'\n pool.terminate()\n pool.join()\n raise e\n\n progress_bar.finished()\n os.environ['QUTIP_IN_PARALLEL'] = 'FALSE'\n return [ar.get() for ar in async_res]\n\n\ndef _default_kwargs():\n settings = {'num_cpus': qset.num_cpus}\n return settings\n", "path": "qutip/parallel.py" } ]
[ { "content": "\"\"\"\nThis function provides functions for parallel execution of loops and function\nmappings, using the builtin Python module multiprocessing.\n\"\"\"\n__all__ = ['parfor', 'parallel_map', 'serial_map']\n\nfrom numpy import array\nimport multiprocessing\nfrom functools import partial\nimport os\nimport sys\nimport signal\nimport qutip.settings as qset\nfrom qutip.ui.progressbar import BaseProgressBar, TextProgressBar\n\n\nif sys.platform == 'darwin':\n Pool = multiprocessing.get_context('fork').Pool\nelse:\n Pool = multiprocessing.Pool\n\n\ndef _task_wrapper(args):\n try:\n return args[0](*args[1])\n except KeyboardInterrupt:\n os.kill(args[2], signal.SIGINT)\n sys.exit(1)\n\n\ndef _task_wrapper_with_args(args, user_args):\n try:\n return args[0](*args[1], **user_args)\n except KeyboardInterrupt:\n os.kill(args[2], signal.SIGINT)\n sys.exit(1)\n\n\ndef parfor(func, *args, **kwargs):\n \"\"\"Executes a multi-variable function in parallel on the local machine.\n\n Parallel execution of a for-loop over function `func` for multiple input\n arguments and keyword arguments.\n\n .. note::\n\n From QuTiP 3.1, we recommend to use :func:`qutip.parallel.parallel_map`\n instead of this function.\n\n Parameters\n ----------\n func : function_type\n A function to run in parallel on the local machine. The function 'func'\n accepts a series of arguments that are passed to the function as\n variables. In general, the function can have multiple input variables,\n and these arguments must be passed in the same order as they are\n defined in the function definition. In addition, the user can pass\n multiple keyword arguments to the function.\n\n The following keyword argument is reserved:\n\n num_cpus : int\n Number of CPU's to use. Default uses maximum number of CPU's.\n Performance degrades if num_cpus is larger than the physical CPU\n count of your machine.\n\n Returns\n -------\n result : list\n A ``list`` with length equal to number of input parameters\n containing the output from `func`.\n\n \"\"\"\n os.environ['QUTIP_IN_PARALLEL'] = 'TRUE'\n kw = _default_kwargs()\n if 'num_cpus' in kwargs.keys():\n kw['num_cpus'] = kwargs['num_cpus']\n del kwargs['num_cpus']\n if len(kwargs) != 0:\n task_func = partial(_task_wrapper_with_args, user_args=kwargs)\n else:\n task_func = _task_wrapper\n\n if kw['num_cpus'] > qset.num_cpus:\n print(\"Requested number of CPUs (%s) \" % kw['num_cpus'] +\n \"is larger than physical number (%s).\" % qset.num_cpus)\n print(\"Reduce 'num_cpus' for greater performance.\")\n\n pool = Pool(processes=kw['num_cpus'])\n args = [list(arg) for arg in args]\n var = [[args[j][i] for j in range(len(args))]\n for i in range(len(list(args[0])))]\n try:\n map_args = ((func, v, os.getpid()) for v in var)\n par_return = list(pool.map(task_func, map_args))\n\n pool.terminate()\n pool.join()\n os.environ['QUTIP_IN_PARALLEL'] = 'FALSE'\n if isinstance(par_return[0], tuple):\n par_return = [elem for elem in par_return]\n num_elems = len(par_return[0])\n dt = [type(ii) for ii in par_return[0]]\n return [array([elem[ii] for elem in par_return], dtype=dt[ii])\n for ii in range(num_elems)]\n else:\n return par_return\n\n except KeyboardInterrupt:\n os.environ['QUTIP_IN_PARALLEL'] = 'FALSE'\n pool.terminate()\n\n\ndef serial_map(task, values, task_args=tuple(), task_kwargs={}, **kwargs):\n \"\"\"\n Serial mapping function with the same call signature as parallel_map, for\n easy switching between serial and parallel execution. This\n is functionally equivalent to::\n\n result = [task(value, *task_args, **task_kwargs) for value in values]\n\n This function work as a drop-in replacement of\n :func:`qutip.parallel.parallel_map`.\n\n Parameters\n ----------\n task : a Python function\n The function that is to be called for each value in ``task_vec``.\n values : array / list\n The list or array of values for which the ``task`` function is to be\n evaluated.\n task_args : list / dictionary\n The optional additional argument to the ``task`` function.\n task_kwargs : list / dictionary\n The optional additional keyword argument to the ``task`` function.\n progress_bar : ProgressBar\n Progress bar class instance for showing progress.\n\n Returns\n --------\n result : list\n The result list contains the value of\n ``task(value, *task_args, **task_kwargs)`` for each\n value in ``values``.\n\n \"\"\"\n try:\n progress_bar = kwargs['progress_bar']\n if progress_bar is True:\n progress_bar = TextProgressBar()\n except:\n progress_bar = BaseProgressBar()\n\n progress_bar.start(len(values))\n results = []\n for n, value in enumerate(values):\n progress_bar.update(n)\n result = task(value, *task_args, **task_kwargs)\n results.append(result)\n progress_bar.finished()\n\n return results\n\n\ndef parallel_map(task, values, task_args=tuple(), task_kwargs={}, **kwargs):\n \"\"\"\n Parallel execution of a mapping of `values` to the function `task`. This\n is functionally equivalent to::\n\n result = [task(value, *task_args, **task_kwargs) for value in values]\n\n Parameters\n ----------\n task : a Python function\n The function that is to be called for each value in ``task_vec``.\n values : array / list\n The list or array of values for which the ``task`` function is to be\n evaluated.\n task_args : list / dictionary\n The optional additional argument to the ``task`` function.\n task_kwargs : list / dictionary\n The optional additional keyword argument to the ``task`` function.\n progress_bar : ProgressBar\n Progress bar class instance for showing progress.\n\n Returns\n --------\n result : list\n The result list contains the value of\n ``task(value, *task_args, **task_kwargs)`` for\n each value in ``values``.\n\n \"\"\"\n os.environ['QUTIP_IN_PARALLEL'] = 'TRUE'\n kw = _default_kwargs()\n if 'num_cpus' in kwargs:\n kw['num_cpus'] = kwargs['num_cpus']\n\n try:\n progress_bar = kwargs['progress_bar']\n if progress_bar is True:\n progress_bar = TextProgressBar()\n except:\n progress_bar = BaseProgressBar()\n\n progress_bar.start(len(values))\n nfinished = [0]\n\n def _update_progress_bar(x):\n nfinished[0] += 1\n progress_bar.update(nfinished[0])\n\n try:\n pool = Pool(processes=kw['num_cpus'])\n\n async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs,\n _update_progress_bar)\n for value in values]\n\n while not all([ar.ready() for ar in async_res]):\n for ar in async_res:\n ar.wait(timeout=0.1)\n\n pool.terminate()\n pool.join()\n\n except KeyboardInterrupt as e:\n os.environ['QUTIP_IN_PARALLEL'] = 'FALSE'\n pool.terminate()\n pool.join()\n raise e\n\n progress_bar.finished()\n os.environ['QUTIP_IN_PARALLEL'] = 'FALSE'\n return [ar.get() for ar in async_res]\n\n\ndef _default_kwargs():\n settings = {'num_cpus': qset.num_cpus}\n return settings\n", "path": "qutip/parallel.py" } ]
diff --git a/doc/changes/2305.bugfix b/doc/changes/2305.bugfix new file mode 100644 index 0000000000..7d37258c30 --- /dev/null +++ b/doc/changes/2305.bugfix @@ -0,0 +1 @@ +Remove use of scipy.<numpy-func> in parallel.py, incompatible with scipy==1.12 diff --git a/qutip/parallel.py b/qutip/parallel.py index e2d1086c92..81ff6e5932 100644 --- a/qutip/parallel.py +++ b/qutip/parallel.py @@ -4,7 +4,7 @@ """ __all__ = ['parfor', 'parallel_map', 'serial_map'] -from scipy import array +from numpy import array import multiprocessing from functools import partial import os
kornia__kornia-2817
[ { "content": "from typing import Dict, Iterator, List, Optional, Tuple, Union, cast\n\nimport torch\nfrom torch.distributions import Categorical\n\nfrom kornia.augmentation.auto.base import SUBPLOLICY_CONFIG, PolicyAugmentBase\nfrom kornia.augmentation.auto.operations import OperationBase\nfrom kornia.augmentation.auto.operations.policy import PolicySequential\nfrom kornia.augmentation.container.params import ParamItem\nfrom kornia.core import Module, Tensor\n\nfrom . import ops\n\ndefault_policy: List[SUBPLOLICY_CONFIG] = [\n [(\"auto_contrast\", 0, 1)],\n [(\"equalize\", 0, 1)],\n [(\"invert\", 0, 1)],\n [(\"rotate\", -30.0, 30.0)],\n [(\"posterize\", 0.0, 4)],\n [(\"solarize\", 0.0, 1.0)],\n [(\"solarize_add\", 0.0, 0.43)],\n [(\"color\", 0.1, 1.9)],\n [(\"contrast\", 0.1, 1.9)],\n [(\"brightness\", 0.1, 1.9)],\n [(\"sharpness\", 0.1, 1.9)],\n [(\"shear_x\", -0.3, 0.3)],\n [(\"shear_y\", -0.3, 0.3)],\n # (CutoutAbs, 0, 40),\n [(\"translate_x\", -0.1, 0.1)],\n [(\"translate_x\", -0.1, 0.1)],\n]\n\n\nclass RandAugment(PolicyAugmentBase):\n \"\"\"Apply RandAugment :cite:`cubuk2020randaugment` augmentation strategies.\n\n Args:\n n: the number of augmentations to apply sequentially.\n m: magnitude for all the augmentations, ranged from [0, 30].\n policy: candidate transformations. If None, a default candidate list will be used.\n transformation_matrix_mode: computation mode for the chained transformation matrix, via `.transform_matrix`\n attribute.\n If `silent`, transformation matrix will be computed silently and the non-rigid\n modules will be ignored as identity transformations.\n If `rigid`, transformation matrix will be computed silently and the non-rigid\n modules will trigger errors.\n If `skip`, transformation matrix will be totally ignored.\n\n Examples:\n >>> import kornia.augmentation as K\n >>> in_tensor = torch.rand(5, 3, 30, 30)\n >>> aug = K.AugmentationSequential(RandAugment(n=2, m=10))\n >>> aug(in_tensor).shape\n torch.Size([5, 3, 30, 30])\n \"\"\"\n\n def __init__(\n self,\n n: int,\n m: int,\n policy: Optional[List[SUBPLOLICY_CONFIG]] = None,\n transformation_matrix_mode: str = \"silent\",\n ) -> None:\n if m <= 0 or m >= 30:\n raise ValueError(f\"Expect `m` in [0, 30]. Got {m}.\")\n\n if policy is None:\n _policy = default_policy\n else:\n _policy = policy\n\n super().__init__(_policy, transformation_matrix_mode=transformation_matrix_mode)\n selection_weights = torch.tensor([1.0 / len(self)] * len(self))\n self.rand_selector = Categorical(selection_weights)\n self.n = n\n self.m = m\n\n def compose_subpolicy_sequential(self, subpolicy: SUBPLOLICY_CONFIG) -> PolicySequential:\n if len(subpolicy) != 1:\n raise RuntimeError(f\"Each policy must have only one operation for RandAugment. Got {len(subpolicy)}.\")\n name, low, high = subpolicy[0]\n return PolicySequential(*[getattr(ops, name)(low, high)])\n\n def get_forward_sequence(self, params: Optional[List[ParamItem]] = None) -> Iterator[Tuple[str, Module]]:\n if params is None:\n idx = self.rand_selector.sample((self.n,))\n return self.get_children_by_indices(idx)\n\n return self.get_children_by_params(params)\n\n def forward_parameters(self, batch_shape: torch.Size) -> List[ParamItem]:\n named_modules: Iterator[Tuple[str, Module]] = self.get_forward_sequence()\n\n params: List[ParamItem] = []\n mod_param: Union[Dict[str, Tensor], List[ParamItem]]\n m = torch.tensor([self.m / 30] * batch_shape[0])\n\n for name, module in named_modules:\n # The Input PolicySequential only got one child.\n op = cast(PolicySequential, module)[0]\n op = cast(OperationBase, op)\n mag = None\n if op.magnitude_range is not None:\n minval, maxval = op.magnitude_range\n mag = m * float(maxval - minval) + minval\n mod_param = op.forward_parameters(batch_shape, mag=mag)\n # Compose it\n param = ParamItem(name, [ParamItem(next(iter(module.named_children()))[0], mod_param)])\n params.append(param)\n\n return params\n", "path": "kornia/augmentation/auto/rand_augment/rand_augment.py" } ]
[ { "content": "from typing import Dict, Iterator, List, Optional, Tuple, Union, cast\n\nimport torch\nfrom torch.distributions import Categorical\n\nfrom kornia.augmentation.auto.base import SUBPLOLICY_CONFIG, PolicyAugmentBase\nfrom kornia.augmentation.auto.operations import OperationBase\nfrom kornia.augmentation.auto.operations.policy import PolicySequential\nfrom kornia.augmentation.container.params import ParamItem\nfrom kornia.core import Module, Tensor\n\nfrom . import ops\n\ndefault_policy: List[SUBPLOLICY_CONFIG] = [\n [(\"auto_contrast\", 0, 1)],\n [(\"equalize\", 0, 1)],\n [(\"invert\", 0, 1)],\n [(\"rotate\", -30.0, 30.0)],\n [(\"posterize\", 0.0, 4)],\n [(\"solarize\", 0.0, 1.0)],\n [(\"solarize_add\", 0.0, 0.43)],\n [(\"color\", 0.1, 1.9)],\n [(\"contrast\", 0.1, 1.9)],\n [(\"brightness\", 0.1, 1.9)],\n [(\"sharpness\", 0.1, 1.9)],\n [(\"shear_x\", -0.3, 0.3)],\n [(\"shear_y\", -0.3, 0.3)],\n # (CutoutAbs, 0, 40),\n [(\"translate_x\", -0.1, 0.1)],\n [(\"translate_y\", -0.1, 0.1)],\n]\n\n\nclass RandAugment(PolicyAugmentBase):\n \"\"\"Apply RandAugment :cite:`cubuk2020randaugment` augmentation strategies.\n\n Args:\n n: the number of augmentations to apply sequentially.\n m: magnitude for all the augmentations, ranged from [0, 30].\n policy: candidate transformations. If None, a default candidate list will be used.\n transformation_matrix_mode: computation mode for the chained transformation matrix, via `.transform_matrix`\n attribute.\n If `silent`, transformation matrix will be computed silently and the non-rigid\n modules will be ignored as identity transformations.\n If `rigid`, transformation matrix will be computed silently and the non-rigid\n modules will trigger errors.\n If `skip`, transformation matrix will be totally ignored.\n\n Examples:\n >>> import kornia.augmentation as K\n >>> in_tensor = torch.rand(5, 3, 30, 30)\n >>> aug = K.AugmentationSequential(RandAugment(n=2, m=10))\n >>> aug(in_tensor).shape\n torch.Size([5, 3, 30, 30])\n \"\"\"\n\n def __init__(\n self,\n n: int,\n m: int,\n policy: Optional[List[SUBPLOLICY_CONFIG]] = None,\n transformation_matrix_mode: str = \"silent\",\n ) -> None:\n if m <= 0 or m >= 30:\n raise ValueError(f\"Expect `m` in [0, 30]. Got {m}.\")\n\n if policy is None:\n _policy = default_policy\n else:\n _policy = policy\n\n super().__init__(_policy, transformation_matrix_mode=transformation_matrix_mode)\n selection_weights = torch.tensor([1.0 / len(self)] * len(self))\n self.rand_selector = Categorical(selection_weights)\n self.n = n\n self.m = m\n\n def compose_subpolicy_sequential(self, subpolicy: SUBPLOLICY_CONFIG) -> PolicySequential:\n if len(subpolicy) != 1:\n raise RuntimeError(f\"Each policy must have only one operation for RandAugment. Got {len(subpolicy)}.\")\n name, low, high = subpolicy[0]\n return PolicySequential(*[getattr(ops, name)(low, high)])\n\n def get_forward_sequence(self, params: Optional[List[ParamItem]] = None) -> Iterator[Tuple[str, Module]]:\n if params is None:\n idx = self.rand_selector.sample((self.n,))\n return self.get_children_by_indices(idx)\n\n return self.get_children_by_params(params)\n\n def forward_parameters(self, batch_shape: torch.Size) -> List[ParamItem]:\n named_modules: Iterator[Tuple[str, Module]] = self.get_forward_sequence()\n\n params: List[ParamItem] = []\n mod_param: Union[Dict[str, Tensor], List[ParamItem]]\n m = torch.tensor([self.m / 30] * batch_shape[0])\n\n for name, module in named_modules:\n # The Input PolicySequential only got one child.\n op = cast(PolicySequential, module)[0]\n op = cast(OperationBase, op)\n mag = None\n if op.magnitude_range is not None:\n minval, maxval = op.magnitude_range\n mag = m * float(maxval - minval) + minval\n mod_param = op.forward_parameters(batch_shape, mag=mag)\n # Compose it\n param = ParamItem(name, [ParamItem(next(iter(module.named_children()))[0], mod_param)])\n params.append(param)\n\n return params\n", "path": "kornia/augmentation/auto/rand_augment/rand_augment.py" } ]
diff --git a/kornia/augmentation/auto/rand_augment/rand_augment.py b/kornia/augmentation/auto/rand_augment/rand_augment.py index da742c5949..88c86098ed 100644 --- a/kornia/augmentation/auto/rand_augment/rand_augment.py +++ b/kornia/augmentation/auto/rand_augment/rand_augment.py @@ -27,7 +27,7 @@ [("shear_y", -0.3, 0.3)], # (CutoutAbs, 0, 40), [("translate_x", -0.1, 0.1)], - [("translate_x", -0.1, 0.1)], + [("translate_y", -0.1, 0.1)], ]
vyperlang__vyper-1275
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\ntest_deps = [\n 'pytest',\n 'pytest-cov',\n 'py-evm==0.2.0a34',\n 'eth-tester==0.1.0b33',\n 'web3==4.8.2',\n]\n\n\nextras = {\n 'test': test_deps\n}\n\n\nsetup(\n name='vyper',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='0.1.0-beta.8',\n description='Vyper Programming Language for Ethereum',\n long_description_markdown_filename='README.md',\n author='Vitalik Buterin',\n author_email='',\n url='https://github.com/ethereum/vyper',\n license=\"MIT\",\n keywords='ethereum',\n include_package_data=True,\n packages=find_packages(exclude=('tests', 'docs')),\n python_requires='>=3.6',\n py_modules=['vyper'],\n install_requires=[\n 'pycryptodome>=3.5.1,<4',\n ],\n setup_requires=[\n 'pytest-runner',\n 'setuptools-markdown'\n ],\n tests_require=test_deps,\n extras_require=extras,\n scripts=[\n 'bin/vyper',\n 'bin/vyper-serve',\n 'bin/vyper-lll'\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n ]\n)\n", "path": "setup.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\ntest_deps = [\n 'pytest>=3.6',\n 'pytest-cov==2.4.0',\n 'pytest-xdist==1.18.1',\n 'py-evm==0.2.0a39',\n 'eth-tester==0.1.0b37',\n 'web3==5.0.0a6'\n]\n\n\nextras = {\n 'test': test_deps\n}\n\n\nsetup(\n name='vyper',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='0.1.0-beta.8',\n description='Vyper Programming Language for Ethereum',\n long_description_markdown_filename='README.md',\n author='Vitalik Buterin',\n author_email='',\n url='https://github.com/ethereum/vyper',\n license=\"MIT\",\n keywords='ethereum',\n include_package_data=True,\n packages=find_packages(exclude=('tests', 'docs')),\n python_requires='>=3.6',\n py_modules=['vyper'],\n install_requires=[\n 'pycryptodome>=3.5.1,<4',\n ],\n setup_requires=[\n 'pytest-runner',\n 'setuptools-markdown'\n ],\n tests_require=test_deps,\n extras_require=extras,\n scripts=[\n 'bin/vyper',\n 'bin/vyper-serve',\n 'bin/vyper-lll'\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n ]\n)\n", "path": "setup.py" } ]
diff --git a/setup.cfg b/setup.cfg index 3c90a124ba..68987778a0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [tool:pytest] -addopts = --cov-report term --cov-report html --cov=vyper +addopts = -n auto --cov-report term --cov-report html --cov=vyper python_files = test_*.py testpaths = tests diff --git a/setup.py b/setup.py index f36fd5a9a8..b262b41d3c 100644 --- a/setup.py +++ b/setup.py @@ -4,11 +4,12 @@ test_deps = [ - 'pytest', - 'pytest-cov', - 'py-evm==0.2.0a34', - 'eth-tester==0.1.0b33', - 'web3==4.8.2', + 'pytest>=3.6', + 'pytest-cov==2.4.0', + 'pytest-xdist==1.18.1', + 'py-evm==0.2.0a39', + 'eth-tester==0.1.0b37', + 'web3==5.0.0a6' ] diff --git a/tests/conftest.py b/tests/conftest.py index eb88f01466..392941886b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,7 +1,5 @@ -import eth_tester import logging import pytest -import web3 from functools import wraps @@ -18,9 +16,12 @@ from web3 import ( Web3, ) +from web3._utils.toolz import ( + compose, +) from web3.contract import ( - ConciseContract, - ConciseMethod + Contract, + mk_collision_prop, ) from vyper.parser.parser_utils import ( LLLnode @@ -32,9 +33,13 @@ ) -class VyperMethod(ConciseMethod): +class VyperMethod: ALLOWED_MODIFIERS = {'call', 'estimateGas', 'transact', 'buildTransaction'} + def __init__(self, function, normalizers=None): + self._function = function + self._function._return_data_normalizers = normalizers + def __call__(self, *args, **kwargs): return self.__prepared_function(*args, **kwargs) @@ -54,89 +59,82 @@ def __prepared_function(self, *args, **kwargs): return getattr(self._function(*args), modifier)(modifier_dict) -class VyperContract(ConciseContract): +class VyperContract: - def __init__(self, classic_contract, method_class=VyperMethod): - super().__init__(classic_contract, method_class) + """ + An alternative Contract Factory which invokes all methods as `call()`, + unless you add a keyword argument. The keyword argument assigns the prep method. + This call -############ -# PATCHING # -############ + > contract.withdraw(amount, transact={'from': eth.accounts[1], 'gas': 100000, ...}) -setattr(eth_tester.backends.pyevm.main, 'GENESIS_GAS_LIMIT', 10**9) -setattr(eth_tester.backends.pyevm.main, 'GENESIS_DIFFICULTY', 1) + is equivalent to this call in the classic contract: + > contract.functions.withdraw(amount).transact({'from': eth.accounts[1], 'gas': 100000, ...}) + """ + def __init__(self, classic_contract, method_class=VyperMethod): -def set_evm_verbose_logging(): - logger = logging.getLogger('evm') - logger.setLevel('TRACE') + classic_contract._return_data_normalizers += CONCISE_NORMALIZERS + self._classic_contract = classic_contract + self.address = self._classic_contract.address + protected_fn_names = [fn for fn in dir(self) if not fn.endswith('__')] -# Useful options to comment out whilst working: -set_evm_verbose_logging() -# from vdb import vdb -# vdb.set_evm_opcode_debugger() + for fn_name in self._classic_contract.functions: + # Override namespace collisions + if fn_name in protected_fn_names: + _concise_method = mk_collision_prop(fn_name) -@pytest.fixture(autouse=True) -def patch_log_filter_remove(monkeypatch): + else: + _classic_method = getattr( + self._classic_contract.functions, + fn_name) - def Filter_remove(self, *values): + _concise_method = method_class( + _classic_method, + self._classic_contract._return_data_normalizers + ) - def get_key(v): - return v.get('transaction_hash'), v.get('log_index'), v.get('transaction_index') + setattr(self, fn_name, _concise_method) - values_to_remove = set([ - get_key(value) - for value in values - ]) + @classmethod + def factory(cls, *args, **kwargs): + return compose(cls, Contract.factory(*args, **kwargs)) - queued_values = self.get_changes() - self.values = [ - value - for value - in self.get_all() - if get_key(value) not in values_to_remove - ] - for value in queued_values: - if get_key(value) in values_to_remove: - continue - self.queue.put_nowait(value) - monkeypatch.setattr(eth_tester.utils.filters.Filter, 'remove', Filter_remove) +def _none_addr(datatype, data): + if datatype == 'address' and int(data, base=16) == 0: + return (datatype, None) + else: + return (datatype, data) -@pytest.fixture(autouse=True) -def patch_is_encodeable_for_fixed(monkeypatch): - original_is_encodable = web3.utils.abi.is_encodable +CONCISE_NORMALIZERS = ( + _none_addr, +) - def utils_abi_is_encodable(_type, value): - from eth_utils import is_integer - from eth_abi.abi import process_type - try: - base, sub, arrlist = _type - except ValueError: - base, sub, arrlist = process_type(_type) +############ +# PATCHING # +############ - if not arrlist: - if base == 'fixed' and not arrlist: - return True - elif base == 'int': - if not is_integer(value): - return False - exp = int(sub) - if value < -1 * 2**(exp - 1) or value > 2**(exp - 1) + 1: - return False - return True +# setattr(eth_tester.backends.pyevm.main, 'GENESIS_GAS_LIMIT', 10**9) +# setattr(eth_tester.backends.pyevm.main, 'GENESIS_DIFFICULTY', 1) + + +def set_evm_verbose_logging(): + logger = logging.getLogger('evm') + logger.setLevel('TRACE') - # default behaviour - return original_is_encodable(_type, value) - monkeypatch.setattr(web3.utils.abi, 'is_encodable', utils_abi_is_encodable) +# Useful options to comment out whilst working: +# set_evm_verbose_logging() +# from vdb import vdb +# vdb.set_evm_opcode_debugger() -@pytest.fixture(scope="module") +@pytest.fixture def tester(): t = EthereumTester() return t @@ -146,7 +144,7 @@ def zero_gas_price_strategy(web3, transaction_params=None): return 0 # zero gas price makes testing simpler. -@pytest.fixture(scope="module") +@pytest.fixture def w3(tester): w3 = Web3(EthereumTesterProvider(tester)) w3.eth.setGasPriceStrategy(zero_gas_price_strategy) @@ -155,7 +153,7 @@ def w3(tester): @pytest.fixture def keccak(): - return Web3.sha3 + return Web3.keccak @pytest.fixture diff --git a/tests/examples/auctions/test_blind_auction.py b/tests/examples/auctions/test_blind_auction.py index a8e40fcdfb..7e67daf2d6 100644 --- a/tests/examples/auctions/test_blind_auction.py +++ b/tests/examples/auctions/test_blind_auction.py @@ -39,7 +39,7 @@ def test_late_bid(w3, auction_contract, assert_tx_failed): # Try to bid after bidding has ended assert_tx_failed(lambda: auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (200).to_bytes(32, byteorder='big'), (0).to_bytes(32, byteorder='big'), (8675309).to_bytes(32, byteorder='big') @@ -54,7 +54,7 @@ def test_too_many_bids(w3, auction_contract, assert_tx_failed): # First 128 bids should be able to be placed successfully for i in range(MAX_BIDS): auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (i).to_bytes(32, byteorder='big'), (1).to_bytes(32, byteorder='big'), (8675309).to_bytes(32, byteorder='big') @@ -64,7 +64,7 @@ def test_too_many_bids(w3, auction_contract, assert_tx_failed): # 129th bid should fail assert_tx_failed(lambda: auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (128).to_bytes(32, byteorder='big'), (0).to_bytes(32, byteorder='big'), (8675309).to_bytes(32, byteorder='big') @@ -78,7 +78,7 @@ def test_early_reval(w3, auction_contract, assert_tx_failed): # k1 places 1 real bid auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (100).to_bytes(32, byteorder='big'), (0).to_bytes(32, byteorder='big'), (8675309).to_bytes(32, byteorder='big') @@ -116,7 +116,7 @@ def test_late_reveal(w3, auction_contract, assert_tx_failed): # k1 places 1 real bid auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (100).to_bytes(32, byteorder='big'), (0).to_bytes(32, byteorder='big'), (8675309).to_bytes(32, byteorder='big') @@ -184,7 +184,7 @@ def test_blind_auction(w3, auction_contract): # k1 places 1 real bid auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (100).to_bytes(32, byteorder='big'), (0).to_bytes(32, byteorder='big'), (8675309).to_bytes(32, byteorder='big') @@ -194,7 +194,7 @@ def test_blind_auction(w3, auction_contract): # k2 places 1 real bid (highest) and 2 fake auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (150).to_bytes(32, byteorder='big'), (1).to_bytes(32, byteorder='big'), (1234567).to_bytes(32, byteorder='big') @@ -202,7 +202,7 @@ def test_blind_auction(w3, auction_contract): transact={'value': 150, 'from': k2} ) auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (200).to_bytes(32, byteorder='big'), (0).to_bytes(32, byteorder='big'), (1234567).to_bytes(32, byteorder='big') @@ -210,7 +210,7 @@ def test_blind_auction(w3, auction_contract): transact={'value': 250, 'from': k2} ) auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (300).to_bytes(32, byteorder='big'), (1).to_bytes(32, byteorder='big'), (1234567).to_bytes(32, byteorder='big') @@ -220,7 +220,7 @@ def test_blind_auction(w3, auction_contract): # k3 places 2 fake bids auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (175).to_bytes(32, byteorder='big'), (1).to_bytes(32, byteorder='big'), (9876543).to_bytes(32, byteorder='big') @@ -228,7 +228,7 @@ def test_blind_auction(w3, auction_contract): transact={'value': 175, 'from': k3} ) auction_contract.bid( - w3.sha3(b''.join([ + w3.keccak(b''.join([ (275).to_bytes(32, byteorder='big') + (1).to_bytes(32, byteorder='big') + (9876543).to_bytes(32, byteorder='big') diff --git a/tests/examples/market_maker/test_on_chain_market_maker.py b/tests/examples/market_maker/test_on_chain_market_maker.py index b91bf33544..6971cf2a45 100644 --- a/tests/examples/market_maker/test_on_chain_market_maker.py +++ b/tests/examples/market_maker/test_on_chain_market_maker.py @@ -31,27 +31,27 @@ def test_initial_statet(market_maker): def test_initiate(w3, market_maker, erc20, assert_tx_failed): a0 = w3.eth.accounts[0] - erc20.approve(market_maker.address, 2 * 10**18, transact={}) - market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18}) - assert market_maker.totalEthQty() == 2 * 10**18 - assert market_maker.totalTokenQty() == 1 * 10**18 + erc20.approve(market_maker.address, w3.toWei(2, "ether"), transact={}) + market_maker.initiate(erc20.address, w3.toWei(1, "ether"), transact={'value': w3.toWei(2, "ether")}) + assert market_maker.totalEthQty() == w3.toWei(2, "ether") + assert market_maker.totalTokenQty() == w3.toWei(1, "ether") assert market_maker.invariant() == 2 * 10**36 assert market_maker.owner() == a0 assert erc20.name() == TOKEN_NAME assert erc20.decimals() == TOKEN_DECIMALS # Initiate cannot be called twice - assert_tx_failed(lambda: market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18})) + assert_tx_failed(lambda: market_maker.initiate(erc20.address, w3.toWei(1, "ether"), transact={'value': w3.toWei(2, "ether")})) def test_eth_to_tokens(w3, market_maker, erc20): a1 = w3.eth.accounts[1] - erc20.approve(market_maker.address, 2 * 10**18, transact={}) - market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18}) - assert erc20.balanceOf(market_maker.address) == 1000000000000000000 + erc20.approve(market_maker.address, w3.toWei(2, "ether"), transact={}) + market_maker.initiate(erc20.address, w3.toWei(1, "ether"), transact={'value': w3.toWei(2, "ether")}) + assert erc20.balanceOf(market_maker.address) == w3.toWei(1, "ether") assert erc20.balanceOf(a1) == 0 - assert market_maker.totalTokenQty() == 1000000000000000000 - assert market_maker.totalEthQty() == 2000000000000000000 + assert market_maker.totalTokenQty() == w3.toWei(1, "ether") + assert market_maker.totalEthQty() == w3.toWei(2, "ether") market_maker.ethToTokens(transact={'value': 100, 'from': a1}) assert erc20.balanceOf(market_maker.address) == 999999999999999950 @@ -62,30 +62,34 @@ def test_eth_to_tokens(w3, market_maker, erc20): def test_tokens_to_eth(w3, tester, market_maker, erc20): a1 = w3.eth.accounts[1] - erc20.transfer(a1, 2 * 10**18, transact={}) - erc20.approve(market_maker.address, 2 * 10**18, transact={'from': a1}) - market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18, 'from': a1}) - assert w3.eth.getBalance(market_maker.address) == 2000000000000000000 - assert w3.eth.getBalance(a1) == 999997999999999999999900 - assert market_maker.totalTokenQty() == 1000000000000000000 + a1_balance_before = w3.eth.getBalance(a1) - erc20.approve(market_maker.address, 1 * 10**18, transact={'from': a1}) - market_maker.tokensToEth(1 * 10**18, transact={'from': a1}) - assert w3.eth.getBalance(market_maker.address) == 1000000000000000000 - assert w3.eth.getBalance(a1) == 999998999999999999999900 - assert market_maker.totalTokenQty() == 2000000000000000000 - assert market_maker.totalEthQty() == 1000000000000000000 + erc20.transfer(a1, w3.toWei(2, "ether"), transact={}) + erc20.approve(market_maker.address, w3.toWei(2, "ether"), transact={'from': a1}) + market_maker.initiate(erc20.address, w3.toWei(1, "ether"), transact={'value': w3.toWei(2, "ether"), 'from': a1}) + assert w3.eth.getBalance(market_maker.address) == w3.toWei(2, "ether") + assert w3.eth.getBalance(a1) == a1_balance_before - w3.toWei(2, "ether") # sent 2 eth, with initiate. + assert market_maker.totalTokenQty() == w3.toWei(1, "ether") + + erc20.approve(market_maker.address, w3.toWei(1, "ether"), transact={'from': a1}) + market_maker.tokensToEth(w3.toWei(1, "ether"), transact={'from': a1}) + assert w3.eth.getBalance(market_maker.address) == w3.toWei(1, "ether") # 1 eth less in market. + assert w3.eth.getBalance(a1) == a1_balance_before - w3.toWei(1, "ether") # got 1 eth back, for trade. + assert market_maker.totalTokenQty() == w3.toWei(2, "ether") # Tokens increased by 1 + assert market_maker.totalEthQty() == w3.toWei(1, "ether") def test_owner_withdraw(w3, tester, market_maker, erc20, assert_tx_failed): a0, a1 = w3.eth.accounts[:2] - erc20.approve(market_maker.address, 2 * 10**18, transact={}) - market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18}) - assert w3.eth.getBalance(a0) == 999994000000000000000000 - assert erc20.balanceOf(a0) == 20999999000000000000000000 - - # Only owner can call ownerWithdraw - assert_tx_failed(lambda: market_maker.ownerWithdraw(transact={'from': a1})) + a0_balance_before = w3.eth.getBalance(a0) + # Approve 2 eth transfers. + erc20.approve(market_maker.address, w3.toWei(2, "ether"), transact={}) + # Initiate market with 2 eth value. + market_maker.initiate(erc20.address, w3.toWei(1, "ether"), transact={'value': w3.toWei(2, "ether")}) + assert w3.eth.getBalance(a0) == a0_balance_before - w3.toWei(2, "ether") # 2 eth was sent to market_maker contract. + assert erc20.balanceOf(a0) == TOKEN_TOTAL_SUPPLY - w3.toWei(1, "ether") # a0's balance is locked up in market_maker contract. + + assert_tx_failed(lambda: market_maker.ownerWithdraw(transact={'from': a1})) # Only owner can call ownerWithdraw market_maker.ownerWithdraw(transact={}) - assert w3.eth.getBalance(a0) == 999996000000000000000000 - assert erc20.balanceOf(a0) == 21000000000000000000000000 + assert w3.eth.getBalance(a0) == a0_balance_before # Eth balance restored. + assert erc20.balanceOf(a0) == TOKEN_TOTAL_SUPPLY # Tokens returned to a0. diff --git a/tests/examples/safe_remote_purchase/test_safe_remote_purchase.py b/tests/examples/safe_remote_purchase/test_safe_remote_purchase.py index fbcb92f7b4..8e029507e0 100644 --- a/tests/examples/safe_remote_purchase/test_safe_remote_purchase.py +++ b/tests/examples/safe_remote_purchase/test_safe_remote_purchase.py @@ -9,11 +9,6 @@ import pytest -# Inital balance of accounts -INIT_BAL_a0 = 1000000000000000000000000 -INIT_BAL_a1 = 1000000000000000000000000 - - @pytest.fixture def contract_code(get_contract): with open("examples/safe_remote_purchase/safe_remote_purchase.vy") as f: @@ -22,20 +17,19 @@ def contract_code(get_contract): @pytest.fixture -def check_balance(w3, tester): - def check_balance(): +def get_balance(w3, tester): + def get_balance(): a0, a1 = w3.eth.accounts[:2] # balance of a1 = seller, a2 = buyer return w3.eth.getBalance(a0), w3.eth.getBalance(a1) - return check_balance + return get_balance -def test_initial_state(w3, assert_tx_failed, get_contract, check_balance, contract_code): - assert check_balance() == (INIT_BAL_a0, INIT_BAL_a1) +def test_initial_state(w3, assert_tx_failed, get_contract, get_balance, contract_code): # Inital deposit has to be divisible by two assert_tx_failed(lambda: get_contract(contract_code, value=13)) # Seller puts item up for sale - a0_pre_bal, a1_pre_bal = check_balance() + a0_pre_bal, a1_pre_bal = get_balance() c = get_contract(contract_code, value_in_eth=2) # Check that the seller is set correctly assert c.seller() == w3.eth.accounts[0] @@ -44,26 +38,29 @@ def test_initial_state(w3, assert_tx_failed, get_contract, check_balance, contra # Check if unlocked() works correctly after initialization assert c.unlocked() is True # Check that sellers (and buyers) balance is correct - assert check_balance() == ((INIT_BAL_a0 - w3.toWei(2, 'ether')), INIT_BAL_a1) + assert get_balance() == ((a1_pre_bal - w3.toWei(2, 'ether')), a1_pre_bal) -def test_abort(w3, assert_tx_failed, check_balance, get_contract, contract_code): +def test_abort(w3, assert_tx_failed, get_balance, get_contract, contract_code): a0, a1, a2 = w3.eth.accounts[:3] - c = get_contract(contract_code, value=2) + + a0_pre_bal, a1_pre_bal = get_balance() + c = get_contract(contract_code, value=w3.toWei(2, 'ether')) + assert c.value() == w3.toWei(1, 'ether') # Only sender can trigger refund assert_tx_failed(lambda: c.abort(transact={'from': a2})) # Refund works correctly c.abort(transact={'from': a0, 'gasPrice': 0}) - assert check_balance() == (INIT_BAL_a0 - w3.toWei(2, 'ether'), INIT_BAL_a1) + assert get_balance() == (a0_pre_bal, a1_pre_bal) # Purchase in process, no refund possible c = get_contract(contract_code, value=2) c.purchase(transact={'value': 2, 'from': a1, 'gasPrice': 0}) assert_tx_failed(lambda: c.abort(transact={'from': a0})) -def test_purchase(w3, get_contract, assert_tx_failed, check_balance, contract_code): +def test_purchase(w3, get_contract, assert_tx_failed, get_balance, contract_code): a0, a1, a2, a3 = w3.eth.accounts[:4] - init_bal_a0, init_bal_a1 = check_balance() + init_bal_a0, init_bal_a1 = get_balance() c = get_contract(contract_code, value=2) # Purchase for too low/high price assert_tx_failed(lambda: c.purchase(transact={'value': 1, 'from': a1})) @@ -75,14 +72,14 @@ def test_purchase(w3, get_contract, assert_tx_failed, check_balance, contract_co # Check if contract is locked correctly assert c.unlocked() is False # Check balances, both deposits should have been deducted - assert check_balance() == (init_bal_a0 - 2, init_bal_a1 - 2) + assert get_balance() == (init_bal_a0 - 2, init_bal_a1 - 2) # Allow nobody else to purchase assert_tx_failed(lambda: c.purchase(transact={'value': 2, 'from': a3})) -def test_received(w3, get_contract, assert_tx_failed, check_balance, contract_code): +def test_received(w3, get_contract, assert_tx_failed, get_balance, contract_code): a0, a1 = w3.eth.accounts[:2] - init_bal_a0, init_bal_a1 = check_balance() + init_bal_a0, init_bal_a1 = get_balance() c = get_contract(contract_code, value=2) # Can only be called after purchase assert_tx_failed(lambda: c.received(transact={'from': a1, 'gasPrice': 0})) @@ -93,10 +90,10 @@ def test_received(w3, get_contract, assert_tx_failed, check_balance, contract_co # Check if buyer can call receive c.received(transact={'from': a1, 'gasPrice': 0}) # Final check if everything worked. 1 value has been transferred - assert check_balance() == (init_bal_a0 + 1, init_bal_a1 - 1) + assert get_balance() == (init_bal_a0 + 1, init_bal_a1 - 1) -def test_received_reentrancy(w3, get_contract, assert_tx_failed, check_balance, contract_code): +def test_received_reentrancy(w3, get_contract, assert_tx_failed, get_balance, contract_code): buyer_contract_code = """ contract PurchaseContract: diff --git a/tests/examples/wallet/test_wallet.py b/tests/examples/wallet/test_wallet.py index bf09527423..fe3b46df2e 100644 --- a/tests/examples/wallet/test_wallet.py +++ b/tests/examples/wallet/test_wallet.py @@ -89,8 +89,8 @@ def test_javascript_signatures(w3, get_contract): ) for x in map(lambda z: w3.toBytes(hexstr=z[2:]), raw_sigs) ] - h = w3.sha3((0).to_bytes(32, "big") + b'\x00' * 12 + w3.toBytes(hexstr=recipient[2:]) + (25).to_bytes(32, "big") + b'') - h2 = w3.sha3(b"\x19Ethereum Signed Message:\n32" + h) + h = w3.keccak((0).to_bytes(32, "big") + b'\x00' * 12 + w3.toBytes(hexstr=recipient[2:]) + (25).to_bytes(32, "big") + b'') + h2 = w3.keccak(b"\x19Ethereum Signed Message:\n32" + h) # Check to make sure the signatures are valid assert is_same_address(Account.recoverHash(h2, sigs[0]), accounts[0]) diff --git a/tests/parser/features/test_assert.py b/tests/parser/features/test_assert.py index b8e593aeec..3a04ee5aa6 100644 --- a/tests/parser/features/test_assert.py +++ b/tests/parser/features/test_assert.py @@ -48,15 +48,15 @@ def test2(a: int128, b: int128) -> int128: with pytest.raises(TransactionFailed) as e_info: c.test(0) - assert e_info.value.args[0] == b'larger than one please' + assert e_info.value.args[0] == 'larger than one please' # a = 0, b = 1 with pytest.raises(TransactionFailed) as e_info: c.test2(0, 1) - assert e_info.value.args[0] == b'a is not large enough' + assert e_info.value.args[0] == 'a is not large enough' # a = 1, b = 0 with pytest.raises(TransactionFailed) as e_info: c.test2(2, 2) - assert e_info.value.args[0] == b'b may only be 1' + assert e_info.value.args[0] == 'b may only be 1' # return correct value assert c.test2(5, 1) == 17 diff --git a/tests/parser/functions/test_default_parameters.py b/tests/parser/functions/test_default_parameters.py index 35abe193fe..4ad6bf4eb8 100644 --- a/tests/parser/functions/test_default_parameters.py +++ b/tests/parser/functions/test_default_parameters.py @@ -110,15 +110,10 @@ def bar(a: int128, b: int128 = -1) -> (int128, int128): assert c.bar(-123) == [-123, -1] assert c.bar(100, 100) == [100, 100] - # bypass abi encoding checking: - def utils_abi_is_encodable(_type, value): - return True - def validate_value(cls, value): pass monkeypatch.setattr('eth_abi.encoding.NumberEncoder.validate_value', validate_value) - monkeypatch.setattr('web3.utils.abi.is_encodable', utils_abi_is_encodable) assert c.bar(200, 2**127 - 1) == [200, 2**127 - 1] assert_tx_failed(lambda: c.bar(200, 2**127))
Parsl__parsl-597
[ { "content": "\nclass GlobusScheme(object):\n \"\"\"Specification for accessing data on a remote executor via Globus.\n\n Parameters\n ----------\n endpoint_uuid : str\n Universally unique identifier of the Globus endpoint at which the data can be accessed.\n This can be found in the `Manage Endpoints <https://www.globus.org/app/endpoints>`_ page.\n endpoint_path : str, optional\n FIXME\n local_path : str, optional\n FIXME\n \"\"\"\n def __init__(self, endpoint_uuid, endpoint_path=None, local_path=None):\n self.endpoint_uuid = endpoint_uuid\n self.endpoint_path = endpoint_path\n self.local_path = local_path\n", "path": "parsl/data_provider/scheme.py" } ]
[ { "content": "from parsl.utils import RepresentationMixin\n\n\nclass GlobusScheme(RepresentationMixin):\n \"\"\"Specification for accessing data on a remote executor via Globus.\n\n Parameters\n ----------\n endpoint_uuid : str\n Universally unique identifier of the Globus endpoint at which the data can be accessed.\n This can be found in the `Manage Endpoints <https://www.globus.org/app/endpoints>`_ page.\n endpoint_path : str, optional\n FIXME\n local_path : str, optional\n FIXME\n \"\"\"\n def __init__(self, endpoint_uuid, endpoint_path=None, local_path=None):\n self.endpoint_uuid = endpoint_uuid\n self.endpoint_path = endpoint_path\n self.local_path = local_path\n", "path": "parsl/data_provider/scheme.py" } ]
diff --git a/parsl/data_provider/scheme.py b/parsl/data_provider/scheme.py index 23711e9fc2..6cf4ec8e53 100644 --- a/parsl/data_provider/scheme.py +++ b/parsl/data_provider/scheme.py @@ -1,5 +1,7 @@ +from parsl.utils import RepresentationMixin -class GlobusScheme(object): + +class GlobusScheme(RepresentationMixin): """Specification for accessing data on a remote executor via Globus. Parameters
PaddlePaddle__models-1201
[ { "content": "class TrainTaskConfig(object):\n # only support GPU currently\n use_gpu = True\n # the epoch number to train.\n pass_num = 30\n # the number of sequences contained in a mini-batch.\n # deprecated, set batch_size in args.\n batch_size = 32\n # the hyper parameters for Adam optimizer.\n # This static learning_rate will be multiplied to the LearningRateScheduler\n # derived learning rate the to get the final learning rate.\n learning_rate = 1\n beta1 = 0.9\n beta2 = 0.98\n eps = 1e-9\n # the parameters for learning rate scheduling.\n warmup_steps = 4000\n # the weight used to mix up the ground-truth distribution and the fixed\n # uniform distribution in label smoothing when training.\n # Set this as zero if label smoothing is not wanted.\n label_smooth_eps = 0.1\n # the directory for saving trained models.\n model_dir = \"trained_models\"\n # the directory for saving checkpoints.\n ckpt_dir = \"trained_ckpts\"\n # the directory for loading checkpoint.\n # If provided, continue training from the checkpoint.\n ckpt_path = None\n # the parameter to initialize the learning rate scheduler.\n # It should be provided if use checkpoints, since the checkpoint doesn't\n # include the training step counter currently.\n start_step = 0\n\n\nclass InferTaskConfig(object):\n use_gpu = True\n # the number of examples in one run for sequence generation.\n batch_size = 10\n # the parameters for beam search.\n beam_size = 5\n max_out_len = 256\n # the number of decoded sentences to output.\n n_best = 1\n # the flags indicating whether to output the special tokens.\n output_bos = False\n output_eos = False\n output_unk = True\n # the directory for loading the trained model.\n model_path = \"trained_models/pass_1.infer.model\"\n\n\nclass ModelHyperParams(object):\n # These following five vocabularies related configurations will be set\n # automatically according to the passed vocabulary path and special tokens.\n # size of source word dictionary.\n src_vocab_size = 10000\n # size of target word dictionay\n trg_vocab_size = 10000\n # index for <bos> token\n bos_idx = 0\n # index for <eos> token\n eos_idx = 1\n # index for <unk> token\n unk_idx = 2\n # max length of sequences deciding the size of position encoding table.\n # Start from 1 and count start and end tokens in.\n max_length = 256\n # the dimension for word embeddings, which is also the last dimension of\n # the input and output of multi-head attention, position-wise feed-forward\n # networks, encoder and decoder.\n d_model = 512\n # size of the hidden layer in position-wise feed-forward networks.\n d_inner_hid = 2048\n # the dimension that keys are projected to for dot-product attention.\n d_key = 64\n # the dimension that values are projected to for dot-product attention.\n d_value = 64\n # number of head used in multi-head attention.\n n_head = 8\n # number of sub-layers to be stacked in the encoder and decoder.\n n_layer = 6\n # dropout rate used by all dropout layers.\n dropout = 0.1\n # random seed used in dropout for CE.\n dropout_seed = None\n # the flag indicating whether to share embedding and softmax weights.\n # vocabularies in source and target should be same for weight sharing.\n weight_sharing = True\n\n\ndef merge_cfg_from_list(cfg_list, g_cfgs):\n \"\"\"\n Set the above global configurations using the cfg_list. \n \"\"\"\n assert len(cfg_list) % 2 == 0\n for key, value in zip(cfg_list[0::2], cfg_list[1::2]):\n for g_cfg in g_cfgs:\n if hasattr(g_cfg, key):\n try:\n value = eval(value)\n except Exception: # for file path\n pass\n setattr(g_cfg, key, value)\n break\n\n\n# The placeholder for batch_size in compile time. Must be -1 currently to be\n# consistent with some ops' infer-shape output in compile time, such as the\n# sequence_expand op used in beamsearch decoder.\nbatch_size = -1\n# The placeholder for squence length in compile time.\nseq_len = ModelHyperParams.max_length\n# Here list the data shapes and data types of all inputs.\n# The shapes here act as placeholder and are set to pass the infer-shape in\n# compile time.\ninput_descs = {\n # The actual data shape of src_word is:\n # [batch_size, max_src_len_in_batch, 1]\n \"src_word\": [(batch_size, seq_len, 1), \"int64\", 2],\n # The actual data shape of src_pos is:\n # [batch_size, max_src_len_in_batch, 1]\n \"src_pos\": [(batch_size, seq_len, 1), \"int64\"],\n # This input is used to remove attention weights on paddings in the\n # encoder.\n # The actual data shape of src_slf_attn_bias is:\n # [batch_size, n_head, max_src_len_in_batch, max_src_len_in_batch]\n \"src_slf_attn_bias\": [(batch_size, ModelHyperParams.n_head, seq_len,\n seq_len), \"float32\"],\n # The actual data shape of trg_word is:\n # [batch_size, max_trg_len_in_batch, 1]\n \"trg_word\": [(batch_size, seq_len, 1), \"int64\",\n 2], # lod_level is only used in fast decoder.\n # The actual data shape of trg_pos is:\n # [batch_size, max_trg_len_in_batch, 1]\n \"trg_pos\": [(batch_size, seq_len, 1), \"int64\"],\n # This input is used to remove attention weights on paddings and\n # subsequent words in the decoder.\n # The actual data shape of trg_slf_attn_bias is:\n # [batch_size, n_head, max_trg_len_in_batch, max_trg_len_in_batch]\n \"trg_slf_attn_bias\": [(batch_size, ModelHyperParams.n_head, seq_len,\n seq_len), \"float32\"],\n # This input is used to remove attention weights on paddings of the source\n # input in the encoder-decoder attention.\n # The actual data shape of trg_src_attn_bias is:\n # [batch_size, n_head, max_trg_len_in_batch, max_src_len_in_batch]\n \"trg_src_attn_bias\": [(batch_size, ModelHyperParams.n_head, seq_len,\n seq_len), \"float32\"],\n # This input is used in independent decoder program for inference.\n # The actual data shape of enc_output is:\n # [batch_size, max_src_len_in_batch, d_model]\n \"enc_output\": [(batch_size, seq_len, ModelHyperParams.d_model), \"float32\"],\n # The actual data shape of label_word is:\n # [batch_size * max_trg_len_in_batch, 1]\n \"lbl_word\": [(batch_size * seq_len, 1), \"int64\"],\n # This input is used to mask out the loss of paddding tokens.\n # The actual data shape of label_weight is:\n # [batch_size * max_trg_len_in_batch, 1]\n \"lbl_weight\": [(batch_size * seq_len, 1), \"float32\"],\n # This input is used in beam-search decoder.\n \"init_score\": [(batch_size, 1), \"float32\"],\n}\n\n# Names of word embedding table which might be reused for weight sharing.\nword_emb_param_names = (\n \"src_word_emb_table\",\n \"trg_word_emb_table\", )\n# Names of position encoding table which will be initialized externally.\npos_enc_param_names = (\n \"src_pos_enc_table\",\n \"trg_pos_enc_table\", )\n# separated inputs for different usages.\nencoder_data_input_fields = (\n \"src_word\",\n \"src_pos\",\n \"src_slf_attn_bias\", )\ndecoder_data_input_fields = (\n \"trg_word\",\n \"trg_pos\",\n \"trg_slf_attn_bias\",\n \"trg_src_attn_bias\",\n \"enc_output\", )\nlabel_data_input_fields = (\n \"lbl_word\",\n \"lbl_weight\", )\n# In fast decoder, trg_pos (only containing the current time step) is generated\n# by ops and trg_slf_attn_bias is not needed.\nfast_decoder_data_input_fields = (\n \"trg_word\",\n \"init_score\",\n \"trg_src_attn_bias\", )\n", "path": "fluid/neural_machine_translation/transformer/config.py" } ]
[ { "content": "class TrainTaskConfig(object):\n # support both CPU and GPU now.\n use_gpu = True\n # the epoch number to train.\n pass_num = 30\n # the number of sequences contained in a mini-batch.\n # deprecated, set batch_size in args.\n batch_size = 32\n # the hyper parameters for Adam optimizer.\n # This static learning_rate will be multiplied to the LearningRateScheduler\n # derived learning rate the to get the final learning rate.\n learning_rate = 1\n beta1 = 0.9\n beta2 = 0.98\n eps = 1e-9\n # the parameters for learning rate scheduling.\n warmup_steps = 4000\n # the weight used to mix up the ground-truth distribution and the fixed\n # uniform distribution in label smoothing when training.\n # Set this as zero if label smoothing is not wanted.\n label_smooth_eps = 0.1\n # the directory for saving trained models.\n model_dir = \"trained_models\"\n # the directory for saving checkpoints.\n ckpt_dir = \"trained_ckpts\"\n # the directory for loading checkpoint.\n # If provided, continue training from the checkpoint.\n ckpt_path = None\n # the parameter to initialize the learning rate scheduler.\n # It should be provided if use checkpoints, since the checkpoint doesn't\n # include the training step counter currently.\n start_step = 0\n\n\nclass InferTaskConfig(object):\n use_gpu = True\n # the number of examples in one run for sequence generation.\n batch_size = 10\n # the parameters for beam search.\n beam_size = 5\n max_out_len = 256\n # the number of decoded sentences to output.\n n_best = 1\n # the flags indicating whether to output the special tokens.\n output_bos = False\n output_eos = False\n output_unk = True\n # the directory for loading the trained model.\n model_path = \"trained_models/pass_1.infer.model\"\n\n\nclass ModelHyperParams(object):\n # These following five vocabularies related configurations will be set\n # automatically according to the passed vocabulary path and special tokens.\n # size of source word dictionary.\n src_vocab_size = 10000\n # size of target word dictionay\n trg_vocab_size = 10000\n # index for <bos> token\n bos_idx = 0\n # index for <eos> token\n eos_idx = 1\n # index for <unk> token\n unk_idx = 2\n # max length of sequences deciding the size of position encoding table.\n # Start from 1 and count start and end tokens in.\n max_length = 256\n # the dimension for word embeddings, which is also the last dimension of\n # the input and output of multi-head attention, position-wise feed-forward\n # networks, encoder and decoder.\n d_model = 512\n # size of the hidden layer in position-wise feed-forward networks.\n d_inner_hid = 2048\n # the dimension that keys are projected to for dot-product attention.\n d_key = 64\n # the dimension that values are projected to for dot-product attention.\n d_value = 64\n # number of head used in multi-head attention.\n n_head = 8\n # number of sub-layers to be stacked in the encoder and decoder.\n n_layer = 6\n # dropout rate used by all dropout layers.\n dropout = 0.1\n # random seed used in dropout for CE.\n dropout_seed = None\n # the flag indicating whether to share embedding and softmax weights.\n # vocabularies in source and target should be same for weight sharing.\n weight_sharing = True\n\n\ndef merge_cfg_from_list(cfg_list, g_cfgs):\n \"\"\"\n Set the above global configurations using the cfg_list. \n \"\"\"\n assert len(cfg_list) % 2 == 0\n for key, value in zip(cfg_list[0::2], cfg_list[1::2]):\n for g_cfg in g_cfgs:\n if hasattr(g_cfg, key):\n try:\n value = eval(value)\n except Exception: # for file path\n pass\n setattr(g_cfg, key, value)\n break\n\n\n# The placeholder for batch_size in compile time. Must be -1 currently to be\n# consistent with some ops' infer-shape output in compile time, such as the\n# sequence_expand op used in beamsearch decoder.\nbatch_size = -1\n# The placeholder for squence length in compile time.\nseq_len = ModelHyperParams.max_length\n# Here list the data shapes and data types of all inputs.\n# The shapes here act as placeholder and are set to pass the infer-shape in\n# compile time.\ninput_descs = {\n # The actual data shape of src_word is:\n # [batch_size, max_src_len_in_batch, 1]\n \"src_word\": [(batch_size, seq_len, 1), \"int64\", 2],\n # The actual data shape of src_pos is:\n # [batch_size, max_src_len_in_batch, 1]\n \"src_pos\": [(batch_size, seq_len, 1), \"int64\"],\n # This input is used to remove attention weights on paddings in the\n # encoder.\n # The actual data shape of src_slf_attn_bias is:\n # [batch_size, n_head, max_src_len_in_batch, max_src_len_in_batch]\n \"src_slf_attn_bias\": [(batch_size, ModelHyperParams.n_head, seq_len,\n seq_len), \"float32\"],\n # The actual data shape of trg_word is:\n # [batch_size, max_trg_len_in_batch, 1]\n \"trg_word\": [(batch_size, seq_len, 1), \"int64\",\n 2], # lod_level is only used in fast decoder.\n # The actual data shape of trg_pos is:\n # [batch_size, max_trg_len_in_batch, 1]\n \"trg_pos\": [(batch_size, seq_len, 1), \"int64\"],\n # This input is used to remove attention weights on paddings and\n # subsequent words in the decoder.\n # The actual data shape of trg_slf_attn_bias is:\n # [batch_size, n_head, max_trg_len_in_batch, max_trg_len_in_batch]\n \"trg_slf_attn_bias\": [(batch_size, ModelHyperParams.n_head, seq_len,\n seq_len), \"float32\"],\n # This input is used to remove attention weights on paddings of the source\n # input in the encoder-decoder attention.\n # The actual data shape of trg_src_attn_bias is:\n # [batch_size, n_head, max_trg_len_in_batch, max_src_len_in_batch]\n \"trg_src_attn_bias\": [(batch_size, ModelHyperParams.n_head, seq_len,\n seq_len), \"float32\"],\n # This input is used in independent decoder program for inference.\n # The actual data shape of enc_output is:\n # [batch_size, max_src_len_in_batch, d_model]\n \"enc_output\": [(batch_size, seq_len, ModelHyperParams.d_model), \"float32\"],\n # The actual data shape of label_word is:\n # [batch_size * max_trg_len_in_batch, 1]\n \"lbl_word\": [(batch_size * seq_len, 1), \"int64\"],\n # This input is used to mask out the loss of paddding tokens.\n # The actual data shape of label_weight is:\n # [batch_size * max_trg_len_in_batch, 1]\n \"lbl_weight\": [(batch_size * seq_len, 1), \"float32\"],\n # This input is used in beam-search decoder.\n \"init_score\": [(batch_size, 1), \"float32\"],\n}\n\n# Names of word embedding table which might be reused for weight sharing.\nword_emb_param_names = (\n \"src_word_emb_table\",\n \"trg_word_emb_table\", )\n# Names of position encoding table which will be initialized externally.\npos_enc_param_names = (\n \"src_pos_enc_table\",\n \"trg_pos_enc_table\", )\n# separated inputs for different usages.\nencoder_data_input_fields = (\n \"src_word\",\n \"src_pos\",\n \"src_slf_attn_bias\", )\ndecoder_data_input_fields = (\n \"trg_word\",\n \"trg_pos\",\n \"trg_slf_attn_bias\",\n \"trg_src_attn_bias\",\n \"enc_output\", )\nlabel_data_input_fields = (\n \"lbl_word\",\n \"lbl_weight\", )\n# In fast decoder, trg_pos (only containing the current time step) is generated\n# by ops and trg_slf_attn_bias is not needed.\nfast_decoder_data_input_fields = (\n \"trg_word\",\n \"init_score\",\n \"trg_src_attn_bias\", )\n", "path": "fluid/neural_machine_translation/transformer/config.py" } ]
diff --git a/fluid/neural_machine_translation/transformer/config.py b/fluid/neural_machine_translation/transformer/config.py index 0ed38d9c04..964b9243eb 100644 --- a/fluid/neural_machine_translation/transformer/config.py +++ b/fluid/neural_machine_translation/transformer/config.py @@ -1,5 +1,5 @@ class TrainTaskConfig(object): - # only support GPU currently + # support both CPU and GPU now. use_gpu = True # the epoch number to train. pass_num = 30
mozmeao__snippets-service-892
[ { "content": "import re\n\nfrom django.contrib import admin\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.models import JINJA_ENV\nfrom snippets.base.admin import filters\nfrom snippets.base.admin import actions\n\n\nMATCH_LOCALE_REGEX = re.compile(r'(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n class Media:\n css = {\n 'all': ('css/admin.css',)\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass UploadedFileAdmin(admin.ModelAdmin):\n readonly_fields = ('url', 'preview', 'snippets')\n list_display = ('name', 'url', 'preview', 'modified')\n prepopulated_fields = {'name': ('file',)}\n form = forms.UploadedFileAdminForm\n\n def preview(self, obj):\n template = get_template('base/uploadedfile_preview.jinja')\n return mark_safe(template.render({'file': obj}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this file.\"\"\"\n template = get_template('base/uploadedfile_snippets.jinja')\n return mark_safe(template.render({'snippets': obj.snippets}))\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'modified',\n )\n list_filter = (\n filters.ModifiedFilter,\n 'status',\n filters.ChannelFilter,\n ('template', RelatedDropdownFilter),\n )\n search_fields = (\n 'name',\n 'id',\n 'campaign__name',\n 'target__name',\n )\n autocomplete_fields = (\n 'campaign',\n )\n preserve_filters = True\n readonly_fields = (\n 'id',\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url',\n 'migrated_from_linked',\n )\n filter_horizontal = (\n 'targets',\n 'locales',\n )\n save_on_top = True\n save_as = True\n view_on_site = False\n actions = (\n actions.duplicate_snippets_action,\n actions.publish_snippets_action,\n )\n\n fieldsets = (\n ('ID', {\n 'fields': ('id', 'name', 'status', 'creator', 'preview_url', 'migrated_from_linked')\n }),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code>&lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt;</code> # noqa\n <br/>\n '''\n ),\n 'fields': ('template', 'data'),\n }),\n ('Publishing Options', {\n 'fields': (\n 'campaign',\n 'targets',\n ('publish_start', 'publish_end'),\n 'locales',\n 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified'), 'for_qa'),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ASRSnippetAdmin.css',\n 'css/admin/IDFieldHighlight.css',\n )\n }\n js = (\n 'js/admin/clipboard.min.js',\n 'js/admin/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url(self, obj):\n text = f'''\n <span id=\"previewLinkUrl\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrl\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n\n def migrated_from_linked(self, obj):\n return mark_safe(\n f'<a href={obj.migrated_from.get_admin_url(full=False)}>{obj.migrated_from.name}</a>')\n migrated_from_linked.short_description = 'Migrated From'\n\n def change_view(self, request, *args, **kwargs):\n if request.method == 'POST' and '_saveasnew' in request.POST:\n # Always saved cloned snippets as un-published and un-check ready for review.\n post_data = request.POST.copy()\n post_data['status'] = models.STATUS_CHOICES['Draft']\n post_data.pop('migrated_from', None)\n request.POST = post_data\n return super().change_view(request, *args, **kwargs)\n\n def get_readonly_fields(self, request, obj):\n if not request.user.is_superuser:\n return self.readonly_fields + ('for_qa',)\n return self.readonly_fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n if request.user.is_superuser:\n return queryset\n return queryset.filter(for_qa=False)\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n save_on_top = True\n readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\n filter_horizontal = (\n 'client_match_rules',\n )\n search_fields = (\n 'name',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_uses_firefox_sync',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n )\n }),\n ('Advanced Targeting', {\n 'fields': (\n 'client_match_rules',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n", "path": "snippets/base/admin/adminmodels.py" } ]
[ { "content": "import re\n\nfrom django.contrib import admin\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.models import JINJA_ENV\nfrom snippets.base.admin import filters\nfrom snippets.base.admin import actions\n\n\nMATCH_LOCALE_REGEX = re.compile(r'(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n class Media:\n css = {\n 'all': ('css/admin.css',)\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass UploadedFileAdmin(admin.ModelAdmin):\n readonly_fields = ('url', 'preview', 'snippets')\n list_display = ('name', 'url', 'preview', 'modified')\n prepopulated_fields = {'name': ('file',)}\n form = forms.UploadedFileAdminForm\n\n def preview(self, obj):\n template = get_template('base/uploadedfile_preview.jinja')\n return mark_safe(template.render({'file': obj}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this file.\"\"\"\n template = get_template('base/uploadedfile_snippets.jinja')\n return mark_safe(template.render({'snippets': obj.snippets}))\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'modified',\n )\n list_filter = (\n filters.ModifiedFilter,\n 'status',\n filters.ChannelFilter,\n ('template', RelatedDropdownFilter),\n )\n search_fields = (\n 'name',\n 'id',\n 'campaign__name',\n 'targets__name',\n )\n autocomplete_fields = (\n 'campaign',\n )\n preserve_filters = True\n readonly_fields = (\n 'id',\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url',\n 'migrated_from_linked',\n )\n filter_horizontal = (\n 'targets',\n 'locales',\n )\n save_on_top = True\n save_as = True\n view_on_site = False\n actions = (\n actions.duplicate_snippets_action,\n actions.publish_snippets_action,\n )\n\n fieldsets = (\n ('ID', {\n 'fields': ('id', 'name', 'status', 'creator', 'preview_url', 'migrated_from_linked')\n }),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code>&lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt;</code> # noqa\n <br/>\n '''\n ),\n 'fields': ('template', 'data'),\n }),\n ('Publishing Options', {\n 'fields': (\n 'campaign',\n 'targets',\n ('publish_start', 'publish_end'),\n 'locales',\n 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified'), 'for_qa'),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ASRSnippetAdmin.css',\n 'css/admin/IDFieldHighlight.css',\n )\n }\n js = (\n 'js/admin/clipboard.min.js',\n 'js/admin/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url(self, obj):\n text = f'''\n <span id=\"previewLinkUrl\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrl\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n\n def migrated_from_linked(self, obj):\n return mark_safe(\n f'<a href={obj.migrated_from.get_admin_url(full=False)}>{obj.migrated_from.name}</a>')\n migrated_from_linked.short_description = 'Migrated From'\n\n def change_view(self, request, *args, **kwargs):\n if request.method == 'POST' and '_saveasnew' in request.POST:\n # Always saved cloned snippets as un-published and un-check ready for review.\n post_data = request.POST.copy()\n post_data['status'] = models.STATUS_CHOICES['Draft']\n post_data.pop('migrated_from', None)\n request.POST = post_data\n return super().change_view(request, *args, **kwargs)\n\n def get_readonly_fields(self, request, obj):\n if not request.user.is_superuser:\n return self.readonly_fields + ('for_qa',)\n return self.readonly_fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request)\n if request.user.is_superuser:\n return queryset\n return queryset.filter(for_qa=False)\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n save_on_top = True\n readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\n filter_horizontal = (\n 'client_match_rules',\n )\n search_fields = (\n 'name',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_uses_firefox_sync',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n )\n }),\n ('Advanced Targeting', {\n 'fields': (\n 'client_match_rules',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n", "path": "snippets/base/admin/adminmodels.py" } ]
diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py index fe29ce387..de8d6296a 100644 --- a/snippets/base/admin/adminmodels.py +++ b/snippets/base/admin/adminmodels.py @@ -132,7 +132,7 @@ class ASRSnippetAdmin(admin.ModelAdmin): 'name', 'id', 'campaign__name', - 'target__name', + 'targets__name', ) autocomplete_fields = ( 'campaign',
e-valuation__EvaP-410
[ { "content": "from django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.db import models\nfrom django.db.models import Count\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template import Context, Template, TemplateSyntaxError, TemplateEncodingError\nfrom django_fsm.db.fields import FSMField, transition\n\n# see evaluation.meta for the use of Translate in this file\nfrom evap.evaluation.meta import LocalizeModelBase, Translate\n\nimport datetime\nimport random\n\n# for converting state into student_state\nSTUDENT_STATES_NAMES = {\n 'new': 'upcoming',\n 'prepared': 'upcoming',\n 'lecturerApproved': 'upcoming',\n 'approved': 'upcoming',\n 'inEvaluation': 'inEvaluation',\n 'evaluated': 'evaluationFinished',\n 'reviewed': 'evaluationFinished',\n 'published': 'published'\n}\n\n\nclass Semester(models.Model):\n \"\"\"Represents a semester, e.g. the winter term of 2011/2012.\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(u\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(u\"name (english)\"))\n\n name = Translate\n\n created_at = models.DateField(verbose_name=_(u\"created at\"), auto_now_add=True)\n\n class Meta:\n ordering = ('-created_at', 'name_de')\n verbose_name = _(u\"semester\")\n verbose_name_plural = _(u\"semesters\")\n\n def __unicode__(self):\n return self.name\n\n @property\n def can_fsr_delete(self):\n for course in self.course_set.all():\n if not course.can_fsr_delete:\n return False\n return True\n\n @classmethod\n def get_all_with_published_courses(cls):\n return cls.objects.filter(course__state=\"published\").distinct()\n\n\nclass Questionnaire(models.Model):\n \"\"\"A named collection of questions.\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(u\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(u\"name (english)\"))\n name = Translate\n\n description_de = models.TextField(verbose_name=_(u\"description (german)\"), blank=True, null=True)\n description_en = models.TextField(verbose_name=_(u\"description (english)\"), blank=True, null=True)\n description = Translate\n\n public_name_de = models.CharField(max_length=1024, verbose_name=_(u\"display name (german)\"))\n public_name_en = models.CharField(max_length=1024, verbose_name=_(u\"display name (english)\"))\n public_name = Translate\n\n teaser_de = models.TextField(verbose_name=_(u\"teaser (german)\"), blank=True, null=True)\n teaser_en = models.TextField(verbose_name=_(u\"teaser (english)\"), blank=True, null=True)\n teaser = Translate\n\n index = models.IntegerField(verbose_name=_(u\"ordering index\"))\n\n is_for_contributors = models.BooleanField(verbose_name=_(u\"is for contributors\"), default=False)\n obsolete = models.BooleanField(verbose_name=_(u\"obsolete\"), default=False)\n\n class Meta:\n ordering = ('obsolete', 'index', 'name_de')\n verbose_name = _(u\"questionnaire\")\n verbose_name_plural = _(u\"questionnaires\")\n\n def __unicode__(self):\n return self.name\n\n @property\n def can_fsr_delete(self):\n return not self.contributions.exists()\n\n\nclass Course(models.Model):\n \"\"\"Models a single course, e.g. the Math 101 course of 2002.\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n state = FSMField(default='new', protected=True)\n\n semester = models.ForeignKey(Semester, verbose_name=_(u\"semester\"))\n\n name_de = models.CharField(max_length=1024, verbose_name=_(u\"name (german)\"))\n name_en = models.CharField(max_length=1024, verbose_name=_(u\"name (english)\"))\n name = Translate\n\n # type of course: lecture, seminar, project\n kind = models.CharField(max_length=1024, verbose_name=_(u\"type\"))\n\n # bachelor, master, d-school course\n degree = models.CharField(max_length=1024, verbose_name=_(u\"degree\"))\n\n # students that are allowed to vote\n participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(u\"participants\"), blank=True)\n participant_count = models.IntegerField(verbose_name=_(u\"participant count\"), blank=True, null=True, default=None)\n\n # students that already voted\n voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(u\"voters\"), blank=True, related_name='+')\n voter_count = models.IntegerField(verbose_name=_(u\"voter count\"), blank=True, null=True, default=None)\n\n # when the evaluation takes place\n vote_start_date = models.DateField(null=True, verbose_name=_(u\"first date to vote\"))\n vote_end_date = models.DateField(null=True, verbose_name=_(u\"last date to vote\"))\n\n # who last modified this course, shell be noted\n last_modified_time = models.DateTimeField(auto_now=True)\n last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\"+\", null=True, blank=True)\n\n class Meta:\n ordering = ('semester', 'degree', 'name_de')\n unique_together = (\n ('semester', 'degree', 'name_de'),\n ('semester', 'degree', 'name_en'),\n )\n verbose_name = _(u\"course\")\n verbose_name_plural = _(u\"courses\")\n\n def __unicode__(self):\n return self.name\n\n def clean(self):\n if self.vote_start_date and self.vote_end_date:\n if self.vote_start_date >= self.vote_end_date:\n raise ValidationError(_(u\"The vote start date must be before the vote end date.\"))\n\n def save(self, *args, **kw):\n super(Course, self).save(*args, **kw)\n\n # make sure there is a general contribution\n if not self.general_contribution:\n self.contributions.create(contributor=None)\n\n def is_fully_checked(self):\n \"\"\"Shortcut for finding out whether all text answers to this course have been checked\"\"\"\n return not self.open_textanswer_set.exists()\n\n def can_user_vote(self, user):\n \"\"\"Returns whether the user is allowed to vote on this course.\"\"\"\n return (self.state == \"inEvaluation\"\n and datetime.date.today() <= self.vote_end_date\n and user in self.participants.all()\n and user not in self.voters.all())\n\n def can_fsr_edit(self):\n return self.state in ['new', 'prepared', 'lecturerApproved', 'approved', 'inEvaluation']\n\n def can_fsr_delete(self):\n return self.can_fsr_edit() and not self.voters.exists()\n\n def can_fsr_review(self):\n return self.state in ['inEvaluation', 'evaluated'] and not self.is_fully_checked()\n\n def can_fsr_approve(self):\n return self.state in ['new', 'prepared', 'lecturerApproved']\n\n def can_publish_grades(self):\n return self.num_voters >= settings.MIN_ANSWER_COUNT and float(self.num_voters) / self.num_participants >= settings.MIN_ANSWER_PERCENTAGE\n\n @transition(field=state, source=['new', 'lecturerApproved'], target='prepared')\n def ready_for_contributors(self, send_mail=True):\n if send_mail:\n EmailTemplate.get_review_template().send_to_users_in_courses([self], ['editors'])\n\n @transition(field=state, source='prepared', target='lecturerApproved')\n def contributor_approve(self):\n pass\n\n @transition(field=state, source=['new', 'prepared', 'lecturerApproved'], target='approved')\n def fsr_approve(self):\n pass\n\n @transition(field=state, source='prepared', target='new')\n def revert_to_new(self):\n pass\n\n @transition(field=state, source='approved', target='inEvaluation')\n def evaluation_begin(self):\n pass\n\n @transition(field=state, source='inEvaluation', target='evaluated')\n def evaluation_end(self):\n pass\n\n @transition(field=state, source='evaluated', target='reviewed', conditions=[is_fully_checked])\n def review_finished(self):\n pass\n\n @transition(field=state, source='reviewed', target='published')\n def publish(self):\n pass\n\n @transition(field=state, source='published', target='reviewed')\n def revoke(self):\n pass\n\n @property\n def student_state(self):\n return STUDENT_STATES_NAMES[self.state]\n\n @property\n def general_contribution(self):\n try:\n return self.contributions.get(contributor=None)\n except Contribution.DoesNotExist:\n return None\n\n @property\n def num_participants(self):\n if self.participant_count:\n return self.participant_count\n return self.participants.count()\n\n @property\n def num_voters(self):\n if self.voter_count:\n return self.voter_count\n return self.voters.count()\n\n @property\n def due_participants(self):\n return self.participants.exclude(pk__in=self.voters.all())\n\n @property\n def responsible_contributor(self):\n return self.contributions.get(responsible=True).contributor\n\n @property\n def responsible_contributors_name(self):\n return self.responsible_contributor.userprofile.full_name\n\n @property\n def responsible_contributors_username(self):\n return self.responsible_contributor.username\n\n def has_enough_questionnaires(self):\n return self.general_contribution and all(self.contributions.aggregate(Count('questionnaires')).values())\n\n def is_user_editor_or_delegate(self, user):\n if self.contributions.filter(can_edit=True, contributor=user).exists():\n return True\n else:\n represented_userprofiles = user.represented_users.all()\n represented_users = [profile.user for profile in represented_userprofiles]\n if self.contributions.filter(can_edit=True, contributor__in=represented_users).exists():\n return True\n\n return False\n\n def is_user_responsible_or_delegate(self, user):\n if self.contributions.filter(responsible=True, contributor=user).exists():\n return True\n else:\n represented_userprofiles = user.represented_users.all()\n represented_users = [profile.user for profile in represented_userprofiles]\n if self.contributions.filter(responsible=True, contributor__in=represented_users).exists():\n return True\n\n return False\n\n def is_user_contributor(self, user):\n return self.contributions.filter(contributor=user).exists()\n\n def is_user_editor(self, user):\n return self.contributions.filter(contributor=user, can_edit=True).exists()\n\n def warnings(self):\n result = []\n if self.state == 'new' and not self.has_enough_questionnaires():\n result.append(_(u\"Not enough questionnaires assigned\"))\n if self.state in ['inEvaluation', 'evaluated', 'reviewed'] and not self.can_publish_grades():\n result.append(_(u\"Not enough participants to publish results\"))\n return result\n\n @property\n def textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return TextAnswer.objects.filter(contribution__in=self.contributions.all())\n\n @property\n def open_textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return TextAnswer.objects.filter(contribution__in=self.contributions.all(), checked=False)\n\n @property\n def checked_textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return TextAnswer.objects.filter(contribution__in=self.contributions.all(), checked=True)\n\n @property\n def likertanswer_set(self):\n \"\"\"Pseudo relationship to all Likert answers for this course\"\"\"\n return LikertAnswer.objects.filter(contribution__in=self.contributions.all())\n\n @property\n def gradeanswer_set(self):\n \"\"\"Pseudo relationship to all grade answers for this course\"\"\"\n return GradeAnswer.objects.filter(contribution__in=self.contributions.all())\n\n\nclass Contribution(models.Model):\n \"\"\"A contributor who is assigned to a course and his questionnaires.\"\"\"\n\n course = models.ForeignKey(Course, verbose_name=_(u\"course\"), related_name='contributions')\n contributor = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_(u\"contributor\"), blank=True, null=True, related_name='contributions')\n questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_(u\"questionnaires\"), blank=True, related_name=\"contributions\")\n responsible = models.BooleanField(verbose_name=_(u\"responsible\"), default=False)\n can_edit = models.BooleanField(verbose_name=_(u\"can edit\"), default=False)\n\n class Meta:\n unique_together = (\n ('course', 'contributor'),\n )\n\n def clean(self):\n # responsible contributors can always edit\n if self.responsible:\n self.can_edit = True\n\n\nclass Question(models.Model):\n \"\"\"A question including a type.\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n QUESTION_KINDS = (\n (u\"T\", _(u\"Text Question\")),\n (u\"L\", _(u\"Likert Question\")),\n (u\"G\", _(u\"Grade Question\")),\n )\n\n questionnaire = models.ForeignKey(Questionnaire)\n text_de = models.TextField(verbose_name=_(u\"question text (german)\"))\n text_en = models.TextField(verbose_name=_(u\"question text (english)\"))\n kind = models.CharField(max_length=1, choices=QUESTION_KINDS,\n verbose_name=_(u\"kind of question\"))\n\n text = Translate\n\n class Meta:\n order_with_respect_to = 'questionnaire'\n verbose_name = _(u\"question\")\n verbose_name_plural = _(u\"questions\")\n\n @property\n def answer_class(self):\n if self.kind == u\"T\":\n return TextAnswer\n elif self.kind == u\"L\":\n return LikertAnswer\n elif self.kind == u\"G\":\n return GradeAnswer\n else:\n raise Exception(\"Unknown answer kind: %r\" % self.kind)\n\n def is_likert_question(self):\n return self.answer_class == LikertAnswer\n\n def is_text_question(self):\n return self.answer_class == TextAnswer\n\n def is_grade_question(self):\n return self.answer_class == GradeAnswer\n\n\nclass Answer(models.Model):\n \"\"\"An abstract answer to a question. For anonymity purposes, the answering\n user ist not stored in the object. Concrete subclasses are `LikertAnswer`,\n `TextAnswer` and `GradeAnswer`.\"\"\"\n\n question = models.ForeignKey(Question)\n contribution = models.ForeignKey(Contribution)\n\n class Meta:\n abstract = True\n verbose_name = _(u\"answer\")\n verbose_name_plural = _(u\"answers\")\n\n\nclass LikertAnswer(Answer):\n \"\"\"A Likert-scale answer to a question with `1` being *strongly agree* and `5`\n being *strongly disagree*.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(u\"answer\"))\n\n class Meta:\n verbose_name = _(u\"Likert answer\")\n verbose_name_plural = _(u\"Likert answers\")\n\n\nclass GradeAnswer(Answer):\n \"\"\"A grade answer to a question with `1` being best and `5` being worst.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(u\"answer\"))\n\n class Meta:\n verbose_name = _(u\"grade answer\")\n verbose_name_plural = _(u\"grade answers\")\n\n\nclass TextAnswer(Answer):\n \"\"\"A free-form text answer to a question (usually a comment about a course\n or a contributor).\"\"\"\n\n elements_per_page = 5\n\n reviewed_answer = models.TextField(verbose_name=_(u\"reviewed answer\"), blank=True, null=True)\n original_answer = models.TextField(verbose_name=_(u\"original answer\"), blank=True)\n\n checked = models.BooleanField(verbose_name=_(u\"answer checked\"), default=False)\n hidden = models.BooleanField(verbose_name=_(u\"hide answer\"), default=False)\n\n class Meta:\n verbose_name = _(u\"text answer\")\n verbose_name_plural = _(u\"text answers\")\n\n def _answer_get(self):\n return self.reviewed_answer or self.original_answer\n\n def _answer_set(self, value):\n self.original_answer = value\n self.reviewed_answer = None\n\n answer = property(_answer_get, _answer_set)\n\n\nclass FaqSection(models.Model):\n \"\"\"Section in the frequently asked questions\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n order = models.IntegerField(verbose_name=_(\"section order\"))\n\n title_de = models.TextField(verbose_name=_(u\"section title (german)\"))\n title_en = models.TextField(verbose_name=_(u\"section title (english)\"))\n title = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(u\"section\")\n verbose_name_plural = _(u\"sections\")\n\n\nclass FaqQuestion(models.Model):\n \"\"\"Question and answer in the frequently asked questions\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n section = models.ForeignKey(FaqSection, related_name=\"questions\")\n\n order = models.IntegerField(verbose_name=_(\"question order\"))\n\n question_de = models.TextField(verbose_name=_(\"question (german)\"))\n question_en = models.TextField(verbose_name=_(\"question (english)\"))\n question = Translate\n\n answer_de = models.TextField(verbose_name=_(\"answer (german)\"))\n answer_en = models.TextField(verbose_name=_(\"answer (german)\"))\n answer = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(u\"question\")\n verbose_name_plural = _(u\"questions\")\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(settings.AUTH_USER_MODEL)\n\n # extending first_name and last_name from the user\n title = models.CharField(verbose_name=_(u\"Title\"), max_length=1024, blank=True, null=True)\n\n # picture of the user\n picture = models.ImageField(verbose_name=_(u\"Picture\"), upload_to=\"pictures\", blank=True, null=True)\n\n # delegates of the user, which can also manage their courses\n delegates = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(u\"Delegates\"), related_name=\"represented_users\", blank=True)\n\n # users to which all emails should be sent in cc without giving them delegate rights\n cc_users = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(u\"CC Users\"), related_name=\"cc_users\", blank=True)\n\n # key for url based login of this user\n MAX_LOGIN_KEY = 2**31-1\n\n login_key = models.IntegerField(verbose_name=_(u\"Login Key\"), blank=True, null=True)\n login_key_valid_until = models.DateField(verbose_name=_(u\"Login Key Validity\"), null=True)\n\n class Meta:\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n def __unicode__(self):\n return unicode(self.user)\n\n @property\n def full_name(self):\n if self.user.last_name:\n name = self.user.last_name\n if self.user.first_name:\n name = self.user.first_name + \" \" + name\n if self.title:\n name = self.title + \" \" + name\n return name\n else:\n return self.user.username\n\n @property\n def can_fsr_delete(self):\n return not self.is_contributor\n\n @property\n def enrolled_in_courses(self):\n return self.user.course_set.exists()\n\n @property\n def is_contributor(self):\n return self.user.contributions.exists()\n\n @property\n def is_editor(self):\n return self.user.contributions.filter(can_edit=True).exists()\n\n @property\n def is_responsible(self):\n # in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it\n return any(contribution.responsible for contribution in self.user.contributions.all())\n\n @property\n def is_delegate(self):\n return self.delegates.exists()\n\n @property\n def is_editor_or_delegate(self):\n return self.is_editor or self.is_delegate\n\n @classmethod\n def email_needs_login_key(cls, email):\n return not any([email.endswith(\"@\" + domain) for domain in settings.INSTITUTION_EMAIL_DOMAINS])\n\n @property\n def needs_login_key(self):\n return UserProfile.email_needs_login_key(self.user.email)\n\n @classmethod\n def get_for_user(cls, user):\n obj, _ = cls.objects.get_or_create(user=user)\n return obj\n\n def generate_login_key(self):\n while True:\n key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)\n if not UserProfile.objects.filter(login_key=key).exists():\n # key not yet used\n self.login_key = key\n break\n\n self.refresh_login_key()\n\n def refresh_login_key(self):\n self.login_key_valid_until = datetime.date.today() + datetime.timedelta(settings.LOGIN_KEY_VALIDITY)\n\n @staticmethod\n @receiver(post_save, sender=settings.AUTH_USER_MODEL)\n def create_user_profile(sender, instance, created, raw, **kwargs):\n \"\"\"Creates a UserProfile object whenever a User is created.\"\"\"\n if created and not raw:\n UserProfile.objects.create(user=instance)\n\n\ndef validate_template(value):\n \"\"\"Field validator which ensures that the value can be compiled into a\n Django Template.\"\"\"\n try:\n Template(value)\n except (TemplateSyntaxError, TemplateEncodingError) as e:\n raise ValidationError(str(e))\n\n\nclass EmailTemplate(models.Model):\n name = models.CharField(max_length=1024, unique=True, verbose_name=_(\"Name\"))\n\n subject = models.CharField(max_length=1024, verbose_name=_(u\"Subject\"), validators=[validate_template])\n body = models.TextField(verbose_name=_(\"Body\"), validators=[validate_template])\n\n @classmethod\n def get_review_template(cls):\n return cls.objects.get(name=\"Lecturer Review Notice\")\n\n @classmethod\n def get_reminder_template(cls):\n return cls.objects.get(name=\"Student Reminder\")\n\n @classmethod\n def get_publish_template(cls):\n return cls.objects.get(name=\"Publishing Notice\")\n\n @classmethod\n def get_login_key_template(cls):\n return cls.objects.get(name=\"Login Key Created\")\n\n @classmethod\n def recipient_list_for_course(cls, course, recipient_groups):\n recipients = []\n\n if \"responsible\" in recipient_groups:\n recipients += [course.responsible_contributor]\n\n if \"contributors\" in recipient_groups:\n recipients += [c.contributor for c in course.contributions.exclude(contributor=None)]\n elif \"editors\" in recipient_groups:\n recipients += [c.contributor for c in course.contributions.exclude(contributor=None).filter(can_edit=True)]\n\n if \"all_participants\" in recipient_groups:\n recipients += course.participants.all()\n elif \"due_participants\" in recipient_groups:\n recipients += course.due_participants\n\n return recipients\n\n @classmethod\n def render_string(cls, text, dictionary):\n return Template(text).render(Context(dictionary, autoescape=False))\n\n def send_to_users_in_courses(self, courses, recipient_groups):\n user_course_map = {}\n for course in courses:\n responsible = UserProfile.get_for_user(course.responsible_contributor)\n for user in self.recipient_list_for_course(course, recipient_groups):\n if user.email and user not in responsible.cc_users.all() and user not in responsible.delegates.all():\n user_course_map.setdefault(user, []).append(course)\n\n for user, courses in user_course_map.iteritems():\n cc_users = []\n if (\"responsible\" in recipient_groups or \"editors\" in recipient_groups) and any(course.is_user_editor(user) for course in courses):\n cc_users += UserProfile.get_for_user(user).delegates.all()\n cc_users += UserProfile.get_for_user(user).cc_users.all()\n cc_addresses = [p.email for p in cc_users if p.email]\n\n mail = EmailMessage(\n subject = self.render_string(self.subject, {'user': user, 'courses': courses}),\n body = self.render_string(self.body, {'user': user, 'courses': courses}),\n to = [user.email],\n cc = cc_addresses,\n bcc = [a[1] for a in settings.MANAGERS],\n headers = {'Reply-To': settings.REPLY_TO_EMAIL})\n mail.send(False)\n\n def send_to_user(self, user):\n if not user.email:\n return\n\n mail = EmailMessage(\n subject = self.render_string(self.subject, {'user': user}),\n body = self.render_string(self.body, {'user': user}),\n to = [user.email],\n bcc = [a[1] for a in settings.MANAGERS],\n headers = {'Reply-To': settings.REPLY_TO_EMAIL})\n mail.send(False)\n\n", "path": "evap/evaluation/models.py" } ]
[ { "content": "from django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.db import models\nfrom django.db.models import Count\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template import Context, Template, TemplateSyntaxError, TemplateEncodingError\nfrom django_fsm.db.fields import FSMField, transition\n\n# see evaluation.meta for the use of Translate in this file\nfrom evap.evaluation.meta import LocalizeModelBase, Translate\n\nimport datetime\nimport random\n\n# for converting state into student_state\nSTUDENT_STATES_NAMES = {\n 'new': 'upcoming',\n 'prepared': 'upcoming',\n 'lecturerApproved': 'upcoming',\n 'approved': 'upcoming',\n 'inEvaluation': 'inEvaluation',\n 'evaluated': 'evaluationFinished',\n 'reviewed': 'evaluationFinished',\n 'published': 'published'\n}\n\n\nclass Semester(models.Model):\n \"\"\"Represents a semester, e.g. the winter term of 2011/2012.\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(u\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(u\"name (english)\"))\n\n name = Translate\n\n created_at = models.DateField(verbose_name=_(u\"created at\"), auto_now_add=True)\n\n class Meta:\n ordering = ('-created_at', 'name_de')\n verbose_name = _(u\"semester\")\n verbose_name_plural = _(u\"semesters\")\n\n def __unicode__(self):\n return self.name\n\n @property\n def can_fsr_delete(self):\n for course in self.course_set.all():\n if not course.can_fsr_delete:\n return False\n return True\n\n @classmethod\n def get_all_with_published_courses(cls):\n return cls.objects.filter(course__state=\"published\").distinct()\n\n\nclass Questionnaire(models.Model):\n \"\"\"A named collection of questions.\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n name_de = models.CharField(max_length=1024, unique=True, verbose_name=_(u\"name (german)\"))\n name_en = models.CharField(max_length=1024, unique=True, verbose_name=_(u\"name (english)\"))\n name = Translate\n\n description_de = models.TextField(verbose_name=_(u\"description (german)\"), blank=True, null=True)\n description_en = models.TextField(verbose_name=_(u\"description (english)\"), blank=True, null=True)\n description = Translate\n\n public_name_de = models.CharField(max_length=1024, verbose_name=_(u\"display name (german)\"))\n public_name_en = models.CharField(max_length=1024, verbose_name=_(u\"display name (english)\"))\n public_name = Translate\n\n teaser_de = models.TextField(verbose_name=_(u\"teaser (german)\"), blank=True, null=True)\n teaser_en = models.TextField(verbose_name=_(u\"teaser (english)\"), blank=True, null=True)\n teaser = Translate\n\n index = models.IntegerField(verbose_name=_(u\"ordering index\"))\n\n is_for_contributors = models.BooleanField(verbose_name=_(u\"is for contributors\"), default=False)\n obsolete = models.BooleanField(verbose_name=_(u\"obsolete\"), default=False)\n\n class Meta:\n ordering = ('obsolete', 'index', 'name_de')\n verbose_name = _(u\"questionnaire\")\n verbose_name_plural = _(u\"questionnaires\")\n\n def __unicode__(self):\n return self.name\n\n @property\n def can_fsr_delete(self):\n return not self.contributions.exists()\n\n\nclass Course(models.Model):\n \"\"\"Models a single course, e.g. the Math 101 course of 2002.\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n state = FSMField(default='new', protected=True)\n\n semester = models.ForeignKey(Semester, verbose_name=_(u\"semester\"))\n\n name_de = models.CharField(max_length=1024, verbose_name=_(u\"name (german)\"))\n name_en = models.CharField(max_length=1024, verbose_name=_(u\"name (english)\"))\n name = Translate\n\n # type of course: lecture, seminar, project\n kind = models.CharField(max_length=1024, verbose_name=_(u\"type\"))\n\n # bachelor, master, d-school course\n degree = models.CharField(max_length=1024, verbose_name=_(u\"degree\"))\n\n # students that are allowed to vote\n participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(u\"participants\"), blank=True)\n participant_count = models.IntegerField(verbose_name=_(u\"participant count\"), blank=True, null=True, default=None)\n\n # students that already voted\n voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(u\"voters\"), blank=True, related_name='+')\n voter_count = models.IntegerField(verbose_name=_(u\"voter count\"), blank=True, null=True, default=None)\n\n # when the evaluation takes place\n vote_start_date = models.DateField(null=True, verbose_name=_(u\"first date to vote\"))\n vote_end_date = models.DateField(null=True, verbose_name=_(u\"last date to vote\"))\n\n # who last modified this course, shell be noted\n last_modified_time = models.DateTimeField(auto_now=True)\n last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\"+\", null=True, blank=True)\n\n class Meta:\n ordering = ('semester', 'degree', 'name_de')\n unique_together = (\n ('semester', 'degree', 'name_de'),\n ('semester', 'degree', 'name_en'),\n )\n verbose_name = _(u\"course\")\n verbose_name_plural = _(u\"courses\")\n\n def __unicode__(self):\n return self.name\n\n def clean(self):\n if self.vote_start_date and self.vote_end_date:\n if self.vote_start_date >= self.vote_end_date:\n raise ValidationError(_(u\"The vote start date must be before the vote end date.\"))\n\n def save(self, *args, **kw):\n super(Course, self).save(*args, **kw)\n\n # make sure there is a general contribution\n if not self.general_contribution:\n self.contributions.create(contributor=None)\n\n def is_fully_checked(self):\n \"\"\"Shortcut for finding out whether all text answers to this course have been checked\"\"\"\n return not self.open_textanswer_set.exists()\n\n def can_user_vote(self, user):\n \"\"\"Returns whether the user is allowed to vote on this course.\"\"\"\n return (self.state == \"inEvaluation\"\n and datetime.date.today() <= self.vote_end_date\n and user in self.participants.all()\n and user not in self.voters.all())\n\n def can_fsr_edit(self):\n return self.state in ['new', 'prepared', 'lecturerApproved', 'approved', 'inEvaluation']\n\n def can_fsr_delete(self):\n return self.can_fsr_edit() and not self.voters.exists()\n\n def can_fsr_review(self):\n return self.state in ['inEvaluation', 'evaluated'] and not self.is_fully_checked()\n\n def can_fsr_approve(self):\n return self.state in ['new', 'prepared', 'lecturerApproved']\n\n def can_publish_grades(self):\n return self.num_voters >= settings.MIN_ANSWER_COUNT and float(self.num_voters) / self.num_participants >= settings.MIN_ANSWER_PERCENTAGE\n\n @transition(field=state, source=['new', 'lecturerApproved'], target='prepared')\n def ready_for_contributors(self, send_mail=True):\n if send_mail:\n EmailTemplate.get_review_template().send_to_users_in_courses([self], ['editors'])\n\n @transition(field=state, source='prepared', target='lecturerApproved')\n def contributor_approve(self):\n pass\n\n @transition(field=state, source=['new', 'prepared', 'lecturerApproved'], target='approved')\n def fsr_approve(self):\n pass\n\n @transition(field=state, source='prepared', target='new')\n def revert_to_new(self):\n pass\n\n @transition(field=state, source='approved', target='inEvaluation')\n def evaluation_begin(self):\n pass\n\n @transition(field=state, source='inEvaluation', target='evaluated')\n def evaluation_end(self):\n pass\n\n @transition(field=state, source='evaluated', target='reviewed', conditions=[is_fully_checked])\n def review_finished(self):\n pass\n\n @transition(field=state, source='reviewed', target='published')\n def publish(self):\n pass\n\n @transition(field=state, source='published', target='reviewed')\n def revoke(self):\n pass\n\n @property\n def student_state(self):\n return STUDENT_STATES_NAMES[self.state]\n\n @property\n def general_contribution(self):\n try:\n return self.contributions.get(contributor=None)\n except Contribution.DoesNotExist:\n return None\n\n @property\n def num_participants(self):\n if self.participant_count:\n return self.participant_count\n return self.participants.count()\n\n @property\n def num_voters(self):\n if self.voter_count:\n return self.voter_count\n return self.voters.count()\n\n @property\n def due_participants(self):\n return self.participants.exclude(pk__in=self.voters.all())\n\n @property\n def responsible_contributor(self):\n return self.contributions.get(responsible=True).contributor\n\n @property\n def responsible_contributors_name(self):\n return self.responsible_contributor.userprofile.full_name\n\n @property\n def responsible_contributors_username(self):\n return self.responsible_contributor.username\n\n def has_enough_questionnaires(self):\n return self.general_contribution and all(self.contributions.aggregate(Count('questionnaires')).values())\n\n def is_user_editor_or_delegate(self, user):\n if self.contributions.filter(can_edit=True, contributor=user).exists():\n return True\n else:\n represented_userprofiles = user.represented_users.all()\n represented_users = [profile.user for profile in represented_userprofiles]\n if self.contributions.filter(can_edit=True, contributor__in=represented_users).exists():\n return True\n\n return False\n\n def is_user_responsible_or_delegate(self, user):\n if self.contributions.filter(responsible=True, contributor=user).exists():\n return True\n else:\n represented_userprofiles = user.represented_users.all()\n represented_users = [profile.user for profile in represented_userprofiles]\n if self.contributions.filter(responsible=True, contributor__in=represented_users).exists():\n return True\n\n return False\n\n def is_user_contributor(self, user):\n return self.contributions.filter(contributor=user).exists()\n\n def is_user_editor(self, user):\n return self.contributions.filter(contributor=user, can_edit=True).exists()\n\n def warnings(self):\n result = []\n if self.state == 'new' and not self.has_enough_questionnaires():\n result.append(_(u\"Not enough questionnaires assigned\"))\n if self.state in ['inEvaluation', 'evaluated', 'reviewed'] and not self.can_publish_grades():\n result.append(_(u\"Not enough participants to publish results\"))\n return result\n\n @property\n def textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return TextAnswer.objects.filter(contribution__in=self.contributions.all())\n\n @property\n def open_textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return TextAnswer.objects.filter(contribution__in=self.contributions.all(), checked=False)\n\n @property\n def checked_textanswer_set(self):\n \"\"\"Pseudo relationship to all text answers for this course\"\"\"\n return TextAnswer.objects.filter(contribution__in=self.contributions.all(), checked=True)\n\n @property\n def likertanswer_set(self):\n \"\"\"Pseudo relationship to all Likert answers for this course\"\"\"\n return LikertAnswer.objects.filter(contribution__in=self.contributions.all())\n\n @property\n def gradeanswer_set(self):\n \"\"\"Pseudo relationship to all grade answers for this course\"\"\"\n return GradeAnswer.objects.filter(contribution__in=self.contributions.all())\n\n\nclass Contribution(models.Model):\n \"\"\"A contributor who is assigned to a course and his questionnaires.\"\"\"\n\n course = models.ForeignKey(Course, verbose_name=_(u\"course\"), related_name='contributions')\n contributor = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_(u\"contributor\"), blank=True, null=True, related_name='contributions')\n questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_(u\"questionnaires\"), blank=True, related_name=\"contributions\")\n responsible = models.BooleanField(verbose_name=_(u\"responsible\"), default=False)\n can_edit = models.BooleanField(verbose_name=_(u\"can edit\"), default=False)\n\n class Meta:\n unique_together = (\n ('course', 'contributor'),\n )\n\n def clean(self):\n # responsible contributors can always edit\n if self.responsible:\n self.can_edit = True\n\n\nclass Question(models.Model):\n \"\"\"A question including a type.\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n QUESTION_KINDS = (\n (u\"T\", _(u\"Text Question\")),\n (u\"L\", _(u\"Likert Question\")),\n (u\"G\", _(u\"Grade Question\")),\n )\n\n questionnaire = models.ForeignKey(Questionnaire)\n text_de = models.TextField(verbose_name=_(u\"question text (german)\"))\n text_en = models.TextField(verbose_name=_(u\"question text (english)\"))\n kind = models.CharField(max_length=1, choices=QUESTION_KINDS,\n verbose_name=_(u\"kind of question\"))\n\n text = Translate\n\n class Meta:\n order_with_respect_to = 'questionnaire'\n verbose_name = _(u\"question\")\n verbose_name_plural = _(u\"questions\")\n\n @property\n def answer_class(self):\n if self.kind == u\"T\":\n return TextAnswer\n elif self.kind == u\"L\":\n return LikertAnswer\n elif self.kind == u\"G\":\n return GradeAnswer\n else:\n raise Exception(\"Unknown answer kind: %r\" % self.kind)\n\n def is_likert_question(self):\n return self.answer_class == LikertAnswer\n\n def is_text_question(self):\n return self.answer_class == TextAnswer\n\n def is_grade_question(self):\n return self.answer_class == GradeAnswer\n\n\nclass Answer(models.Model):\n \"\"\"An abstract answer to a question. For anonymity purposes, the answering\n user ist not stored in the object. Concrete subclasses are `LikertAnswer`,\n `TextAnswer` and `GradeAnswer`.\"\"\"\n\n question = models.ForeignKey(Question)\n contribution = models.ForeignKey(Contribution)\n\n class Meta:\n abstract = True\n verbose_name = _(u\"answer\")\n verbose_name_plural = _(u\"answers\")\n\n\nclass LikertAnswer(Answer):\n \"\"\"A Likert-scale answer to a question with `1` being *strongly agree* and `5`\n being *strongly disagree*.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(u\"answer\"))\n\n class Meta:\n verbose_name = _(u\"Likert answer\")\n verbose_name_plural = _(u\"Likert answers\")\n\n\nclass GradeAnswer(Answer):\n \"\"\"A grade answer to a question with `1` being best and `5` being worst.\"\"\"\n\n answer = models.IntegerField(verbose_name=_(u\"answer\"))\n\n class Meta:\n verbose_name = _(u\"grade answer\")\n verbose_name_plural = _(u\"grade answers\")\n\n\nclass TextAnswer(Answer):\n \"\"\"A free-form text answer to a question (usually a comment about a course\n or a contributor).\"\"\"\n\n elements_per_page = 5\n\n reviewed_answer = models.TextField(verbose_name=_(u\"reviewed answer\"), blank=True, null=True)\n original_answer = models.TextField(verbose_name=_(u\"original answer\"), blank=True)\n\n checked = models.BooleanField(verbose_name=_(u\"answer checked\"), default=False)\n hidden = models.BooleanField(verbose_name=_(u\"hide answer\"), default=False)\n\n class Meta:\n verbose_name = _(u\"text answer\")\n verbose_name_plural = _(u\"text answers\")\n\n def _answer_get(self):\n return self.reviewed_answer or self.original_answer\n\n def _answer_set(self, value):\n self.original_answer = value\n self.reviewed_answer = None\n\n answer = property(_answer_get, _answer_set)\n\n\nclass FaqSection(models.Model):\n \"\"\"Section in the frequently asked questions\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n order = models.IntegerField(verbose_name=_(\"section order\"))\n\n title_de = models.TextField(verbose_name=_(u\"section title (german)\"))\n title_en = models.TextField(verbose_name=_(u\"section title (english)\"))\n title = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(u\"section\")\n verbose_name_plural = _(u\"sections\")\n\n\nclass FaqQuestion(models.Model):\n \"\"\"Question and answer in the frequently asked questions\"\"\"\n\n __metaclass__ = LocalizeModelBase\n\n section = models.ForeignKey(FaqSection, related_name=\"questions\")\n\n order = models.IntegerField(verbose_name=_(\"question order\"))\n\n question_de = models.TextField(verbose_name=_(\"question (german)\"))\n question_en = models.TextField(verbose_name=_(\"question (english)\"))\n question = Translate\n\n answer_de = models.TextField(verbose_name=_(\"answer (german)\"))\n answer_en = models.TextField(verbose_name=_(\"answer (german)\"))\n answer = Translate\n\n class Meta:\n ordering = ['order', ]\n verbose_name = _(u\"question\")\n verbose_name_plural = _(u\"questions\")\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(settings.AUTH_USER_MODEL)\n\n # extending first_name and last_name from the user\n title = models.CharField(verbose_name=_(u\"Title\"), max_length=1024, blank=True, null=True)\n\n # picture of the user\n picture = models.ImageField(verbose_name=_(u\"Picture\"), upload_to=\"pictures\", blank=True, null=True)\n\n # delegates of the user, which can also manage their courses\n delegates = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(u\"Delegates\"), related_name=\"represented_users\", blank=True)\n\n # users to which all emails should be sent in cc without giving them delegate rights\n cc_users = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_(u\"CC Users\"), related_name=\"cc_users\", blank=True)\n\n # key for url based login of this user\n MAX_LOGIN_KEY = 2**31-1\n\n login_key = models.IntegerField(verbose_name=_(u\"Login Key\"), blank=True, null=True)\n login_key_valid_until = models.DateField(verbose_name=_(u\"Login Key Validity\"), null=True)\n\n class Meta:\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n def __unicode__(self):\n return unicode(self.user)\n\n @property\n def full_name(self):\n if self.user.last_name:\n name = self.user.last_name\n if self.user.first_name:\n name = self.user.first_name + \" \" + name\n if self.title:\n name = self.title + \" \" + name\n return name\n else:\n return self.user.username\n\n @property\n def can_fsr_delete(self):\n return not self.is_contributor\n\n @property\n def enrolled_in_courses(self):\n return self.user.course_set.exists()\n\n @property\n def is_contributor(self):\n return self.user.contributions.exists()\n\n @property\n def is_editor(self):\n return self.user.contributions.filter(can_edit=True).exists()\n\n @property\n def is_responsible(self):\n # in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it\n return any(contribution.responsible for contribution in self.user.contributions.all())\n\n @property\n def is_delegate(self):\n return self.user.represented_users.exists()\n\n @property\n def is_editor_or_delegate(self):\n return self.is_editor or self.is_delegate\n\n @classmethod\n def email_needs_login_key(cls, email):\n return not any([email.endswith(\"@\" + domain) for domain in settings.INSTITUTION_EMAIL_DOMAINS])\n\n @property\n def needs_login_key(self):\n return UserProfile.email_needs_login_key(self.user.email)\n\n @classmethod\n def get_for_user(cls, user):\n obj, _ = cls.objects.get_or_create(user=user)\n return obj\n\n def generate_login_key(self):\n while True:\n key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)\n if not UserProfile.objects.filter(login_key=key).exists():\n # key not yet used\n self.login_key = key\n break\n\n self.refresh_login_key()\n\n def refresh_login_key(self):\n self.login_key_valid_until = datetime.date.today() + datetime.timedelta(settings.LOGIN_KEY_VALIDITY)\n\n @staticmethod\n @receiver(post_save, sender=settings.AUTH_USER_MODEL)\n def create_user_profile(sender, instance, created, raw, **kwargs):\n \"\"\"Creates a UserProfile object whenever a User is created.\"\"\"\n if created and not raw:\n UserProfile.objects.create(user=instance)\n\n\ndef validate_template(value):\n \"\"\"Field validator which ensures that the value can be compiled into a\n Django Template.\"\"\"\n try:\n Template(value)\n except (TemplateSyntaxError, TemplateEncodingError) as e:\n raise ValidationError(str(e))\n\n\nclass EmailTemplate(models.Model):\n name = models.CharField(max_length=1024, unique=True, verbose_name=_(\"Name\"))\n\n subject = models.CharField(max_length=1024, verbose_name=_(u\"Subject\"), validators=[validate_template])\n body = models.TextField(verbose_name=_(\"Body\"), validators=[validate_template])\n\n @classmethod\n def get_review_template(cls):\n return cls.objects.get(name=\"Lecturer Review Notice\")\n\n @classmethod\n def get_reminder_template(cls):\n return cls.objects.get(name=\"Student Reminder\")\n\n @classmethod\n def get_publish_template(cls):\n return cls.objects.get(name=\"Publishing Notice\")\n\n @classmethod\n def get_login_key_template(cls):\n return cls.objects.get(name=\"Login Key Created\")\n\n @classmethod\n def recipient_list_for_course(cls, course, recipient_groups):\n recipients = []\n\n if \"responsible\" in recipient_groups:\n recipients += [course.responsible_contributor]\n\n if \"contributors\" in recipient_groups:\n recipients += [c.contributor for c in course.contributions.exclude(contributor=None)]\n elif \"editors\" in recipient_groups:\n recipients += [c.contributor for c in course.contributions.exclude(contributor=None).filter(can_edit=True)]\n\n if \"all_participants\" in recipient_groups:\n recipients += course.participants.all()\n elif \"due_participants\" in recipient_groups:\n recipients += course.due_participants\n\n return recipients\n\n @classmethod\n def render_string(cls, text, dictionary):\n return Template(text).render(Context(dictionary, autoescape=False))\n\n def send_to_users_in_courses(self, courses, recipient_groups):\n user_course_map = {}\n for course in courses:\n responsible = UserProfile.get_for_user(course.responsible_contributor)\n for user in self.recipient_list_for_course(course, recipient_groups):\n if user.email and user not in responsible.cc_users.all() and user not in responsible.delegates.all():\n user_course_map.setdefault(user, []).append(course)\n\n for user, courses in user_course_map.iteritems():\n cc_users = []\n if (\"responsible\" in recipient_groups or \"editors\" in recipient_groups) and any(course.is_user_editor(user) for course in courses):\n cc_users += UserProfile.get_for_user(user).delegates.all()\n cc_users += UserProfile.get_for_user(user).cc_users.all()\n cc_addresses = [p.email for p in cc_users if p.email]\n\n mail = EmailMessage(\n subject = self.render_string(self.subject, {'user': user, 'courses': courses}),\n body = self.render_string(self.body, {'user': user, 'courses': courses}),\n to = [user.email],\n cc = cc_addresses,\n bcc = [a[1] for a in settings.MANAGERS],\n headers = {'Reply-To': settings.REPLY_TO_EMAIL})\n mail.send(False)\n\n def send_to_user(self, user):\n if not user.email:\n return\n\n mail = EmailMessage(\n subject = self.render_string(self.subject, {'user': user}),\n body = self.render_string(self.body, {'user': user}),\n to = [user.email],\n bcc = [a[1] for a in settings.MANAGERS],\n headers = {'Reply-To': settings.REPLY_TO_EMAIL})\n mail.send(False)\n\n", "path": "evap/evaluation/models.py" } ]
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py index fd8f82be5e..0fb1a729b2 100644 --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -554,7 +554,7 @@ def is_responsible(self): @property def is_delegate(self): - return self.delegates.exists() + return self.user.represented_users.exists() @property def is_editor_or_delegate(self):
googleapis__google-api-python-client-273
[ { "content": "\"\"\"Channel notifications support.\n\nClasses and functions to support channel subscriptions and notifications\non those channels.\n\nNotes:\n - This code is based on experimental APIs and is subject to change.\n - Notification does not do deduplication of notification ids, that's up to\n the receiver.\n - Storing the Channel between calls is up to the caller.\n\n\nExample setting up a channel:\n\n # Create a new channel that gets notifications via webhook.\n channel = new_webhook_channel(\"https://example.com/my_web_hook\")\n\n # Store the channel, keyed by 'channel.id'. Store it before calling the\n # watch method because notifications may start arriving before the watch\n # method returns.\n ...\n\n resp = service.objects().watchAll(\n bucket=\"some_bucket_id\", body=channel.body()).execute()\n channel.update(resp)\n\n # Store the channel, keyed by 'channel.id'. Store it after being updated\n # since the resource_id value will now be correct, and that's needed to\n # stop a subscription.\n ...\n\n\nAn example Webhook implementation using webapp2. Note that webapp2 puts\nheaders in a case insensitive dictionary, as headers aren't guaranteed to\nalways be upper case.\n\n id = self.request.headers[X_GOOG_CHANNEL_ID]\n\n # Retrieve the channel by id.\n channel = ...\n\n # Parse notification from the headers, including validating the id.\n n = notification_from_headers(channel, self.request.headers)\n\n # Do app specific stuff with the notification here.\n if n.resource_state == 'sync':\n # Code to handle sync state.\n elif n.resource_state == 'exists':\n # Code to handle the exists state.\n elif n.resource_state == 'not_exists':\n # Code to handle the not exists state.\n\n\nExample of unsubscribing.\n\n service.channels().stop(channel.body())\n\"\"\"\nfrom __future__ import absolute_import\n\nimport datetime\nimport uuid\n\nfrom googleapiclient import errors\nfrom oauth2client import util\nimport six\n\n# Oauth2client < 3 has the positional helper in 'util', >= 3 has it\n# in '_helpers'.\ntry:\n from oauth2client import util\nexcept ImportError:\n from oauth2client import _helpers as util\n\n\n# The unix time epoch starts at midnight 1970.\nEPOCH = datetime.datetime.utcfromtimestamp(0)\n\n# Map the names of the parameters in the JSON channel description to\n# the parameter names we use in the Channel class.\nCHANNEL_PARAMS = {\n 'address': 'address',\n 'id': 'id',\n 'expiration': 'expiration',\n 'params': 'params',\n 'resourceId': 'resource_id',\n 'resourceUri': 'resource_uri',\n 'type': 'type',\n 'token': 'token',\n }\n\nX_GOOG_CHANNEL_ID = 'X-GOOG-CHANNEL-ID'\nX_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER'\nX_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE'\nX_GOOG_RESOURCE_URI = 'X-GOOG-RESOURCE-URI'\nX_GOOG_RESOURCE_ID = 'X-GOOG-RESOURCE-ID'\n\n\ndef _upper_header_keys(headers):\n new_headers = {}\n for k, v in six.iteritems(headers):\n new_headers[k.upper()] = v\n return new_headers\n\n\nclass Notification(object):\n \"\"\"A Notification from a Channel.\n\n Notifications are not usually constructed directly, but are returned\n from functions like notification_from_headers().\n\n Attributes:\n message_number: int, The unique id number of this notification.\n state: str, The state of the resource being monitored.\n uri: str, The address of the resource being monitored.\n resource_id: str, The unique identifier of the version of the resource at\n this event.\n \"\"\"\n @util.positional(5)\n def __init__(self, message_number, state, resource_uri, resource_id):\n \"\"\"Notification constructor.\n\n Args:\n message_number: int, The unique id number of this notification.\n state: str, The state of the resource being monitored. Can be one\n of \"exists\", \"not_exists\", or \"sync\".\n resource_uri: str, The address of the resource being monitored.\n resource_id: str, The identifier of the watched resource.\n \"\"\"\n self.message_number = message_number\n self.state = state\n self.resource_uri = resource_uri\n self.resource_id = resource_id\n\n\nclass Channel(object):\n \"\"\"A Channel for notifications.\n\n Usually not constructed directly, instead it is returned from helper\n functions like new_webhook_channel().\n\n Attributes:\n type: str, The type of delivery mechanism used by this channel. For\n example, 'web_hook'.\n id: str, A UUID for the channel.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each event delivered\n over this channel.\n address: str, The address of the receiving entity where events are\n delivered. Specific to the channel type.\n expiration: int, The time, in milliseconds from the epoch, when this\n channel will expire.\n params: dict, A dictionary of string to string, with additional parameters\n controlling delivery channel behavior.\n resource_id: str, An opaque id that identifies the resource that is\n being watched. Stable across different API versions.\n resource_uri: str, The canonicalized ID of the watched resource.\n \"\"\"\n\n @util.positional(5)\n def __init__(self, type, id, token, address, expiration=None,\n params=None, resource_id=\"\", resource_uri=\"\"):\n \"\"\"Create a new Channel.\n\n In user code, this Channel constructor will not typically be called\n manually since there are functions for creating channels for each specific\n type with a more customized set of arguments to pass.\n\n Args:\n type: str, The type of delivery mechanism used by this channel. For\n example, 'web_hook'.\n id: str, A UUID for the channel.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each event delivered\n over this channel.\n address: str, The address of the receiving entity where events are\n delivered. Specific to the channel type.\n expiration: int, The time, in milliseconds from the epoch, when this\n channel will expire.\n params: dict, A dictionary of string to string, with additional parameters\n controlling delivery channel behavior.\n resource_id: str, An opaque id that identifies the resource that is\n being watched. Stable across different API versions.\n resource_uri: str, The canonicalized ID of the watched resource.\n \"\"\"\n self.type = type\n self.id = id\n self.token = token\n self.address = address\n self.expiration = expiration\n self.params = params\n self.resource_id = resource_id\n self.resource_uri = resource_uri\n\n def body(self):\n \"\"\"Build a body from the Channel.\n\n Constructs a dictionary that's appropriate for passing into watch()\n methods as the value of body argument.\n\n Returns:\n A dictionary representation of the channel.\n \"\"\"\n result = {\n 'id': self.id,\n 'token': self.token,\n 'type': self.type,\n 'address': self.address\n }\n if self.params:\n result['params'] = self.params\n if self.resource_id:\n result['resourceId'] = self.resource_id\n if self.resource_uri:\n result['resourceUri'] = self.resource_uri\n if self.expiration:\n result['expiration'] = self.expiration\n\n return result\n\n def update(self, resp):\n \"\"\"Update a channel with information from the response of watch().\n\n When a request is sent to watch() a resource, the response returned\n from the watch() request is a dictionary with updated channel information,\n such as the resource_id, which is needed when stopping a subscription.\n\n Args:\n resp: dict, The response from a watch() method.\n \"\"\"\n for json_name, param_name in six.iteritems(CHANNEL_PARAMS):\n value = resp.get(json_name)\n if value is not None:\n setattr(self, param_name, value)\n\n\ndef notification_from_headers(channel, headers):\n \"\"\"Parse a notification from the webhook request headers, validate\n the notification, and return a Notification object.\n\n Args:\n channel: Channel, The channel that the notification is associated with.\n headers: dict, A dictionary like object that contains the request headers\n from the webhook HTTP request.\n\n Returns:\n A Notification object.\n\n Raises:\n errors.InvalidNotificationError if the notification is invalid.\n ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.\n \"\"\"\n headers = _upper_header_keys(headers)\n channel_id = headers[X_GOOG_CHANNEL_ID]\n if channel.id != channel_id:\n raise errors.InvalidNotificationError(\n 'Channel id mismatch: %s != %s' % (channel.id, channel_id))\n else:\n message_number = int(headers[X_GOOG_MESSAGE_NUMBER])\n state = headers[X_GOOG_RESOURCE_STATE]\n resource_uri = headers[X_GOOG_RESOURCE_URI]\n resource_id = headers[X_GOOG_RESOURCE_ID]\n return Notification(message_number, state, resource_uri, resource_id)\n\n\n@util.positional(2)\ndef new_webhook_channel(url, token=None, expiration=None, params=None):\n \"\"\"Create a new webhook Channel.\n\n Args:\n url: str, URL to post notifications to.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each notification delivered\n over this channel.\n expiration: datetime.datetime, A time in the future when the channel\n should expire. Can also be None if the subscription should use the\n default expiration. Note that different services may have different\n limits on how long a subscription lasts. Check the response from the\n watch() method to see the value the service has set for an expiration\n time.\n params: dict, Extra parameters to pass on channel creation. Currently\n not used for webhook channels.\n \"\"\"\n expiration_ms = 0\n if expiration:\n delta = expiration - EPOCH\n expiration_ms = delta.microseconds/1000 + (\n delta.seconds + delta.days*24*3600)*1000\n if expiration_ms < 0:\n expiration_ms = 0\n\n return Channel('web_hook', str(uuid.uuid4()),\n token, url, expiration=expiration_ms,\n params=params)\n\n", "path": "googleapiclient/channel.py" } ]
[ { "content": "\"\"\"Channel notifications support.\n\nClasses and functions to support channel subscriptions and notifications\non those channels.\n\nNotes:\n - This code is based on experimental APIs and is subject to change.\n - Notification does not do deduplication of notification ids, that's up to\n the receiver.\n - Storing the Channel between calls is up to the caller.\n\n\nExample setting up a channel:\n\n # Create a new channel that gets notifications via webhook.\n channel = new_webhook_channel(\"https://example.com/my_web_hook\")\n\n # Store the channel, keyed by 'channel.id'. Store it before calling the\n # watch method because notifications may start arriving before the watch\n # method returns.\n ...\n\n resp = service.objects().watchAll(\n bucket=\"some_bucket_id\", body=channel.body()).execute()\n channel.update(resp)\n\n # Store the channel, keyed by 'channel.id'. Store it after being updated\n # since the resource_id value will now be correct, and that's needed to\n # stop a subscription.\n ...\n\n\nAn example Webhook implementation using webapp2. Note that webapp2 puts\nheaders in a case insensitive dictionary, as headers aren't guaranteed to\nalways be upper case.\n\n id = self.request.headers[X_GOOG_CHANNEL_ID]\n\n # Retrieve the channel by id.\n channel = ...\n\n # Parse notification from the headers, including validating the id.\n n = notification_from_headers(channel, self.request.headers)\n\n # Do app specific stuff with the notification here.\n if n.resource_state == 'sync':\n # Code to handle sync state.\n elif n.resource_state == 'exists':\n # Code to handle the exists state.\n elif n.resource_state == 'not_exists':\n # Code to handle the not exists state.\n\n\nExample of unsubscribing.\n\n service.channels().stop(channel.body())\n\"\"\"\nfrom __future__ import absolute_import\n\nimport datetime\nimport uuid\n\nfrom googleapiclient import errors\nimport six\n\n# Oauth2client < 3 has the positional helper in 'util', >= 3 has it\n# in '_helpers'.\ntry:\n from oauth2client import util\nexcept ImportError:\n from oauth2client import _helpers as util\n\n\n# The unix time epoch starts at midnight 1970.\nEPOCH = datetime.datetime.utcfromtimestamp(0)\n\n# Map the names of the parameters in the JSON channel description to\n# the parameter names we use in the Channel class.\nCHANNEL_PARAMS = {\n 'address': 'address',\n 'id': 'id',\n 'expiration': 'expiration',\n 'params': 'params',\n 'resourceId': 'resource_id',\n 'resourceUri': 'resource_uri',\n 'type': 'type',\n 'token': 'token',\n }\n\nX_GOOG_CHANNEL_ID = 'X-GOOG-CHANNEL-ID'\nX_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER'\nX_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE'\nX_GOOG_RESOURCE_URI = 'X-GOOG-RESOURCE-URI'\nX_GOOG_RESOURCE_ID = 'X-GOOG-RESOURCE-ID'\n\n\ndef _upper_header_keys(headers):\n new_headers = {}\n for k, v in six.iteritems(headers):\n new_headers[k.upper()] = v\n return new_headers\n\n\nclass Notification(object):\n \"\"\"A Notification from a Channel.\n\n Notifications are not usually constructed directly, but are returned\n from functions like notification_from_headers().\n\n Attributes:\n message_number: int, The unique id number of this notification.\n state: str, The state of the resource being monitored.\n uri: str, The address of the resource being monitored.\n resource_id: str, The unique identifier of the version of the resource at\n this event.\n \"\"\"\n @util.positional(5)\n def __init__(self, message_number, state, resource_uri, resource_id):\n \"\"\"Notification constructor.\n\n Args:\n message_number: int, The unique id number of this notification.\n state: str, The state of the resource being monitored. Can be one\n of \"exists\", \"not_exists\", or \"sync\".\n resource_uri: str, The address of the resource being monitored.\n resource_id: str, The identifier of the watched resource.\n \"\"\"\n self.message_number = message_number\n self.state = state\n self.resource_uri = resource_uri\n self.resource_id = resource_id\n\n\nclass Channel(object):\n \"\"\"A Channel for notifications.\n\n Usually not constructed directly, instead it is returned from helper\n functions like new_webhook_channel().\n\n Attributes:\n type: str, The type of delivery mechanism used by this channel. For\n example, 'web_hook'.\n id: str, A UUID for the channel.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each event delivered\n over this channel.\n address: str, The address of the receiving entity where events are\n delivered. Specific to the channel type.\n expiration: int, The time, in milliseconds from the epoch, when this\n channel will expire.\n params: dict, A dictionary of string to string, with additional parameters\n controlling delivery channel behavior.\n resource_id: str, An opaque id that identifies the resource that is\n being watched. Stable across different API versions.\n resource_uri: str, The canonicalized ID of the watched resource.\n \"\"\"\n\n @util.positional(5)\n def __init__(self, type, id, token, address, expiration=None,\n params=None, resource_id=\"\", resource_uri=\"\"):\n \"\"\"Create a new Channel.\n\n In user code, this Channel constructor will not typically be called\n manually since there are functions for creating channels for each specific\n type with a more customized set of arguments to pass.\n\n Args:\n type: str, The type of delivery mechanism used by this channel. For\n example, 'web_hook'.\n id: str, A UUID for the channel.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each event delivered\n over this channel.\n address: str, The address of the receiving entity where events are\n delivered. Specific to the channel type.\n expiration: int, The time, in milliseconds from the epoch, when this\n channel will expire.\n params: dict, A dictionary of string to string, with additional parameters\n controlling delivery channel behavior.\n resource_id: str, An opaque id that identifies the resource that is\n being watched. Stable across different API versions.\n resource_uri: str, The canonicalized ID of the watched resource.\n \"\"\"\n self.type = type\n self.id = id\n self.token = token\n self.address = address\n self.expiration = expiration\n self.params = params\n self.resource_id = resource_id\n self.resource_uri = resource_uri\n\n def body(self):\n \"\"\"Build a body from the Channel.\n\n Constructs a dictionary that's appropriate for passing into watch()\n methods as the value of body argument.\n\n Returns:\n A dictionary representation of the channel.\n \"\"\"\n result = {\n 'id': self.id,\n 'token': self.token,\n 'type': self.type,\n 'address': self.address\n }\n if self.params:\n result['params'] = self.params\n if self.resource_id:\n result['resourceId'] = self.resource_id\n if self.resource_uri:\n result['resourceUri'] = self.resource_uri\n if self.expiration:\n result['expiration'] = self.expiration\n\n return result\n\n def update(self, resp):\n \"\"\"Update a channel with information from the response of watch().\n\n When a request is sent to watch() a resource, the response returned\n from the watch() request is a dictionary with updated channel information,\n such as the resource_id, which is needed when stopping a subscription.\n\n Args:\n resp: dict, The response from a watch() method.\n \"\"\"\n for json_name, param_name in six.iteritems(CHANNEL_PARAMS):\n value = resp.get(json_name)\n if value is not None:\n setattr(self, param_name, value)\n\n\ndef notification_from_headers(channel, headers):\n \"\"\"Parse a notification from the webhook request headers, validate\n the notification, and return a Notification object.\n\n Args:\n channel: Channel, The channel that the notification is associated with.\n headers: dict, A dictionary like object that contains the request headers\n from the webhook HTTP request.\n\n Returns:\n A Notification object.\n\n Raises:\n errors.InvalidNotificationError if the notification is invalid.\n ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.\n \"\"\"\n headers = _upper_header_keys(headers)\n channel_id = headers[X_GOOG_CHANNEL_ID]\n if channel.id != channel_id:\n raise errors.InvalidNotificationError(\n 'Channel id mismatch: %s != %s' % (channel.id, channel_id))\n else:\n message_number = int(headers[X_GOOG_MESSAGE_NUMBER])\n state = headers[X_GOOG_RESOURCE_STATE]\n resource_uri = headers[X_GOOG_RESOURCE_URI]\n resource_id = headers[X_GOOG_RESOURCE_ID]\n return Notification(message_number, state, resource_uri, resource_id)\n\n\n@util.positional(2)\ndef new_webhook_channel(url, token=None, expiration=None, params=None):\n \"\"\"Create a new webhook Channel.\n\n Args:\n url: str, URL to post notifications to.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each notification delivered\n over this channel.\n expiration: datetime.datetime, A time in the future when the channel\n should expire. Can also be None if the subscription should use the\n default expiration. Note that different services may have different\n limits on how long a subscription lasts. Check the response from the\n watch() method to see the value the service has set for an expiration\n time.\n params: dict, Extra parameters to pass on channel creation. Currently\n not used for webhook channels.\n \"\"\"\n expiration_ms = 0\n if expiration:\n delta = expiration - EPOCH\n expiration_ms = delta.microseconds/1000 + (\n delta.seconds + delta.days*24*3600)*1000\n if expiration_ms < 0:\n expiration_ms = 0\n\n return Channel('web_hook', str(uuid.uuid4()),\n token, url, expiration=expiration_ms,\n params=params)\n\n", "path": "googleapiclient/channel.py" } ]
diff --git a/googleapiclient/channel.py b/googleapiclient/channel.py index 7d0feb9482b..a38b4ffbcf7 100644 --- a/googleapiclient/channel.py +++ b/googleapiclient/channel.py @@ -61,7 +61,6 @@ import uuid from googleapiclient import errors -from oauth2client import util import six # Oauth2client < 3 has the positional helper in 'util', >= 3 has it
secdev__scapy-1330
[ { "content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <phil@secdev.org>\n## This program is published under a GPLv2 license\n\n\"\"\"\nFields that hold random numbers.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport random, time, math\nfrom scapy.base_classes import Net\nfrom scapy.compat import *\nfrom scapy.utils import corrupt_bits, corrupt_bytes\nfrom scapy.modules.six.moves import range\n\n####################\n## Random numbers ##\n####################\n\n\nclass RandomEnumeration:\n \"\"\"iterate through a sequence in random order.\n When all the values have been drawn, if forever=1, the drawing is done again.\n If renewkeys=0, the draw will be in the same order, guaranteeing that the same\n number will be drawn in not less than the number of integers of the sequence\"\"\"\n\n def __init__(self, inf, sup, seed=None, forever=1, renewkeys=0):\n self.forever = forever\n self.renewkeys = renewkeys\n self.inf = inf\n self.rnd = random.Random(seed)\n self.sbox_size = 256\n\n self.top = sup-inf+1\n\n n=0\n while (1<<n) < self.top:\n n += 1\n self.n =n\n\n self.fs = min(3, (n+1)//2)\n self.fsmask = 2**self.fs-1\n self.rounds = max(self.n, 3)\n self.turns = 0\n self.i = 0\n\n def __iter__(self):\n return self\n\n def next(self):\n while True:\n if self.turns == 0 or (self.i == 0 and self.renewkeys):\n self.cnt_key = self.rnd.randint(0, 2**self.n-1)\n self.sbox = [self.rnd.randint(0, self.fsmask)\n for _ in range(self.sbox_size)]\n self.turns += 1\n while self.i < 2**self.n:\n ct = self.i^self.cnt_key\n self.i += 1\n for _ in range(self.rounds): # Unbalanced Feistel Network\n lsb = ct & self.fsmask\n ct >>= self.fs\n lsb ^= self.sbox[ct%self.sbox_size]\n ct |= lsb << (self.n-self.fs)\n\n if ct < self.top:\n return self.inf+ct\n self.i = 0\n if not self.forever:\n raise StopIteration\n __next__ = next\n\n\nclass VolatileValue(object):\n def __repr__(self):\n return \"<%s>\" % self.__class__.__name__\n\n def __eq__(self, other):\n x = self._fix()\n y = other._fix() if isinstance(other, VolatileValue) else other\n if not isinstance(x, type(y)):\n return False\n return x == y\n\n def __getattr__(self, attr):\n if attr in [\"__setstate__\", \"__getstate__\"]:\n raise AttributeError(attr)\n return getattr(self._fix(), attr)\n\n def __str__(self):\n return str(self._fix())\n\n def __bytes__(self):\n return raw(self._fix())\n\n def __len__(self):\n return len(self._fix())\n\n def _fix(self):\n return None\n\n\nclass RandField(VolatileValue):\n pass\n\n\nclass RandNum(RandField):\n \"\"\"Instances evaluate to random integers in selected range\"\"\"\n min = 0\n max = 0\n\n def __init__(self, min, max):\n self.min = min\n self.max = max\n\n def _fix(self):\n return random.randrange(self.min, self.max+1)\n\n def __int__(self):\n return int(self._fix())\n\n def __index__(self):\n return int(self)\n\n def __nonzero__(self):\n return bool(self.value)\n __bool__ = __nonzero__\n\n def __add__(self, other):\n return self._fix() + other\n\n def __radd__(self, other):\n return other + self._fix()\n\n def __sub__(self, other):\n return self._fix() - other\n\n def __rsub__(self, other):\n return other - self._fix()\n\n def __mul__(self, other):\n return self._fix() * other\n\n def __rmul__(self, other):\n return other * self._fix()\n\n def __floordiv__(self, other):\n return self._fix() / other\n __div__ = __floordiv__\n\n def __lt__(self, other):\n return self._fix() < other\n\n def __le__(self, other):\n return self._fix() <= other\n\n def __eq__(self, other):\n return self._fix() == other\n\n def __ne__(self, other):\n return self._fix() != other\n\n def __ge__(self, other):\n return self._fix() >= other\n\n def __gt__(self, other):\n return self._fix() > other\n\n def __lshift__(self, other):\n return self._fix() << other\n\n def __rshift__(self, other):\n return self._fix() >> other\n\n def __and__(self, other):\n return self._fix() & other\n\n def __rand__(self, other):\n return other & self._fix()\n\n def __or__(self, other):\n return self._fix() | other\n\n def __ror__(self, other):\n return other | self._fix()\n\n\nclass RandNumGamma(RandNum):\n def __init__(self, alpha, beta):\n self.alpha = alpha\n self.beta = beta\n\n def _fix(self):\n return int(round(random.gammavariate(self.alpha, self.beta)))\n\n\nclass RandNumGauss(RandNum):\n def __init__(self, mu, sigma):\n self.mu = mu\n self.sigma = sigma\n\n def _fix(self):\n return int(round(random.gauss(self.mu, self.sigma)))\n\n\nclass RandNumExpo(RandNum):\n def __init__(self, lambd, base=0):\n self.lambd = lambd\n self.base = base\n\n def _fix(self):\n return self.base+int(round(random.expovariate(self.lambd)))\n\n\nclass RandEnum(RandNum):\n \"\"\"Instances evaluate to integer sampling without replacement from the given interval\"\"\"\n\n def __init__(self, min, max, seed=None):\n self.seq = RandomEnumeration(min, max, seed)\n\n def _fix(self):\n return next(self.seq)\n\n\nclass RandByte(RandNum):\n def __init__(self):\n RandNum.__init__(self, 0, 2**8-1)\n\n\nclass RandSByte(RandNum):\n def __init__(self):\n RandNum.__init__(self, -2**7, 2**7-1)\n\n\nclass RandShort(RandNum):\n def __init__(self):\n RandNum.__init__(self, 0, 2**16-1)\n\n\nclass RandSShort(RandNum):\n def __init__(self):\n RandNum.__init__(self, -2**15, 2**15-1)\n\n\nclass RandInt(RandNum):\n def __init__(self):\n RandNum.__init__(self, 0, 2**32-1)\n\n\nclass RandSInt(RandNum):\n def __init__(self):\n RandNum.__init__(self, -2**31, 2**31-1)\n\n\nclass RandLong(RandNum):\n def __init__(self):\n RandNum.__init__(self, 0, 2**64-1)\n\n\nclass RandSLong(RandNum):\n def __init__(self):\n RandNum.__init__(self, -2**63, 2**63-1)\n\n\nclass RandEnumByte(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, 0, 2**8-1)\n\n\nclass RandEnumSByte(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, -2**7, 2**7-1)\n\n\nclass RandEnumShort(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, 0, 2**16-1)\n\n\nclass RandEnumSShort(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, -2**15, 2**15-1)\n\n\nclass RandEnumInt(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, 0, 2**32-1)\n\n\nclass RandEnumSInt(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, -2**31, 2**31-1)\n\n\nclass RandEnumLong(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, 0, 2**64-1)\n\n\nclass RandEnumSLong(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, -2**63, 2**63-1)\n\n\nclass RandEnumKeys(RandEnum):\n \"\"\"Picks a random value from dict keys list. \"\"\"\n\n def __init__(self, enum, seed=None):\n self.enum = list(enum)\n self.seq = RandomEnumeration(0, len(self.enum) - 1, seed)\n\n def _fix(self):\n return self.enum[next(self.seq)]\n\n\nclass RandChoice(RandField):\n def __init__(self, *args):\n if not args:\n raise TypeError(\"RandChoice needs at least one choice\")\n self._choice = args\n\n def _fix(self):\n return random.choice(self._choice)\n\n\nclass RandString(RandField):\n def __init__(self, size=None, chars=b\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\"):\n if size is None:\n size = RandNumExpo(0.01)\n self.size = size\n self.chars = chars\n\n def _fix(self):\n s = b\"\"\n for _ in range(self.size):\n s += chb(random.choice(self.chars))\n return s\n\n def __str__(self):\n return plain_str(self._fix())\n\n def __bytes__(self):\n return raw(self._fix())\n\n def __mul__(self, n):\n return self._fix()*n\n\n\nclass RandBin(RandString):\n def __init__(self, size=None):\n super(RandBin, self).__init__(size=size, chars=b\"\".join(chb(c) for c in range(256)))\n\n\nclass RandTermString(RandBin):\n def __init__(self, size, term):\n self.term = raw(term)\n super(RandTermString, self).__init__(size=size)\n\n def _fix(self):\n return RandBin._fix(self)+self.term\n\n\nclass RandIP(RandString):\n def __init__(self, iptemplate=\"0.0.0.0/0\"):\n self.ip = Net(iptemplate)\n\n def _fix(self):\n return self.ip.choice()\n\n\nclass RandMAC(RandString):\n def __init__(self, template=\"*\"):\n template += \":*:*:*:*:*\"\n template = template.split(\":\")\n self.mac = ()\n for i in range(6):\n if template[i] == \"*\":\n v = RandByte()\n elif \"-\" in template[i]:\n x, y = template[i].split(\"-\")\n v = RandNum(int(x, 16), int(y, 16))\n else:\n v = int(template[i], 16)\n self.mac += (v,)\n\n def _fix(self):\n return \"%02x:%02x:%02x:%02x:%02x:%02x\" % self.mac\n\n\nclass RandIP6(RandString):\n def __init__(self, ip6template=\"**\"):\n self.tmpl = ip6template\n self.sp = self.tmpl.split(\":\")\n for i, v in enumerate(self.sp):\n if not v or v == \"**\":\n continue\n if \"-\" in v:\n a, b = v.split(\"-\")\n elif v == \"*\":\n a=b=\"\"\n else:\n a=b=v\n\n if not a:\n a = \"0\"\n if not b:\n b = \"ffff\"\n if a==b:\n self.sp[i] = int(a, 16)\n else:\n self.sp[i] = RandNum(int(a, 16), int(b, 16))\n self.variable = \"\" in self.sp\n self.multi = self.sp.count(\"**\")\n\n def _fix(self):\n done = 0\n nbm = self.multi\n ip = []\n for i, n in enumerate(self.sp):\n if n == \"**\":\n nbm -= 1\n remain = 8-(len(self.sp)-i-1)-len(ip)+nbm\n if \"\" in self.sp:\n remain += 1\n if nbm or self.variable:\n remain = random.randint(0, remain)\n for j in range(remain):\n ip.append(\"%04x\" % random.randint(0, 65535))\n elif isinstance(n, RandNum):\n ip.append(\"%04x\" % n)\n elif n == 0:\n ip.append(\"0\")\n elif not n:\n ip.append(\"\")\n else:\n ip.append(\"%04x\" % n)\n if len(ip) == 9:\n ip.remove(\"\")\n if ip[-1] == \"\":\n ip[-1] = \"0\"\n return \":\".join(ip)\n\n\nclass RandOID(RandString):\n def __init__(self, fmt=None, depth=RandNumExpo(0.1), idnum=RandNumExpo(0.01)):\n self.ori_fmt = fmt\n if fmt is not None:\n fmt = fmt.split(\".\")\n for i in range(len(fmt)):\n if \"-\" in fmt[i]:\n fmt[i] = tuple(map(int, fmt[i].split(\"-\")))\n self.fmt = fmt\n self.depth = depth\n self.idnum = idnum\n\n def __repr__(self):\n if self.ori_fmt is None:\n return \"<%s>\" % self.__class__.__name__\n else:\n return \"<%s [%s]>\" % (self.__class__.__name__, self.ori_fmt)\n\n def _fix(self):\n if self.fmt is None:\n return \".\".join(str(self.idnum) for _ in range(1 + self.depth))\n else:\n oid = []\n for i in self.fmt:\n if i == \"*\":\n oid.append(str(self.idnum))\n elif i == \"**\":\n oid += [str(self.idnum) for i in range(1 + self.depth)]\n elif isinstance(i, tuple):\n oid.append(str(random.randrange(*i)))\n else:\n oid.append(i)\n return \".\".join(oid)\n\n\nclass RandRegExp(RandField):\n def __init__(self, regexp, lambda_=0.3,):\n self._regexp = regexp\n self._lambda = lambda_\n\n @staticmethod\n def choice_expand(s): #XXX does not support special sets like (ex ':alnum:')\n m = \"\"\n invert = s and s[0] == \"^\"\n while True:\n p = s.find(\"-\")\n if p < 0:\n break\n if p == 0 or p == len(s)-1:\n m = \"-\"\n if p:\n s = s[:-1]\n else:\n s = s[1:]\n else:\n c1 = s[p-1]\n c2 = s[p+1]\n rng = \"\".join(map(chr, range(ord(c1), ord(c2)+1)))\n s = s[:p-1]+rng+s[p+1:]\n res = m+s\n if invert:\n res = \"\".join(chr(x) for x in range(256) if chr(x) not in res)\n return res\n\n @staticmethod\n def stack_fix(lst, index):\n r = \"\"\n mul = 1\n for e in lst:\n if isinstance(e, list):\n if mul != 1:\n mul = mul-1\n r += RandRegExp.stack_fix(e[1:]*mul, index)\n # only the last iteration should be kept for back reference\n f = RandRegExp.stack_fix(e[1:], index)\n for i, idx in enumerate(index):\n if e is idx:\n index[i] = f\n r += f\n mul = 1\n elif isinstance(e, tuple):\n kind, val = e\n if kind == \"cite\":\n r += index[val-1]\n elif kind == \"repeat\":\n mul = val\n\n elif kind == \"choice\":\n if mul == 1:\n c = random.choice(val)\n r += RandRegExp.stack_fix(c[1:], index)\n else:\n r += RandRegExp.stack_fix([e]*mul, index)\n mul = 1\n else:\n if mul != 1:\n r += RandRegExp.stack_fix([e]*mul, index)\n mul = 1\n else:\n r += str(e)\n return r\n\n def _fix(self):\n stack = [None]\n index = []\n current = stack\n i = 0\n ln = len(self._regexp)\n interp = True\n while i < ln:\n c = self._regexp[i]\n i+=1\n\n if c == '(':\n current = [current]\n current[0].append(current)\n elif c == '|':\n p = current[0]\n ch = p[-1]\n if not isinstance(ch, tuple):\n ch = (\"choice\", [current])\n p[-1] = ch\n else:\n ch[1].append(current)\n current = [p]\n elif c == ')':\n ch = current[0][-1]\n if isinstance(ch, tuple):\n ch[1].append(current)\n index.append(current)\n current = current[0]\n elif c == '[' or c == '{':\n current = [current]\n current[0].append(current)\n interp = False\n elif c == ']':\n current = current[0]\n choice = RandRegExp.choice_expand(\"\".join(current.pop()[1:]))\n current.append(RandChoice(*list(choice)))\n interp = True\n elif c == '}':\n current = current[0]\n num = \"\".join(current.pop()[1:])\n e = current.pop()\n if \",\" not in num:\n n = int(num)\n current.append([current]+[e]*n)\n else:\n num_min, num_max = num.split(\",\")\n if not num_min:\n num_min = \"0\"\n if num_max:\n n = RandNum(int(num_min), int(num_max))\n else:\n n = RandNumExpo(self._lambda, base=int(num_min))\n current.append((\"repeat\", n))\n current.append(e)\n interp = True\n elif c == '\\\\':\n c = self._regexp[i]\n if c == \"s\":\n c = RandChoice(\" \", \"\\t\")\n elif c in \"0123456789\":\n c = (\"cite\", ord(c)-0x30)\n current.append(c)\n i += 1\n elif not interp:\n current.append(c)\n elif c == '+':\n e = current.pop()\n current.append([current]+[e]*(int(random.expovariate(self._lambda))+1))\n elif c == '*':\n e = current.pop()\n current.append([current]+[e]*int(random.expovariate(self._lambda)))\n elif c == '?':\n if random.randint(0, 1):\n current.pop()\n elif c == '.':\n current.append(RandChoice(*[chr(x) for x in range(256)]))\n elif c == '$' or c == '^':\n pass\n else:\n current.append(c)\n\n return RandRegExp.stack_fix(stack[1:], index)\n\n def __repr__(self):\n return \"<%s [%r]>\" % (self.__class__.__name__, self._regexp)\n\n\nclass RandSingularity(RandChoice):\n pass\n\n\nclass RandSingNum(RandSingularity):\n @staticmethod\n def make_power_of_two(end):\n sign = 1\n if end == 0:\n end = 1\n if end < 0:\n end = -end\n sign = -1\n end_n = int(math.log(end)/math.log(2))+1\n return {sign*2**i for i in range(end_n)}\n\n def __init__(self, mn, mx):\n sing = {0, mn, mx, int((mn+mx)/2)}\n sing |= self.make_power_of_two(mn)\n sing |= self.make_power_of_two(mx)\n for i in sing.copy():\n sing.add(i+1)\n sing.add(i-1)\n for i in sing.copy():\n if not mn <= i <= mx:\n sing.remove(i)\n self._choice = list(sing)\n self._choice.sort()\n\n\nclass RandSingByte(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, 0, 2**8-1)\n\n\nclass RandSingSByte(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, -2**7, 2**7-1)\n\n\nclass RandSingShort(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, 0, 2**16-1)\n\n\nclass RandSingSShort(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, -2**15, 2**15-1)\n\n\nclass RandSingInt(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, 0, 2**32-1)\n\n\nclass RandSingSInt(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, -2**31, 2**31-1)\n\n\nclass RandSingLong(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, 0, 2**64-1)\n\n\nclass RandSingSLong(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, -2**63, 2**63-1)\n\n\nclass RandSingString(RandSingularity):\n def __init__(self):\n self._choice = [\"\",\n \"%x\",\n \"%%\",\n \"%s\",\n \"%i\",\n \"%n\",\n \"%x%x%x%x%x%x%x%x%x\",\n \"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\",\n \"%\",\n \"%%%\",\n \"A\"*4096,\n b\"\\x00\"*4096,\n b\"\\xff\"*4096,\n b\"\\x7f\"*4096,\n b\"\\x80\"*4096,\n \" \"*4096,\n \"\\\\\"*4096,\n \"(\"*4096,\n \"../\"*1024,\n \"/\"*1024,\n \"${HOME}\"*512,\n \" or 1=1 --\",\n \"' or 1=1 --\",\n '\" or 1=1 --',\n \" or 1=1; #\",\n \"' or 1=1; #\",\n '\" or 1=1; #',\n \";reboot;\",\n \"$(reboot)\",\n \"`reboot`\",\n \"index.php%00\",\n b\"\\x00\",\n \"%00\",\n \"\\\\\",\n \"../../../../../../../../../../../../../../../../../etc/passwd\",\n \"%2e%2e%2f\" * 20 + \"etc/passwd\",\n \"%252e%252e%252f\" * 20 + \"boot.ini\",\n \"..%c0%af\" * 20 + \"etc/passwd\",\n \"..%c0%af\" * 20 + \"boot.ini\",\n \"//etc/passwd\",\n r\"..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\boot.ini\",\n \"AUX:\",\n \"CLOCK$\",\n \"COM:\",\n \"CON:\",\n \"LPT:\",\n \"LST:\",\n \"NUL:\",\n \"CON:\",\n r\"C:\\CON\\CON\",\n r\"C:\\boot.ini\",\n r\"\\\\myserver\\share\",\n \"foo.exe:\",\n \"foo.exe\\\\\", ]\n\n def __str__(self):\n return str(self._fix())\n\n def __bytes__(self):\n return raw(self._fix())\n\n\nclass RandPool(RandField):\n def __init__(self, *args):\n \"\"\"Each parameter is a volatile object or a couple (volatile object, weight)\"\"\"\n pool = []\n for p in args:\n w = 1\n if isinstance(p, tuple):\n p, w = p\n pool += [p]*w\n self._pool = pool\n\n def _fix(self):\n r = random.choice(self._pool)\n return r._fix()\n\n# Automatic timestamp\n\n\nclass AutoTime(VolatileValue):\n def __init__(self, base=None):\n if base == None:\n self.diff = 0\n else:\n self.diff = time.time()-base\n\n def _fix(self):\n return time.time()-self.diff\n\n\nclass IntAutoTime(AutoTime):\n def _fix(self):\n return int(time.time()-self.diff)\n\n\nclass ZuluTime(AutoTime):\n def __init__(self, diff=0):\n self.diff = diff\n\n def _fix(self):\n return time.strftime(\"%y%m%d%H%M%SZ\",\n time.gmtime(time.time() + self.diff))\n\n\nclass GeneralizedTime(AutoTime):\n def __init__(self, diff=0):\n self.diff = diff\n\n def _fix(self):\n return time.strftime(\"%Y%m%d%H%M%SZ\",\n time.gmtime(time.time() + self.diff))\n\n\nclass DelayedEval(VolatileValue):\n \"\"\" Example of usage: DelayedEval(\"time.time()\") \"\"\"\n\n def __init__(self, expr):\n self.expr = expr\n\n def _fix(self):\n return eval(self.expr)\n\n\nclass IncrementalValue(VolatileValue):\n def __init__(self, start=0, step=1, restart=-1):\n self.start = self.val = start\n self.step = step\n self.restart = restart\n\n def _fix(self):\n v = self.val\n if self.val == self.restart:\n self.val = self.start\n else:\n self.val += self.step\n return v\n\n\nclass CorruptedBytes(VolatileValue):\n def __init__(self, s, p=0.01, n=None):\n self.s = s\n self.p = p\n self.n = n\n\n def _fix(self):\n return corrupt_bytes(self.s, self.p, self.n)\n\n\nclass CorruptedBits(CorruptedBytes):\n def _fix(self):\n return corrupt_bits(self.s, self.p, self.n)\n\n", "path": "scapy/volatile.py" } ]
[ { "content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <phil@secdev.org>\n## This program is published under a GPLv2 license\n\n\"\"\"\nFields that hold random numbers.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport random, time, math\nfrom scapy.base_classes import Net\nfrom scapy.compat import *\nfrom scapy.utils import corrupt_bits, corrupt_bytes\nfrom scapy.modules.six.moves import range\n\n####################\n## Random numbers ##\n####################\n\n\nclass RandomEnumeration:\n \"\"\"iterate through a sequence in random order.\n When all the values have been drawn, if forever=1, the drawing is done again.\n If renewkeys=0, the draw will be in the same order, guaranteeing that the same\n number will be drawn in not less than the number of integers of the sequence\"\"\"\n\n def __init__(self, inf, sup, seed=None, forever=1, renewkeys=0):\n self.forever = forever\n self.renewkeys = renewkeys\n self.inf = inf\n self.rnd = random.Random(seed)\n self.sbox_size = 256\n\n self.top = sup-inf+1\n\n n=0\n while (1<<n) < self.top:\n n += 1\n self.n =n\n\n self.fs = min(3, (n+1)//2)\n self.fsmask = 2**self.fs-1\n self.rounds = max(self.n, 3)\n self.turns = 0\n self.i = 0\n\n def __iter__(self):\n return self\n\n def next(self):\n while True:\n if self.turns == 0 or (self.i == 0 and self.renewkeys):\n self.cnt_key = self.rnd.randint(0, 2**self.n-1)\n self.sbox = [self.rnd.randint(0, self.fsmask)\n for _ in range(self.sbox_size)]\n self.turns += 1\n while self.i < 2**self.n:\n ct = self.i^self.cnt_key\n self.i += 1\n for _ in range(self.rounds): # Unbalanced Feistel Network\n lsb = ct & self.fsmask\n ct >>= self.fs\n lsb ^= self.sbox[ct%self.sbox_size]\n ct |= lsb << (self.n-self.fs)\n\n if ct < self.top:\n return self.inf+ct\n self.i = 0\n if not self.forever:\n raise StopIteration\n __next__ = next\n\n\nclass VolatileValue(object):\n def __repr__(self):\n return \"<%s>\" % self.__class__.__name__\n\n def __eq__(self, other):\n x = self._fix()\n y = other._fix() if isinstance(other, VolatileValue) else other\n if not isinstance(x, type(y)):\n return False\n return x == y\n\n def __getattr__(self, attr):\n if attr in [\"__setstate__\", \"__getstate__\"]:\n raise AttributeError(attr)\n return getattr(self._fix(), attr)\n\n def __str__(self):\n return str(self._fix())\n\n def __bytes__(self):\n return raw(self._fix())\n\n def __len__(self):\n return len(self._fix())\n\n def _fix(self):\n return None\n\n\nclass RandField(VolatileValue):\n pass\n\n\nclass RandNum(RandField):\n \"\"\"Instances evaluate to random integers in selected range\"\"\"\n min = 0\n max = 0\n\n def __init__(self, min, max):\n self.min = min\n self.max = max\n\n def _fix(self):\n return random.randrange(self.min, self.max+1)\n\n def __int__(self):\n return int(self._fix())\n\n def __index__(self):\n return int(self)\n\n def __nonzero__(self):\n return bool(self._fix())\n __bool__ = __nonzero__\n\n def __add__(self, other):\n return self._fix() + other\n\n def __radd__(self, other):\n return other + self._fix()\n\n def __sub__(self, other):\n return self._fix() - other\n\n def __rsub__(self, other):\n return other - self._fix()\n\n def __mul__(self, other):\n return self._fix() * other\n\n def __rmul__(self, other):\n return other * self._fix()\n\n def __floordiv__(self, other):\n return self._fix() / other\n __div__ = __floordiv__\n\n def __lt__(self, other):\n return self._fix() < other\n\n def __le__(self, other):\n return self._fix() <= other\n\n def __eq__(self, other):\n return self._fix() == other\n\n def __ne__(self, other):\n return self._fix() != other\n\n def __ge__(self, other):\n return self._fix() >= other\n\n def __gt__(self, other):\n return self._fix() > other\n\n def __lshift__(self, other):\n return self._fix() << other\n\n def __rshift__(self, other):\n return self._fix() >> other\n\n def __and__(self, other):\n return self._fix() & other\n\n def __rand__(self, other):\n return other & self._fix()\n\n def __or__(self, other):\n return self._fix() | other\n\n def __ror__(self, other):\n return other | self._fix()\n\n\nclass RandNumGamma(RandNum):\n def __init__(self, alpha, beta):\n self.alpha = alpha\n self.beta = beta\n\n def _fix(self):\n return int(round(random.gammavariate(self.alpha, self.beta)))\n\n\nclass RandNumGauss(RandNum):\n def __init__(self, mu, sigma):\n self.mu = mu\n self.sigma = sigma\n\n def _fix(self):\n return int(round(random.gauss(self.mu, self.sigma)))\n\n\nclass RandNumExpo(RandNum):\n def __init__(self, lambd, base=0):\n self.lambd = lambd\n self.base = base\n\n def _fix(self):\n return self.base+int(round(random.expovariate(self.lambd)))\n\n\nclass RandEnum(RandNum):\n \"\"\"Instances evaluate to integer sampling without replacement from the given interval\"\"\"\n\n def __init__(self, min, max, seed=None):\n self.seq = RandomEnumeration(min, max, seed)\n\n def _fix(self):\n return next(self.seq)\n\n\nclass RandByte(RandNum):\n def __init__(self):\n RandNum.__init__(self, 0, 2**8-1)\n\n\nclass RandSByte(RandNum):\n def __init__(self):\n RandNum.__init__(self, -2**7, 2**7-1)\n\n\nclass RandShort(RandNum):\n def __init__(self):\n RandNum.__init__(self, 0, 2**16-1)\n\n\nclass RandSShort(RandNum):\n def __init__(self):\n RandNum.__init__(self, -2**15, 2**15-1)\n\n\nclass RandInt(RandNum):\n def __init__(self):\n RandNum.__init__(self, 0, 2**32-1)\n\n\nclass RandSInt(RandNum):\n def __init__(self):\n RandNum.__init__(self, -2**31, 2**31-1)\n\n\nclass RandLong(RandNum):\n def __init__(self):\n RandNum.__init__(self, 0, 2**64-1)\n\n\nclass RandSLong(RandNum):\n def __init__(self):\n RandNum.__init__(self, -2**63, 2**63-1)\n\n\nclass RandEnumByte(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, 0, 2**8-1)\n\n\nclass RandEnumSByte(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, -2**7, 2**7-1)\n\n\nclass RandEnumShort(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, 0, 2**16-1)\n\n\nclass RandEnumSShort(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, -2**15, 2**15-1)\n\n\nclass RandEnumInt(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, 0, 2**32-1)\n\n\nclass RandEnumSInt(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, -2**31, 2**31-1)\n\n\nclass RandEnumLong(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, 0, 2**64-1)\n\n\nclass RandEnumSLong(RandEnum):\n def __init__(self):\n RandEnum.__init__(self, -2**63, 2**63-1)\n\n\nclass RandEnumKeys(RandEnum):\n \"\"\"Picks a random value from dict keys list. \"\"\"\n\n def __init__(self, enum, seed=None):\n self.enum = list(enum)\n self.seq = RandomEnumeration(0, len(self.enum) - 1, seed)\n\n def _fix(self):\n return self.enum[next(self.seq)]\n\n\nclass RandChoice(RandField):\n def __init__(self, *args):\n if not args:\n raise TypeError(\"RandChoice needs at least one choice\")\n self._choice = args\n\n def _fix(self):\n return random.choice(self._choice)\n\n\nclass RandString(RandField):\n def __init__(self, size=None, chars=b\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\"):\n if size is None:\n size = RandNumExpo(0.01)\n self.size = size\n self.chars = chars\n\n def _fix(self):\n s = b\"\"\n for _ in range(self.size):\n s += chb(random.choice(self.chars))\n return s\n\n def __str__(self):\n return plain_str(self._fix())\n\n def __bytes__(self):\n return raw(self._fix())\n\n def __mul__(self, n):\n return self._fix()*n\n\n\nclass RandBin(RandString):\n def __init__(self, size=None):\n super(RandBin, self).__init__(size=size, chars=b\"\".join(chb(c) for c in range(256)))\n\n\nclass RandTermString(RandBin):\n def __init__(self, size, term):\n self.term = raw(term)\n super(RandTermString, self).__init__(size=size)\n\n def _fix(self):\n return RandBin._fix(self)+self.term\n\n\nclass RandIP(RandString):\n def __init__(self, iptemplate=\"0.0.0.0/0\"):\n self.ip = Net(iptemplate)\n\n def _fix(self):\n return self.ip.choice()\n\n\nclass RandMAC(RandString):\n def __init__(self, template=\"*\"):\n template += \":*:*:*:*:*\"\n template = template.split(\":\")\n self.mac = ()\n for i in range(6):\n if template[i] == \"*\":\n v = RandByte()\n elif \"-\" in template[i]:\n x, y = template[i].split(\"-\")\n v = RandNum(int(x, 16), int(y, 16))\n else:\n v = int(template[i], 16)\n self.mac += (v,)\n\n def _fix(self):\n return \"%02x:%02x:%02x:%02x:%02x:%02x\" % self.mac\n\n\nclass RandIP6(RandString):\n def __init__(self, ip6template=\"**\"):\n self.tmpl = ip6template\n self.sp = self.tmpl.split(\":\")\n for i, v in enumerate(self.sp):\n if not v or v == \"**\":\n continue\n if \"-\" in v:\n a, b = v.split(\"-\")\n elif v == \"*\":\n a=b=\"\"\n else:\n a=b=v\n\n if not a:\n a = \"0\"\n if not b:\n b = \"ffff\"\n if a==b:\n self.sp[i] = int(a, 16)\n else:\n self.sp[i] = RandNum(int(a, 16), int(b, 16))\n self.variable = \"\" in self.sp\n self.multi = self.sp.count(\"**\")\n\n def _fix(self):\n done = 0\n nbm = self.multi\n ip = []\n for i, n in enumerate(self.sp):\n if n == \"**\":\n nbm -= 1\n remain = 8-(len(self.sp)-i-1)-len(ip)+nbm\n if \"\" in self.sp:\n remain += 1\n if nbm or self.variable:\n remain = random.randint(0, remain)\n for j in range(remain):\n ip.append(\"%04x\" % random.randint(0, 65535))\n elif isinstance(n, RandNum):\n ip.append(\"%04x\" % n)\n elif n == 0:\n ip.append(\"0\")\n elif not n:\n ip.append(\"\")\n else:\n ip.append(\"%04x\" % n)\n if len(ip) == 9:\n ip.remove(\"\")\n if ip[-1] == \"\":\n ip[-1] = \"0\"\n return \":\".join(ip)\n\n\nclass RandOID(RandString):\n def __init__(self, fmt=None, depth=RandNumExpo(0.1), idnum=RandNumExpo(0.01)):\n self.ori_fmt = fmt\n if fmt is not None:\n fmt = fmt.split(\".\")\n for i in range(len(fmt)):\n if \"-\" in fmt[i]:\n fmt[i] = tuple(map(int, fmt[i].split(\"-\")))\n self.fmt = fmt\n self.depth = depth\n self.idnum = idnum\n\n def __repr__(self):\n if self.ori_fmt is None:\n return \"<%s>\" % self.__class__.__name__\n else:\n return \"<%s [%s]>\" % (self.__class__.__name__, self.ori_fmt)\n\n def _fix(self):\n if self.fmt is None:\n return \".\".join(str(self.idnum) for _ in range(1 + self.depth))\n else:\n oid = []\n for i in self.fmt:\n if i == \"*\":\n oid.append(str(self.idnum))\n elif i == \"**\":\n oid += [str(self.idnum) for i in range(1 + self.depth)]\n elif isinstance(i, tuple):\n oid.append(str(random.randrange(*i)))\n else:\n oid.append(i)\n return \".\".join(oid)\n\n\nclass RandRegExp(RandField):\n def __init__(self, regexp, lambda_=0.3,):\n self._regexp = regexp\n self._lambda = lambda_\n\n @staticmethod\n def choice_expand(s): #XXX does not support special sets like (ex ':alnum:')\n m = \"\"\n invert = s and s[0] == \"^\"\n while True:\n p = s.find(\"-\")\n if p < 0:\n break\n if p == 0 or p == len(s)-1:\n m = \"-\"\n if p:\n s = s[:-1]\n else:\n s = s[1:]\n else:\n c1 = s[p-1]\n c2 = s[p+1]\n rng = \"\".join(map(chr, range(ord(c1), ord(c2)+1)))\n s = s[:p-1]+rng+s[p+1:]\n res = m+s\n if invert:\n res = \"\".join(chr(x) for x in range(256) if chr(x) not in res)\n return res\n\n @staticmethod\n def stack_fix(lst, index):\n r = \"\"\n mul = 1\n for e in lst:\n if isinstance(e, list):\n if mul != 1:\n mul = mul-1\n r += RandRegExp.stack_fix(e[1:]*mul, index)\n # only the last iteration should be kept for back reference\n f = RandRegExp.stack_fix(e[1:], index)\n for i, idx in enumerate(index):\n if e is idx:\n index[i] = f\n r += f\n mul = 1\n elif isinstance(e, tuple):\n kind, val = e\n if kind == \"cite\":\n r += index[val-1]\n elif kind == \"repeat\":\n mul = val\n\n elif kind == \"choice\":\n if mul == 1:\n c = random.choice(val)\n r += RandRegExp.stack_fix(c[1:], index)\n else:\n r += RandRegExp.stack_fix([e]*mul, index)\n mul = 1\n else:\n if mul != 1:\n r += RandRegExp.stack_fix([e]*mul, index)\n mul = 1\n else:\n r += str(e)\n return r\n\n def _fix(self):\n stack = [None]\n index = []\n current = stack\n i = 0\n ln = len(self._regexp)\n interp = True\n while i < ln:\n c = self._regexp[i]\n i+=1\n\n if c == '(':\n current = [current]\n current[0].append(current)\n elif c == '|':\n p = current[0]\n ch = p[-1]\n if not isinstance(ch, tuple):\n ch = (\"choice\", [current])\n p[-1] = ch\n else:\n ch[1].append(current)\n current = [p]\n elif c == ')':\n ch = current[0][-1]\n if isinstance(ch, tuple):\n ch[1].append(current)\n index.append(current)\n current = current[0]\n elif c == '[' or c == '{':\n current = [current]\n current[0].append(current)\n interp = False\n elif c == ']':\n current = current[0]\n choice = RandRegExp.choice_expand(\"\".join(current.pop()[1:]))\n current.append(RandChoice(*list(choice)))\n interp = True\n elif c == '}':\n current = current[0]\n num = \"\".join(current.pop()[1:])\n e = current.pop()\n if \",\" not in num:\n n = int(num)\n current.append([current]+[e]*n)\n else:\n num_min, num_max = num.split(\",\")\n if not num_min:\n num_min = \"0\"\n if num_max:\n n = RandNum(int(num_min), int(num_max))\n else:\n n = RandNumExpo(self._lambda, base=int(num_min))\n current.append((\"repeat\", n))\n current.append(e)\n interp = True\n elif c == '\\\\':\n c = self._regexp[i]\n if c == \"s\":\n c = RandChoice(\" \", \"\\t\")\n elif c in \"0123456789\":\n c = (\"cite\", ord(c)-0x30)\n current.append(c)\n i += 1\n elif not interp:\n current.append(c)\n elif c == '+':\n e = current.pop()\n current.append([current]+[e]*(int(random.expovariate(self._lambda))+1))\n elif c == '*':\n e = current.pop()\n current.append([current]+[e]*int(random.expovariate(self._lambda)))\n elif c == '?':\n if random.randint(0, 1):\n current.pop()\n elif c == '.':\n current.append(RandChoice(*[chr(x) for x in range(256)]))\n elif c == '$' or c == '^':\n pass\n else:\n current.append(c)\n\n return RandRegExp.stack_fix(stack[1:], index)\n\n def __repr__(self):\n return \"<%s [%r]>\" % (self.__class__.__name__, self._regexp)\n\n\nclass RandSingularity(RandChoice):\n pass\n\n\nclass RandSingNum(RandSingularity):\n @staticmethod\n def make_power_of_two(end):\n sign = 1\n if end == 0:\n end = 1\n if end < 0:\n end = -end\n sign = -1\n end_n = int(math.log(end)/math.log(2))+1\n return {sign*2**i for i in range(end_n)}\n\n def __init__(self, mn, mx):\n sing = {0, mn, mx, int((mn+mx)/2)}\n sing |= self.make_power_of_two(mn)\n sing |= self.make_power_of_two(mx)\n for i in sing.copy():\n sing.add(i+1)\n sing.add(i-1)\n for i in sing.copy():\n if not mn <= i <= mx:\n sing.remove(i)\n self._choice = list(sing)\n self._choice.sort()\n\n\nclass RandSingByte(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, 0, 2**8-1)\n\n\nclass RandSingSByte(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, -2**7, 2**7-1)\n\n\nclass RandSingShort(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, 0, 2**16-1)\n\n\nclass RandSingSShort(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, -2**15, 2**15-1)\n\n\nclass RandSingInt(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, 0, 2**32-1)\n\n\nclass RandSingSInt(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, -2**31, 2**31-1)\n\n\nclass RandSingLong(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, 0, 2**64-1)\n\n\nclass RandSingSLong(RandSingNum):\n def __init__(self):\n RandSingNum.__init__(self, -2**63, 2**63-1)\n\n\nclass RandSingString(RandSingularity):\n def __init__(self):\n self._choice = [\"\",\n \"%x\",\n \"%%\",\n \"%s\",\n \"%i\",\n \"%n\",\n \"%x%x%x%x%x%x%x%x%x\",\n \"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\",\n \"%\",\n \"%%%\",\n \"A\"*4096,\n b\"\\x00\"*4096,\n b\"\\xff\"*4096,\n b\"\\x7f\"*4096,\n b\"\\x80\"*4096,\n \" \"*4096,\n \"\\\\\"*4096,\n \"(\"*4096,\n \"../\"*1024,\n \"/\"*1024,\n \"${HOME}\"*512,\n \" or 1=1 --\",\n \"' or 1=1 --\",\n '\" or 1=1 --',\n \" or 1=1; #\",\n \"' or 1=1; #\",\n '\" or 1=1; #',\n \";reboot;\",\n \"$(reboot)\",\n \"`reboot`\",\n \"index.php%00\",\n b\"\\x00\",\n \"%00\",\n \"\\\\\",\n \"../../../../../../../../../../../../../../../../../etc/passwd\",\n \"%2e%2e%2f\" * 20 + \"etc/passwd\",\n \"%252e%252e%252f\" * 20 + \"boot.ini\",\n \"..%c0%af\" * 20 + \"etc/passwd\",\n \"..%c0%af\" * 20 + \"boot.ini\",\n \"//etc/passwd\",\n r\"..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\boot.ini\",\n \"AUX:\",\n \"CLOCK$\",\n \"COM:\",\n \"CON:\",\n \"LPT:\",\n \"LST:\",\n \"NUL:\",\n \"CON:\",\n r\"C:\\CON\\CON\",\n r\"C:\\boot.ini\",\n r\"\\\\myserver\\share\",\n \"foo.exe:\",\n \"foo.exe\\\\\", ]\n\n def __str__(self):\n return str(self._fix())\n\n def __bytes__(self):\n return raw(self._fix())\n\n\nclass RandPool(RandField):\n def __init__(self, *args):\n \"\"\"Each parameter is a volatile object or a couple (volatile object, weight)\"\"\"\n pool = []\n for p in args:\n w = 1\n if isinstance(p, tuple):\n p, w = p\n pool += [p]*w\n self._pool = pool\n\n def _fix(self):\n r = random.choice(self._pool)\n return r._fix()\n\n# Automatic timestamp\n\n\nclass AutoTime(VolatileValue):\n def __init__(self, base=None):\n if base == None:\n self.diff = 0\n else:\n self.diff = time.time()-base\n\n def _fix(self):\n return time.time()-self.diff\n\n\nclass IntAutoTime(AutoTime):\n def _fix(self):\n return int(time.time()-self.diff)\n\n\nclass ZuluTime(AutoTime):\n def __init__(self, diff=0):\n self.diff = diff\n\n def _fix(self):\n return time.strftime(\"%y%m%d%H%M%SZ\",\n time.gmtime(time.time() + self.diff))\n\n\nclass GeneralizedTime(AutoTime):\n def __init__(self, diff=0):\n self.diff = diff\n\n def _fix(self):\n return time.strftime(\"%Y%m%d%H%M%SZ\",\n time.gmtime(time.time() + self.diff))\n\n\nclass DelayedEval(VolatileValue):\n \"\"\" Example of usage: DelayedEval(\"time.time()\") \"\"\"\n\n def __init__(self, expr):\n self.expr = expr\n\n def _fix(self):\n return eval(self.expr)\n\n\nclass IncrementalValue(VolatileValue):\n def __init__(self, start=0, step=1, restart=-1):\n self.start = self.val = start\n self.step = step\n self.restart = restart\n\n def _fix(self):\n v = self.val\n if self.val == self.restart:\n self.val = self.start\n else:\n self.val += self.step\n return v\n\n\nclass CorruptedBytes(VolatileValue):\n def __init__(self, s, p=0.01, n=None):\n self.s = s\n self.p = p\n self.n = n\n\n def _fix(self):\n return corrupt_bytes(self.s, self.p, self.n)\n\n\nclass CorruptedBits(CorruptedBytes):\n def _fix(self):\n return corrupt_bits(self.s, self.p, self.n)\n\n", "path": "scapy/volatile.py" } ]
diff --git a/scapy/volatile.py b/scapy/volatile.py index 2fb6bd77809..8d12b25b77d 100644 --- a/scapy/volatile.py +++ b/scapy/volatile.py @@ -124,7 +124,7 @@ def __index__(self): return int(self) def __nonzero__(self): - return bool(self.value) + return bool(self._fix()) __bool__ = __nonzero__ def __add__(self, other): diff --git a/test/regression.uts b/test/regression.uts index 020f072c266..bcfbc41113b 100644 --- a/test/regression.uts +++ b/test/regression.uts @@ -9087,6 +9087,9 @@ random.seed(0x2807) rts = RandTermString(4, "scapy") assert(sane(raw(rts)) in ["...Zscapy", "$#..scapy"]) += RandInt (test __bool__) +a = "True" if RandNum(False, True) else "False" +assert a in ["True", "False"] ############ ############
apache__airflow-26806
[ { "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Mask sensitive information from logs\"\"\"\nfrom __future__ import annotations\n\nimport collections\nimport logging\nimport re\nimport sys\nfrom typing import Any, Dict, Iterable, List, TextIO, Tuple, TypeVar, Union\n\nfrom airflow import settings\nfrom airflow.compat.functools import cache, cached_property\n\nRedactable = TypeVar(\"Redactable\", str, Dict[Any, Any], Tuple[Any, ...], List[Any])\nRedacted = Union[Redactable, str]\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_SENSITIVE_FIELDS = frozenset(\n {\n 'access_token',\n 'api_key',\n 'apikey',\n 'authorization',\n 'passphrase',\n 'passwd',\n 'password',\n 'private_key',\n 'secret',\n 'token',\n 'keyfile_dict',\n 'service_account',\n }\n)\n\"\"\"Names of fields (Connection extra, Variable key name etc.) that are deemed sensitive\"\"\"\n\nSECRETS_TO_SKIP_MASKING_FOR_TESTS = {'airflow'}\n\n\n@cache\ndef get_sensitive_variables_fields():\n \"\"\"Get comma-separated sensitive Variable Fields from airflow.cfg.\"\"\"\n from airflow.configuration import conf\n\n sensitive_fields = DEFAULT_SENSITIVE_FIELDS.copy()\n sensitive_variable_fields = conf.get('core', 'sensitive_var_conn_names')\n if sensitive_variable_fields:\n sensitive_fields |= frozenset({field.strip() for field in sensitive_variable_fields.split(',')})\n return sensitive_fields\n\n\ndef should_hide_value_for_key(name):\n \"\"\"Should the value for this given name (Variable name, or key in conn.extra_dejson) be hidden\"\"\"\n from airflow import settings\n\n if isinstance(name, str) and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:\n name = name.strip().lower()\n return any(s in name for s in get_sensitive_variables_fields())\n return False\n\n\ndef mask_secret(secret: str | dict | Iterable, name: str | None = None) -> None:\n \"\"\"\n Mask a secret from appearing in the task logs.\n\n If ``name`` is provided, then it will only be masked if the name matches\n one of the configured \"sensitive\" names.\n\n If ``secret`` is a dict or a iterable (excluding str) then it will be\n recursively walked and keys with sensitive names will be hidden.\n \"\"\"\n # Filtering all log messages is not a free process, so we only do it when\n # running tasks\n if not secret:\n return\n\n _secrets_masker().add_mask(secret, name)\n\n\ndef redact(value: Redactable, name: str | None = None) -> Redacted:\n \"\"\"Redact any secrets found in ``value``.\"\"\"\n return _secrets_masker().redact(value, name)\n\n\n@cache\ndef _secrets_masker() -> SecretsMasker:\n for flt in logging.getLogger('airflow.task').filters:\n if isinstance(flt, SecretsMasker):\n return flt\n raise RuntimeError(\n \"Logging Configuration Error! No SecretsMasker found! If you have custom logging, please make \"\n \"sure you configure it taking airflow configuration as a base as explained at \"\n \"https://airflow.apache.org/docs/apache-airflow/stable/logging-monitoring/logging-tasks.html\"\n \"#advanced-configuration\"\n )\n\n\nclass SecretsMasker(logging.Filter):\n \"\"\"Redact secrets from logs\"\"\"\n\n replacer: re.Pattern | None = None\n patterns: set[str]\n\n ALREADY_FILTERED_FLAG = \"__SecretsMasker_filtered\"\n MAX_RECURSION_DEPTH = 5\n\n def __init__(self):\n super().__init__()\n self.patterns = set()\n\n @cached_property\n def _record_attrs_to_ignore(self) -> Iterable[str]:\n # Doing log.info(..., extra={'foo': 2}) sets extra properties on\n # record, i.e. record.foo. And we need to filter those too. Fun\n #\n # Create a record, and look at what attributes are on it, and ignore\n # all the default ones!\n\n record = logging.getLogRecordFactory()(\n # name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None,\n \"x\",\n logging.INFO,\n __file__,\n 1,\n \"\",\n tuple(),\n exc_info=None,\n func=\"funcname\",\n )\n return frozenset(record.__dict__).difference({'msg', 'args'})\n\n def _redact_exception_with_context(self, exception):\n # Exception class may not be modifiable (e.g. declared by an\n # extension module such as JDBC).\n try:\n exception.args = (self.redact(v) for v in exception.args)\n except AttributeError:\n pass\n if exception.__context__:\n self._redact_exception_with_context(exception.__context__)\n if exception.__cause__ and exception.__cause__ is not exception.__context__:\n self._redact_exception_with_context(exception.__cause__)\n\n def filter(self, record) -> bool:\n if settings.MASK_SECRETS_IN_LOGS is not True:\n return True\n\n if self.ALREADY_FILTERED_FLAG in record.__dict__:\n # Filters are attached to multiple handlers and logs, keep a\n # \"private\" flag that stops us needing to process it more than once\n return True\n\n if self.replacer:\n for k, v in record.__dict__.items():\n if k in self._record_attrs_to_ignore:\n continue\n record.__dict__[k] = self.redact(v)\n if record.exc_info and record.exc_info[1] is not None:\n exc = record.exc_info[1]\n self._redact_exception_with_context(exc)\n record.__dict__[self.ALREADY_FILTERED_FLAG] = True\n\n return True\n\n def _redact_all(self, item: Redactable, depth: int) -> Redacted:\n if depth > self.MAX_RECURSION_DEPTH or isinstance(item, str):\n return '***'\n if isinstance(item, dict):\n return {dict_key: self._redact_all(subval, depth + 1) for dict_key, subval in item.items()}\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self._redact_all(subval, depth + 1) for subval in item)\n elif isinstance(item, list):\n return list(self._redact_all(subval, depth + 1) for subval in item)\n else:\n return item\n\n def _redact(self, item: Redactable, name: str | None, depth: int) -> Redacted:\n # Avoid spending too much effort on redacting on deeply nested\n # structures. This also avoid infinite recursion if a structure has\n # reference to self.\n if depth > self.MAX_RECURSION_DEPTH:\n return item\n try:\n if name and should_hide_value_for_key(name):\n return self._redact_all(item, depth)\n if isinstance(item, dict):\n return {\n dict_key: self._redact(subval, name=dict_key, depth=(depth + 1))\n for dict_key, subval in item.items()\n }\n elif isinstance(item, str):\n if self.replacer:\n # We can't replace specific values, but the key-based redacting\n # can still happen, so we can't short-circuit, we need to walk\n # the structure.\n return self.replacer.sub('***', item)\n return item\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self._redact(subval, name=None, depth=(depth + 1)) for subval in item)\n elif isinstance(item, list):\n return [self._redact(subval, name=None, depth=(depth + 1)) for subval in item]\n else:\n return item\n # I think this should never happen, but it does not hurt to leave it just in case\n # Well. It happened (see https://github.com/apache/airflow/issues/19816#issuecomment-983311373)\n # but it caused infinite recursion, so we need to cast it to str first.\n except Exception as e:\n log.warning(\n \"Unable to redact %s, please report this via <https://github.com/apache/airflow/issues>. \"\n \"Error was: %s: %s\",\n repr(item),\n type(e).__name__,\n str(e),\n )\n return item\n\n def redact(self, item: Redactable, name: str | None = None) -> Redacted:\n \"\"\"Redact an any secrets found in ``item``, if it is a string.\n\n If ``name`` is given, and it's a \"sensitive\" name (see\n :func:`should_hide_value_for_key`) then all string values in the item\n is redacted.\n \"\"\"\n return self._redact(item, name, depth=0)\n\n def add_mask(self, secret: str | dict | Iterable, name: str | None = None):\n \"\"\"Add a new secret to be masked to this filter instance.\"\"\"\n from airflow.configuration import conf\n\n test_mode: bool = conf.getboolean('core', 'unit_test_mode')\n if isinstance(secret, dict):\n for k, v in secret.items():\n self.add_mask(v, k)\n elif isinstance(secret, str):\n if not secret or (test_mode and secret in SECRETS_TO_SKIP_MASKING_FOR_TESTS):\n return\n pattern = re.escape(secret)\n if pattern not in self.patterns and (not name or should_hide_value_for_key(name)):\n self.patterns.add(pattern)\n self.replacer = re.compile('|'.join(self.patterns))\n elif isinstance(secret, collections.abc.Iterable):\n for v in secret:\n self.add_mask(v, name)\n\n\nclass RedactedIO(TextIO):\n \"\"\"IO class that redacts values going into stdout.\n\n Expected usage::\n\n with contextlib.redirect_stdout(RedactedIO()):\n ... # Writes to stdout will be redacted.\n \"\"\"\n\n def __init__(self):\n self.target = sys.stdout\n\n def write(self, s: str) -> int:\n s = redact(s)\n return self.target.write(s)\n\n def flush(self) -> None:\n return self.target.flush()\n", "path": "airflow/utils/log/secrets_masker.py" } ]
[ { "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Mask sensitive information from logs\"\"\"\nfrom __future__ import annotations\n\nimport collections\nimport logging\nimport re\nimport sys\nfrom typing import Any, Dict, Iterable, List, TextIO, Tuple, TypeVar, Union\n\nfrom airflow import settings\nfrom airflow.compat.functools import cache, cached_property\n\nRedactable = TypeVar(\"Redactable\", str, Dict[Any, Any], Tuple[Any, ...], List[Any])\nRedacted = Union[Redactable, str]\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_SENSITIVE_FIELDS = frozenset(\n {\n 'access_token',\n 'api_key',\n 'apikey',\n 'authorization',\n 'passphrase',\n 'passwd',\n 'password',\n 'private_key',\n 'secret',\n 'token',\n 'keyfile_dict',\n 'service_account',\n }\n)\n\"\"\"Names of fields (Connection extra, Variable key name etc.) that are deemed sensitive\"\"\"\n\nSECRETS_TO_SKIP_MASKING_FOR_TESTS = {'airflow'}\n\n\n@cache\ndef get_sensitive_variables_fields():\n \"\"\"Get comma-separated sensitive Variable Fields from airflow.cfg.\"\"\"\n from airflow.configuration import conf\n\n sensitive_fields = DEFAULT_SENSITIVE_FIELDS.copy()\n sensitive_variable_fields = conf.get('core', 'sensitive_var_conn_names')\n if sensitive_variable_fields:\n sensitive_fields |= frozenset({field.strip() for field in sensitive_variable_fields.split(',')})\n return sensitive_fields\n\n\ndef should_hide_value_for_key(name):\n \"\"\"Should the value for this given name (Variable name, or key in conn.extra_dejson) be hidden\"\"\"\n from airflow import settings\n\n if isinstance(name, str) and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:\n name = name.strip().lower()\n return any(s in name for s in get_sensitive_variables_fields())\n return False\n\n\ndef mask_secret(secret: str | dict | Iterable, name: str | None = None) -> None:\n \"\"\"\n Mask a secret from appearing in the task logs.\n\n If ``name`` is provided, then it will only be masked if the name matches\n one of the configured \"sensitive\" names.\n\n If ``secret`` is a dict or a iterable (excluding str) then it will be\n recursively walked and keys with sensitive names will be hidden.\n \"\"\"\n # Filtering all log messages is not a free process, so we only do it when\n # running tasks\n if not secret:\n return\n\n _secrets_masker().add_mask(secret, name)\n\n\ndef redact(value: Redactable, name: str | None = None) -> Redacted:\n \"\"\"Redact any secrets found in ``value``.\"\"\"\n return _secrets_masker().redact(value, name)\n\n\n@cache\ndef _secrets_masker() -> SecretsMasker:\n for flt in logging.getLogger('airflow.task').filters:\n if isinstance(flt, SecretsMasker):\n return flt\n raise RuntimeError(\n \"Logging Configuration Error! No SecretsMasker found! If you have custom logging, please make \"\n \"sure you configure it taking airflow configuration as a base as explained at \"\n \"https://airflow.apache.org/docs/apache-airflow/stable/logging-monitoring/logging-tasks.html\"\n \"#advanced-configuration\"\n )\n\n\nclass SecretsMasker(logging.Filter):\n \"\"\"Redact secrets from logs\"\"\"\n\n replacer: re.Pattern | None = None\n patterns: set[str]\n\n ALREADY_FILTERED_FLAG = \"__SecretsMasker_filtered\"\n MAX_RECURSION_DEPTH = 5\n\n def __init__(self):\n super().__init__()\n self.patterns = set()\n\n @cached_property\n def _record_attrs_to_ignore(self) -> Iterable[str]:\n # Doing log.info(..., extra={'foo': 2}) sets extra properties on\n # record, i.e. record.foo. And we need to filter those too. Fun\n #\n # Create a record, and look at what attributes are on it, and ignore\n # all the default ones!\n\n record = logging.getLogRecordFactory()(\n # name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None,\n \"x\",\n logging.INFO,\n __file__,\n 1,\n \"\",\n tuple(),\n exc_info=None,\n func=\"funcname\",\n )\n return frozenset(record.__dict__).difference({'msg', 'args'})\n\n def _redact_exception_with_context(self, exception):\n # Exception class may not be modifiable (e.g. declared by an\n # extension module such as JDBC).\n try:\n exception.args = (self.redact(v) for v in exception.args)\n except AttributeError:\n pass\n if exception.__context__:\n self._redact_exception_with_context(exception.__context__)\n if exception.__cause__ and exception.__cause__ is not exception.__context__:\n self._redact_exception_with_context(exception.__cause__)\n\n def filter(self, record) -> bool:\n if settings.MASK_SECRETS_IN_LOGS is not True:\n return True\n\n if self.ALREADY_FILTERED_FLAG in record.__dict__:\n # Filters are attached to multiple handlers and logs, keep a\n # \"private\" flag that stops us needing to process it more than once\n return True\n\n if self.replacer:\n for k, v in record.__dict__.items():\n if k in self._record_attrs_to_ignore:\n continue\n record.__dict__[k] = self.redact(v)\n if record.exc_info and record.exc_info[1] is not None:\n exc = record.exc_info[1]\n self._redact_exception_with_context(exc)\n record.__dict__[self.ALREADY_FILTERED_FLAG] = True\n\n return True\n\n def _redact_all(self, item: Redactable, depth: int) -> Redacted:\n if depth > self.MAX_RECURSION_DEPTH or isinstance(item, str):\n return '***'\n if isinstance(item, dict):\n return {dict_key: self._redact_all(subval, depth + 1) for dict_key, subval in item.items()}\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self._redact_all(subval, depth + 1) for subval in item)\n elif isinstance(item, list):\n return list(self._redact_all(subval, depth + 1) for subval in item)\n else:\n return item\n\n def _redact(self, item: Redactable, name: str | None, depth: int) -> Redacted:\n # Avoid spending too much effort on redacting on deeply nested\n # structures. This also avoid infinite recursion if a structure has\n # reference to self.\n if depth > self.MAX_RECURSION_DEPTH:\n return item\n try:\n if name and should_hide_value_for_key(name):\n return self._redact_all(item, depth)\n if isinstance(item, dict):\n return {\n dict_key: self._redact(subval, name=dict_key, depth=(depth + 1))\n for dict_key, subval in item.items()\n }\n elif isinstance(item, str):\n if self.replacer:\n # We can't replace specific values, but the key-based redacting\n # can still happen, so we can't short-circuit, we need to walk\n # the structure.\n return self.replacer.sub('***', item)\n return item\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self._redact(subval, name=None, depth=(depth + 1)) for subval in item)\n elif isinstance(item, list):\n return [self._redact(subval, name=None, depth=(depth + 1)) for subval in item]\n else:\n return item\n # I think this should never happen, but it does not hurt to leave it just in case\n # Well. It happened (see https://github.com/apache/airflow/issues/19816#issuecomment-983311373)\n # but it caused infinite recursion, so we need to cast it to str first.\n except Exception as e:\n log.warning(\n \"Unable to redact %s, please report this via <https://github.com/apache/airflow/issues>. \"\n \"Error was: %s: %s\",\n repr(item),\n type(e).__name__,\n str(e),\n )\n return item\n\n def redact(self, item: Redactable, name: str | None = None) -> Redacted:\n \"\"\"Redact an any secrets found in ``item``, if it is a string.\n\n If ``name`` is given, and it's a \"sensitive\" name (see\n :func:`should_hide_value_for_key`) then all string values in the item\n is redacted.\n \"\"\"\n return self._redact(item, name, depth=0)\n\n def add_mask(self, secret: str | dict | Iterable, name: str | None = None):\n \"\"\"Add a new secret to be masked to this filter instance.\"\"\"\n from airflow.configuration import conf\n\n test_mode: bool = conf.getboolean('core', 'unit_test_mode')\n if isinstance(secret, dict):\n for k, v in secret.items():\n self.add_mask(v, k)\n elif isinstance(secret, str):\n if not secret or (test_mode and secret in SECRETS_TO_SKIP_MASKING_FOR_TESTS):\n return\n pattern = re.escape(secret)\n if pattern not in self.patterns and (not name or should_hide_value_for_key(name)):\n self.patterns.add(pattern)\n self.replacer = re.compile('|'.join(self.patterns))\n elif isinstance(secret, collections.abc.Iterable):\n for v in secret:\n self.add_mask(v, name)\n\n\nclass RedactedIO(TextIO):\n \"\"\"IO class that redacts values going into stdout.\n\n Expected usage::\n\n with contextlib.redirect_stdout(RedactedIO()):\n ... # Writes to stdout will be redacted.\n \"\"\"\n\n def __init__(self):\n self.target = sys.stdout\n self.fileno = sys.stdout.fileno\n\n def write(self, s: str) -> int:\n s = redact(s)\n return self.target.write(s)\n\n def flush(self) -> None:\n return self.target.flush()\n", "path": "airflow/utils/log/secrets_masker.py" } ]
diff --git a/airflow/utils/log/secrets_masker.py b/airflow/utils/log/secrets_masker.py index 4200056abc5b2..17234bf4082ea 100644 --- a/airflow/utils/log/secrets_masker.py +++ b/airflow/utils/log/secrets_masker.py @@ -271,6 +271,7 @@ class RedactedIO(TextIO): def __init__(self): self.target = sys.stdout + self.fileno = sys.stdout.fileno def write(self, s: str) -> int: s = redact(s) diff --git a/tests/utils/log/test_secrets_masker.py b/tests/utils/log/test_secrets_masker.py index c07be27bb7ae0..6de30eb574787 100644 --- a/tests/utils/log/test_secrets_masker.py +++ b/tests/utils/log/test_secrets_masker.py @@ -18,9 +18,11 @@ import contextlib import inspect +import io import logging import logging.config import os +import sys import textwrap import pytest @@ -363,3 +365,13 @@ def test_write(self, capsys): RedactedIO().write(p) stdout = capsys.readouterr().out assert stdout == "***" + + def test_input_builtin(self, monkeypatch): + """ + Test that when redirect is inplace the `input()` builtin works. + + This is used by debuggers! + """ + monkeypatch.setattr(sys, 'stdin', io.StringIO("a\n")) + with contextlib.redirect_stdout(RedactedIO()): + assert input() == "a"
secdev__scapy-2046
[ { "content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <phil@secdev.org>\n# Vincent Mauge <vmauge.nospam@nospam.gmail.com>\n# This program is published under a GPLv2 license\n\n\"\"\"\nRADIUS (Remote Authentication Dial In User Service)\n\"\"\"\n\nimport struct\nimport hashlib\nimport hmac\nfrom scapy.compat import orb, raw\nfrom scapy.packet import Packet, Padding, bind_layers\nfrom scapy.fields import ByteField, ByteEnumField, IntField, StrLenField,\\\n XStrLenField, XStrFixedLenField, FieldLenField, PacketField,\\\n PacketListField, IPField, MultiEnumField\nfrom scapy.layers.inet import UDP\nfrom scapy.layers.eap import EAP\nfrom scapy.utils import issubtype\nfrom scapy.config import conf\nfrom scapy.error import Scapy_Exception\n\n\n# https://www.iana.org/assignments/radius-types/radius-types.xhtml\n_radius_attribute_types = {\n 1: \"User-Name\",\n 2: \"User-Password\",\n 3: \"CHAP-Password\",\n 4: \"NAS-IP-Address\",\n 5: \"NAS-Port\",\n 6: \"Service-Type\",\n 7: \"Framed-Protocol\",\n 8: \"Framed-IP-Address\",\n 9: \"Framed-IP-Netmask\",\n 10: \"Framed-Routing\",\n 11: \"Filter-Id\",\n 12: \"Framed-MTU\",\n 13: \"Framed-Compression\",\n 14: \"Login-IP-Host\",\n 15: \"Login-Service\",\n 16: \"Login-TCP-Port\",\n 17: \"Unassigned\",\n 18: \"Reply-Message\",\n 19: \"Callback-Number\",\n 20: \"Callback-Id\",\n 21: \"Unassigned\",\n 22: \"Framed-Route\",\n 23: \"Framed-IPX-Network\",\n 24: \"State\",\n 25: \"Class\",\n 26: \"Vendor-Specific\",\n 27: \"Session-Timeout\",\n 28: \"Idle-Timeout\",\n 29: \"Termination-Action\",\n 30: \"Called-Station-Id\",\n 31: \"Calling-Station-Id\",\n 32: \"NAS-Identifier\",\n 33: \"Proxy-State\",\n 34: \"Login-LAT-Service\",\n 35: \"Login-LAT-Node\",\n 36: \"Login-LAT-Group\",\n 37: \"Framed-AppleTalk-Link\",\n 38: \"Framed-AppleTalk-Network\",\n 39: \"Framed-AppleTalk-Zone\",\n 40: \"Acct-Status-Type\",\n 41: \"Acct-Delay-Time\",\n 42: \"Acct-Input-Octets\",\n 43: \"Acct-Output-Octets\",\n 44: \"Acct-Session-Id\",\n 45: \"Acct-Authentic\",\n 46: \"Acct-Session-Time\",\n 47: \"Acct-Input-Packets\",\n 48: \"Acct-Output-Packets\",\n 49: \"Acct-Terminate-Cause\",\n 50: \"Acct-Multi-Session-Id\",\n 51: \"Acct-Link-Count\",\n 52: \"Acct-Input-Gigawords\",\n 53: \"Acct-Output-Gigawords\",\n 54: \"Unassigned\",\n 55: \"Event-Timestamp\",\n 56: \"Egress-VLANID\",\n 57: \"Ingress-Filters\",\n 58: \"Egress-VLAN-Name\",\n 59: \"User-Priority-Table\",\n 60: \"CHAP-Challenge\",\n 61: \"NAS-Port-Type\",\n 62: \"Port-Limit\",\n 63: \"Login-LAT-Port\",\n 64: \"Tunnel-Type\",\n 65: \"Tunnel-Medium-Type\",\n 66: \"Tunnel-Client-Endpoint\",\n 67: \"Tunnel-Server-Endpoint\",\n 68: \"Acct-Tunnel-Connection\",\n 69: \"Tunnel-Password\",\n 70: \"ARAP-Password\",\n 71: \"ARAP-Features\",\n 72: \"ARAP-Zone-Access\",\n 73: \"ARAP-Security\",\n 74: \"ARAP-Security-Data\",\n 75: \"Password-Retry\",\n 76: \"Prompt\",\n 77: \"Connect-Info\",\n 78: \"Configuration-Token\",\n 79: \"EAP-Message\",\n 80: \"Message-Authenticator\",\n 81: \"Tunnel-Private-Group-ID\",\n 82: \"Tunnel-Assignment-ID\",\n 83: \"Tunnel-Preference\",\n 84: \"ARAP-Challenge-Response\",\n 85: \"Acct-Interim-Interval\",\n 86: \"Acct-Tunnel-Packets-Lost\",\n 87: \"NAS-Port-Id\",\n 88: \"Framed-Pool\",\n 89: \"CUI\",\n 90: \"Tunnel-Client-Auth-ID\",\n 91: \"Tunnel-Server-Auth-ID\",\n 92: \"NAS-Filter-Rule\",\n 93: \"Unassigned\",\n 94: \"Originating-Line-Info\",\n 95: \"NAS-IPv6-Address\",\n 96: \"Framed-Interface-Id\",\n 97: \"Framed-IPv6-Prefix\",\n 98: \"Login-IPv6-Host\",\n 99: \"Framed-IPv6-Route\",\n 100: \"Framed-IPv6-Pool\",\n 101: \"Error-Cause\",\n 102: \"EAP-Key-Name\",\n 103: \"Digest-Response\",\n 104: \"Digest-Realm\",\n 105: \"Digest-Nonce\",\n 106: \"Digest-Response-Auth\",\n 107: \"Digest-Nextnonce\",\n 108: \"Digest-Method\",\n 109: \"Digest-URI\",\n 110: \"Digest-Qop\",\n 111: \"Digest-Algorithm\",\n 112: \"Digest-Entity-Body-Hash\",\n 113: \"Digest-CNonce\",\n 114: \"Digest-Nonce-Count\",\n 115: \"Digest-Username\",\n 116: \"Digest-Opaque\",\n 117: \"Digest-Auth-Param\",\n 118: \"Digest-AKA-Auts\",\n 119: \"Digest-Domain\",\n 120: \"Digest-Stale\",\n 121: \"Digest-HA1\",\n 122: \"SIP-AOR\",\n 123: \"Delegated-IPv6-Prefix\",\n 124: \"MIP6-Feature-Vector\",\n 125: \"MIP6-Home-Link-Prefix\",\n 126: \"Operator-Name\",\n 127: \"Location-Information\",\n 128: \"Location-Data\",\n 129: \"Basic-Location-Policy-Rules\",\n 130: \"Extended-Location-Policy-Rules\",\n 131: \"Location-Capable\",\n 132: \"Requested-Location-Info\",\n 133: \"Framed-Management-Protocol\",\n 134: \"Management-Transport-Protection\",\n 135: \"Management-Policy-Id\",\n 136: \"Management-Privilege-Level\",\n 137: \"PKM-SS-Cert\",\n 138: \"PKM-CA-Cert\",\n 139: \"PKM-Config-Settings\",\n 140: \"PKM-Cryptosuite-List\",\n 141: \"PKM-SAID\",\n 142: \"PKM-SA-Descriptor\",\n 143: \"PKM-Auth-Key\",\n 144: \"DS-Lite-Tunnel-Name\",\n 145: \"Mobile-Node-Identifier\",\n 146: \"Service-Selection\",\n 147: \"PMIP6-Home-LMA-IPv6-Address\",\n 148: \"PMIP6-Visited-LMA-IPv6-Address\",\n 149: \"PMIP6-Home-LMA-IPv4-Address\",\n 150: \"PMIP6-Visited-LMA-IPv4-Address\",\n 151: \"PMIP6-Home-HN-Prefix\",\n 152: \"PMIP6-Visited-HN-Prefix\",\n 153: \"PMIP6-Home-Interface-ID\",\n 154: \"PMIP6-Visited-Interface-ID\",\n 155: \"PMIP6-Home-IPv4-HoA\",\n 156: \"PMIP6-Visited-IPv4-HoA\",\n 157: \"PMIP6-Home-DHCP4-Server-Address\",\n 158: \"PMIP6-Visited-DHCP4-Server-Address\",\n 159: \"PMIP6-Home-DHCP6-Server-Address\",\n 160: \"PMIP6-Visited-DHCP6-Server-Address\",\n 161: \"PMIP6-Home-IPv4-Gateway\",\n 162: \"PMIP6-Visited-IPv4-Gateway\",\n 163: \"EAP-Lower-Layer\",\n 164: \"GSS-Acceptor-Service-Name\",\n 165: \"GSS-Acceptor-Host-Name\",\n 166: \"GSS-Acceptor-Service-Specifics\",\n 167: \"GSS-Acceptor-Realm-Name\",\n 168: \"Framed-IPv6-Address\",\n 169: \"DNS-Server-IPv6-Address\",\n 170: \"Route-IPv6-Information\",\n 171: \"Delegated-IPv6-Prefix-Pool\",\n 172: \"Stateful-IPv6-Address-Pool\",\n 173: \"IPv6-6rd-Configuration\",\n 174: \"Allowed-Called-Station-Id\",\n 175: \"EAP-Peer-Id\",\n 176: \"EAP-Server-Id\",\n 177: \"Mobility-Domain-Id\",\n 178: \"Preauth-Timeout\",\n 179: \"Network-Id-Name\",\n 180: \"EAPoL-Announcement\",\n 181: \"WLAN-HESSID\",\n 182: \"WLAN-Venue-Info\",\n 183: \"WLAN-Venue-Language\",\n 184: \"WLAN-Venue-Name\",\n 185: \"WLAN-Reason-Code\",\n 186: \"WLAN-Pairwise-Cipher\",\n 187: \"WLAN-Group-Cipher\",\n 188: \"WLAN-AKM-Suite\",\n 189: \"WLAN-Group-Mgmt-Cipher\",\n 190: \"WLAN-RF-Band\",\n 191: \"Unassigned\",\n}\n\n\nclass RadiusAttribute(Packet):\n \"\"\"\n Implements a RADIUS attribute (RFC 2865). Every specific RADIUS attribute\n class should inherit from this one.\n \"\"\"\n\n name = \"Radius Attribute\"\n fields_desc = [\n ByteEnumField(\"type\", 1, _radius_attribute_types),\n FieldLenField(\"len\", None, \"value\", \"B\",\n adjust=lambda pkt, x: len(pkt.value) + 2),\n StrLenField(\"value\", \"\", length_from=lambda pkt: pkt.len - 2)\n ]\n\n registered_attributes = {}\n\n @classmethod\n def register_variant(cls):\n \"\"\"\n Registers the RADIUS attributes defined in this module.\n \"\"\"\n\n if hasattr(cls, \"val\"):\n cls.registered_attributes[cls.val] = cls\n else:\n cls.registered_attributes[cls.type.default] = cls\n\n @classmethod\n def dispatch_hook(cls, _pkt=None, *args, **kargs):\n \"\"\"\n Returns the right RadiusAttribute class for the given data.\n \"\"\"\n\n if _pkt:\n attr_type = orb(_pkt[0])\n return cls.registered_attributes.get(attr_type, cls)\n return cls\n\n def haslayer(self, cls):\n if cls == \"RadiusAttribute\":\n if isinstance(self, RadiusAttribute):\n return True\n elif issubtype(cls, RadiusAttribute):\n if isinstance(self, cls):\n return True\n return super(RadiusAttribute, self).haslayer(cls)\n\n def getlayer(self, cls, nb=1, _track=None, _subclass=True, **flt):\n return super(RadiusAttribute, self).getlayer(cls, nb=nb, _track=_track,\n _subclass=True, **flt)\n\n def post_build(self, p, pay):\n length = self.len\n if length is None:\n length = len(p)\n p = p[:1] + struct.pack(\"!B\", length) + p[2:]\n return p\n\n def guess_payload_class(self, _):\n return Padding\n\n\nclass _SpecificRadiusAttr(RadiusAttribute):\n \"\"\"\n Class from which every \"specific\" RADIUS attribute defined in this module\n inherits.\n \"\"\"\n\n __slots__ = [\"val\"]\n\n def __init__(self, _pkt=\"\", post_transform=None, _internal=0, _underlayer=None, **fields): # noqa: E501\n super(_SpecificRadiusAttr, self).__init__(\n _pkt,\n post_transform,\n _internal,\n _underlayer\n )\n self.fields[\"type\"] = self.val\n name_parts = self.__class__.__name__.split('RadiusAttr_')\n if len(name_parts) < 2:\n raise Scapy_Exception(\n \"Invalid class name: {}\".format(self.__class__.__name__)\n )\n self.name = name_parts[1].replace('_', '-')\n\n\n#\n# RADIUS attributes which values are 4 bytes integers\n#\n\nclass _RadiusAttrIntValue(_SpecificRadiusAttr):\n \"\"\"\n Implements a RADIUS attribute which value field is 4 bytes long integer.\n \"\"\"\n\n fields_desc = [\n ByteEnumField(\"type\", 5, _radius_attribute_types),\n ByteField(\"len\", 6),\n IntField(\"value\", 0)\n ]\n\n\nclass RadiusAttr_NAS_Port(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 5\n\n\nclass RadiusAttr_Framed_MTU(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 12\n\n\nclass RadiusAttr_Login_TCP_Port(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 16\n\n\nclass RadiusAttr_Session_Timeout(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 27\n\n\nclass RadiusAttr_Idle_Timeout(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 28\n\n\nclass RadiusAttr_Framed_AppleTalk_Link(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 37\n\n\nclass RadiusAttr_Framed_AppleTalk_Network(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 38\n\n\nclass RadiusAttr_Acct_Delay_Time(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 41\n\n\nclass RadiusAttr_Acct_Input_Octets(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 42\n\n\nclass RadiusAttr_Acct_Output_Octets(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 43\n\n\nclass RadiusAttr_Acct_Session_Time(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 46\n\n\nclass RadiusAttr_Acct_Input_Packets(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 47\n\n\nclass RadiusAttr_Acct_Output_Packets(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 48\n\n\nclass RadiusAttr_Acct_Link_Count(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 51\n\n\nclass RadiusAttr_Acct_Input_Gigawords(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 52\n\n\nclass RadiusAttr_Acct_Output_Gigawords(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 53\n\n\nclass RadiusAttr_Egress_VLANID(_RadiusAttrIntValue):\n \"\"\"RFC 4675\"\"\"\n val = 56\n\n\nclass RadiusAttr_Port_Limit(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 62\n\n\nclass RadiusAttr_ARAP_Security(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 73\n\n\nclass RadiusAttr_Password_Retry(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 75\n\n\nclass RadiusAttr_Tunnel_Preference(_RadiusAttrIntValue):\n \"\"\"RFC 2868\"\"\"\n val = 83\n\n\nclass RadiusAttr_Acct_Interim_Interval(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 85\n\n\nclass RadiusAttr_Acct_Tunnel_Packets_Lost(_RadiusAttrIntValue):\n \"\"\"RFC 2867\"\"\"\n val = 86\n\n\nclass RadiusAttr_Management_Privilege_Level(_RadiusAttrIntValue):\n \"\"\"RFC 5607\"\"\"\n val = 136\n\n\nclass RadiusAttr_Mobility_Domain_Id(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 177\n\n\nclass RadiusAttr_Preauth_Timeout(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 178\n\n\nclass RadiusAttr_WLAN_Venue_Info(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 182\n\n\nclass RadiusAttr_WLAN_Reason_Code(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 185\n\n\nclass RadiusAttr_WLAN_Pairwise_Cipher(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 186\n\n\nclass RadiusAttr_WLAN_Group_Cipher(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 187\n\n\nclass RadiusAttr_WLAN_AKM_Suite(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 188\n\n\nclass RadiusAttr_WLAN_Group_Mgmt_Cipher(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 189\n\n\nclass RadiusAttr_WLAN_RF_Band(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 190\n\n\n#\n# RADIUS attributes which values are string (displayed as hex)\n#\n\nclass _RadiusAttrHexStringVal(_SpecificRadiusAttr):\n \"\"\"\n Implements a RADIUS attribute which value field is a string that will be\n as a hex string.\n \"\"\"\n\n __slots__ = [\"val\"]\n\n def __init__(self, _pkt=\"\", post_transform=None, _internal=0, _underlayer=None, **fields): # noqa: E501\n super(_RadiusAttrHexStringVal, self).__init__(\n _pkt,\n post_transform,\n _internal,\n _underlayer\n )\n self.fields[\"type\"] = self.val\n name_parts = self.__class__.__name__.split('RadiusAttr_')\n if len(name_parts) < 2:\n raise Scapy_Exception(\n \"Invalid class name: {}\".format(self.__class__.__name__)\n )\n self.name = name_parts[1].replace('_', '-')\n\n fields_desc = [\n ByteEnumField(\"type\", 24, _radius_attribute_types),\n FieldLenField(\n \"len\",\n None,\n \"value\",\n \"B\",\n adjust=lambda p, x: len(p.value) + 2\n ),\n XStrLenField(\"value\", \"\", length_from=lambda p: p.len - 2 if p.len else 0) # noqa: E501\n ]\n\n\nclass RadiusAttr_State(_RadiusAttrHexStringVal):\n \"\"\"RFC 2865\"\"\"\n val = 24\n\n\ndef prepare_packed_data(radius_packet, packed_req_authenticator):\n \"\"\"\n Pack RADIUS data prior computing the authentication MAC\n \"\"\"\n\n packed_hdr = struct.pack(\"!B\", radius_packet.code)\n packed_hdr += struct.pack(\"!B\", radius_packet.id)\n packed_hdr += struct.pack(\"!H\", radius_packet.len)\n\n packed_attrs = b''\n for attr in radius_packet.attributes:\n packed_attrs += raw(attr)\n\n return packed_hdr + packed_req_authenticator + packed_attrs\n\n\nclass RadiusAttr_Message_Authenticator(_RadiusAttrHexStringVal):\n \"\"\"RFC 2869\"\"\"\n val = 80\n\n fields_desc = [\n ByteEnumField(\"type\", 24, _radius_attribute_types),\n FieldLenField(\n \"len\",\n 18,\n \"value\",\n \"B\",\n ),\n XStrFixedLenField(\"value\", \"\\x00\" * 16, length=16)\n ]\n\n @staticmethod\n def compute_message_authenticator(radius_packet, packed_req_authenticator,\n shared_secret):\n \"\"\"\n Computes the \"Message-Authenticator\" of a given RADIUS packet.\n \"\"\"\n\n data = prepare_packed_data(radius_packet, packed_req_authenticator)\n radius_hmac = hmac.new(shared_secret, data, hashlib.md5)\n\n return radius_hmac.digest()\n\n#\n# RADIUS attributes which values are IPv4 prefixes\n#\n\n\nclass _RadiusAttrIPv4AddrVal(RadiusAttribute):\n \"\"\"\n Implements a RADIUS attribute which value field is an IPv4 address.\n \"\"\"\n\n __slots__ = [\"val\"]\n\n fields_desc = [\n ByteEnumField(\"type\", 4, _radius_attribute_types),\n ByteField(\"len\", 6),\n IPField(\"value\", \"0.0.0.0\")\n ]\n\n\nclass RadiusAttr_NAS_IP_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 4\n\n\nclass RadiusAttr_Framed_IP_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 8\n\n\nclass RadiusAttr_Framed_IP_Netmask(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 9\n\n\nclass RadiusAttr_Login_IP_Host(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 14\n\n\nclass RadiusAttr_Framed_IPX_Network(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 23\n\n\nclass RadiusAttr_PMIP6_Home_LMA_IPv4_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 149\n\n\nclass RadiusAttr_PMIP6_Visited_LMA_IPv4_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 150\n\n\nclass RadiusAttr_PMIP6_Home_DHCP4_Server_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 157\n\n\nclass RadiusAttr_PMIP6_Visited_DHCP4_Server_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 158\n\n\nclass RadiusAttr_PMIP6_Home_IPv4_Gateway(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 161\n\n\nclass RadiusAttr_PMIP6_Visited_IPv4_Gateway(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 162\n\n\n# See IANA registry \"RADIUS Types\"\n_radius_attrs_values = {\n # Service-Type\n 6:\n {\n 1: \"Login\",\n 2: \"Framed\",\n 3: \"Callback Login\",\n 4: \"Callback Framed\",\n 5: \"Outbound\",\n 6: \"Administrative\",\n 7: \"NAS Prompt\",\n 8: \"Authenticate Only\",\n 9: \"Callback NAS Prompt\",\n 10: \"Call Check\",\n 11: \"Callback Administrative\",\n 12: \"Voice\",\n 13: \"Fax\",\n 14: \"Modem Relay\",\n 15: \"IAPP-Register\",\n 16: \"IAPP-AP-Check\",\n 17: \"Authorize Only\",\n 18: \"Framed-Management\",\n 19: \"Additional-Authorization\"\n },\n\n # Framed-Protocol\n 7:\n {\n 1: \"PPP\",\n 2: \"SLIP\",\n 3: \"AppleTalk Remote Access Protocol (ARAP)\",\n 4: \"Gandalf proprietary SingleLink/MultiLink protocol\",\n 5: \"Xylogics proprietary IPX/SLIP\",\n 6: \"X.75 Synchronous\",\n 7: \"GPRS PDP Context\"\n },\n\n # Framed-Routing\n 10:\n {\n 0: \"None\",\n 1: \"Send routing packets\",\n 2: \"Listen for routing packets\",\n 3: \"Send and Listen\"\n },\n\n # Framed-Compression\n 13:\n {\n 0: \"None\",\n 1: \"VJ TCP/IP header compression\",\n 2: \"IPX header compression\",\n 3: \"Stac-LZS compression\"\n },\n\n # Login-Service\n 15:\n {\n 0: \"Telnet\",\n 1: \"Rlogin\",\n 2: \"TCP Clear\",\n 3: \"PortMaster (proprietary)\",\n 4: \"LAT\",\n 5: \"X25-PAD\",\n 6: \"X25-T3POS\",\n 7: \"Unassigned\",\n 8: \"TCP Clear Quiet (suppresses any NAS-generated connect string)\"\n },\n\n # Termination-Action\n 29:\n {\n 0: \"Default\",\n 1: \"RADIUS-Request\"\n },\n\n # Acct-Status-Type\n 40:\n {\n 1: \"Start\",\n 2: \"Stop\",\n 3: \"Interim-Update\",\n 4: \"Unassigned\",\n 5: \"Unassigned\",\n 6: \"Unassigned\",\n 7: \"Accounting-On\",\n 8: \"Accounting-Off\",\n 9: \"Tunnel-Start\",\n 10: \"Tunnel-Stop\",\n 11: \"Tunnel-Reject\",\n 12: \"Tunnel-Link-Start\",\n 13: \"Tunnel-Link-Stop\",\n 14: \"Tunnel-Link-Reject\",\n 15: \"Failed\"\n },\n\n # Acct-Authentic\n 45:\n {\n 1: \"RADIUS\",\n 2: \"Local\",\n 3: \"Remote\",\n 4: \"Diameter\"\n },\n\n # Acct-Terminate-Cause\n 49:\n {\n 1: \"User Request\",\n 2: \"Lost Carrier\",\n 3: \"Lost Service\",\n 4: \"Idle Timeout\",\n 5: \"Session Timeout\",\n 6: \"Admin Reset\",\n 7: \"Admin Reboot\",\n 8: \"Port Error\",\n 9: \"NAS Error\",\n 10: \"NAS Request\",\n 11: \"NAS Reboot\",\n 12: \"Port Unneeded\",\n 13: \"Port Preempted\",\n 14: \"Port Suspended\",\n 15: \"Service Unavailable\",\n 16: \"Callback\",\n 17: \"User Error\",\n 18: \"Host Request\",\n 19: \"Supplicant Restart\",\n 20: \"Reauthentication Failure\",\n 21: \"Port Reinitialized\",\n 22: \"Port Administratively Disabled\",\n 23: \"Lost Power\",\n },\n\n # NAS-Port-Type\n 61:\n {\n 0: \"Async\",\n 1: \"Sync\",\n 2: \"ISDN Sync\",\n 3: \"ISDN Async V.120\",\n 4: \"ISDN Async V.110\",\n 5: \"Virtual\",\n 6: \"PIAFS\",\n 7: \"HDLC Clear Channel\",\n 8: \"X.25\",\n 9: \"X.75\",\n 10: \"G.3 Fax\",\n 11: \"SDSL - Symmetric DSL\",\n 12: \"ADSL-CAP - Asymmetric DSL, Carrierless Amplitude Phase Modulation\", # noqa: E501\n 13: \"ADSL-DMT - Asymmetric DSL, Discrete Multi-Tone\",\n 14: \"IDSL - ISDN Digital Subscriber Line\",\n 15: \"Ethernet\",\n 16: \"xDSL - Digital Subscriber Line of unknown type\",\n 17: \"Cable\",\n 18: \"Wireles - Other\",\n 19: \"Wireless - IEEE 802.11\",\n 20: \"Token-Ring\",\n 21: \"FDDI\",\n 22: \"Wireless - CDMA2000\",\n 23: \"Wireless - UMTS\",\n 24: \"Wireless - 1X-EV\",\n 25: \"IAPP\",\n 26: \"FTTP - Fiber to the Premises\",\n 27: \"Wireless - IEEE 802.16\",\n 28: \"Wireless - IEEE 802.20\",\n 29: \"Wireless - IEEE 802.22\",\n 30: \"PPPoA - PPP over ATM\",\n 31: \"PPPoEoA - PPP over Ethernet over ATM\",\n 32: \"PPPoEoE - PPP over Ethernet over Ethernet\",\n 33: \"PPPoEoVLAN - PPP over Ethernet over VLAN\",\n 34: \"PPPoEoQinQ - PPP over Ethernet over IEEE 802.1QinQ\",\n 35: \"xPON - Passive Optical Network\",\n 36: \"Wireless - XGP\",\n 37: \"WiMAX Pre-Release 8 IWK Function\",\n 38: \"WIMAX-WIFI-IWK: WiMAX WIFI Interworking\",\n 39: \"WIMAX-SFF: Signaling Forwarding Function for LTE/3GPP2\",\n 40: \"WIMAX-HA-LMA: WiMAX HA and or LMA function\",\n 41: \"WIMAX-DHCP: WIMAX DHCP service\",\n 42: \"WIMAX-LBS: WiMAX location based service\",\n 43: \"WIMAX-WVS: WiMAX voice service\"\n },\n\n # Tunnel-Type\n 64:\n {\n 1: \"Point-to-Point Tunneling Protocol (PPTP)\",\n 2: \"Layer Two Forwarding (L2F)\",\n 3: \"Layer Two Tunneling Protocol (L2TP)\",\n 4: \"Ascend Tunnel Management Protocol (ATMP)\",\n 5: \"Virtual Tunneling Protocol (VTP)\",\n 6: \"IP Authentication Header in the Tunnel-mode (AH)\",\n 7: \"IP-in-IP Encapsulation (IP-IP)\",\n 8: \"Minimal IP-in-IP Encapsulation (MIN-IP-IP)\",\n 9: \"IP Encapsulating Security Payload in the Tunnel-mode (ESP)\",\n 10: \"Generic Route Encapsulation (GRE)\",\n 11: \"Bay Dial Virtual Services (DVS)\",\n 12: \"IP-in-IP Tunneling\",\n 13: \"Virtual LANs (VLAN)\"\n },\n\n # Tunnel-Medium-Type\n 65:\n {\n 1: \"IPv4 (IP version 4)\",\n 2: \"IPv6 (IP version 6)\",\n 3: \"NSAP\",\n 4: \"HDLC (8-bit multidrop)\",\n 5: \"BBN 1822\",\n 6: \"802\",\n 7: \"E.163 (POTS)\",\n 8: \"E.164 (SMDS, Frame Relay, ATM)\",\n 9: \"F.69 (Telex)\",\n 10: \"X.121 (X.25, Frame Relay)\",\n 11: \"IPX\",\n 12: \"Appletalk\",\n 13: \"Decnet IV\",\n 14: \"Banyan Vine\",\n 15: \"E.164 with NSAP format subaddress\"\n },\n\n # ARAP-Zone-Access\n 72:\n {\n 1: \"Only allow access to default zone\",\n 2: \"Use zone filter inclusively\",\n 3: \"Not used\",\n 4: \"Use zone filter exclusively\"\n },\n\n # Prompt\n 76:\n {\n 0: \"No Echo\",\n 1: \"Echo\"\n },\n\n # Error-Cause Attribute\n 101:\n {\n 201: \"Residual Session Context Removed\",\n 202: \"Invalid EAP Packet (Ignored)\",\n 401: \"Unsupported Attribute\",\n 402: \"Missing Attribute\",\n 403: \"NAS Identification Mismatch\",\n 404: \"Invalid Request\",\n 405: \"Unsupported Service\",\n 406: \"Unsupported Extension\",\n 407: \"Invalid Attribute Value\",\n 501: \"Administratively Prohibited\",\n 502: \"Request Not Routable (Proxy)\",\n 503: \"Session Context Not Found\",\n 504: \"Session Context Not Removable\",\n 505: \"Other Proxy Processing Error\",\n 506: \"Resources Unavailable\",\n 507: \"Request Initiated\",\n 508: \"Multiple Session Selection Unsupported\",\n 509: \"Location-Info-Required\",\n 601: \"Response Too Big\"\n },\n\n # Operator Namespace Identifier - Attribute 126\n 126:\n {\n 0x30: \"TADIG\",\n 0x31: \"REALM\",\n 0x32: \"E212\",\n 0x33: \"ICC\",\n 0xFF: \"Reserved\"\n },\n\n # Basic-Location-Policy-Rules\n 129:\n {\n 0: \"Retransmission allowed\",\n },\n\n # Location-Capable\n 131:\n {\n 1: \"CIVIC_LOCATION\",\n 2: \"GEO_LOCATION\",\n 4: \"USERS_LOCATION\",\n 8: \"NAS_LOCATION\"\n },\n\n # Framed-Management-Protocol\n 133:\n {\n 1: \"SNMP\",\n 2: \"Web-based\",\n 3: \"NETCONF\",\n 4: \"FTP\",\n 5: \"TFTP\",\n 6: \"SFTP\",\n 7: \"RCP\",\n 8: \"SCP\"\n },\n\n # Management-Transport-Protection\n 134:\n {\n 1: \"No-Protection\",\n 2: \"Integrity-Protection\",\n 3: \"Integrity-Confidentiality-Protection\",\n },\n}\n\n\nclass _RadiusAttrIntEnumVal(_SpecificRadiusAttr):\n \"\"\"\n Implements a RADIUS attribute which value field is 4 bytes long integer.\n \"\"\"\n\n __slots__ = [\"val\"]\n\n fields_desc = [\n ByteEnumField(\"type\", 6, _radius_attribute_types),\n ByteField(\"len\", 6),\n MultiEnumField(\n \"value\",\n 0,\n _radius_attrs_values,\n depends_on=lambda p: p.type,\n fmt=\"I\"\n )\n ]\n\n\nclass RadiusAttr_Service_Type(_RadiusAttrIntEnumVal):\n \"\"\"RFC 2865\"\"\"\n val = 6\n\n\nclass RadiusAttr_Framed_Protocol(_RadiusAttrIntEnumVal):\n \"\"\"RFC 2865\"\"\"\n val = 7\n\n\nclass RadiusAttr_NAS_Port_Type(_RadiusAttrIntEnumVal):\n \"\"\"RFC 2865\"\"\"\n val = 61\n\n\nclass _EAPPacketField(PacketField):\n\n \"\"\"\n Handles EAP-Message attribute value (the actual EAP packet).\n \"\"\"\n\n def m2i(self, pkt, m):\n ret = None\n eap_packet_len = struct.unpack(\"!H\", m[2:4])[0]\n if eap_packet_len < 254:\n # If the EAP packet has not been fragmented, build a Scapy EAP\n # packet from the data.\n ret = EAP(m)\n else:\n ret = conf.raw_layer(m)\n return ret\n\n\nclass RadiusAttr_EAP_Message(RadiusAttribute):\n \"\"\"\n Implements the \"EAP-Message\" attribute (RFC 3579).\n \"\"\"\n\n name = \"EAP-Message\"\n fields_desc = [\n ByteEnumField(\"type\", 79, _radius_attribute_types),\n FieldLenField(\n \"len\",\n None,\n \"value\",\n \"B\",\n adjust=lambda pkt, x: len(pkt.value) + 2\n ),\n _EAPPacketField(\"value\", \"\", EAP)\n ]\n\n\nclass RadiusAttr_Vendor_Specific(RadiusAttribute):\n \"\"\"\n Implements the \"Vendor-Specific\" attribute, as described in RFC 2865.\n \"\"\"\n\n name = \"Vendor-Specific\"\n fields_desc = [\n ByteEnumField(\"type\", 26, _radius_attribute_types),\n FieldLenField(\n \"len\",\n None,\n \"value\",\n \"B\",\n adjust=lambda pkt, x: len(pkt.value) + 8\n ),\n IntField(\"vendor_id\", 0),\n ByteField(\"vendor_type\", 0),\n FieldLenField(\n \"vendor_len\",\n None,\n \"value\",\n \"B\",\n adjust=lambda p, x: len(p.value) + 2\n ),\n StrLenField(\"value\", \"\", length_from=lambda p: p.vendor_len - 2)\n ]\n\n\n# See IANA RADIUS Packet Type Codes registry\n_packet_codes = {\n 1: \"Access-Request\",\n 2: \"Access-Accept\",\n 3: \"Access-Reject\",\n 4: \"Accounting-Request\",\n 5: \"Accounting-Response\",\n 6: \"Accounting-Status (now Interim Accounting)\",\n 7: \"Password-Request\",\n 8: \"Password-Ack\",\n 9: \"Password-Reject\",\n 10: \"Accounting-Message\",\n 11: \"Access-Challenge\",\n 12: \"Status-Server (experimental)\",\n 13: \"Status-Client (experimental)\",\n 21: \"Resource-Free-Request\",\n 22: \"Resource-Free-Response\",\n 23: \"Resource-Query-Request\",\n 24: \"Resource-Query-Response\",\n 25: \"Alternate-Resource-Reclaim-Request\",\n 26: \"NAS-Reboot-Request\",\n 27: \"NAS-Reboot-Response\",\n 28: \"Reserved\",\n 29: \"Next-Passcode\",\n 30: \"New-Pin\",\n 31: \"Terminate-Session\",\n 32: \"Password-Expired\",\n 33: \"Event-Request\",\n 34: \"Event-Response\",\n 40: \"Disconnect-Request\",\n 41: \"Disconnect-ACK\",\n 42: \"Disconnect-NAK\",\n 43: \"CoA-Request\",\n 44: \"CoA-ACK\",\n 45: \"CoA-NAK\",\n 50: \"IP-Address-Allocate\",\n 51: \"IP-Address-Release\",\n 52: \"Protocol-Error\",\n 250: \"Experimental Use\",\n 251: \"Experimental Use\",\n 252: \"Experimental Use\",\n 253: \"Experimental Use\",\n 254: \"Reserved\",\n 255: \"Reserved\"\n}\n\n\nclass Radius(Packet):\n \"\"\"\n Implements a RADIUS packet (RFC 2865).\n \"\"\"\n\n name = \"RADIUS\"\n fields_desc = [\n ByteEnumField(\"code\", 1, _packet_codes),\n ByteField(\"id\", 0),\n FieldLenField(\n \"len\",\n None,\n \"attributes\",\n \"H\",\n adjust=lambda pkt, x: len(pkt.attributes) + 20\n ),\n XStrFixedLenField(\"authenticator\", \"\", 16),\n PacketListField(\n \"attributes\",\n [],\n RadiusAttribute,\n length_from=lambda pkt: pkt.len - 20\n )\n ]\n\n def compute_authenticator(self, packed_request_auth, shared_secret):\n \"\"\"\n Computes the authenticator field (RFC 2865 - Section 3)\n \"\"\"\n\n data = prepare_packed_data(self, packed_request_auth)\n radius_mac = hashlib.md5(data + shared_secret)\n return radius_mac.digest()\n\n def post_build(self, p, pay):\n p += pay\n length = self.len\n if length is None:\n length = len(p)\n p = p[:2] + struct.pack(\"!H\", length) + p[4:]\n return p\n\n\nbind_layers(UDP, Radius, sport=1812)\nbind_layers(UDP, Radius, dport=1812)\nbind_layers(UDP, Radius, sport=1813)\nbind_layers(UDP, Radius, dport=1813)\n", "path": "scapy/layers/radius.py" } ]
[ { "content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <phil@secdev.org>\n# Vincent Mauge <vmauge.nospam@nospam.gmail.com>\n# This program is published under a GPLv2 license\n\n\"\"\"\nRADIUS (Remote Authentication Dial In User Service)\n\"\"\"\n\nimport struct\nimport hashlib\nimport hmac\nfrom scapy.compat import orb, raw\nfrom scapy.packet import Packet, Padding, bind_layers\nfrom scapy.fields import ByteField, ByteEnumField, IntField, StrLenField,\\\n XStrLenField, XStrFixedLenField, FieldLenField, PacketField,\\\n PacketListField, IPField, MultiEnumField\nfrom scapy.layers.inet import UDP\nfrom scapy.layers.eap import EAP\nfrom scapy.utils import issubtype\nfrom scapy.config import conf\nfrom scapy.error import Scapy_Exception\n\n\n# https://www.iana.org/assignments/radius-types/radius-types.xhtml\n_radius_attribute_types = {\n 1: \"User-Name\",\n 2: \"User-Password\",\n 3: \"CHAP-Password\",\n 4: \"NAS-IP-Address\",\n 5: \"NAS-Port\",\n 6: \"Service-Type\",\n 7: \"Framed-Protocol\",\n 8: \"Framed-IP-Address\",\n 9: \"Framed-IP-Netmask\",\n 10: \"Framed-Routing\",\n 11: \"Filter-Id\",\n 12: \"Framed-MTU\",\n 13: \"Framed-Compression\",\n 14: \"Login-IP-Host\",\n 15: \"Login-Service\",\n 16: \"Login-TCP-Port\",\n 17: \"Unassigned\",\n 18: \"Reply-Message\",\n 19: \"Callback-Number\",\n 20: \"Callback-Id\",\n 21: \"Unassigned\",\n 22: \"Framed-Route\",\n 23: \"Framed-IPX-Network\",\n 24: \"State\",\n 25: \"Class\",\n 26: \"Vendor-Specific\",\n 27: \"Session-Timeout\",\n 28: \"Idle-Timeout\",\n 29: \"Termination-Action\",\n 30: \"Called-Station-Id\",\n 31: \"Calling-Station-Id\",\n 32: \"NAS-Identifier\",\n 33: \"Proxy-State\",\n 34: \"Login-LAT-Service\",\n 35: \"Login-LAT-Node\",\n 36: \"Login-LAT-Group\",\n 37: \"Framed-AppleTalk-Link\",\n 38: \"Framed-AppleTalk-Network\",\n 39: \"Framed-AppleTalk-Zone\",\n 40: \"Acct-Status-Type\",\n 41: \"Acct-Delay-Time\",\n 42: \"Acct-Input-Octets\",\n 43: \"Acct-Output-Octets\",\n 44: \"Acct-Session-Id\",\n 45: \"Acct-Authentic\",\n 46: \"Acct-Session-Time\",\n 47: \"Acct-Input-Packets\",\n 48: \"Acct-Output-Packets\",\n 49: \"Acct-Terminate-Cause\",\n 50: \"Acct-Multi-Session-Id\",\n 51: \"Acct-Link-Count\",\n 52: \"Acct-Input-Gigawords\",\n 53: \"Acct-Output-Gigawords\",\n 54: \"Unassigned\",\n 55: \"Event-Timestamp\",\n 56: \"Egress-VLANID\",\n 57: \"Ingress-Filters\",\n 58: \"Egress-VLAN-Name\",\n 59: \"User-Priority-Table\",\n 60: \"CHAP-Challenge\",\n 61: \"NAS-Port-Type\",\n 62: \"Port-Limit\",\n 63: \"Login-LAT-Port\",\n 64: \"Tunnel-Type\",\n 65: \"Tunnel-Medium-Type\",\n 66: \"Tunnel-Client-Endpoint\",\n 67: \"Tunnel-Server-Endpoint\",\n 68: \"Acct-Tunnel-Connection\",\n 69: \"Tunnel-Password\",\n 70: \"ARAP-Password\",\n 71: \"ARAP-Features\",\n 72: \"ARAP-Zone-Access\",\n 73: \"ARAP-Security\",\n 74: \"ARAP-Security-Data\",\n 75: \"Password-Retry\",\n 76: \"Prompt\",\n 77: \"Connect-Info\",\n 78: \"Configuration-Token\",\n 79: \"EAP-Message\",\n 80: \"Message-Authenticator\",\n 81: \"Tunnel-Private-Group-ID\",\n 82: \"Tunnel-Assignment-ID\",\n 83: \"Tunnel-Preference\",\n 84: \"ARAP-Challenge-Response\",\n 85: \"Acct-Interim-Interval\",\n 86: \"Acct-Tunnel-Packets-Lost\",\n 87: \"NAS-Port-Id\",\n 88: \"Framed-Pool\",\n 89: \"CUI\",\n 90: \"Tunnel-Client-Auth-ID\",\n 91: \"Tunnel-Server-Auth-ID\",\n 92: \"NAS-Filter-Rule\",\n 93: \"Unassigned\",\n 94: \"Originating-Line-Info\",\n 95: \"NAS-IPv6-Address\",\n 96: \"Framed-Interface-Id\",\n 97: \"Framed-IPv6-Prefix\",\n 98: \"Login-IPv6-Host\",\n 99: \"Framed-IPv6-Route\",\n 100: \"Framed-IPv6-Pool\",\n 101: \"Error-Cause\",\n 102: \"EAP-Key-Name\",\n 103: \"Digest-Response\",\n 104: \"Digest-Realm\",\n 105: \"Digest-Nonce\",\n 106: \"Digest-Response-Auth\",\n 107: \"Digest-Nextnonce\",\n 108: \"Digest-Method\",\n 109: \"Digest-URI\",\n 110: \"Digest-Qop\",\n 111: \"Digest-Algorithm\",\n 112: \"Digest-Entity-Body-Hash\",\n 113: \"Digest-CNonce\",\n 114: \"Digest-Nonce-Count\",\n 115: \"Digest-Username\",\n 116: \"Digest-Opaque\",\n 117: \"Digest-Auth-Param\",\n 118: \"Digest-AKA-Auts\",\n 119: \"Digest-Domain\",\n 120: \"Digest-Stale\",\n 121: \"Digest-HA1\",\n 122: \"SIP-AOR\",\n 123: \"Delegated-IPv6-Prefix\",\n 124: \"MIP6-Feature-Vector\",\n 125: \"MIP6-Home-Link-Prefix\",\n 126: \"Operator-Name\",\n 127: \"Location-Information\",\n 128: \"Location-Data\",\n 129: \"Basic-Location-Policy-Rules\",\n 130: \"Extended-Location-Policy-Rules\",\n 131: \"Location-Capable\",\n 132: \"Requested-Location-Info\",\n 133: \"Framed-Management-Protocol\",\n 134: \"Management-Transport-Protection\",\n 135: \"Management-Policy-Id\",\n 136: \"Management-Privilege-Level\",\n 137: \"PKM-SS-Cert\",\n 138: \"PKM-CA-Cert\",\n 139: \"PKM-Config-Settings\",\n 140: \"PKM-Cryptosuite-List\",\n 141: \"PKM-SAID\",\n 142: \"PKM-SA-Descriptor\",\n 143: \"PKM-Auth-Key\",\n 144: \"DS-Lite-Tunnel-Name\",\n 145: \"Mobile-Node-Identifier\",\n 146: \"Service-Selection\",\n 147: \"PMIP6-Home-LMA-IPv6-Address\",\n 148: \"PMIP6-Visited-LMA-IPv6-Address\",\n 149: \"PMIP6-Home-LMA-IPv4-Address\",\n 150: \"PMIP6-Visited-LMA-IPv4-Address\",\n 151: \"PMIP6-Home-HN-Prefix\",\n 152: \"PMIP6-Visited-HN-Prefix\",\n 153: \"PMIP6-Home-Interface-ID\",\n 154: \"PMIP6-Visited-Interface-ID\",\n 155: \"PMIP6-Home-IPv4-HoA\",\n 156: \"PMIP6-Visited-IPv4-HoA\",\n 157: \"PMIP6-Home-DHCP4-Server-Address\",\n 158: \"PMIP6-Visited-DHCP4-Server-Address\",\n 159: \"PMIP6-Home-DHCP6-Server-Address\",\n 160: \"PMIP6-Visited-DHCP6-Server-Address\",\n 161: \"PMIP6-Home-IPv4-Gateway\",\n 162: \"PMIP6-Visited-IPv4-Gateway\",\n 163: \"EAP-Lower-Layer\",\n 164: \"GSS-Acceptor-Service-Name\",\n 165: \"GSS-Acceptor-Host-Name\",\n 166: \"GSS-Acceptor-Service-Specifics\",\n 167: \"GSS-Acceptor-Realm-Name\",\n 168: \"Framed-IPv6-Address\",\n 169: \"DNS-Server-IPv6-Address\",\n 170: \"Route-IPv6-Information\",\n 171: \"Delegated-IPv6-Prefix-Pool\",\n 172: \"Stateful-IPv6-Address-Pool\",\n 173: \"IPv6-6rd-Configuration\",\n 174: \"Allowed-Called-Station-Id\",\n 175: \"EAP-Peer-Id\",\n 176: \"EAP-Server-Id\",\n 177: \"Mobility-Domain-Id\",\n 178: \"Preauth-Timeout\",\n 179: \"Network-Id-Name\",\n 180: \"EAPoL-Announcement\",\n 181: \"WLAN-HESSID\",\n 182: \"WLAN-Venue-Info\",\n 183: \"WLAN-Venue-Language\",\n 184: \"WLAN-Venue-Name\",\n 185: \"WLAN-Reason-Code\",\n 186: \"WLAN-Pairwise-Cipher\",\n 187: \"WLAN-Group-Cipher\",\n 188: \"WLAN-AKM-Suite\",\n 189: \"WLAN-Group-Mgmt-Cipher\",\n 190: \"WLAN-RF-Band\",\n 191: \"Unassigned\",\n}\n\n\nclass RadiusAttribute(Packet):\n \"\"\"\n Implements a RADIUS attribute (RFC 2865). Every specific RADIUS attribute\n class should inherit from this one.\n \"\"\"\n\n name = \"Radius Attribute\"\n fields_desc = [\n ByteEnumField(\"type\", 1, _radius_attribute_types),\n FieldLenField(\"len\", None, \"value\", \"B\",\n adjust=lambda pkt, x: len(pkt.value) + 2),\n StrLenField(\"value\", \"\", length_from=lambda pkt: pkt.len - 2)\n ]\n\n registered_attributes = {}\n\n @classmethod\n def register_variant(cls):\n \"\"\"\n Registers the RADIUS attributes defined in this module.\n \"\"\"\n\n if hasattr(cls, \"val\"):\n cls.registered_attributes[cls.val] = cls\n else:\n cls.registered_attributes[cls.type.default] = cls\n\n @classmethod\n def dispatch_hook(cls, _pkt=None, *args, **kargs):\n \"\"\"\n Returns the right RadiusAttribute class for the given data.\n \"\"\"\n\n if _pkt:\n attr_type = orb(_pkt[0])\n return cls.registered_attributes.get(attr_type, cls)\n return cls\n\n def haslayer(self, cls):\n if cls == \"RadiusAttribute\":\n if isinstance(self, RadiusAttribute):\n return True\n elif issubtype(cls, RadiusAttribute):\n if isinstance(self, cls):\n return True\n return super(RadiusAttribute, self).haslayer(cls)\n\n def getlayer(self, cls, nb=1, _track=None, _subclass=True, **flt):\n return super(RadiusAttribute, self).getlayer(cls, nb=nb, _track=_track,\n _subclass=True, **flt)\n\n def post_build(self, p, pay):\n length = self.len\n if length is None:\n length = len(p)\n p = p[:1] + struct.pack(\"!B\", length) + p[2:]\n return p\n\n def guess_payload_class(self, _):\n return Padding\n\n\nclass _SpecificRadiusAttr(RadiusAttribute):\n \"\"\"\n Class from which every \"specific\" RADIUS attribute defined in this module\n inherits.\n \"\"\"\n\n __slots__ = [\"val\"]\n\n def __init__(self, _pkt=\"\", post_transform=None, _internal=0, _underlayer=None, **fields): # noqa: E501\n super(_SpecificRadiusAttr, self).__init__(\n _pkt,\n post_transform,\n _internal,\n _underlayer\n )\n self.fields[\"type\"] = self.val\n name_parts = self.__class__.__name__.split('RadiusAttr_')\n if len(name_parts) < 2:\n raise Scapy_Exception(\n \"Invalid class name: {}\".format(self.__class__.__name__)\n )\n self.name = name_parts[1].replace('_', '-')\n\n\n#\n# RADIUS attributes which values are 4 bytes integers\n#\n\nclass _RadiusAttrIntValue(_SpecificRadiusAttr):\n \"\"\"\n Implements a RADIUS attribute which value field is 4 bytes long integer.\n \"\"\"\n\n fields_desc = [\n ByteEnumField(\"type\", 5, _radius_attribute_types),\n ByteField(\"len\", 6),\n IntField(\"value\", 0)\n ]\n\n\nclass RadiusAttr_NAS_Port(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 5\n\n\nclass RadiusAttr_Framed_MTU(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 12\n\n\nclass RadiusAttr_Login_TCP_Port(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 16\n\n\nclass RadiusAttr_Session_Timeout(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 27\n\n\nclass RadiusAttr_Idle_Timeout(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 28\n\n\nclass RadiusAttr_Framed_AppleTalk_Link(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 37\n\n\nclass RadiusAttr_Framed_AppleTalk_Network(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 38\n\n\nclass RadiusAttr_Acct_Delay_Time(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 41\n\n\nclass RadiusAttr_Acct_Input_Octets(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 42\n\n\nclass RadiusAttr_Acct_Output_Octets(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 43\n\n\nclass RadiusAttr_Acct_Session_Time(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 46\n\n\nclass RadiusAttr_Acct_Input_Packets(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 47\n\n\nclass RadiusAttr_Acct_Output_Packets(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 48\n\n\nclass RadiusAttr_Acct_Link_Count(_RadiusAttrIntValue):\n \"\"\"RFC 2866\"\"\"\n val = 51\n\n\nclass RadiusAttr_Acct_Input_Gigawords(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 52\n\n\nclass RadiusAttr_Acct_Output_Gigawords(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 53\n\n\nclass RadiusAttr_Egress_VLANID(_RadiusAttrIntValue):\n \"\"\"RFC 4675\"\"\"\n val = 56\n\n\nclass RadiusAttr_Port_Limit(_RadiusAttrIntValue):\n \"\"\"RFC 2865\"\"\"\n val = 62\n\n\nclass RadiusAttr_ARAP_Security(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 73\n\n\nclass RadiusAttr_Password_Retry(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 75\n\n\nclass RadiusAttr_Tunnel_Preference(_RadiusAttrIntValue):\n \"\"\"RFC 2868\"\"\"\n val = 83\n\n\nclass RadiusAttr_Acct_Interim_Interval(_RadiusAttrIntValue):\n \"\"\"RFC 2869\"\"\"\n val = 85\n\n\nclass RadiusAttr_Acct_Tunnel_Packets_Lost(_RadiusAttrIntValue):\n \"\"\"RFC 2867\"\"\"\n val = 86\n\n\nclass RadiusAttr_Management_Privilege_Level(_RadiusAttrIntValue):\n \"\"\"RFC 5607\"\"\"\n val = 136\n\n\nclass RadiusAttr_Mobility_Domain_Id(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 177\n\n\nclass RadiusAttr_Preauth_Timeout(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 178\n\n\nclass RadiusAttr_WLAN_Venue_Info(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 182\n\n\nclass RadiusAttr_WLAN_Reason_Code(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 185\n\n\nclass RadiusAttr_WLAN_Pairwise_Cipher(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 186\n\n\nclass RadiusAttr_WLAN_Group_Cipher(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 187\n\n\nclass RadiusAttr_WLAN_AKM_Suite(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 188\n\n\nclass RadiusAttr_WLAN_Group_Mgmt_Cipher(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 189\n\n\nclass RadiusAttr_WLAN_RF_Band(_RadiusAttrIntValue):\n \"\"\"RFC 7268\"\"\"\n val = 190\n\n\n#\n# RADIUS attributes which values are string (displayed as hex)\n#\n\nclass _RadiusAttrHexStringVal(_SpecificRadiusAttr):\n \"\"\"\n Implements a RADIUS attribute which value field is a string that will be\n as a hex string.\n \"\"\"\n\n __slots__ = [\"val\"]\n\n def __init__(self, _pkt=\"\", post_transform=None, _internal=0, _underlayer=None, **fields): # noqa: E501\n super(_RadiusAttrHexStringVal, self).__init__(\n _pkt,\n post_transform,\n _internal,\n _underlayer\n )\n self.fields[\"type\"] = self.val\n name_parts = self.__class__.__name__.split('RadiusAttr_')\n if len(name_parts) < 2:\n raise Scapy_Exception(\n \"Invalid class name: {}\".format(self.__class__.__name__)\n )\n self.name = name_parts[1].replace('_', '-')\n\n fields_desc = [\n ByteEnumField(\"type\", 24, _radius_attribute_types),\n FieldLenField(\n \"len\",\n None,\n \"value\",\n \"B\",\n adjust=lambda p, x: len(p.value) + 2\n ),\n XStrLenField(\"value\", \"\", length_from=lambda p: p.len - 2 if p.len else 0) # noqa: E501\n ]\n\n\nclass RadiusAttr_State(_RadiusAttrHexStringVal):\n \"\"\"RFC 2865\"\"\"\n val = 24\n\n\ndef prepare_packed_data(radius_packet, packed_req_authenticator):\n \"\"\"\n Pack RADIUS data prior computing the authentication MAC\n \"\"\"\n\n packed_hdr = struct.pack(\"!B\", radius_packet.code)\n packed_hdr += struct.pack(\"!B\", radius_packet.id)\n packed_hdr += struct.pack(\"!H\", radius_packet.len)\n\n packed_attrs = b''\n for attr in radius_packet.attributes:\n packed_attrs += raw(attr)\n\n return packed_hdr + packed_req_authenticator + packed_attrs\n\n\nclass RadiusAttr_Message_Authenticator(_RadiusAttrHexStringVal):\n \"\"\"RFC 2869\"\"\"\n val = 80\n\n fields_desc = [\n ByteEnumField(\"type\", 24, _radius_attribute_types),\n FieldLenField(\n \"len\",\n 18,\n \"value\",\n \"B\",\n ),\n XStrFixedLenField(\"value\", \"\\x00\" * 16, length=16)\n ]\n\n @staticmethod\n def compute_message_authenticator(radius_packet, packed_req_authenticator,\n shared_secret):\n \"\"\"\n Computes the \"Message-Authenticator\" of a given RADIUS packet.\n \"\"\"\n\n data = prepare_packed_data(radius_packet, packed_req_authenticator)\n radius_hmac = hmac.new(shared_secret, data, hashlib.md5)\n\n return radius_hmac.digest()\n\n#\n# RADIUS attributes which values are IPv4 prefixes\n#\n\n\nclass _RadiusAttrIPv4AddrVal(_SpecificRadiusAttr):\n \"\"\"\n Implements a RADIUS attribute which value field is an IPv4 address.\n \"\"\"\n\n __slots__ = [\"val\"]\n\n fields_desc = [\n ByteEnumField(\"type\", 4, _radius_attribute_types),\n ByteField(\"len\", 6),\n IPField(\"value\", \"0.0.0.0\")\n ]\n\n\nclass RadiusAttr_NAS_IP_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 4\n\n\nclass RadiusAttr_Framed_IP_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 8\n\n\nclass RadiusAttr_Framed_IP_Netmask(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 9\n\n\nclass RadiusAttr_Login_IP_Host(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 14\n\n\nclass RadiusAttr_Framed_IPX_Network(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 2865\"\"\"\n val = 23\n\n\nclass RadiusAttr_PMIP6_Home_LMA_IPv4_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 149\n\n\nclass RadiusAttr_PMIP6_Visited_LMA_IPv4_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 150\n\n\nclass RadiusAttr_PMIP6_Home_DHCP4_Server_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 157\n\n\nclass RadiusAttr_PMIP6_Visited_DHCP4_Server_Address(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 158\n\n\nclass RadiusAttr_PMIP6_Home_IPv4_Gateway(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 161\n\n\nclass RadiusAttr_PMIP6_Visited_IPv4_Gateway(_RadiusAttrIPv4AddrVal):\n \"\"\"RFC 6572\"\"\"\n val = 162\n\n\n# See IANA registry \"RADIUS Types\"\n_radius_attrs_values = {\n # Service-Type\n 6:\n {\n 1: \"Login\",\n 2: \"Framed\",\n 3: \"Callback Login\",\n 4: \"Callback Framed\",\n 5: \"Outbound\",\n 6: \"Administrative\",\n 7: \"NAS Prompt\",\n 8: \"Authenticate Only\",\n 9: \"Callback NAS Prompt\",\n 10: \"Call Check\",\n 11: \"Callback Administrative\",\n 12: \"Voice\",\n 13: \"Fax\",\n 14: \"Modem Relay\",\n 15: \"IAPP-Register\",\n 16: \"IAPP-AP-Check\",\n 17: \"Authorize Only\",\n 18: \"Framed-Management\",\n 19: \"Additional-Authorization\"\n },\n\n # Framed-Protocol\n 7:\n {\n 1: \"PPP\",\n 2: \"SLIP\",\n 3: \"AppleTalk Remote Access Protocol (ARAP)\",\n 4: \"Gandalf proprietary SingleLink/MultiLink protocol\",\n 5: \"Xylogics proprietary IPX/SLIP\",\n 6: \"X.75 Synchronous\",\n 7: \"GPRS PDP Context\"\n },\n\n # Framed-Routing\n 10:\n {\n 0: \"None\",\n 1: \"Send routing packets\",\n 2: \"Listen for routing packets\",\n 3: \"Send and Listen\"\n },\n\n # Framed-Compression\n 13:\n {\n 0: \"None\",\n 1: \"VJ TCP/IP header compression\",\n 2: \"IPX header compression\",\n 3: \"Stac-LZS compression\"\n },\n\n # Login-Service\n 15:\n {\n 0: \"Telnet\",\n 1: \"Rlogin\",\n 2: \"TCP Clear\",\n 3: \"PortMaster (proprietary)\",\n 4: \"LAT\",\n 5: \"X25-PAD\",\n 6: \"X25-T3POS\",\n 7: \"Unassigned\",\n 8: \"TCP Clear Quiet (suppresses any NAS-generated connect string)\"\n },\n\n # Termination-Action\n 29:\n {\n 0: \"Default\",\n 1: \"RADIUS-Request\"\n },\n\n # Acct-Status-Type\n 40:\n {\n 1: \"Start\",\n 2: \"Stop\",\n 3: \"Interim-Update\",\n 4: \"Unassigned\",\n 5: \"Unassigned\",\n 6: \"Unassigned\",\n 7: \"Accounting-On\",\n 8: \"Accounting-Off\",\n 9: \"Tunnel-Start\",\n 10: \"Tunnel-Stop\",\n 11: \"Tunnel-Reject\",\n 12: \"Tunnel-Link-Start\",\n 13: \"Tunnel-Link-Stop\",\n 14: \"Tunnel-Link-Reject\",\n 15: \"Failed\"\n },\n\n # Acct-Authentic\n 45:\n {\n 1: \"RADIUS\",\n 2: \"Local\",\n 3: \"Remote\",\n 4: \"Diameter\"\n },\n\n # Acct-Terminate-Cause\n 49:\n {\n 1: \"User Request\",\n 2: \"Lost Carrier\",\n 3: \"Lost Service\",\n 4: \"Idle Timeout\",\n 5: \"Session Timeout\",\n 6: \"Admin Reset\",\n 7: \"Admin Reboot\",\n 8: \"Port Error\",\n 9: \"NAS Error\",\n 10: \"NAS Request\",\n 11: \"NAS Reboot\",\n 12: \"Port Unneeded\",\n 13: \"Port Preempted\",\n 14: \"Port Suspended\",\n 15: \"Service Unavailable\",\n 16: \"Callback\",\n 17: \"User Error\",\n 18: \"Host Request\",\n 19: \"Supplicant Restart\",\n 20: \"Reauthentication Failure\",\n 21: \"Port Reinitialized\",\n 22: \"Port Administratively Disabled\",\n 23: \"Lost Power\",\n },\n\n # NAS-Port-Type\n 61:\n {\n 0: \"Async\",\n 1: \"Sync\",\n 2: \"ISDN Sync\",\n 3: \"ISDN Async V.120\",\n 4: \"ISDN Async V.110\",\n 5: \"Virtual\",\n 6: \"PIAFS\",\n 7: \"HDLC Clear Channel\",\n 8: \"X.25\",\n 9: \"X.75\",\n 10: \"G.3 Fax\",\n 11: \"SDSL - Symmetric DSL\",\n 12: \"ADSL-CAP - Asymmetric DSL, Carrierless Amplitude Phase Modulation\", # noqa: E501\n 13: \"ADSL-DMT - Asymmetric DSL, Discrete Multi-Tone\",\n 14: \"IDSL - ISDN Digital Subscriber Line\",\n 15: \"Ethernet\",\n 16: \"xDSL - Digital Subscriber Line of unknown type\",\n 17: \"Cable\",\n 18: \"Wireles - Other\",\n 19: \"Wireless - IEEE 802.11\",\n 20: \"Token-Ring\",\n 21: \"FDDI\",\n 22: \"Wireless - CDMA2000\",\n 23: \"Wireless - UMTS\",\n 24: \"Wireless - 1X-EV\",\n 25: \"IAPP\",\n 26: \"FTTP - Fiber to the Premises\",\n 27: \"Wireless - IEEE 802.16\",\n 28: \"Wireless - IEEE 802.20\",\n 29: \"Wireless - IEEE 802.22\",\n 30: \"PPPoA - PPP over ATM\",\n 31: \"PPPoEoA - PPP over Ethernet over ATM\",\n 32: \"PPPoEoE - PPP over Ethernet over Ethernet\",\n 33: \"PPPoEoVLAN - PPP over Ethernet over VLAN\",\n 34: \"PPPoEoQinQ - PPP over Ethernet over IEEE 802.1QinQ\",\n 35: \"xPON - Passive Optical Network\",\n 36: \"Wireless - XGP\",\n 37: \"WiMAX Pre-Release 8 IWK Function\",\n 38: \"WIMAX-WIFI-IWK: WiMAX WIFI Interworking\",\n 39: \"WIMAX-SFF: Signaling Forwarding Function for LTE/3GPP2\",\n 40: \"WIMAX-HA-LMA: WiMAX HA and or LMA function\",\n 41: \"WIMAX-DHCP: WIMAX DHCP service\",\n 42: \"WIMAX-LBS: WiMAX location based service\",\n 43: \"WIMAX-WVS: WiMAX voice service\"\n },\n\n # Tunnel-Type\n 64:\n {\n 1: \"Point-to-Point Tunneling Protocol (PPTP)\",\n 2: \"Layer Two Forwarding (L2F)\",\n 3: \"Layer Two Tunneling Protocol (L2TP)\",\n 4: \"Ascend Tunnel Management Protocol (ATMP)\",\n 5: \"Virtual Tunneling Protocol (VTP)\",\n 6: \"IP Authentication Header in the Tunnel-mode (AH)\",\n 7: \"IP-in-IP Encapsulation (IP-IP)\",\n 8: \"Minimal IP-in-IP Encapsulation (MIN-IP-IP)\",\n 9: \"IP Encapsulating Security Payload in the Tunnel-mode (ESP)\",\n 10: \"Generic Route Encapsulation (GRE)\",\n 11: \"Bay Dial Virtual Services (DVS)\",\n 12: \"IP-in-IP Tunneling\",\n 13: \"Virtual LANs (VLAN)\"\n },\n\n # Tunnel-Medium-Type\n 65:\n {\n 1: \"IPv4 (IP version 4)\",\n 2: \"IPv6 (IP version 6)\",\n 3: \"NSAP\",\n 4: \"HDLC (8-bit multidrop)\",\n 5: \"BBN 1822\",\n 6: \"802\",\n 7: \"E.163 (POTS)\",\n 8: \"E.164 (SMDS, Frame Relay, ATM)\",\n 9: \"F.69 (Telex)\",\n 10: \"X.121 (X.25, Frame Relay)\",\n 11: \"IPX\",\n 12: \"Appletalk\",\n 13: \"Decnet IV\",\n 14: \"Banyan Vine\",\n 15: \"E.164 with NSAP format subaddress\"\n },\n\n # ARAP-Zone-Access\n 72:\n {\n 1: \"Only allow access to default zone\",\n 2: \"Use zone filter inclusively\",\n 3: \"Not used\",\n 4: \"Use zone filter exclusively\"\n },\n\n # Prompt\n 76:\n {\n 0: \"No Echo\",\n 1: \"Echo\"\n },\n\n # Error-Cause Attribute\n 101:\n {\n 201: \"Residual Session Context Removed\",\n 202: \"Invalid EAP Packet (Ignored)\",\n 401: \"Unsupported Attribute\",\n 402: \"Missing Attribute\",\n 403: \"NAS Identification Mismatch\",\n 404: \"Invalid Request\",\n 405: \"Unsupported Service\",\n 406: \"Unsupported Extension\",\n 407: \"Invalid Attribute Value\",\n 501: \"Administratively Prohibited\",\n 502: \"Request Not Routable (Proxy)\",\n 503: \"Session Context Not Found\",\n 504: \"Session Context Not Removable\",\n 505: \"Other Proxy Processing Error\",\n 506: \"Resources Unavailable\",\n 507: \"Request Initiated\",\n 508: \"Multiple Session Selection Unsupported\",\n 509: \"Location-Info-Required\",\n 601: \"Response Too Big\"\n },\n\n # Operator Namespace Identifier - Attribute 126\n 126:\n {\n 0x30: \"TADIG\",\n 0x31: \"REALM\",\n 0x32: \"E212\",\n 0x33: \"ICC\",\n 0xFF: \"Reserved\"\n },\n\n # Basic-Location-Policy-Rules\n 129:\n {\n 0: \"Retransmission allowed\",\n },\n\n # Location-Capable\n 131:\n {\n 1: \"CIVIC_LOCATION\",\n 2: \"GEO_LOCATION\",\n 4: \"USERS_LOCATION\",\n 8: \"NAS_LOCATION\"\n },\n\n # Framed-Management-Protocol\n 133:\n {\n 1: \"SNMP\",\n 2: \"Web-based\",\n 3: \"NETCONF\",\n 4: \"FTP\",\n 5: \"TFTP\",\n 6: \"SFTP\",\n 7: \"RCP\",\n 8: \"SCP\"\n },\n\n # Management-Transport-Protection\n 134:\n {\n 1: \"No-Protection\",\n 2: \"Integrity-Protection\",\n 3: \"Integrity-Confidentiality-Protection\",\n },\n}\n\n\nclass _RadiusAttrIntEnumVal(_SpecificRadiusAttr):\n \"\"\"\n Implements a RADIUS attribute which value field is 4 bytes long integer.\n \"\"\"\n\n __slots__ = [\"val\"]\n\n fields_desc = [\n ByteEnumField(\"type\", 6, _radius_attribute_types),\n ByteField(\"len\", 6),\n MultiEnumField(\n \"value\",\n 0,\n _radius_attrs_values,\n depends_on=lambda p: p.type,\n fmt=\"I\"\n )\n ]\n\n\nclass RadiusAttr_Service_Type(_RadiusAttrIntEnumVal):\n \"\"\"RFC 2865\"\"\"\n val = 6\n\n\nclass RadiusAttr_Framed_Protocol(_RadiusAttrIntEnumVal):\n \"\"\"RFC 2865\"\"\"\n val = 7\n\n\nclass RadiusAttr_NAS_Port_Type(_RadiusAttrIntEnumVal):\n \"\"\"RFC 2865\"\"\"\n val = 61\n\n\nclass _EAPPacketField(PacketField):\n\n \"\"\"\n Handles EAP-Message attribute value (the actual EAP packet).\n \"\"\"\n\n def m2i(self, pkt, m):\n ret = None\n eap_packet_len = struct.unpack(\"!H\", m[2:4])[0]\n if eap_packet_len < 254:\n # If the EAP packet has not been fragmented, build a Scapy EAP\n # packet from the data.\n ret = EAP(m)\n else:\n ret = conf.raw_layer(m)\n return ret\n\n\nclass RadiusAttr_EAP_Message(RadiusAttribute):\n \"\"\"\n Implements the \"EAP-Message\" attribute (RFC 3579).\n \"\"\"\n\n name = \"EAP-Message\"\n fields_desc = [\n ByteEnumField(\"type\", 79, _radius_attribute_types),\n FieldLenField(\n \"len\",\n None,\n \"value\",\n \"B\",\n adjust=lambda pkt, x: len(pkt.value) + 2\n ),\n _EAPPacketField(\"value\", \"\", EAP)\n ]\n\n\nclass RadiusAttr_Vendor_Specific(RadiusAttribute):\n \"\"\"\n Implements the \"Vendor-Specific\" attribute, as described in RFC 2865.\n \"\"\"\n\n name = \"Vendor-Specific\"\n fields_desc = [\n ByteEnumField(\"type\", 26, _radius_attribute_types),\n FieldLenField(\n \"len\",\n None,\n \"value\",\n \"B\",\n adjust=lambda pkt, x: len(pkt.value) + 8\n ),\n IntField(\"vendor_id\", 0),\n ByteField(\"vendor_type\", 0),\n FieldLenField(\n \"vendor_len\",\n None,\n \"value\",\n \"B\",\n adjust=lambda p, x: len(p.value) + 2\n ),\n StrLenField(\"value\", \"\", length_from=lambda p: p.vendor_len - 2)\n ]\n\n\n# See IANA RADIUS Packet Type Codes registry\n_packet_codes = {\n 1: \"Access-Request\",\n 2: \"Access-Accept\",\n 3: \"Access-Reject\",\n 4: \"Accounting-Request\",\n 5: \"Accounting-Response\",\n 6: \"Accounting-Status (now Interim Accounting)\",\n 7: \"Password-Request\",\n 8: \"Password-Ack\",\n 9: \"Password-Reject\",\n 10: \"Accounting-Message\",\n 11: \"Access-Challenge\",\n 12: \"Status-Server (experimental)\",\n 13: \"Status-Client (experimental)\",\n 21: \"Resource-Free-Request\",\n 22: \"Resource-Free-Response\",\n 23: \"Resource-Query-Request\",\n 24: \"Resource-Query-Response\",\n 25: \"Alternate-Resource-Reclaim-Request\",\n 26: \"NAS-Reboot-Request\",\n 27: \"NAS-Reboot-Response\",\n 28: \"Reserved\",\n 29: \"Next-Passcode\",\n 30: \"New-Pin\",\n 31: \"Terminate-Session\",\n 32: \"Password-Expired\",\n 33: \"Event-Request\",\n 34: \"Event-Response\",\n 40: \"Disconnect-Request\",\n 41: \"Disconnect-ACK\",\n 42: \"Disconnect-NAK\",\n 43: \"CoA-Request\",\n 44: \"CoA-ACK\",\n 45: \"CoA-NAK\",\n 50: \"IP-Address-Allocate\",\n 51: \"IP-Address-Release\",\n 52: \"Protocol-Error\",\n 250: \"Experimental Use\",\n 251: \"Experimental Use\",\n 252: \"Experimental Use\",\n 253: \"Experimental Use\",\n 254: \"Reserved\",\n 255: \"Reserved\"\n}\n\n\nclass Radius(Packet):\n \"\"\"\n Implements a RADIUS packet (RFC 2865).\n \"\"\"\n\n name = \"RADIUS\"\n fields_desc = [\n ByteEnumField(\"code\", 1, _packet_codes),\n ByteField(\"id\", 0),\n FieldLenField(\n \"len\",\n None,\n \"attributes\",\n \"H\",\n adjust=lambda pkt, x: len(pkt.attributes) + 20\n ),\n XStrFixedLenField(\"authenticator\", \"\", 16),\n PacketListField(\n \"attributes\",\n [],\n RadiusAttribute,\n length_from=lambda pkt: pkt.len - 20\n )\n ]\n\n def compute_authenticator(self, packed_request_auth, shared_secret):\n \"\"\"\n Computes the authenticator field (RFC 2865 - Section 3)\n \"\"\"\n\n data = prepare_packed_data(self, packed_request_auth)\n radius_mac = hashlib.md5(data + shared_secret)\n return radius_mac.digest()\n\n def post_build(self, p, pay):\n p += pay\n length = self.len\n if length is None:\n length = len(p)\n p = p[:2] + struct.pack(\"!H\", length) + p[4:]\n return p\n\n\nbind_layers(UDP, Radius, sport=1812)\nbind_layers(UDP, Radius, dport=1812)\nbind_layers(UDP, Radius, sport=1813)\nbind_layers(UDP, Radius, dport=1813)\n", "path": "scapy/layers/radius.py" } ]
diff --git a/scapy/layers/radius.py b/scapy/layers/radius.py index 75abffe508d..c7cceeb4c79 100644 --- a/scapy/layers/radius.py +++ b/scapy/layers/radius.py @@ -579,7 +579,7 @@ def compute_message_authenticator(radius_packet, packed_req_authenticator, # -class _RadiusAttrIPv4AddrVal(RadiusAttribute): +class _RadiusAttrIPv4AddrVal(_SpecificRadiusAttr): """ Implements a RADIUS attribute which value field is an IPv4 address. """ diff --git a/test/regression.uts b/test/regression.uts index 6e3d375e383..1f75d7a5bc3 100644 --- a/test/regression.uts +++ b/test/regression.uts @@ -9619,6 +9619,16 @@ assert len(s) == 1 s = b"Z\xa5\xaaUZ\xa5\xaaU\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe9\xc5\x00\x00\x14'\x02\x00\x00\x001\x9a\xe44\xea4" isinstance(Radius(s), Radius) += RADIUS - attributes with IPv4 addresses + +r = raw(RadiusAttr_NAS_IP_Address()) +p = RadiusAttr_NAS_IP_Address(p) +assert p.type == 4 + +r = raw(RadiusAttr_Framed_IP_Address()) +p = RadiusAttr_Framed_IP_Address(p) +assert p.type == 8 + ############ ############
joke2k__faker-1710
[ { "content": "#!/usr/bin/env python\n\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nhere = Path(__file__).resolve().parent\nREADME = (here / \"README.rst\").read_text(encoding=\"utf-8\")\nVERSION = (here / \"VERSION\").read_text(encoding=\"utf-8\").strip()\n\nexcluded_packages = [\"docs\", \"tests\", \"tests.*\"]\n\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n\n zip_safe = (\n hasattr(zipimport.zipimporter, \"iter_modules\")\n or zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\n )\nexcept AttributeError:\n zip_safe = False\n\nsetup(\n name=\"Faker\",\n version=VERSION,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n \"console_scripts\": [\"faker=faker.cli:execute_from_command_line\"],\n \"pytest11\": [\"faker = faker.contrib.pytest.plugin\"],\n },\n classifiers=[\n # See https://pypi.org/pypi?%3Aaction=list_classifiers\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: MIT License\",\n ],\n keywords=\"faker fixtures data test mock generator\",\n author=\"joke2k\",\n author_email=\"joke2k@gmail.com\",\n url=\"https://github.com/joke2k/faker\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/joke2k/faker/issues\",\n \"Changes\": \"https://github.com/joke2k/faker/blob/master/CHANGELOG.md\",\n \"Documentation\": \"http://faker.rtfd.org/\",\n \"Source Code\": \"https://github.com/joke2k/faker\",\n },\n license=\"MIT License\",\n packages=find_packages(exclude=excluded_packages),\n package_data={\n \"faker\": [\"py.typed\"],\n },\n platforms=[\"any\"],\n zip_safe=zip_safe,\n python_requires=\">=3.6\",\n install_requires=[\n \"python-dateutil>=2.4\",\n \"typing-extensions>=3.10.0.2;python_version<'3.8'\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nhere = Path(__file__).resolve().parent\nREADME = (here / \"README.rst\").read_text(encoding=\"utf-8\")\nVERSION = (here / \"VERSION\").read_text(encoding=\"utf-8\").strip()\n\nexcluded_packages = [\"docs\", \"tests\", \"tests.*\"]\n\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n\n zip_safe = (\n hasattr(zipimport.zipimporter, \"iter_modules\")\n or zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\n )\nexcept AttributeError:\n zip_safe = False\n\nsetup(\n name=\"Faker\",\n version=VERSION,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n \"console_scripts\": [\"faker=faker.cli:execute_from_command_line\"],\n \"pytest11\": [\"faker = faker.contrib.pytest.plugin\"],\n },\n classifiers=[\n # See https://pypi.org/pypi?%3Aaction=list_classifiers\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: MIT License\",\n ],\n keywords=\"faker fixtures data test mock generator\",\n author=\"joke2k\",\n author_email=\"joke2k@gmail.com\",\n url=\"https://github.com/joke2k/faker\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/joke2k/faker/issues\",\n \"Changes\": \"https://github.com/joke2k/faker/blob/master/CHANGELOG.md\",\n \"Documentation\": \"http://faker.rtfd.org/\",\n \"Source Code\": \"https://github.com/joke2k/faker\",\n },\n license=\"MIT License\",\n packages=find_packages(exclude=excluded_packages),\n package_data={\n \"faker\": [\"py.typed\"],\n },\n platforms=[\"any\"],\n zip_safe=zip_safe,\n python_requires=\">=3.6\",\n install_requires=[\n \"python-dateutil>=2.4\",\n \"typing-extensions>=3.7.4.3;python_version<'3.8'\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 62809a1090..bde881c6df 100644 --- a/setup.py +++ b/setup.py @@ -72,6 +72,6 @@ python_requires=">=3.6", install_requires=[ "python-dateutil>=2.4", - "typing-extensions>=3.10.0.2;python_version<'3.8'", + "typing-extensions>=3.7.4.3;python_version<'3.8'", ], )
yt-project__yt-3238
[ { "content": "import functools\nimport glob\nimport inspect\nimport os\nimport weakref\nfrom functools import wraps\n\nimport numpy as np\nfrom more_itertools import always_iterable\n\nfrom yt._maintenance.deprecation import issue_deprecation_warning\nfrom yt.config import ytcfg\nfrom yt.data_objects.analyzer_objects import AnalysisTask, create_quantity_proxy\nfrom yt.data_objects.particle_trajectories import ParticleTrajectories\nfrom yt.funcs import is_sequence, mylog\nfrom yt.units.yt_array import YTArray, YTQuantity\nfrom yt.utilities.exceptions import YTException\nfrom yt.utilities.object_registries import (\n analysis_task_registry,\n data_object_registry,\n derived_quantity_registry,\n simulation_time_series_registry,\n)\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import (\n communication_system,\n parallel_objects,\n parallel_root_only,\n)\n\n\nclass AnalysisTaskProxy:\n def __init__(self, time_series):\n self.time_series = time_series\n\n def __getitem__(self, key):\n task_cls = analysis_task_registry[key]\n\n @wraps(task_cls.__init__)\n def func(*args, **kwargs):\n task = task_cls(*args, **kwargs)\n return self.time_series.eval(task)\n\n return func\n\n def keys(self):\n return analysis_task_registry.keys()\n\n def __contains__(self, key):\n return key in analysis_task_registry\n\n\ndef get_ds_prop(propname):\n def _eval(params, ds):\n return getattr(ds, propname)\n\n cls = type(propname, (AnalysisTask,), dict(eval=_eval, _params=tuple()))\n return cls\n\n\nattrs = (\n \"refine_by\",\n \"dimensionality\",\n \"current_time\",\n \"domain_dimensions\",\n \"domain_left_edge\",\n \"domain_right_edge\",\n \"unique_identifier\",\n \"current_redshift\",\n \"cosmological_simulation\",\n \"omega_matter\",\n \"omega_lambda\",\n \"omega_radiation\",\n \"hubble_constant\",\n)\n\n\nclass TimeSeriesParametersContainer:\n def __init__(self, data_object):\n self.data_object = data_object\n\n def __getattr__(self, attr):\n if attr in attrs:\n return self.data_object.eval(get_ds_prop(attr)())\n raise AttributeError(attr)\n\n\nclass DatasetSeries:\n r\"\"\"The DatasetSeries object is a container of multiple datasets,\n allowing easy iteration and computation on them.\n\n DatasetSeries objects are designed to provide easy ways to access,\n analyze, parallelize and visualize multiple datasets sequentially. This is\n primarily expressed through iteration, but can also be constructed via\n analysis tasks (see :ref:`time-series-analysis`).\n\n Note that contained datasets are lazily loaded and weakly referenced. This means\n that in order to perform follow-up operations on data it's best to define handles on\n these datasets during iteration.\n\n Parameters\n ----------\n outputs : list of filenames, or pattern\n A list of filenames, for instance [\"DD0001/DD0001\", \"DD0002/DD0002\"],\n or a glob pattern (i.e. containing wildcards '[]?!*') such as \"DD*/DD*.index\".\n In the latter case, results are sorted automatically.\n Filenames and patterns can be of type str, os.Pathlike or bytes.\n parallel : True, False or int\n This parameter governs the behavior when .piter() is called on the\n resultant DatasetSeries object. If this is set to False, the time\n series will not iterate in parallel when .piter() is called. If\n this is set to either True, one processor will be allocated for\n each iteration of the loop. If this is set to an integer, the loop\n will be parallelized over this many workgroups. It the integer\n value is less than the total number of available processors,\n more than one processor will be allocated to a given loop iteration,\n causing the functionality within the loop to be run in parallel.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n mixed_dataset_types : True or False, default False\n Set to True if the DatasetSeries will load different dataset types, set\n to False if loading dataset of a single type as this will result in a\n considerable speed up from not having to figure out the dataset type.\n\n Examples\n --------\n\n >>> ts = DatasetSeries(\n \"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0\")\n >>> for ds in ts:\n ... SlicePlot(ds, \"x\", (\"gas\", \"density\")).save()\n ...\n >>> def print_time(ds):\n ... print(ds.current_time)\n ...\n >>> ts = DatasetSeries(\n ... \"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0\",\n ... setup_function = print_time)\n ...\n >>> for ds in ts:\n ... SlicePlot(ds, \"x\", (\"gas\", \"density\")).save()\n\n \"\"\"\n\n def __init_subclass__(cls, *args, **kwargs):\n super().__init_subclass__(*args, **kwargs)\n code_name = cls.__name__[: cls.__name__.find(\"Simulation\")]\n if code_name:\n simulation_time_series_registry[code_name] = cls\n mylog.debug(\"Registering simulation: %s as %s\", code_name, cls)\n\n def __new__(cls, outputs, *args, **kwargs):\n try:\n outputs = cls._get_filenames_from_glob_pattern(outputs)\n except TypeError:\n pass\n ret = super().__new__(cls)\n ret._pre_outputs = outputs[:]\n return ret\n\n def __init__(\n self,\n outputs,\n parallel=True,\n setup_function=None,\n mixed_dataset_types=False,\n **kwargs,\n ):\n # This is needed to properly set _pre_outputs for Simulation subclasses.\n self._mixed_dataset_types = mixed_dataset_types\n if is_sequence(outputs) and not isinstance(outputs, str):\n self._pre_outputs = outputs[:]\n self.tasks = AnalysisTaskProxy(self)\n self.params = TimeSeriesParametersContainer(self)\n if setup_function is None:\n\n def _null(x):\n return None\n\n setup_function = _null\n self._setup_function = setup_function\n for type_name in data_object_registry:\n setattr(\n self, type_name, functools.partial(DatasetSeriesObject, self, type_name)\n )\n self.parallel = parallel\n self.kwargs = kwargs\n\n @staticmethod\n def _get_filenames_from_glob_pattern(outputs):\n \"\"\"\n Helper function to DatasetSeries.__new__\n handle a special case where \"outputs\" is assumed to be really a pattern string\n \"\"\"\n pattern = outputs\n epattern = os.path.expanduser(pattern)\n data_dir = ytcfg.get(\"yt\", \"test_data_dir\")\n # if no match if found from the current work dir,\n # we try to match the pattern from the test data dir\n file_list = glob.glob(epattern) or glob.glob(os.path.join(data_dir, epattern))\n if not file_list:\n raise FileNotFoundError(f\"No match found for pattern : {pattern}\")\n return sorted(file_list)\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n if isinstance(key.start, float):\n return self.get_range(key.start, key.stop)\n # This will return a sliced up object!\n return DatasetSeries(\n self._pre_outputs[key], parallel=self.parallel, **self.kwargs\n )\n o = self._pre_outputs[key]\n if isinstance(o, (str, os.PathLike)):\n o = self._load(o, **self.kwargs)\n self._setup_function(o)\n return o\n\n def __len__(self):\n return len(self._pre_outputs)\n\n @property\n def outputs(self):\n return self._pre_outputs\n\n def piter(self, storage=None, dynamic=False):\n r\"\"\"Iterate over time series components in parallel.\n\n This allows you to iterate over a time series while dispatching\n individual components of that time series to different processors or\n processor groups. If the parallelism strategy was set to be\n multi-processor (by \"parallel = N\" where N is an integer when the\n DatasetSeries was created) this will issue each dataset to an\n N-processor group. For instance, this would allow you to start a 1024\n processor job, loading up 100 datasets in a time series and creating 8\n processor groups of 128 processors each, each of which would be\n assigned a different dataset. This could be accomplished as shown in\n the examples below. The *storage* option is as seen in\n :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_objects`\n which is a mechanism for storing results of analysis on an individual\n dataset and then combining the results at the end, so that the entire\n set of processors have access to those results.\n\n Note that supplying a *store* changes the iteration mechanism; see\n below.\n\n Parameters\n ----------\n storage : dict\n This is a dictionary, which will be filled with results during the\n course of the iteration. The keys will be the dataset\n indices and the values will be whatever is assigned to the *result*\n attribute on the storage during iteration.\n dynamic : boolean\n This governs whether or not dynamic load balancing will be\n enabled. This requires one dedicated processor; if this\n is enabled with a set of 128 processors available, only\n 127 will be available to iterate over objects as one will\n be load balancing the rest.\n\n\n Examples\n --------\n Here is an example of iteration when the results do not need to be\n stored. One processor will be assigned to each dataset.\n\n >>> ts = DatasetSeries(\"DD*/DD*.index\")\n >>> for ds in ts.piter():\n ... SlicePlot(ds, \"x\", (\"gas\", \"density\")).save()\n ...\n\n This demonstrates how one might store results:\n\n >>> def print_time(ds):\n ... print(ds.current_time)\n ...\n >>> ts = DatasetSeries(\"DD*/DD*.index\",\n ... setup_function = print_time )\n ...\n >>> my_storage = {}\n >>> for sto, ds in ts.piter(storage=my_storage):\n ... v, c = ds.find_max((\"gas\", \"density\"))\n ... sto.result = (v, c)\n ...\n >>> for i, (v, c) in sorted(my_storage.items()):\n ... print(\"% 4i %0.3e\" % (i, v))\n ...\n\n This shows how to dispatch 4 processors to each dataset:\n\n >>> ts = DatasetSeries(\"DD*/DD*.index\",\n ... parallel = 4)\n >>> for ds in ts.piter():\n ... ProjectionPlot(ds, \"x\", (\"gas\", \"density\")).save()\n ...\n\n \"\"\"\n if not self.parallel:\n njobs = 1\n elif not dynamic:\n if self.parallel:\n njobs = -1\n else:\n njobs = self.parallel\n else:\n my_communicator = communication_system.communicators[-1]\n nsize = my_communicator.size\n if nsize == 1:\n self.parallel = False\n dynamic = False\n njobs = 1\n else:\n njobs = nsize - 1\n\n for output in parallel_objects(\n self._pre_outputs, njobs=njobs, storage=storage, dynamic=dynamic\n ):\n if storage is not None:\n sto, output = output\n\n if isinstance(output, str):\n ds = self._load(output, **self.kwargs)\n self._setup_function(ds)\n else:\n ds = output\n\n if storage is not None:\n next_ret = (sto, ds)\n else:\n next_ret = ds\n\n yield next_ret\n\n def eval(self, tasks, obj=None):\n return_values = {}\n for store, ds in self.piter(return_values):\n store.result = []\n for task in always_iterable(tasks):\n try:\n style = inspect.getargspec(task.eval)[0][1]\n if style == \"ds\":\n arg = ds\n elif style == \"data_object\":\n if obj is None:\n obj = DatasetSeriesObject(self, \"all_data\")\n arg = obj.get(ds)\n rv = task.eval(arg)\n # We catch and store YT-originating exceptions\n # This fixes the standard problem of having a sphere that's too\n # small.\n except YTException:\n pass\n store.result.append(rv)\n return [v for k, v in sorted(return_values.items())]\n\n @classmethod\n def from_filenames(cls, filenames, parallel=True, setup_function=None, **kwargs):\n r\"\"\"Create a time series from either a filename pattern or a list of\n filenames.\n\n This method provides an easy way to create a\n :class:`~yt.data_objects.time_series.DatasetSeries`, given a set of\n filenames or a pattern that matches them. Additionally, it can set the\n parallelism strategy.\n\n Parameters\n ----------\n filenames : list or pattern\n This can either be a list of filenames (such as [\"DD0001/DD0001\",\n \"DD0002/DD0002\"]) or a pattern to match, such as\n \"DD*/DD*.index\"). If it's the former, they will be loaded in\n order. The latter will be identified with the glob module and then\n sorted.\n parallel : True, False or int\n This parameter governs the behavior when .piter() is called on the\n resultant DatasetSeries object. If this is set to False, the time\n series will not iterate in parallel when .piter() is called. If\n this is set to either True or an integer, it will be iterated with\n 1 or that integer number of processors assigned to each parameter\n file provided to the loop.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n\n Examples\n --------\n\n >>> def print_time(ds):\n ... print(ds.current_time)\n ...\n >>> ts = DatasetSeries.from_filenames(\n ... \"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0\",\n ... setup_function = print_time)\n ...\n >>> for ds in ts:\n ... SlicePlot(ds, \"x\", (\"gas\", \"density\")).save()\n\n \"\"\"\n issue_deprecation_warning(\n \"DatasetSeries.from_filenames() is deprecated and will be removed \"\n \"in a future version of yt. Use DatasetSeries() directly.\",\n since=\"4.0.0\",\n removal=\"4.1.0\",\n )\n obj = cls(filenames, parallel=parallel, setup_function=setup_function, **kwargs)\n return obj\n\n @classmethod\n def from_output_log(cls, output_log, line_prefix=\"DATASET WRITTEN\", parallel=True):\n filenames = []\n for line in open(output_log):\n if not line.startswith(line_prefix):\n continue\n cut_line = line[len(line_prefix) :].strip()\n fn = cut_line.split()[0]\n filenames.append(fn)\n obj = cls(filenames, parallel=parallel)\n return obj\n\n _dataset_cls = None\n\n def _load(self, output_fn, **kwargs):\n from yt.loaders import load\n\n if self._dataset_cls is not None:\n return self._dataset_cls(output_fn, **kwargs)\n elif self._mixed_dataset_types:\n return load(output_fn, **kwargs)\n ds = load(output_fn, **kwargs)\n self._dataset_cls = ds.__class__\n return ds\n\n def particle_trajectories(\n self, indices, fields=None, suppress_logging=False, ptype=None\n ):\n r\"\"\"Create a collection of particle trajectories in time over a series of\n datasets.\n\n Parameters\n ----------\n indices : array_like\n An integer array of particle indices whose trajectories we\n want to track. If they are not sorted they will be sorted.\n fields : list of strings, optional\n A set of fields that is retrieved when the trajectory\n collection is instantiated. Default: None (will default\n to the fields 'particle_position_x', 'particle_position_y',\n 'particle_position_z')\n suppress_logging : boolean\n Suppress yt's logging when iterating over the simulation time\n series. Default: False\n ptype : str, optional\n Only use this particle type. Default: None, which uses all particle type.\n\n Examples\n --------\n >>> my_fns = glob.glob(\"orbit_hdf5_chk_00[0-9][0-9]\")\n >>> my_fns.sort()\n >>> fields = [(\"all\", \"particle_position_x\"), (\"all\", \"particle_position_y\"),\n >>> (\"all\", \"particle_position_z\"), (\"all\", \"particle_velocity_x\"),\n >>> (\"all\", \"particle_velocity_y\"), (\"all\", \"particle_velocity_z\")]\n >>> ds = load(my_fns[0])\n >>> init_sphere = ds.sphere(ds.domain_center, (.5, \"unitary\"))\n >>> indices = init_sphere[(\"all\", \"particle_index\")].astype(\"int\")\n >>> ts = DatasetSeries(my_fns)\n >>> trajs = ts.particle_trajectories(indices, fields=fields)\n >>> for t in trajs :\n >>> print(t[(\"all\", \"particle_velocity_x\")].max(), t[(\"all\", \"particle_velocity_x\")].min())\n\n Notes\n -----\n This function will fail if there are duplicate particle ids or if some of the\n particle disappear.\n \"\"\"\n return ParticleTrajectories(\n self, indices, fields=fields, suppress_logging=suppress_logging, ptype=ptype\n )\n\n\nclass TimeSeriesQuantitiesContainer:\n def __init__(self, data_object, quantities):\n self.data_object = data_object\n self.quantities = quantities\n\n def __getitem__(self, key):\n if key not in self.quantities:\n raise KeyError(key)\n q = self.quantities[key]\n\n def run_quantity_wrapper(quantity, quantity_name):\n @wraps(derived_quantity_registry[quantity_name][1])\n def run_quantity(*args, **kwargs):\n to_run = quantity(*args, **kwargs)\n return self.data_object.eval(to_run)\n\n return run_quantity\n\n return run_quantity_wrapper(q, key)\n\n\nclass DatasetSeriesObject:\n def __init__(self, time_series, data_object_name, *args, **kwargs):\n self.time_series = weakref.proxy(time_series)\n self.data_object_name = data_object_name\n self._args = args\n self._kwargs = kwargs\n qs = {\n qn: create_quantity_proxy(qv)\n for qn, qv in derived_quantity_registry.items()\n }\n self.quantities = TimeSeriesQuantitiesContainer(self, qs)\n\n def eval(self, tasks):\n return self.time_series.eval(tasks, self)\n\n def get(self, ds):\n # We get the type name, which corresponds to an attribute of the\n # index\n cls = getattr(ds, self.data_object_name)\n return cls(*self._args, **self._kwargs)\n\n\nclass SimulationTimeSeries(DatasetSeries):\n def __init__(self, parameter_filename, find_outputs=False):\n \"\"\"\n Base class for generating simulation time series types.\n Principally consists of a *parameter_filename*.\n \"\"\"\n\n if not os.path.exists(parameter_filename):\n raise FileNotFoundError(parameter_filename)\n self.parameter_filename = parameter_filename\n self.basename = os.path.basename(parameter_filename)\n self.directory = os.path.dirname(parameter_filename)\n self.parameters = {}\n self.key_parameters = []\n\n # Set some parameter defaults.\n self._set_parameter_defaults()\n # Read the simulation dataset.\n self._parse_parameter_file()\n # Set units\n self._set_units()\n # Figure out the starting and stopping times and redshift.\n self._calculate_simulation_bounds()\n # Get all possible datasets.\n self._get_all_outputs(find_outputs=find_outputs)\n\n self.print_key_parameters()\n\n def _set_parameter_defaults(self):\n pass\n\n def _parse_parameter_file(self):\n pass\n\n def _set_units(self):\n pass\n\n def _calculate_simulation_bounds(self):\n pass\n\n def _get_all_outputs(**kwargs):\n pass\n\n def __repr__(self):\n return self.parameter_filename\n\n _arr = None\n\n @property\n def arr(self):\n if self._arr is not None:\n return self._arr\n self._arr = functools.partial(YTArray, registry=self.unit_registry)\n return self._arr\n\n _quan = None\n\n @property\n def quan(self):\n if self._quan is not None:\n return self._quan\n self._quan = functools.partial(YTQuantity, registry=self.unit_registry)\n return self._quan\n\n @parallel_root_only\n def print_key_parameters(self):\n \"\"\"\n Print out some key parameters for the simulation.\n \"\"\"\n if self.simulation_type == \"grid\":\n for a in [\"domain_dimensions\", \"domain_left_edge\", \"domain_right_edge\"]:\n self._print_attr(a)\n for a in [\"initial_time\", \"final_time\", \"cosmological_simulation\"]:\n self._print_attr(a)\n if getattr(self, \"cosmological_simulation\", False):\n for a in [\n \"box_size\",\n \"omega_matter\",\n \"omega_lambda\",\n \"omega_radiation\",\n \"hubble_constant\",\n \"initial_redshift\",\n \"final_redshift\",\n ]:\n self._print_attr(a)\n for a in self.key_parameters:\n self._print_attr(a)\n mylog.info(\"Total datasets: %d.\", len(self.all_outputs))\n\n def _print_attr(self, a):\n \"\"\"\n Print the attribute or warn about it missing.\n \"\"\"\n if not hasattr(self, a):\n mylog.error(\"Missing %s in dataset definition!\", a)\n return\n v = getattr(self, a)\n mylog.info(\"Parameters: %-25s = %s\", a, v)\n\n def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):\n r\"\"\"\n Get datasets at or near to given values.\n\n Parameters\n ----------\n key : str\n The key by which to retrieve outputs, usually 'time' or\n 'redshift'.\n values : array_like\n A list of values, given as floats.\n tolerance : float\n If not None, do not return a dataset unless the value is\n within the tolerance value. If None, simply return the\n nearest dataset.\n Default: None.\n outputs : list\n The list of outputs from which to choose. If None,\n self.all_outputs is used.\n Default: None.\n\n Examples\n --------\n >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)\n\n \"\"\"\n\n if not isinstance(values, YTArray):\n if isinstance(values, tuple) and len(values) == 2:\n values = self.arr(*values)\n else:\n values = self.arr(values)\n values = values.in_base()\n\n if outputs is None:\n outputs = self.all_outputs\n my_outputs = []\n if not outputs:\n return my_outputs\n for value in values:\n outputs.sort(key=lambda obj: np.abs(value - obj[key]))\n if (\n tolerance is None or np.abs(value - outputs[0][key]) <= tolerance\n ) and outputs[0] not in my_outputs:\n my_outputs.append(outputs[0])\n else:\n mylog.error(\"No dataset added for %s = %f.\", key, value)\n\n outputs.sort(key=lambda obj: obj[\"time\"])\n return my_outputs\n", "path": "yt/data_objects/time_series.py" } ]
[ { "content": "import functools\nimport glob\nimport inspect\nimport os\nimport weakref\nfrom functools import wraps\n\nimport numpy as np\nfrom more_itertools import always_iterable\n\nfrom yt._maintenance.deprecation import issue_deprecation_warning\nfrom yt.config import ytcfg\nfrom yt.data_objects.analyzer_objects import AnalysisTask, create_quantity_proxy\nfrom yt.data_objects.particle_trajectories import ParticleTrajectories\nfrom yt.funcs import is_sequence, mylog\nfrom yt.units.yt_array import YTArray, YTQuantity\nfrom yt.utilities.exceptions import YTException\nfrom yt.utilities.object_registries import (\n analysis_task_registry,\n data_object_registry,\n derived_quantity_registry,\n simulation_time_series_registry,\n)\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import (\n communication_system,\n parallel_objects,\n parallel_root_only,\n)\n\n\nclass AnalysisTaskProxy:\n def __init__(self, time_series):\n self.time_series = time_series\n\n def __getitem__(self, key):\n task_cls = analysis_task_registry[key]\n\n @wraps(task_cls.__init__)\n def func(*args, **kwargs):\n task = task_cls(*args, **kwargs)\n return self.time_series.eval(task)\n\n return func\n\n def keys(self):\n return analysis_task_registry.keys()\n\n def __contains__(self, key):\n return key in analysis_task_registry\n\n\ndef get_ds_prop(propname):\n def _eval(params, ds):\n return getattr(ds, propname)\n\n cls = type(propname, (AnalysisTask,), dict(eval=_eval, _params=tuple()))\n return cls\n\n\nattrs = (\n \"refine_by\",\n \"dimensionality\",\n \"current_time\",\n \"domain_dimensions\",\n \"domain_left_edge\",\n \"domain_right_edge\",\n \"unique_identifier\",\n \"current_redshift\",\n \"cosmological_simulation\",\n \"omega_matter\",\n \"omega_lambda\",\n \"omega_radiation\",\n \"hubble_constant\",\n)\n\n\nclass TimeSeriesParametersContainer:\n def __init__(self, data_object):\n self.data_object = data_object\n\n def __getattr__(self, attr):\n if attr in attrs:\n return self.data_object.eval(get_ds_prop(attr)())\n raise AttributeError(attr)\n\n\nclass DatasetSeries:\n r\"\"\"The DatasetSeries object is a container of multiple datasets,\n allowing easy iteration and computation on them.\n\n DatasetSeries objects are designed to provide easy ways to access,\n analyze, parallelize and visualize multiple datasets sequentially. This is\n primarily expressed through iteration, but can also be constructed via\n analysis tasks (see :ref:`time-series-analysis`).\n\n Note that contained datasets are lazily loaded and weakly referenced. This means\n that in order to perform follow-up operations on data it's best to define handles on\n these datasets during iteration.\n\n Parameters\n ----------\n outputs : list of filenames, or pattern\n A list of filenames, for instance [\"DD0001/DD0001\", \"DD0002/DD0002\"],\n or a glob pattern (i.e. containing wildcards '[]?!*') such as \"DD*/DD*.index\".\n In the latter case, results are sorted automatically.\n Filenames and patterns can be of type str, os.Pathlike or bytes.\n parallel : True, False or int\n This parameter governs the behavior when .piter() is called on the\n resultant DatasetSeries object. If this is set to False, the time\n series will not iterate in parallel when .piter() is called. If\n this is set to either True, one processor will be allocated for\n each iteration of the loop. If this is set to an integer, the loop\n will be parallelized over this many workgroups. It the integer\n value is less than the total number of available processors,\n more than one processor will be allocated to a given loop iteration,\n causing the functionality within the loop to be run in parallel.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n mixed_dataset_types : True or False, default False\n Set to True if the DatasetSeries will load different dataset types, set\n to False if loading dataset of a single type as this will result in a\n considerable speed up from not having to figure out the dataset type.\n\n Examples\n --------\n\n >>> ts = DatasetSeries(\n \"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0\")\n >>> for ds in ts:\n ... SlicePlot(ds, \"x\", (\"gas\", \"density\")).save()\n ...\n >>> def print_time(ds):\n ... print(ds.current_time)\n ...\n >>> ts = DatasetSeries(\n ... \"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0\",\n ... setup_function = print_time)\n ...\n >>> for ds in ts:\n ... SlicePlot(ds, \"x\", (\"gas\", \"density\")).save()\n\n \"\"\"\n\n def __init_subclass__(cls, *args, **kwargs):\n super().__init_subclass__(*args, **kwargs)\n code_name = cls.__name__[: cls.__name__.find(\"Simulation\")]\n if code_name:\n simulation_time_series_registry[code_name] = cls\n mylog.debug(\"Registering simulation: %s as %s\", code_name, cls)\n\n def __new__(cls, outputs, *args, **kwargs):\n try:\n outputs = cls._get_filenames_from_glob_pattern(outputs)\n except TypeError:\n pass\n ret = super().__new__(cls)\n ret._pre_outputs = outputs[:]\n ret.kwargs = {}\n return ret\n\n def __init__(\n self,\n outputs,\n parallel=True,\n setup_function=None,\n mixed_dataset_types=False,\n **kwargs,\n ):\n # This is needed to properly set _pre_outputs for Simulation subclasses.\n self._mixed_dataset_types = mixed_dataset_types\n if is_sequence(outputs) and not isinstance(outputs, str):\n self._pre_outputs = outputs[:]\n self.tasks = AnalysisTaskProxy(self)\n self.params = TimeSeriesParametersContainer(self)\n if setup_function is None:\n\n def _null(x):\n return None\n\n setup_function = _null\n self._setup_function = setup_function\n for type_name in data_object_registry:\n setattr(\n self, type_name, functools.partial(DatasetSeriesObject, self, type_name)\n )\n self.parallel = parallel\n self.kwargs = kwargs\n\n @staticmethod\n def _get_filenames_from_glob_pattern(outputs):\n \"\"\"\n Helper function to DatasetSeries.__new__\n handle a special case where \"outputs\" is assumed to be really a pattern string\n \"\"\"\n pattern = outputs\n epattern = os.path.expanduser(pattern)\n data_dir = ytcfg.get(\"yt\", \"test_data_dir\")\n # if no match if found from the current work dir,\n # we try to match the pattern from the test data dir\n file_list = glob.glob(epattern) or glob.glob(os.path.join(data_dir, epattern))\n if not file_list:\n raise FileNotFoundError(f\"No match found for pattern : {pattern}\")\n return sorted(file_list)\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n if isinstance(key.start, float):\n return self.get_range(key.start, key.stop)\n # This will return a sliced up object!\n return DatasetSeries(\n self._pre_outputs[key], parallel=self.parallel, **self.kwargs\n )\n o = self._pre_outputs[key]\n if isinstance(o, (str, os.PathLike)):\n o = self._load(o, **self.kwargs)\n self._setup_function(o)\n return o\n\n def __len__(self):\n return len(self._pre_outputs)\n\n @property\n def outputs(self):\n return self._pre_outputs\n\n def piter(self, storage=None, dynamic=False):\n r\"\"\"Iterate over time series components in parallel.\n\n This allows you to iterate over a time series while dispatching\n individual components of that time series to different processors or\n processor groups. If the parallelism strategy was set to be\n multi-processor (by \"parallel = N\" where N is an integer when the\n DatasetSeries was created) this will issue each dataset to an\n N-processor group. For instance, this would allow you to start a 1024\n processor job, loading up 100 datasets in a time series and creating 8\n processor groups of 128 processors each, each of which would be\n assigned a different dataset. This could be accomplished as shown in\n the examples below. The *storage* option is as seen in\n :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_objects`\n which is a mechanism for storing results of analysis on an individual\n dataset and then combining the results at the end, so that the entire\n set of processors have access to those results.\n\n Note that supplying a *store* changes the iteration mechanism; see\n below.\n\n Parameters\n ----------\n storage : dict\n This is a dictionary, which will be filled with results during the\n course of the iteration. The keys will be the dataset\n indices and the values will be whatever is assigned to the *result*\n attribute on the storage during iteration.\n dynamic : boolean\n This governs whether or not dynamic load balancing will be\n enabled. This requires one dedicated processor; if this\n is enabled with a set of 128 processors available, only\n 127 will be available to iterate over objects as one will\n be load balancing the rest.\n\n\n Examples\n --------\n Here is an example of iteration when the results do not need to be\n stored. One processor will be assigned to each dataset.\n\n >>> ts = DatasetSeries(\"DD*/DD*.index\")\n >>> for ds in ts.piter():\n ... SlicePlot(ds, \"x\", (\"gas\", \"density\")).save()\n ...\n\n This demonstrates how one might store results:\n\n >>> def print_time(ds):\n ... print(ds.current_time)\n ...\n >>> ts = DatasetSeries(\"DD*/DD*.index\",\n ... setup_function = print_time )\n ...\n >>> my_storage = {}\n >>> for sto, ds in ts.piter(storage=my_storage):\n ... v, c = ds.find_max((\"gas\", \"density\"))\n ... sto.result = (v, c)\n ...\n >>> for i, (v, c) in sorted(my_storage.items()):\n ... print(\"% 4i %0.3e\" % (i, v))\n ...\n\n This shows how to dispatch 4 processors to each dataset:\n\n >>> ts = DatasetSeries(\"DD*/DD*.index\",\n ... parallel = 4)\n >>> for ds in ts.piter():\n ... ProjectionPlot(ds, \"x\", (\"gas\", \"density\")).save()\n ...\n\n \"\"\"\n if not self.parallel:\n njobs = 1\n elif not dynamic:\n if self.parallel:\n njobs = -1\n else:\n njobs = self.parallel\n else:\n my_communicator = communication_system.communicators[-1]\n nsize = my_communicator.size\n if nsize == 1:\n self.parallel = False\n dynamic = False\n njobs = 1\n else:\n njobs = nsize - 1\n\n for output in parallel_objects(\n self._pre_outputs, njobs=njobs, storage=storage, dynamic=dynamic\n ):\n if storage is not None:\n sto, output = output\n\n if isinstance(output, str):\n ds = self._load(output, **self.kwargs)\n self._setup_function(ds)\n else:\n ds = output\n\n if storage is not None:\n next_ret = (sto, ds)\n else:\n next_ret = ds\n\n yield next_ret\n\n def eval(self, tasks, obj=None):\n return_values = {}\n for store, ds in self.piter(return_values):\n store.result = []\n for task in always_iterable(tasks):\n try:\n style = inspect.getargspec(task.eval)[0][1]\n if style == \"ds\":\n arg = ds\n elif style == \"data_object\":\n if obj is None:\n obj = DatasetSeriesObject(self, \"all_data\")\n arg = obj.get(ds)\n rv = task.eval(arg)\n # We catch and store YT-originating exceptions\n # This fixes the standard problem of having a sphere that's too\n # small.\n except YTException:\n pass\n store.result.append(rv)\n return [v for k, v in sorted(return_values.items())]\n\n @classmethod\n def from_filenames(cls, filenames, parallel=True, setup_function=None, **kwargs):\n r\"\"\"Create a time series from either a filename pattern or a list of\n filenames.\n\n This method provides an easy way to create a\n :class:`~yt.data_objects.time_series.DatasetSeries`, given a set of\n filenames or a pattern that matches them. Additionally, it can set the\n parallelism strategy.\n\n Parameters\n ----------\n filenames : list or pattern\n This can either be a list of filenames (such as [\"DD0001/DD0001\",\n \"DD0002/DD0002\"]) or a pattern to match, such as\n \"DD*/DD*.index\"). If it's the former, they will be loaded in\n order. The latter will be identified with the glob module and then\n sorted.\n parallel : True, False or int\n This parameter governs the behavior when .piter() is called on the\n resultant DatasetSeries object. If this is set to False, the time\n series will not iterate in parallel when .piter() is called. If\n this is set to either True or an integer, it will be iterated with\n 1 or that integer number of processors assigned to each parameter\n file provided to the loop.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n\n Examples\n --------\n\n >>> def print_time(ds):\n ... print(ds.current_time)\n ...\n >>> ts = DatasetSeries.from_filenames(\n ... \"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0\",\n ... setup_function = print_time)\n ...\n >>> for ds in ts:\n ... SlicePlot(ds, \"x\", (\"gas\", \"density\")).save()\n\n \"\"\"\n issue_deprecation_warning(\n \"DatasetSeries.from_filenames() is deprecated and will be removed \"\n \"in a future version of yt. Use DatasetSeries() directly.\",\n since=\"4.0.0\",\n removal=\"4.1.0\",\n )\n obj = cls(filenames, parallel=parallel, setup_function=setup_function, **kwargs)\n return obj\n\n @classmethod\n def from_output_log(cls, output_log, line_prefix=\"DATASET WRITTEN\", parallel=True):\n filenames = []\n for line in open(output_log):\n if not line.startswith(line_prefix):\n continue\n cut_line = line[len(line_prefix) :].strip()\n fn = cut_line.split()[0]\n filenames.append(fn)\n obj = cls(filenames, parallel=parallel)\n return obj\n\n _dataset_cls = None\n\n def _load(self, output_fn, **kwargs):\n from yt.loaders import load\n\n if self._dataset_cls is not None:\n return self._dataset_cls(output_fn, **kwargs)\n elif self._mixed_dataset_types:\n return load(output_fn, **kwargs)\n ds = load(output_fn, **kwargs)\n self._dataset_cls = ds.__class__\n return ds\n\n def particle_trajectories(\n self, indices, fields=None, suppress_logging=False, ptype=None\n ):\n r\"\"\"Create a collection of particle trajectories in time over a series of\n datasets.\n\n Parameters\n ----------\n indices : array_like\n An integer array of particle indices whose trajectories we\n want to track. If they are not sorted they will be sorted.\n fields : list of strings, optional\n A set of fields that is retrieved when the trajectory\n collection is instantiated. Default: None (will default\n to the fields 'particle_position_x', 'particle_position_y',\n 'particle_position_z')\n suppress_logging : boolean\n Suppress yt's logging when iterating over the simulation time\n series. Default: False\n ptype : str, optional\n Only use this particle type. Default: None, which uses all particle type.\n\n Examples\n --------\n >>> my_fns = glob.glob(\"orbit_hdf5_chk_00[0-9][0-9]\")\n >>> my_fns.sort()\n >>> fields = [(\"all\", \"particle_position_x\"), (\"all\", \"particle_position_y\"),\n >>> (\"all\", \"particle_position_z\"), (\"all\", \"particle_velocity_x\"),\n >>> (\"all\", \"particle_velocity_y\"), (\"all\", \"particle_velocity_z\")]\n >>> ds = load(my_fns[0])\n >>> init_sphere = ds.sphere(ds.domain_center, (.5, \"unitary\"))\n >>> indices = init_sphere[(\"all\", \"particle_index\")].astype(\"int\")\n >>> ts = DatasetSeries(my_fns)\n >>> trajs = ts.particle_trajectories(indices, fields=fields)\n >>> for t in trajs :\n >>> print(t[(\"all\", \"particle_velocity_x\")].max(), t[(\"all\", \"particle_velocity_x\")].min())\n\n Notes\n -----\n This function will fail if there are duplicate particle ids or if some of the\n particle disappear.\n \"\"\"\n return ParticleTrajectories(\n self, indices, fields=fields, suppress_logging=suppress_logging, ptype=ptype\n )\n\n\nclass TimeSeriesQuantitiesContainer:\n def __init__(self, data_object, quantities):\n self.data_object = data_object\n self.quantities = quantities\n\n def __getitem__(self, key):\n if key not in self.quantities:\n raise KeyError(key)\n q = self.quantities[key]\n\n def run_quantity_wrapper(quantity, quantity_name):\n @wraps(derived_quantity_registry[quantity_name][1])\n def run_quantity(*args, **kwargs):\n to_run = quantity(*args, **kwargs)\n return self.data_object.eval(to_run)\n\n return run_quantity\n\n return run_quantity_wrapper(q, key)\n\n\nclass DatasetSeriesObject:\n def __init__(self, time_series, data_object_name, *args, **kwargs):\n self.time_series = weakref.proxy(time_series)\n self.data_object_name = data_object_name\n self._args = args\n self._kwargs = kwargs\n qs = {\n qn: create_quantity_proxy(qv)\n for qn, qv in derived_quantity_registry.items()\n }\n self.quantities = TimeSeriesQuantitiesContainer(self, qs)\n\n def eval(self, tasks):\n return self.time_series.eval(tasks, self)\n\n def get(self, ds):\n # We get the type name, which corresponds to an attribute of the\n # index\n cls = getattr(ds, self.data_object_name)\n return cls(*self._args, **self._kwargs)\n\n\nclass SimulationTimeSeries(DatasetSeries):\n def __init__(self, parameter_filename, find_outputs=False):\n \"\"\"\n Base class for generating simulation time series types.\n Principally consists of a *parameter_filename*.\n \"\"\"\n\n if not os.path.exists(parameter_filename):\n raise FileNotFoundError(parameter_filename)\n self.parameter_filename = parameter_filename\n self.basename = os.path.basename(parameter_filename)\n self.directory = os.path.dirname(parameter_filename)\n self.parameters = {}\n self.key_parameters = []\n\n # Set some parameter defaults.\n self._set_parameter_defaults()\n # Read the simulation dataset.\n self._parse_parameter_file()\n # Set units\n self._set_units()\n # Figure out the starting and stopping times and redshift.\n self._calculate_simulation_bounds()\n # Get all possible datasets.\n self._get_all_outputs(find_outputs=find_outputs)\n\n self.print_key_parameters()\n\n def _set_parameter_defaults(self):\n pass\n\n def _parse_parameter_file(self):\n pass\n\n def _set_units(self):\n pass\n\n def _calculate_simulation_bounds(self):\n pass\n\n def _get_all_outputs(**kwargs):\n pass\n\n def __repr__(self):\n return self.parameter_filename\n\n _arr = None\n\n @property\n def arr(self):\n if self._arr is not None:\n return self._arr\n self._arr = functools.partial(YTArray, registry=self.unit_registry)\n return self._arr\n\n _quan = None\n\n @property\n def quan(self):\n if self._quan is not None:\n return self._quan\n self._quan = functools.partial(YTQuantity, registry=self.unit_registry)\n return self._quan\n\n @parallel_root_only\n def print_key_parameters(self):\n \"\"\"\n Print out some key parameters for the simulation.\n \"\"\"\n if self.simulation_type == \"grid\":\n for a in [\"domain_dimensions\", \"domain_left_edge\", \"domain_right_edge\"]:\n self._print_attr(a)\n for a in [\"initial_time\", \"final_time\", \"cosmological_simulation\"]:\n self._print_attr(a)\n if getattr(self, \"cosmological_simulation\", False):\n for a in [\n \"box_size\",\n \"omega_matter\",\n \"omega_lambda\",\n \"omega_radiation\",\n \"hubble_constant\",\n \"initial_redshift\",\n \"final_redshift\",\n ]:\n self._print_attr(a)\n for a in self.key_parameters:\n self._print_attr(a)\n mylog.info(\"Total datasets: %d.\", len(self.all_outputs))\n\n def _print_attr(self, a):\n \"\"\"\n Print the attribute or warn about it missing.\n \"\"\"\n if not hasattr(self, a):\n mylog.error(\"Missing %s in dataset definition!\", a)\n return\n v = getattr(self, a)\n mylog.info(\"Parameters: %-25s = %s\", a, v)\n\n def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):\n r\"\"\"\n Get datasets at or near to given values.\n\n Parameters\n ----------\n key : str\n The key by which to retrieve outputs, usually 'time' or\n 'redshift'.\n values : array_like\n A list of values, given as floats.\n tolerance : float\n If not None, do not return a dataset unless the value is\n within the tolerance value. If None, simply return the\n nearest dataset.\n Default: None.\n outputs : list\n The list of outputs from which to choose. If None,\n self.all_outputs is used.\n Default: None.\n\n Examples\n --------\n >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)\n\n \"\"\"\n\n if not isinstance(values, YTArray):\n if isinstance(values, tuple) and len(values) == 2:\n values = self.arr(*values)\n else:\n values = self.arr(values)\n values = values.in_base()\n\n if outputs is None:\n outputs = self.all_outputs\n my_outputs = []\n if not outputs:\n return my_outputs\n for value in values:\n outputs.sort(key=lambda obj: np.abs(value - obj[key]))\n if (\n tolerance is None or np.abs(value - outputs[0][key]) <= tolerance\n ) and outputs[0] not in my_outputs:\n my_outputs.append(outputs[0])\n else:\n mylog.error(\"No dataset added for %s = %f.\", key, value)\n\n outputs.sort(key=lambda obj: obj[\"time\"])\n return my_outputs\n", "path": "yt/data_objects/time_series.py" } ]
diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index e2d3528ddb6..d7aecccdd7f 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -155,6 +155,7 @@ def __new__(cls, outputs, *args, **kwargs): pass ret = super().__new__(cls) ret._pre_outputs = outputs[:] + ret.kwargs = {} return ret def __init__(
rlworkgroup__garage-1759
[ { "content": "\"\"\"setuptools based setup module.\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\nGARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\nGYM_VERSION = '0.15.4'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n 'cloudpickle<1.5',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n 'tensorflow>=1.14',\n 'tensorflow-probability',\n 'torch>=1.0.0,!=1.5.0',\n 'torchvision>=0.2.1',\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n f'gym[all]=={GYM_VERSION}',\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control==0.0.300771433',\n]\n\nEXTRAS['bullet'] = ['mpi4py', 'pybullet']\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501\n 'isort>=4.3.21,<5.0.0',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.5.3',\n 'pytest>=4.5.0', # Required for strict-markers\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-xdist',\n 'recommonmark',\n 'sphinx',\n 'sphinx-autoapi>=1.4.0',\n 'sphinx_rtd_theme',\n 'yapf==0.30.0',\n] # yapf: disable\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.6',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "\"\"\"setuptools based setup module.\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\nGARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\nGYM_VERSION = '0.15.4'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n 'cloudpickle<1.5',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n 'tensorflow>=1.14',\n 'tensorflow-probability<=0.10.0',\n 'torch>=1.0.0,!=1.5.0',\n 'torchvision>=0.2.1',\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n f'gym[all]=={GYM_VERSION}',\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control==0.0.300771433',\n]\n\nEXTRAS['bullet'] = ['mpi4py', 'pybullet']\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501\n 'isort>=4.3.21,<5.0.0',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.5.3',\n 'pytest>=4.5.0', # Required for strict-markers\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-xdist',\n 'recommonmark',\n 'sphinx',\n 'sphinx-autoapi>=1.4.0',\n 'sphinx_rtd_theme',\n 'yapf==0.30.0',\n] # yapf: disable\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.6',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 54aee5e3c0..8243b6007a 100644 --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ 'scipy', 'setproctitle>=1.0', 'tensorflow>=1.14', - 'tensorflow-probability', + 'tensorflow-probability<=0.10.0', 'torch>=1.0.0,!=1.5.0', 'torchvision>=0.2.1', ]
netket__netket-111
[ { "content": "import os\nimport re\nimport sys\nimport platform\nimport subprocess\n\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom distutils.version import LooseVersion\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n out = subprocess.check_output(['cmake', '--version'])\n except OSError:\n raise RuntimeError(\"CMake must be installed to build the following extensions: \" +\n \", \".join(e.name for e in self.extensions))\n\n if platform.system() == \"Windows\":\n cmake_version = LooseVersion(re.search(r'version\\s*([\\d.]+)', out.decode()).group(1))\n if cmake_version < '3.1.0':\n raise RuntimeError(\"CMake >= 3.1.0 is required on Windows\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,\n '-DPYTHON_EXECUTABLE=' + sys.executable]\n\n cfg = 'Debug' if self.debug else 'Release'\n build_args = ['--config', cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]\n if sys.maxsize > 2**32:\n cmake_args += ['-A', 'x64']\n build_args += ['--', '/m']\n else:\n cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]\n build_args += ['--', '-j2']\n\n env = os.environ.copy()\n env['CXXFLAGS'] = '{} -DVERSION_INFO=\\\\\"{}\\\\\"'.format(env.get('CXXFLAGS', ''),\n self.distribution.get_version())\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)\n subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)\n\nsetup(\n name='netket',\n version='0.1',\n author='Giuseppe Carleo et al.',\n description='NetKet',\n url='http://github.com/netket/netket',\n author_email='netket@netket.org',\n license='Apache',\n ext_modules=[CMakeExtension('netket')],\n cmdclass=dict(build_ext=CMakeBuild),\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "import os\nimport re\nimport sys\nimport platform\nimport subprocess\n\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom distutils.version import LooseVersion\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n out = subprocess.check_output(['cmake', '--version'])\n except OSError:\n raise RuntimeError(\"CMake must be installed to build the following extensions: \" +\n \", \".join(e.name for e in self.extensions))\n\n if platform.system() == \"Windows\":\n cmake_version = LooseVersion(re.search(r'version\\s*([\\d.]+)', out.decode()).group(1))\n if cmake_version < '3.1.0':\n raise RuntimeError(\"CMake >= 3.1.0 is required on Windows\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,\n '-DPYTHON_EXECUTABLE=' + sys.executable]\n\n cfg = 'Debug' if self.debug else 'Release'\n build_args = ['--config', cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]\n if sys.maxsize > 2**32:\n cmake_args += ['-A', 'x64']\n build_args += ['--', '/m']\n else:\n cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]\n build_args += ['--', '-j2']\n\n env = os.environ.copy()\n env['CXXFLAGS'] = '{} -DVERSION_INFO=\\\\\"{}\\\\\"'.format(env.get('CXXFLAGS', ''),\n self.distribution.get_version())\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)\n subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)\n\nsetup(\n name='netket',\n version='2.0',\n author='Giuseppe Carleo et al.',\n description='NetKet',\n url='http://github.com/netket/netket',\n author_email='netket@netket.org',\n license='Apache',\n ext_modules=[CMakeExtension('netket')],\n cmdclass=dict(build_ext=CMakeBuild),\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/CMakeLists.txt b/CMakeLists.txt index 3926b65d38..1568f22ec2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,9 +3,13 @@ cmake_minimum_required(VERSION 3.1) set(CMAKE_DISABLE_SOURCE_CHANGES ON) set(CMAKE_DISABLE_IN_SOURCE_BUILD ON) option(ENABLE_TESTS "Enable unit tests." OFF) +option(NETKET_Sanitizer "Build test suite with Clang sanitizer" OFF) project(NetKet) +add_library(netket_test INTERFACE) +target_link_libraries(netket_test INTERFACE netket_lib Catch2) + set(NETKET_PYTHON_VERSION "" CACHE STRING "Python version to use for compiling modules") if(ENABLE_TESTS) @@ -202,6 +206,21 @@ set_target_properties(netket PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" SUFFIX "${PYTHON_MODULE_EXTENSION}") add_dependencies(netket eigen_project pybind11_project) +if(NETKET_Sanitizer) + message(STATUS "Building python library with Clang sanitizer") + if(NOT MSVC) + target_compile_options(netket + INTERFACE + -O2 -g -fno-omit-frame-pointer + -fsanitize=address -fsanitize=undefined + ) + target_link_libraries(netket + INTERFACE + -fsanitize=address -fsanitize=undefined + ) + endif() +endif() + # # Testing # diff --git a/NetKet/Hilbert/bosons.hpp b/NetKet/Hilbert/bosons.hpp index 48ff60bc53..70e6044122 100644 --- a/NetKet/Hilbert/bosons.hpp +++ b/NetKet/Hilbert/bosons.hpp @@ -131,10 +131,10 @@ class Boson : public AbstractHilbert { } } - bool CheckConstraint(const Eigen::VectorXd &v) const { + bool CheckConstraint(Eigen::Ref<const Eigen::VectorXd> v) const { int tot = 0; for (int i = 0; i < v.size(); i++) { - tot += int(v(i)); + tot += std::round(v(i)); } return tot == nbosons_; @@ -150,6 +150,7 @@ class Boson : public AbstractHilbert { v(sf) = newconf[i]; i++; assert(v(sf) <= nmax_); + assert(v(sf) >= 0); } if (constraintN_) { diff --git a/NetKet/Machine/jastrow.hpp b/NetKet/Machine/jastrow.hpp index 31f1cc8db7..f6fd66e380 100644 --- a/NetKet/Machine/jastrow.hpp +++ b/NetKet/Machine/jastrow.hpp @@ -62,7 +62,13 @@ class Jastrow : public AbstractMachine<T> { } void Init() { + if (nv_ < 2) { + throw InvalidInputError( + "Cannot construct Jastrow states with less than two visible units"); + } + W_.resize(nv_, nv_); + W_.setZero(); npar_ = (nv_ * (nv_ - 1)) / 2; @@ -104,10 +110,10 @@ class Jastrow : public AbstractMachine<T> { int k = 0; for (int i = 0; i < nv_; i++) { + W_(i, i) = T(0.); for (int j = i + 1; j < nv_; j++) { W_(i, j) = pars(k); W_(j, i) = W_(i, j); // create the lower triangle - W_(i, i) = T(0); k++; } } diff --git a/NetKet/Machine/jastrow_symm.hpp b/NetKet/Machine/jastrow_symm.hpp index 7e07f6a2c3..11baa33d84 100644 --- a/NetKet/Machine/jastrow_symm.hpp +++ b/NetKet/Machine/jastrow_symm.hpp @@ -75,6 +75,11 @@ class JastrowSymm : public AbstractMachine<T> { } void Init(std::shared_ptr<const AbstractGraph> graph) { + if (nv_ < 2) { + throw InvalidInputError( + "Cannot construct Jastrow states with less than two visible units"); + } + permtable_ = graph->SymmetryTable(); permsize_ = permtable_.size(); @@ -83,6 +88,7 @@ class JastrowSymm : public AbstractMachine<T> { } W_.resize(nv_, nv_); + W_.setZero(); thetas_.resize(nv_); thetasnew_.resize(nv_); diff --git a/Test/Machine/test_machine.py b/Test/Machine/test_machine.py index 1c844d2aa9..20d4ca2a10 100644 --- a/Test/Machine/test_machine.py +++ b/Test/Machine/test_machine.py @@ -99,7 +99,6 @@ def test_log_derivative(): print("Machine test: %s" % name) npar = machine.n_par() - randpars = 0.1 * (np.random.randn(npar) + 1.0j * np.random.randn(npar)) # random visibile state hi = machine.get_hilbert() @@ -109,15 +108,18 @@ def test_log_derivative(): for i in range(100): hi.random_vals(v, rg) - grad = (nd.Gradient(log_val_f, step=1.0e-8)) + randpars = 0.1 * (np.random.randn(npar) + + 1.0j * np.random.randn(npar)) machine.set_parameters(randpars) der_log = machine.der_log(v) if("Jastrow" in name): assert(np.max(np.imag(der_log)) == approx(0.)) + grad = (nd.Gradient(log_val_f, step=1.0e-8)) num_der_log = grad(randpars, machine, v) + assert(np.max(np.real(der_log - num_der_log)) == approx(0., rel=1e-4, abs=1e-4)) # The imaginary part is a bit more tricky, there might be an arbitrary phase shift diff --git a/Test/Operator/test_operator.py b/Test/Operator/test_operator.py index a243f5da92..f88477255f 100644 --- a/Test/Operator/test_operator.py +++ b/Test/Operator/test_operator.py @@ -6,19 +6,19 @@ operators = {} # Ising 1D -g_1 = nk.graph.Hypercube(length=20, ndim=1, pbc=True) -hi = nk.hilbert.Spin(s=0.5, graph=g_1) -operators["Ising 1D"] = [nk.operator.Ising(h=1.321, hilbert=hi), hi] +g = nk.graph.Hypercube(length=20, ndim=1, pbc=True) +hi = nk.hilbert.Spin(s=0.5, graph=g) +operators["Ising 1D"] = nk.operator.Ising(h=1.321, hilbert=hi) # Heisenberg 1D -g_2 = nk.graph.Hypercube(length=20, ndim=1, pbc=True) -hi = nk.hilbert.Spin(s=0.5, total_sz=0, graph=g_1) -operators["Heisenberg 1D"] = [nk.operator.Heisenberg(hilbert=hi), hi] +g = nk.graph.Hypercube(length=20, ndim=1, pbc=True) +hi = nk.hilbert.Spin(s=0.5, total_sz=0, graph=g) +operators["Heisenberg 1D"] = nk.operator.Heisenberg(hilbert=hi) # Bose Hubbard -g_3 = nk.graph.Hypercube(length=10, ndim=2, pbc=True) -hi = nk.hilbert.Boson(n_max=3, n_bosons=23, graph=g_3) -operators["Bose Hubbard"] = [nk.operator.BoseHubbard(U=4.0, hilbert=hi), hi] +g = nk.graph.Hypercube(length=3, ndim=2, pbc=True) +hi = nk.hilbert.Boson(n_max=3, n_bosons=6, graph=g) +operators["Bose Hubbard"] = nk.operator.BoseHubbard(U=4.0, hilbert=hi) # Graph Hamiltonian # TODO (jamesETsmith) @@ -30,23 +30,26 @@ #sy = np.array([[0,1j],[-1j,0]]) # # operators["Custom"] = +rg = nk.utils.RandomEngine(seed=1234) def test_produce_elements_in_hilbert(): for name, ha in operators.items(): - hi = ha[1] + hi = ha.get_hilbert() print(name, hi) assert (len(hi.local_states()) == hi.local_size()) rstate = np.zeros(hi.size()) - rg = nk.utils.RandomEngine(seed=1234) + local_states = hi.local_states() for i in range(1000): hi.random_vals(rstate, rg) - conns = ha[0].get_conn(rstate) + conns = ha.get_conn(rstate) for connector, newconf in zip(conns[1], conns[2]): - hi.update_conf(rstate, connector, newconf) - for rs in rstate: + rstatet = np.array(rstate) + hi.update_conf(rstatet, connector, newconf) + + for rs in rstatet: assert(rs in local_states) diff --git a/setup.py b/setup.py index c036e75fd2..094a2d65fc 100644 --- a/setup.py +++ b/setup.py @@ -58,7 +58,7 @@ def build_extension(self, ext): setup( name='netket', - version='0.1', + version='2.0', author='Giuseppe Carleo et al.', description='NetKet', url='http://github.com/netket/netket',
jupyterhub__jupyterhub-1323
[ { "content": "\"\"\"Basic html-rendering handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom http.client import responses\n\nfrom jinja2 import TemplateNotFound\nfrom tornado import web, gen\nfrom tornado.httputil import url_concat\n\nfrom .. import orm\nfrom ..utils import admin_only, url_path_join\nfrom .base import BaseHandler\n\n\nclass RootHandler(BaseHandler):\n \"\"\"Render the Hub root page.\n\n If next argument is passed by single-user server,\n redirect to base_url + single-user page.\n\n If logged in, redirects to:\n\n - single-user server if running\n - hub home, otherwise\n\n Otherwise, renders login page.\n \"\"\"\n def get(self):\n next_url = self.get_argument('next', '')\n if next_url and not next_url.startswith('/'):\n self.log.warning(\"Disallowing redirect outside JupyterHub: %r\", next_url)\n next_url = ''\n if next_url and next_url.startswith(url_path_join(self.base_url, 'user/')):\n # add /hub/ prefix, to ensure we redirect to the right user's server.\n # The next request will be handled by UserSpawnHandler,\n # ultimately redirecting to the logged-in user's server.\n without_prefix = next_url[len(self.base_url):]\n next_url = url_path_join(self.hub.base_url, without_prefix)\n self.log.warning(\"Redirecting %s to %s. For sharing public links, use /user-redirect/\",\n self.request.uri, next_url,\n )\n self.redirect(next_url)\n return\n user = self.get_current_user()\n if user:\n if user.running:\n url = user.url\n self.log.debug(\"User is running: %s\", url)\n self.set_login_cookie(user) # set cookie\n else:\n url = url_path_join(self.hub.base_url, 'home')\n self.log.debug(\"User is not running: %s\", url)\n else:\n url = self.settings['login_url']\n self.redirect(url)\n\n\nclass HomeHandler(BaseHandler):\n \"\"\"Render the user's home page.\"\"\"\n\n @web.authenticated\n @gen.coroutine\n def get(self):\n user = self.get_current_user()\n if user.running:\n # trigger poll_and_notify event in case of a server that died\n yield user.spawner.poll_and_notify()\n html = self.render_template('home.html',\n user=user,\n url=user.url,\n )\n self.finish(html)\n\n\nclass SpawnHandler(BaseHandler):\n \"\"\"Handle spawning of single-user servers via form.\n\n GET renders the form, POST handles form submission.\n\n Only enabled when Spawner.options_form is defined.\n \"\"\"\n def _render_form(self, message=''):\n user = self.get_current_user()\n return self.render_template('spawn.html',\n user=user,\n spawner_options_form=user.spawner.options_form,\n error_message=message,\n url=self.request.uri,\n )\n\n @web.authenticated\n def get(self):\n \"\"\"GET renders form for spawning with user-specified options\"\"\"\n user = self.get_current_user()\n if not self.allow_named_servers and user.running:\n url = user.url\n self.log.debug(\"User is running: %s\", url)\n self.redirect(url)\n return\n if user.spawner.options_form:\n self.finish(self._render_form())\n else:\n # not running, no form. Trigger spawn.\n self.redirect(user.url)\n\n @web.authenticated\n @gen.coroutine\n def post(self):\n \"\"\"POST spawns with user-specified options\"\"\"\n user = self.get_current_user()\n if not self.allow_named_servers and user.running:\n url = user.url\n self.log.warning(\"User is already running: %s\", url)\n self.redirect(url)\n return\n form_options = {}\n for key, byte_list in self.request.body_arguments.items():\n form_options[key] = [ bs.decode('utf8') for bs in byte_list ]\n for key, byte_list in self.request.files.items():\n form_options[\"%s_file\"%key] = byte_list\n try:\n options = user.spawner.options_from_form(form_options)\n yield self.spawn_single_user(user, options=options)\n except Exception as e:\n self.log.error(\"Failed to spawn single-user server with form\", exc_info=True)\n self.finish(self._render_form(str(e)))\n return\n self.set_login_cookie(user)\n url = user.url\n\n next_url = self.get_argument('next', '')\n if next_url and not next_url.startswith('/'):\n self.log.warning(\"Disallowing redirect outside JupyterHub: %r\", next_url)\n elif next_url:\n url = next_url\n\n self.redirect(url)\n\nclass AdminHandler(BaseHandler):\n \"\"\"Render the admin page.\"\"\"\n\n @admin_only\n def get(self):\n available = {'name', 'admin', 'running', 'last_activity'}\n default_sort = ['admin', 'name']\n mapping = {\n 'running': '_server_id'\n }\n default_order = {\n 'name': 'asc',\n 'last_activity': 'desc',\n 'admin': 'desc',\n 'running': 'desc',\n }\n sorts = self.get_arguments('sort') or default_sort\n orders = self.get_arguments('order')\n\n for bad in set(sorts).difference(available):\n self.log.warning(\"ignoring invalid sort: %r\", bad)\n sorts.remove(bad)\n for bad in set(orders).difference({'asc', 'desc'}):\n self.log.warning(\"ignoring invalid order: %r\", bad)\n orders.remove(bad)\n\n # add default sort as secondary\n for s in default_sort:\n if s not in sorts:\n sorts.append(s)\n if len(orders) < len(sorts):\n for col in sorts[len(orders):]:\n orders.append(default_order[col])\n else:\n orders = orders[:len(sorts)]\n\n # this could be one incomprehensible nested list comprehension\n # get User columns\n cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\n # get User.col.desc() order objects\n ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n\n users = self.db.query(orm.User).order_by(*ordered)\n users = [ self._user_from_orm(u) for u in users ]\n running = [ u for u in users if u.running ]\n\n html = self.render_template('admin.html',\n user=self.get_current_user(),\n admin_access=self.settings.get('admin_access', False),\n users=users,\n running=running,\n sort={s:o for s,o in zip(sorts, orders)},\n )\n self.finish(html)\n\n\nclass TokenPageHandler(BaseHandler):\n \"\"\"Handler for page requesting new API tokens\"\"\"\n\n @web.authenticated\n def get(self):\n html = self.render_template('token.html')\n self.finish(html)\n\n\nclass ProxyErrorHandler(BaseHandler):\n \"\"\"Handler for rendering proxy error pages\"\"\"\n \n def get(self, status_code_s):\n status_code = int(status_code_s)\n status_message = responses.get(status_code, 'Unknown HTTP Error')\n # build template namespace\n \n hub_home = url_path_join(self.hub.base_url, 'home')\n message_html = ''\n if status_code == 503:\n message_html = ' '.join([\n \"Your server appears to be down.\",\n \"Try restarting it <a href='%s'>from the hub</a>\" % hub_home\n ])\n ns = dict(\n status_code=status_code,\n status_message=status_message,\n message_html=message_html,\n logo_url=hub_home,\n )\n\n self.set_header('Content-Type', 'text/html')\n # render the template\n try:\n html = self.render_template('%s.html' % status_code, **ns)\n except TemplateNotFound:\n self.log.debug(\"No template for %d\", status_code)\n html = self.render_template('error.html', **ns)\n\n self.write(html)\n\n\ndefault_handlers = [\n (r'/', RootHandler),\n (r'/home', HomeHandler),\n (r'/admin', AdminHandler),\n (r'/spawn', SpawnHandler),\n (r'/token', TokenPageHandler),\n (r'/error/(\\d+)', ProxyErrorHandler),\n]\n", "path": "jupyterhub/handlers/pages.py" } ]
[ { "content": "\"\"\"Basic html-rendering handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom http.client import responses\n\nfrom jinja2 import TemplateNotFound\nfrom tornado import web, gen\nfrom tornado.httputil import url_concat\n\nfrom .. import orm\nfrom ..utils import admin_only, url_path_join\nfrom .base import BaseHandler\n\n\nclass RootHandler(BaseHandler):\n \"\"\"Render the Hub root page.\n\n If next argument is passed by single-user server,\n redirect to base_url + single-user page.\n\n If logged in, redirects to:\n\n - single-user server if running\n - hub home, otherwise\n\n Otherwise, renders login page.\n \"\"\"\n def get(self):\n next_url = self.get_argument('next', '')\n if next_url and not next_url.startswith('/'):\n self.log.warning(\"Disallowing redirect outside JupyterHub: %r\", next_url)\n next_url = ''\n if next_url and next_url.startswith(url_path_join(self.base_url, 'user/')):\n # add /hub/ prefix, to ensure we redirect to the right user's server.\n # The next request will be handled by UserSpawnHandler,\n # ultimately redirecting to the logged-in user's server.\n without_prefix = next_url[len(self.base_url):]\n next_url = url_path_join(self.hub.base_url, without_prefix)\n self.log.warning(\"Redirecting %s to %s. For sharing public links, use /user-redirect/\",\n self.request.uri, next_url,\n )\n self.redirect(next_url)\n return\n user = self.get_current_user()\n if user:\n if user.running:\n url = user.url\n self.log.debug(\"User is running: %s\", url)\n self.set_login_cookie(user) # set cookie\n else:\n url = url_path_join(self.hub.base_url, 'home')\n self.log.debug(\"User is not running: %s\", url)\n else:\n url = self.settings['login_url']\n self.redirect(url)\n\n\nclass HomeHandler(BaseHandler):\n \"\"\"Render the user's home page.\"\"\"\n\n @web.authenticated\n @gen.coroutine\n def get(self):\n user = self.get_current_user()\n if user.running:\n # trigger poll_and_notify event in case of a server that died\n yield user.spawner.poll_and_notify()\n html = self.render_template('home.html',\n user=user,\n url=user.url,\n )\n self.finish(html)\n\n\nclass SpawnHandler(BaseHandler):\n \"\"\"Handle spawning of single-user servers via form.\n\n GET renders the form, POST handles form submission.\n\n Only enabled when Spawner.options_form is defined.\n \"\"\"\n def _render_form(self, message=''):\n user = self.get_current_user()\n return self.render_template('spawn.html',\n user=user,\n spawner_options_form=user.spawner.options_form,\n error_message=message,\n url=self.request.uri,\n )\n\n @web.authenticated\n def get(self):\n \"\"\"GET renders form for spawning with user-specified options\"\"\"\n user = self.get_current_user()\n if not self.allow_named_servers and user.running:\n url = user.url\n self.log.debug(\"User is running: %s\", url)\n self.redirect(url)\n return\n if user.spawner.options_form:\n self.finish(self._render_form())\n else:\n # not running, no form. Trigger spawn.\n self.redirect(user.url)\n\n @web.authenticated\n @gen.coroutine\n def post(self):\n \"\"\"POST spawns with user-specified options\"\"\"\n user = self.get_current_user()\n if not self.allow_named_servers and user.running:\n url = user.url\n self.log.warning(\"User is already running: %s\", url)\n self.redirect(url)\n return\n form_options = {}\n for key, byte_list in self.request.body_arguments.items():\n form_options[key] = [ bs.decode('utf8') for bs in byte_list ]\n for key, byte_list in self.request.files.items():\n form_options[\"%s_file\"%key] = byte_list\n try:\n options = user.spawner.options_from_form(form_options)\n yield self.spawn_single_user(user, options=options)\n except Exception as e:\n self.log.error(\"Failed to spawn single-user server with form\", exc_info=True)\n self.finish(self._render_form(str(e)))\n return\n self.set_login_cookie(user)\n url = user.url\n\n next_url = self.get_argument('next', '')\n if next_url and not next_url.startswith('/'):\n self.log.warning(\"Disallowing redirect outside JupyterHub: %r\", next_url)\n elif next_url:\n url = next_url\n\n self.redirect(url)\n\nclass AdminHandler(BaseHandler):\n \"\"\"Render the admin page.\"\"\"\n\n @admin_only\n def get(self):\n available = {'name', 'admin', 'running', 'last_activity'}\n default_sort = ['admin', 'name']\n mapping = {\n 'running': '_server_id'\n }\n default_order = {\n 'name': 'asc',\n 'last_activity': 'desc',\n 'admin': 'desc',\n 'running': 'desc',\n }\n sorts = self.get_arguments('sort') or default_sort\n orders = self.get_arguments('order')\n\n for bad in set(sorts).difference(available):\n self.log.warning(\"ignoring invalid sort: %r\", bad)\n sorts.remove(bad)\n for bad in set(orders).difference({'asc', 'desc'}):\n self.log.warning(\"ignoring invalid order: %r\", bad)\n orders.remove(bad)\n\n # add default sort as secondary\n for s in default_sort:\n if s not in sorts:\n sorts.append(s)\n if len(orders) < len(sorts):\n for col in sorts[len(orders):]:\n orders.append(default_order[col])\n else:\n orders = orders[:len(sorts)]\n\n # this could be one incomprehensible nested list comprehension\n # get User columns\n cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\n # get User.col.desc() order objects\n ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n\n users = self.db.query(orm.User).order_by(*ordered)\n users = [ self._user_from_orm(u) for u in users ]\n running = [ u for u in users if u.running ]\n\n html = self.render_template('admin.html',\n user=self.get_current_user(),\n admin_access=self.settings.get('admin_access', False),\n users=users,\n running=running,\n sort={s:o for s,o in zip(sorts, orders)},\n )\n self.finish(html)\n\n\nclass TokenPageHandler(BaseHandler):\n \"\"\"Handler for page requesting new API tokens\"\"\"\n\n @web.authenticated\n def get(self):\n html = self.render_template('token.html')\n self.finish(html)\n\n\nclass ProxyErrorHandler(BaseHandler):\n \"\"\"Handler for rendering proxy error pages\"\"\"\n \n def get(self, status_code_s):\n status_code = int(status_code_s)\n status_message = responses.get(status_code, 'Unknown HTTP Error')\n # build template namespace\n \n hub_home = url_path_join(self.hub.base_url, 'home')\n message_html = ''\n if status_code == 503:\n message_html = ' '.join([\n \"Your server appears to be down.\",\n \"Try restarting it <a href='%s'>from the hub</a>\" % hub_home\n ])\n ns = dict(\n status_code=status_code,\n status_message=status_message,\n message_html=message_html,\n logo_url=hub_home,\n )\n\n self.set_header('Content-Type', 'text/html')\n # render the template\n try:\n html = self.render_template('%s.html' % status_code, **ns)\n except TemplateNotFound:\n self.log.debug(\"No template for %d\", status_code)\n html = self.render_template('error.html', **ns)\n\n self.write(html)\n\n\ndefault_handlers = [\n (r'/?', RootHandler),\n (r'/home', HomeHandler),\n (r'/admin', AdminHandler),\n (r'/spawn', SpawnHandler),\n (r'/token', TokenPageHandler),\n (r'/error/(\\d+)', ProxyErrorHandler),\n]\n", "path": "jupyterhub/handlers/pages.py" } ]
diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py index 7c7126774f..63dd661a95 100644 --- a/jupyterhub/handlers/pages.py +++ b/jupyterhub/handlers/pages.py @@ -237,7 +237,7 @@ def get(self, status_code_s): default_handlers = [ - (r'/', RootHandler), + (r'/?', RootHandler), (r'/home', HomeHandler), (r'/admin', AdminHandler), (r'/spawn', SpawnHandler),
cupy__cupy-1239
[ { "content": "import functools\nimport six\nfrom six.moves import builtins\nimport string\nimport threading\nimport warnings\n\nimport numpy\n\nfrom cupy.core import core\nfrom cupy import creation\nfrom cupy import logic\nfrom cupy import math\nfrom cupy import sorting\nfrom cupy import statistics\n\n\n_thread_local = threading.local()\n\n\nclass FusionOp(object):\n\n def __init__(self, name, operation, param_names,\n nin, nout, in_vars, out_vars, types, num):\n self.name = name\n self.operation = operation\n self.param_names = param_names\n self.nin = nin\n self.nout = nout\n self.in_vars = in_vars\n self.out_vars = out_vars\n self.types = types\n self.num = num\n\n def __repr__(self):\n return \"<FusionOp, name={}, types=[{}]>\".format(\n self.name, ', '.join(_.name for _ in self.types))\n\n\nclass _FusionVar(object):\n\n def __init__(self, num, ty, const=None):\n self.num = num\n self.ty = ty\n self.const = const\n\n def __repr__(self):\n return \"<_FusionVar, num={}, ty={}, const={}>\".format(\n self.num, self.ty, self.const)\n\n\nclass _FusionMem(object):\n\n def __init__(self, var_list):\n self.op_list = []\n self.var_list = var_list[:]\n\n def __repr__(self):\n return \"<_FusionMem, op_list={}, var_list={}>\".format(\n self.op_list,\n self.var_list)\n\n def get_fresh(self, ty, **kwargs):\n n = len(self.var_list)\n ret = _FusionVar(n, ty, **kwargs)\n self.var_list.append(ret)\n return ret\n\n def set_op(self, name, operation, param_names,\n nin, nout, in_vars, out_vars, types):\n num = len(self.op_list)\n op = FusionOp(name, operation, param_names,\n nin, nout, in_vars, out_vars, types, num)\n self.op_list.append(op)\n\n\nclass _FusionRef(object):\n\n def __init__(self, var, mem):\n self._var = var\n self.dtype = var.ty\n self._mem = mem\n\n def __repr__(self):\n return \"<_FusionRef, dtype=%s>\" % self.dtype\n\n def __neg__(self):\n return negative(self)\n\n def __add__(self, other):\n return add(self, other)\n\n def __iadd__(self, other):\n return add(self, other, self)\n\n def __radd__(self, other):\n return add(other, self)\n\n def __sub__(self, other):\n return subtract(self, other)\n\n def __isub__(self, other):\n return subtract(self, other, self)\n\n def __rsub__(self, other):\n return subtract(other, self)\n\n def __mul__(self, other):\n return multiply(self, other)\n\n def __imul__(self, other):\n return multiply(self, other, self)\n\n def __rmul__(self, other):\n return multiply(other, self)\n\n def __div__(self, other):\n return divide(self, other)\n\n def __idiv__(self, other):\n return divide(self, other, self)\n\n def __rdiv__(self, other):\n return divide(other, self)\n\n def __truediv__(self, other):\n return true_divide(self, other)\n\n def __itruediv__(self, other):\n return true_divide(self, other, self)\n\n def __rtruediv__(self, other):\n return true_divide(other, self)\n\n def __floordiv__(self, other):\n return floor_divide(self, other)\n\n def __ifloordiv__(self, other):\n return floor_divide(self, other, self)\n\n def __rfloordiv__(self, other):\n return floor_divide(other, self)\n\n def __mod__(self, other):\n return remainder(self, other)\n\n def __imod__(self, other):\n return remainder(self, other, self)\n\n def __rmod__(self, other):\n return remainder(other, self)\n\n def __pow__(x, y):\n return power(x, y)\n\n def __ipow__(self, other):\n return power(self, other, self)\n\n def __lshift__(self, other):\n return left_shift(self, other)\n\n def __ilshift__(self, other):\n return left_shift(self, other, self)\n\n def __rlshift__(self, other):\n return left_shift(other, self)\n\n def __rshift__(self, other):\n return right_shift(self, other)\n\n def __irshift__(self, other):\n return right_shift(self, other, self)\n\n def __rrshift__(self, other):\n return right_shift(other, self)\n\n def __and__(self, other):\n return bitwise_and(self, other)\n\n def __iand__(self, other):\n return bitwise_and(self, other, self)\n\n def __rand__(self, other):\n return bitwise_and(other, self)\n\n def __or__(self, other):\n return bitwise_or(self, other)\n\n def __ior__(self, other):\n return bitwise_or(self, other, self)\n\n def __ror__(self, other):\n return bitwise_or(other, self)\n\n def __xor__(self, other):\n return bitwise_xor(self, other)\n\n def __ixor__(self, other):\n return bitwise_xor(self, other, self)\n\n def __rxor__(self, other):\n return bitwise_xor(other, self)\n\n def __invert__(self):\n return invert(self)\n\n def __lt__(self, other):\n return less(self, other)\n\n def __le__(self, other):\n return less_equal(self, other)\n\n def __eq__(self, other):\n return equal(self, other)\n\n def __ne__(self, other):\n return not_equal(self, other)\n\n def __gt__(self, other):\n return greater(self, other)\n\n def __ge__(self, other):\n return greater_equal(self, other)\n\n def __nonzero__(self):\n raise Exception(\"Can't cast to bool\")\n\n def __bool__(self):\n raise Exception(\"Can't cast to bool\")\n\n def __setitem__(self, slices, value):\n if slices is Ellipsis or (isinstance(slices, slice) and\n slices == slice(None)):\n copy(value, self)\n else:\n raise ValueError('The fusion supports `[...]` or `[:]`.')\n\n def copy(self):\n return copy(self)\n\n\n_kind_score = {\n 'b': 0,\n 'u': 1,\n 'i': 1,\n 'f': 2,\n 'c': 3,\n}\n\n_dtype_to_ctype = {\n numpy.dtype('float64'): 'double',\n numpy.dtype('float32'): 'float',\n numpy.dtype('float16'): 'float16',\n numpy.dtype('complex128'): 'complex<double>',\n numpy.dtype('complex64'): 'complex<float>',\n numpy.dtype('int64'): 'long long',\n numpy.dtype('int32'): 'int',\n numpy.dtype('int16'): 'short',\n numpy.dtype('int8'): 'signed char',\n numpy.dtype('uint64'): 'unsigned long long',\n numpy.dtype('uint32'): 'unsigned int',\n numpy.dtype('uint16'): 'unsigned short',\n numpy.dtype('uint8'): 'unsigned char',\n numpy.dtype('bool'): 'bool',\n}\n\n_dtype_list = [numpy.dtype(_) for _ in '?bhilqBHILQefdFD']\n\n\ndef _normalize_arg(arg, mem):\n arg_type = type(arg)\n if arg_type is _FusionRef:\n return arg._var\n is_scalar = arg_type in six.integer_types + (float, bool, complex)\n is_ndarray = hasattr(arg, 'dtype') and arg.dtype in _dtype_list\n if is_scalar or is_ndarray:\n return mem.get_fresh(numpy.dtype(arg_type), const=arg)\n raise Exception('Unsupported type %s' % arg_type)\n\n\ndef _convert(f):\n if type(f) is core.ufunc:\n return _convert_from_ufunc(f)\n if type(f) is core.ElementwiseKernel:\n return _convert_from_elementwise(f)\n raise Exception(\"Can't convert from %s to FusionOp\" % type(f))\n\n\ndef _should_use_min_scalar(in_args):\n max_array_kind = -2\n max_scalar_kind = -1\n for i in in_args:\n kind = _kind_score[i.ty.kind]\n if i.const is None:\n max_array_kind = max(max_array_kind, kind)\n else:\n max_scalar_kind = max(max_scalar_kind, kind)\n return (max_scalar_kind != -1 and\n max_array_kind >= max_scalar_kind)\n\n\ndef _convert_from_ufunc(ufunc):\n nin = ufunc.nin\n nout = ufunc.nout\n\n def get_mem(args):\n for i in args:\n if type(i) == _FusionRef:\n return i._mem\n raise Exception('number of ndarray arguments must be more than 0')\n\n def can_cast1(args, ty_ins):\n for i in six.moves.range(nin):\n if args[i].const is None:\n if not numpy.can_cast(args[i].ty, ty_ins[i]):\n return False\n else:\n if not numpy.can_cast(args[i].const, ty_ins[i]):\n return False\n return True\n\n def can_cast2(args, ty_ins):\n for i in six.moves.range(nin):\n if not numpy.can_cast(args[i].ty, ty_ins[i]):\n return False\n return True\n\n def res(*args, **kwargs):\n mem = get_mem(args)\n var_list = [_normalize_arg(_, mem) for _ in args]\n if 'out' in kwargs:\n var_list.append(_normalize_arg(kwargs.pop('out'), mem))\n if kwargs:\n raise TypeError('Wrong arguments %s' % kwargs)\n assert nin <= len(var_list) <= nin + nout\n in_vars = var_list[:nin]\n out_vars = var_list[nin:]\n can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2\n for ty_ins, ty_outs, op in ufunc._ops:\n ty_ins = [numpy.dtype(_) for _ in ty_ins]\n ty_outs = [numpy.dtype(_) for _ in ty_outs]\n if can_cast(in_vars, ty_ins):\n param_names = (['in%d' % i for i in six.moves.range(nin)] +\n ['out%d' % i for i in six.moves.range(nout)])\n ret = []\n for i in six.moves.range(nout):\n if i >= len(out_vars):\n v = mem.get_fresh(ty_outs[i])\n out_vars.append(v)\n ret.append(_FusionRef(v, mem))\n elif numpy.can_cast(ty_outs[i], out_vars[i].ty,\n \"same_kind\"):\n v = out_vars[i]\n ret.append(_FusionRef(v, mem))\n else:\n raise TypeError(\n 'output (typecode \\'{}\\') could not be coerced '\n 'to provided output parameter (typecode \\'{}\\') '\n 'according to the casting rule '\n '\"same_kind\"'.format(\n ty_outs[i].char, out_vars[i].ty.char))\n mem.set_op(ufunc.name, op, param_names, nin, nout,\n in_vars, out_vars, ty_ins + ty_outs)\n return ret[0] if len(ret) == 1 else tuple(ret)\n raise TypeError('Invalid type cast in \\'{}\\': {} -> {}'.format(\n ufunc.name,\n [_.ty for _ in in_vars],\n [_.ty for _ in out_vars]))\n return res\n\n\ndef _convert_from_elementwise(elem):\n raise Exception('Not Impletmented')\n\n\ndef _gather_submodules(ops):\n return {(op.name, tuple(op.types)): op for op in ops}\n\n\ndef _get_params(var_list):\n return ['%s v%d' % (var.ty, var.num) for var in var_list]\n\n\ndef _get_out_params(var_list):\n return ['%s ret%d' % (var.ty, i) for i, var in enumerate(var_list)]\n\n\ndef _get_declaration_from_var(var):\n if var.const is None:\n return '%s v%d;\\n' % (_dtype_to_ctype[var.ty], var.num)\n\n c = var.const\n val = numpy.asscalar(c) if hasattr(c, 'dtype') else c\n\n if isinstance(val, bool):\n init = '= %s' % str(c).lower()\n elif isinstance(val, complex):\n init = '(%s, %s)' % (c.real, c.imag)\n elif isinstance(val, six.integer_types + (float,)):\n init = '= %s' % str(c)\n else:\n raise TypeError('Invalid constant type: {}'.format(type(c)))\n return 'const %s v%d %s;\\n' % (_dtype_to_ctype[var.ty], var.num, init)\n\n\ndef _get_declaration_from_op(op):\n return ''.join('%s v%d_%d;\\n' % (_dtype_to_ctype[t], op.num, j)\n for j, t in enumerate(op.types))\n\n\ndef _get_operation_code(op):\n code = ''.join('v%d_%d = v%d;\\n' % (op.num, i, v.num)\n for i, v in enumerate(op.in_vars))\n params = ['v%d_%d' % (op.num, i)\n for i in six.moves.range(op.nin + op.nout)]\n code += op.name + '(' + ', '.join(params) + ');\\n'\n code += ''.join('v%d = v%d_%d;\\n' %\n (v.num, op.num, i + op.nin)\n for i, v in enumerate(op.out_vars))\n return code\n\n\ndef _get_submodule_code(op):\n parameters = ', '.join('%s &%s' % (_dtype_to_ctype[t], name)\n for i, (name, t)\n in enumerate(zip(op.param_names, op.types)))\n typedecl = ''.join(('typedef %s in%d_type;\\n' % (_dtype_to_ctype[t], i))\n for i, t in enumerate(op.types[:op.nin]))\n typedecl += ''.join(('typedef %s out%d_type;\\n' % (_dtype_to_ctype[t], i))\n for i, t in enumerate(op.types[op.nin:]))\n module_code = string.Template('''\n __device__ void ${name}(${parameters}) {\n ${typedecl}\n ${operation};\n }\n ''').substitute(\n name=op.name,\n parameters=parameters,\n operation=op.operation,\n typedecl=typedecl)\n return module_code + '\\n'\n\n\ndef _get_pre_code(in_vars, out_vars, operation):\n in_params = ', '.join('%s v%s' % (_dtype_to_ctype[v.ty], v.num)\n for v in in_vars)\n out_params = ''.join('%s v%s;\\n' % (_dtype_to_ctype[v.ty], v.num)\n for v in out_vars)\n module_code = string.Template('''\n __device__ ${return_type} _pre_map(${in_params}) {\n ${out_params}\n ${operation};\n return ${return_var};\n }\n ''').substitute(\n return_type=_dtype_to_ctype[out_vars[0].ty],\n in_params=in_params,\n out_params=out_params,\n operation=operation,\n return_var='v%d' % out_vars[0].num)\n return module_code\n\n\ndef _get_reduce_op(ops, dtype):\n for i in ops._ops:\n if numpy.can_cast(dtype.type, i[0][0]):\n return i\n raise TypeError(\"Type is mismatched. %s(...), %s\" % (ops.name, dtype.type))\n\n\ndef _get_post_code(post_vars, operation, post_out):\n module_code = string.Template('''\n __device__ ${return_type} _post_map(${arg_type} v0) {\n ${operation};\n return v${return_var};\n }\n ''').substitute(\n arg_type=_dtype_to_ctype[post_vars[0].ty],\n return_type=_dtype_to_ctype[post_vars[post_out.num].ty],\n operation=operation,\n return_var=post_out.num)\n return module_code\n\n\ndef _get_fix_code(data_type, fixed_type, operation):\n module_code = string.Template('''\n __device__ ${fixed_type} _post_fix(${data_type} a) {\n ${fixed_type} out0;\n ${operation};\n return out0;\n }\n ''').substitute(\n data_type=data_type,\n fixed_type=_dtype_to_ctype[fixed_type],\n operation=operation)\n return module_code\n\n\ndef _get_fusion(func, nin, reduce, post_map, identity, input_types, name):\n in_vars = [_FusionVar(i, t) for i, t in enumerate(input_types)]\n mem = _FusionMem(in_vars)\n in_refs = [_FusionRef(_, mem) for _ in in_vars]\n out_refs = func(*in_refs)\n out_refs = list(out_refs) if type(out_refs) == tuple else [out_refs]\n out_refs = [_ for _ in out_refs if _ is not None]\n out_refs = [_FusionRef(_normalize_arg(_, mem), mem) for _ in out_refs]\n out_vars = [_normalize_arg(copy(_), mem) for _ in out_refs]\n nout = len(out_vars)\n op_list = mem.op_list\n tmpvars = mem.var_list[len(in_vars):]\n if nout > 0:\n tmpvars = tmpvars[:-nout]\n\n in_params = ', '.join(_get_params(in_vars[:nin]))\n out_params = ', '.join(_get_params(out_vars))\n operation = ''.join(_get_declaration_from_var(_) for _ in tmpvars)\n operation += ''.join(_get_declaration_from_op(_) for _ in op_list)\n operation += '\\n'.join(_get_operation_code(_) for _ in op_list)\n\n if reduce is None:\n if not out_params:\n in_params = ', '.join(_get_params(in_vars[:-1]))\n out_params = ', '.join(_get_params([in_vars[-1]]))\n submodules = _gather_submodules(op_list)\n submodule_code = ''.join(_get_submodule_code(_)\n for _ in submodules.values())\n return core.ElementwiseKernel(in_params, out_params,\n operation, preamble=submodule_code,\n name=name)\n else:\n if nout != 1:\n raise Exception(\"Wrong number of number of arguments\")\n # pre-map\n pre_type = out_vars[0].ty\n pre_code = _get_pre_code(in_vars, out_vars, operation)\n\n # reduce\n reduce_op = _get_reduce_op(reduce._raw, pre_type)\n reduce_code = reduce_op[2][1]\n reduce_type = numpy.dtype(reduce_op[1][0])\n rtype = reduce_op[2][3]\n post_type = \"type_in0_raw\" if rtype is None else rtype\n pre_code += \"typedef %s type_in0_raw;\\n\" % _dtype_to_ctype[reduce_type]\n\n # post-map\n post_in = [_FusionVar(0, reduce_type)]\n mem = _FusionMem(post_in)\n post_in_ref = [_FusionRef(_, mem) for _ in post_in]\n post_out = _normalize_arg(post_map(*post_in_ref), mem)\n if type(post_out) == tuple:\n raise Exception(\"Can't reduce a tuple\")\n post_vars = mem.var_list\n post_ops = mem.op_list\n post_code = ''.join(_get_declaration_from_var(_)\n for _ in post_vars[1:])\n post_code += ''.join(_get_declaration_from_op(_) for _ in post_ops)\n post_code += '\\n'.join(_get_operation_code(_) for _ in post_ops)\n post_code = _get_post_code(post_vars, post_code, post_out)\n post_code += (\n \"typedef %s type_out0_raw;\\n\" % _dtype_to_ctype[reduce_type])\n post_code += _get_fix_code(post_type, reduce_type, reduce_op[2][2])\n\n submodules = _gather_submodules(op_list + post_ops)\n submodule_code = ''.join(_get_submodule_code(v)\n for v in submodules.values())\n submodule_code += reduce._raw._preamble + pre_code + post_code\n operation_args = ['v' + str(i) for i in six.moves.range(nin)]\n operation = '_pre_map(' + ', '.join(operation_args) + ')'\n out_params = '%s res' % post_out.ty\n return core.ReductionKernel(in_params, out_params, operation,\n reduce_code,\n 'res = _post_map(_post_fix(a))',\n identity,\n name=name,\n reduce_type=post_type,\n preamble=submodule_code)\n\n\nclass Fusion(object):\n\n \"\"\"Function class.\n\n This class can be get by using `fuse` function and\n works like `ElementwiseKernel` or `ReductionKernel`.\n\n Attributes:\n func (function): The function before fusing.\n name (str): The name of the function.\n reduce (ufunc): Reduction ufunc.\n post_map (function): Mapping function for reduced values.\n \"\"\"\n\n def __init__(self, func, input_num, reduce, post_map, name=None):\n self.func = func\n self.name = name or func.__name__\n self.input_num = input_num\n self.reduce = reduce\n self.post_map = post_map\n self.identity = None if reduce is None else self.reduce._raw.identity\n self._memo = {}\n\n def __repr__(self):\n return \"<Fusion '%s'>\" % self.name\n\n def __call__(self, *args, **kwargs):\n _thread_local.in_fusion = True\n try:\n return self._call(*args, **kwargs)\n finally:\n _thread_local.in_fusion = False\n\n def _call(self, *args, **kwargs):\n axis = kwargs['axis'] if 'axis' in kwargs else None\n if len(args) == 0:\n raise Exception('number of arguments must be more than 0')\n if builtins.any(\n not isinstance(_, (core.ndarray, numpy.ndarray, numpy.generic))\n for _ in args):\n raise TypeError('Invalid argument type for \\'{}\\': ({})'.format(\n self.name,\n ', '.join(repr(type(_)) for _ in args)))\n\n def is_cupy_data(a):\n return isinstance(a, (core.ndarray, numpy.generic))\n if builtins.all(is_cupy_data(_) for _ in args):\n types = [_.dtype for _ in args]\n key = tuple(types)\n if key not in self._memo:\n if self.input_num is not None:\n nin = self.input_num\n else:\n nin = len(args)\n f = _get_fusion(self.func, nin, self.reduce,\n self.post_map, self.identity, types, self.name)\n self._memo[key] = f\n f = self._memo[key]\n if self.reduce is None:\n return f(*args)\n else:\n return f(*args, axis=axis)\n else:\n if builtins.any(type(_) is core.ndarray for _ in args):\n types = '.'.join(repr(type(_)) for _ in args)\n message = \"Can't fuse \\n %s(%s)\" % (self.name, types)\n warnings.warn(message)\n if self.reduce is None:\n return self.func(*args)\n elif axis is None:\n return self.post_map(self.reduce(self.func(*args)))\n else:\n return self.post_map(self.reduce(self.func(*args), axis=axis))\n\n\ndef fuse(*args, **kwargs):\n \"\"\"Function fusing decorator.\n\n This decorator can be used to define an elementwise or reduction kernel\n more easily than `ElementwiseKernel` class or `ReductionKernel` class.\n\n This decorator makes `Fusion` class from the given function.\n\n Args:\n input_num (int): Number of input arguments of the given function.\n reduce (function): The reduce function which is applied after\n pre-mapping step. If not assigned, reduction step is skipped.\n post_map (function): Mapping function for reduced values.\n If not assigned, post_map step is skipped.\n kernel_name (str): Name of the fused kernel function.\n If omitted, the name of the decorated function is used.\n\n .. note::\n This API is currently experimental and the interface may be changed in\n the future version.\n\n \"\"\"\n\n def wrapper(\n f, input_num=None, reduce=None, post_map=lambda x: x,\n kernel_name=None):\n return Fusion(f, input_num, reduce, post_map, kernel_name)\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return functools.update_wrapper(wrapper(args[0]), args[0])\n else:\n return lambda f: functools.update_wrapper(\n wrapper(f, *args, **kwargs), f)\n\n\nclass ufunc(core.ufunc):\n\n def __init__(self, fusion_op, cupy_op, numpy_op):\n self.name = fusion_op.name\n self.nin = fusion_op.nin\n self.nout = fusion_op.nout\n self.nargs = fusion_op.nargs\n self._ops = fusion_op._ops\n self._preamble = fusion_op._preamble\n self.__doc__ = cupy_op.__doc__\n self._params = fusion_op._params\n self._routine_cache = fusion_op._routine_cache\n\n self._fusion_op = fusion_op\n self._cupy_op = cupy_op\n self._numpy_op = numpy_op\n\n def __repr__(self):\n return repr(self._cupy_op)\n\n def __call__(self, *args, **kwargs):\n in_fusion = getattr(_thread_local, 'in_fusion', False)\n if in_fusion:\n if builtins.any(isinstance(_, _FusionRef) for _ in args):\n return _convert(self._fusion_op)(*args, **kwargs)\n elif builtins.any(isinstance(_, numpy.ndarray) for _ in args):\n return self._numpy_op(*args, **kwargs)\n\n return self._cupy_op(*args, **kwargs)\n\n __doc__ = core.ufunc.__doc__\n __call__.__doc__ = core.ufunc.__call__.__doc__\n\n\ndef _create_ufunc(cupy_ufunc, numpy_ufunc):\n return ufunc(cupy_ufunc, cupy_ufunc, numpy_ufunc)\n\n\nwhere = ufunc(sorting.search._where_ufunc,\n sorting.search.where, numpy.where)\n\nclip = ufunc(core._clip, math.misc.clip, numpy.clip)\n\ncopy = ufunc(core.elementwise_copy,\n creation.from_data.copy, numpy.copy)\n\nbitwise_and = _create_ufunc(core.bitwise_and, numpy.bitwise_and)\nbitwise_or = _create_ufunc(core.bitwise_or, numpy.bitwise_or)\nbitwise_xor = _create_ufunc(core.bitwise_xor, numpy.bitwise_xor)\ninvert = _create_ufunc(core.invert, numpy.invert)\nleft_shift = _create_ufunc(core.left_shift, numpy.left_shift)\nright_shift = _create_ufunc(core.right_shift, numpy.right_shift)\n\ngreater = _create_ufunc(core.greater, numpy.greater)\ngreater_equal = _create_ufunc(core.greater_equal, numpy.greater_equal)\nless = _create_ufunc(core.less, numpy.less)\nless_equal = _create_ufunc(core.less_equal, numpy.less_equal)\nequal = _create_ufunc(core.equal, numpy.equal)\nnot_equal = _create_ufunc(core.not_equal, numpy.not_equal)\n\nisfinite = _create_ufunc(logic.content.isfinite, numpy.isfinite)\nisinf = _create_ufunc(logic.content.isinf, numpy.isinf)\nisnan = _create_ufunc(logic.content.isnan, numpy.isnan)\n\nlogical_and = _create_ufunc(logic.ops.logical_and, numpy.logical_and)\nlogical_or = _create_ufunc(logic.ops.logical_or, numpy.logical_or)\nlogical_not = _create_ufunc(logic.ops.logical_not, numpy.logical_not)\nlogical_xor = _create_ufunc(logic.ops.logical_xor, numpy.logical_xor)\n\nsin = _create_ufunc(math.trigonometric.sin, numpy.sin)\ncos = _create_ufunc(math.trigonometric.cos, numpy.cos)\ntan = _create_ufunc(math.trigonometric.tan, numpy.tan)\narcsin = _create_ufunc(math.trigonometric.arcsin, numpy.arcsin)\narccos = _create_ufunc(math.trigonometric.arccos, numpy.arccos)\narctan = _create_ufunc(math.trigonometric.arctan, numpy.arctan)\narctan2 = _create_ufunc(math.trigonometric.arctan2, numpy.arctan2)\nhypot = _create_ufunc(math.trigonometric.hypot, numpy.hypot)\ndeg2rad = _create_ufunc(math.trigonometric.deg2rad, numpy.deg2rad)\nrad2deg = _create_ufunc(math.trigonometric.rad2deg, numpy.rad2deg)\ndegrees = _create_ufunc(math.trigonometric.degrees, numpy.degrees)\nradians = _create_ufunc(math.trigonometric.radians, numpy.radians)\n\nsinh = _create_ufunc(math.hyperbolic.sinh, numpy.sinh)\ncosh = _create_ufunc(math.hyperbolic.cosh, numpy.cosh)\ntanh = _create_ufunc(math.hyperbolic.tanh, numpy.tanh)\narcsinh = _create_ufunc(math.hyperbolic.arcsinh, numpy.arcsinh)\narccosh = _create_ufunc(math.hyperbolic.arccosh, numpy.arccosh)\narctanh = _create_ufunc(math.hyperbolic.arctanh, numpy.arctanh)\n\nrint = _create_ufunc(math.rounding.rint, numpy.rint)\nfloor = _create_ufunc(math.rounding.floor, numpy.floor)\nceil = _create_ufunc(math.rounding.ceil, numpy.ceil)\ntrunc = _create_ufunc(math.rounding.trunc, numpy.trunc)\nfix = _create_ufunc(math.rounding.fix, numpy.fix)\n\nexp = _create_ufunc(math.explog.exp, numpy.exp)\nexpm1 = _create_ufunc(math.explog.expm1, numpy.expm1)\nexp2 = _create_ufunc(math.explog.exp2, numpy.exp2)\nlog = _create_ufunc(math.explog.log, numpy.log)\nlog10 = _create_ufunc(math.explog.log10, numpy.log10)\nlog2 = _create_ufunc(math.explog.log2, numpy.log2)\nlog1p = _create_ufunc(math.explog.log1p, numpy.log1p)\nlogaddexp = _create_ufunc(math.explog.logaddexp, numpy.logaddexp)\nlogaddexp2 = _create_ufunc(math.explog.logaddexp2, numpy.logaddexp2)\n\nsignbit = _create_ufunc(math.floating.signbit, numpy.signbit)\ncopysign = _create_ufunc(math.floating.copysign, numpy.copysign)\nldexp = _create_ufunc(math.floating.ldexp, numpy.ldexp)\nfrexp = _create_ufunc(math.floating.frexp, numpy.frexp)\nnextafter = _create_ufunc(math.floating.nextafter, numpy.nextafter)\n\nadd = _create_ufunc(math.arithmetic.add, numpy.add)\nreciprocal = _create_ufunc(math.arithmetic.reciprocal, numpy.reciprocal)\nnegative = _create_ufunc(math.arithmetic.negative, numpy.negative)\nangle = _create_ufunc(math.arithmetic.angle, numpy.angle)\nconj = _create_ufunc(math.arithmetic.conj, numpy.conj)\nreal = _create_ufunc(math.arithmetic.real, numpy.real)\nimag = _create_ufunc(math.arithmetic.imag, numpy.imag)\nmultiply = _create_ufunc(math.arithmetic.multiply, numpy.multiply)\ndivide = _create_ufunc(math.arithmetic.divide, numpy.divide)\npower = _create_ufunc(math.arithmetic.power, numpy.power)\nsubtract = _create_ufunc(math.arithmetic.subtract, numpy.subtract)\ntrue_divide = _create_ufunc(math.arithmetic.true_divide, numpy.true_divide)\nfloor_divide = _create_ufunc(math.arithmetic.floor_divide, numpy.floor_divide)\nfmod = _create_ufunc(math.arithmetic.fmod, numpy.fmod)\nmod = _create_ufunc(math.arithmetic.remainder, numpy.mod)\nmodf = _create_ufunc(math.arithmetic.modf, numpy.modf)\nremainder = _create_ufunc(math.arithmetic.remainder, numpy.remainder)\n\nsqrt = _create_ufunc(math.misc.sqrt, numpy.sqrt)\nsqrt_fixed = _create_ufunc(math.misc.sqrt_fixed, numpy.sqrt)\nsquare = _create_ufunc(math.misc.square, numpy.square)\nabsolute = _create_ufunc(math.misc.absolute, numpy.absolute)\nabs = _create_ufunc(math.misc.absolute, numpy.abs)\nsign = _create_ufunc(math.misc.sign, numpy.sign)\nmaximum = _create_ufunc(math.misc.maximum, numpy.maximum)\nminimum = _create_ufunc(math.misc.minimum, numpy.minimum)\nfmax = _create_ufunc(math.misc.fmax, numpy.fmax)\nfmin = _create_ufunc(math.misc.fmin, numpy.fmin)\n\n\nclass reduction(object):\n\n def __init__(self, cupy_op, numpy_op):\n self._cupy_op = cupy_op\n self._numpy_op = numpy_op\n self.__doc__ = cupy_op.__doc__\n\n def __call__(self, *args, **kwargs):\n if builtins.any(type(_) == numpy.ndarray for _ in args):\n return self._numpy_op(*args, **kwargs)\n else:\n return self._cupy_op(*args, **kwargs)\n\n\nall = reduction(logic.truth.all, numpy.all)\nany = reduction(logic.truth.any, numpy.any)\nsum = reduction(math.sumprod.sum, numpy.sum)\nprod = reduction(math.sumprod.prod, numpy.prod)\namax = reduction(statistics.order.amax, numpy.amax)\namin = reduction(statistics.order.amin, numpy.amin)\n\n\nall._raw = core._all\nany._raw = core._any\nsum._raw = core._sum\nprod._raw = core._prod\namax._raw = core._amax\namin._raw = core._amin\n", "path": "cupy/core/fusion.py" } ]
[ { "content": "import functools\nimport six\nfrom six.moves import builtins\nimport string\nimport threading\nimport warnings\n\nimport numpy\n\nfrom cupy.core import core\nfrom cupy import creation\nfrom cupy import logic\nfrom cupy import math\nfrom cupy import sorting\nfrom cupy import statistics\n\n\n_thread_local = threading.local()\n\n\nclass FusionOp(object):\n\n def __init__(self, name, operation, param_names,\n nin, nout, in_vars, out_vars, types, num):\n self.name = name\n self.operation = operation\n self.param_names = param_names\n self.nin = nin\n self.nout = nout\n self.in_vars = in_vars\n self.out_vars = out_vars\n self.types = types\n self.num = num\n\n def __repr__(self):\n return \"<FusionOp, name={}, types=[{}]>\".format(\n self.name, ', '.join(_.name for _ in self.types))\n\n\nclass _FusionVar(object):\n\n def __init__(self, num, ty, const=None):\n self.num = num\n self.ty = ty\n self.const = const\n\n def __repr__(self):\n return \"<_FusionVar, num={}, ty={}, const={}>\".format(\n self.num, self.ty, self.const)\n\n\nclass _FusionMem(object):\n\n def __init__(self, var_list):\n self.op_list = []\n self.var_list = var_list[:]\n\n def __repr__(self):\n return \"<_FusionMem, op_list={}, var_list={}>\".format(\n self.op_list,\n self.var_list)\n\n def get_fresh(self, ty, **kwargs):\n n = len(self.var_list)\n ret = _FusionVar(n, ty, **kwargs)\n self.var_list.append(ret)\n return ret\n\n def set_op(self, name, operation, param_names,\n nin, nout, in_vars, out_vars, types):\n num = len(self.op_list)\n op = FusionOp(name, operation, param_names,\n nin, nout, in_vars, out_vars, types, num)\n self.op_list.append(op)\n\n\nclass _FusionRef(object):\n\n def __init__(self, var, mem):\n self._var = var\n self.dtype = var.ty\n self._mem = mem\n\n def __repr__(self):\n return \"<_FusionRef, dtype=%s>\" % self.dtype\n\n def __neg__(self):\n return negative(self)\n\n def __add__(self, other):\n return add(self, other)\n\n def __iadd__(self, other):\n return add(self, other, self)\n\n def __radd__(self, other):\n return add(other, self)\n\n def __sub__(self, other):\n return subtract(self, other)\n\n def __isub__(self, other):\n return subtract(self, other, self)\n\n def __rsub__(self, other):\n return subtract(other, self)\n\n def __mul__(self, other):\n return multiply(self, other)\n\n def __imul__(self, other):\n return multiply(self, other, self)\n\n def __rmul__(self, other):\n return multiply(other, self)\n\n def __div__(self, other):\n return divide(self, other)\n\n def __idiv__(self, other):\n return divide(self, other, self)\n\n def __rdiv__(self, other):\n return divide(other, self)\n\n def __truediv__(self, other):\n return true_divide(self, other)\n\n def __itruediv__(self, other):\n return true_divide(self, other, self)\n\n def __rtruediv__(self, other):\n return true_divide(other, self)\n\n def __floordiv__(self, other):\n return floor_divide(self, other)\n\n def __ifloordiv__(self, other):\n return floor_divide(self, other, self)\n\n def __rfloordiv__(self, other):\n return floor_divide(other, self)\n\n def __mod__(self, other):\n return remainder(self, other)\n\n def __imod__(self, other):\n return remainder(self, other, self)\n\n def __rmod__(self, other):\n return remainder(other, self)\n\n def __pow__(x, y):\n return power(x, y)\n\n def __ipow__(self, other):\n return power(self, other, self)\n\n def __lshift__(self, other):\n return left_shift(self, other)\n\n def __ilshift__(self, other):\n return left_shift(self, other, self)\n\n def __rlshift__(self, other):\n return left_shift(other, self)\n\n def __rshift__(self, other):\n return right_shift(self, other)\n\n def __irshift__(self, other):\n return right_shift(self, other, self)\n\n def __rrshift__(self, other):\n return right_shift(other, self)\n\n def __and__(self, other):\n return bitwise_and(self, other)\n\n def __iand__(self, other):\n return bitwise_and(self, other, self)\n\n def __rand__(self, other):\n return bitwise_and(other, self)\n\n def __or__(self, other):\n return bitwise_or(self, other)\n\n def __ior__(self, other):\n return bitwise_or(self, other, self)\n\n def __ror__(self, other):\n return bitwise_or(other, self)\n\n def __xor__(self, other):\n return bitwise_xor(self, other)\n\n def __ixor__(self, other):\n return bitwise_xor(self, other, self)\n\n def __rxor__(self, other):\n return bitwise_xor(other, self)\n\n def __invert__(self):\n return invert(self)\n\n def __lt__(self, other):\n return less(self, other)\n\n def __le__(self, other):\n return less_equal(self, other)\n\n def __eq__(self, other):\n return equal(self, other)\n\n def __ne__(self, other):\n return not_equal(self, other)\n\n def __gt__(self, other):\n return greater(self, other)\n\n def __ge__(self, other):\n return greater_equal(self, other)\n\n def __nonzero__(self):\n raise Exception(\"Can't cast to bool\")\n\n def __bool__(self):\n raise Exception(\"Can't cast to bool\")\n\n def __setitem__(self, slices, value):\n if slices is Ellipsis or (isinstance(slices, slice) and\n slices == slice(None)):\n copy(value, self)\n else:\n raise ValueError('The fusion supports `[...]` or `[:]`.')\n\n def copy(self):\n return copy(self)\n\n\n_kind_score = {\n 'b': 0,\n 'u': 1,\n 'i': 1,\n 'f': 2,\n 'c': 3,\n}\n\n_dtype_to_ctype = {\n numpy.dtype('float64'): 'double',\n numpy.dtype('float32'): 'float',\n numpy.dtype('float16'): 'float16',\n numpy.dtype('complex128'): 'complex<double>',\n numpy.dtype('complex64'): 'complex<float>',\n numpy.dtype('int64'): 'long long',\n numpy.dtype('int32'): 'int',\n numpy.dtype('int16'): 'short',\n numpy.dtype('int8'): 'signed char',\n numpy.dtype('uint64'): 'unsigned long long',\n numpy.dtype('uint32'): 'unsigned int',\n numpy.dtype('uint16'): 'unsigned short',\n numpy.dtype('uint8'): 'unsigned char',\n numpy.dtype('bool'): 'bool',\n}\n\n_dtype_list = [numpy.dtype(_) for _ in '?bhilqBHILQefdFD']\n\n\ndef _normalize_arg(arg, mem):\n arg_type = type(arg)\n if arg_type is _FusionRef:\n return arg._var\n is_scalar = arg_type in six.integer_types + (float, bool, complex)\n is_ndarray = hasattr(arg, 'dtype') and arg.dtype in _dtype_list\n if is_scalar or is_ndarray:\n return mem.get_fresh(numpy.dtype(arg_type), const=arg)\n raise Exception('Unsupported type %s' % arg_type)\n\n\ndef _convert(f):\n if type(f) is core.ufunc:\n return _convert_from_ufunc(f)\n if type(f) is core.ElementwiseKernel:\n return _convert_from_elementwise(f)\n raise Exception(\"Can't convert from %s to FusionOp\" % type(f))\n\n\ndef _should_use_min_scalar(in_args):\n max_array_kind = -2\n max_scalar_kind = -1\n for i in in_args:\n kind = _kind_score[i.ty.kind]\n if i.const is None:\n max_array_kind = max(max_array_kind, kind)\n else:\n max_scalar_kind = max(max_scalar_kind, kind)\n return (max_scalar_kind != -1 and\n max_array_kind >= max_scalar_kind)\n\n\ndef _convert_from_ufunc(ufunc):\n nin = ufunc.nin\n nout = ufunc.nout\n\n def get_mem(args):\n for i in args:\n if type(i) == _FusionRef:\n return i._mem\n raise Exception('number of ndarray arguments must be more than 0')\n\n def can_cast1(args, ty_ins):\n for i in six.moves.range(nin):\n if args[i].const is None:\n if not numpy.can_cast(args[i].ty, ty_ins[i]):\n return False\n else:\n if not numpy.can_cast(args[i].const, ty_ins[i]):\n return False\n return True\n\n def can_cast2(args, ty_ins):\n for i in six.moves.range(nin):\n if not numpy.can_cast(args[i].ty, ty_ins[i]):\n return False\n return True\n\n def res(*args, **kwargs):\n mem = get_mem(args)\n var_list = [_normalize_arg(_, mem) for _ in args]\n if 'out' in kwargs:\n var_list.append(_normalize_arg(kwargs.pop('out'), mem))\n if kwargs:\n raise TypeError('Wrong arguments %s' % kwargs)\n assert nin <= len(var_list) <= nin + nout\n in_vars = var_list[:nin]\n out_vars = var_list[nin:]\n can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2\n for ty_ins, ty_outs, op in ufunc._ops:\n ty_ins = [numpy.dtype(_) for _ in ty_ins]\n ty_outs = [numpy.dtype(_) for _ in ty_outs]\n if can_cast(in_vars, ty_ins):\n param_names = (['in%d' % i for i in six.moves.range(nin)] +\n ['out%d' % i for i in six.moves.range(nout)])\n ret = []\n for i in six.moves.range(nout):\n if i >= len(out_vars):\n v = mem.get_fresh(ty_outs[i])\n out_vars.append(v)\n ret.append(_FusionRef(v, mem))\n elif numpy.can_cast(ty_outs[i], out_vars[i].ty,\n \"same_kind\"):\n v = out_vars[i]\n ret.append(_FusionRef(v, mem))\n else:\n raise TypeError(\n 'output (typecode \\'{}\\') could not be coerced '\n 'to provided output parameter (typecode \\'{}\\') '\n 'according to the casting rule '\n '\"same_kind\"'.format(\n ty_outs[i].char, out_vars[i].ty.char))\n mem.set_op(ufunc.name, op, param_names, nin, nout,\n in_vars, out_vars, ty_ins + ty_outs)\n return ret[0] if len(ret) == 1 else tuple(ret)\n raise TypeError('Invalid type cast in \\'{}\\': {} -> {}'.format(\n ufunc.name,\n [_.ty for _ in in_vars],\n [_.ty for _ in out_vars]))\n return res\n\n\ndef _convert_from_elementwise(elem):\n raise Exception('Not Impletmented')\n\n\ndef _gather_submodules(ops):\n return {(op.name, tuple(op.types)): op for op in ops}\n\n\ndef _get_params(var_list):\n return ['%s v%d' % (var.ty, var.num) for var in var_list]\n\n\ndef _get_out_params(var_list):\n return ['%s ret%d' % (var.ty, i) for i, var in enumerate(var_list)]\n\n\ndef _get_declaration_from_var(var):\n if var.const is None:\n return '%s v%d;\\n' % (_dtype_to_ctype[var.ty], var.num)\n\n c = var.const\n val = numpy.asscalar(c) if hasattr(c, 'dtype') else c\n\n if isinstance(val, bool):\n init = '= %s' % str(c).lower()\n elif isinstance(val, complex):\n init = '(%s, %s)' % (c.real, c.imag)\n elif isinstance(val, six.integer_types + (float,)):\n init = '= %s' % str(c)\n else:\n raise TypeError('Invalid constant type: {}'.format(type(c)))\n return 'const %s v%d %s;\\n' % (_dtype_to_ctype[var.ty], var.num, init)\n\n\ndef _get_declaration_from_op(op):\n return ''.join('%s v%d_%d;\\n' % (_dtype_to_ctype[t], op.num, j)\n for j, t in enumerate(op.types))\n\n\ndef _get_operation_code(op):\n code = ''.join('v%d_%d = v%d;\\n' % (op.num, i, v.num)\n for i, v in enumerate(op.in_vars))\n params = ['v%d_%d' % (op.num, i)\n for i in six.moves.range(op.nin + op.nout)]\n code += op.name + '(' + ', '.join(params) + ');\\n'\n code += ''.join('v%d = v%d_%d;\\n' %\n (v.num, op.num, i + op.nin)\n for i, v in enumerate(op.out_vars))\n return code\n\n\ndef _get_submodule_code(op):\n parameters = ', '.join('%s &%s' % (_dtype_to_ctype[t], name)\n for i, (name, t)\n in enumerate(zip(op.param_names, op.types)))\n typedecl = ''.join(('typedef %s in%d_type;\\n' % (_dtype_to_ctype[t], i))\n for i, t in enumerate(op.types[:op.nin]))\n typedecl += ''.join(('typedef %s out%d_type;\\n' % (_dtype_to_ctype[t], i))\n for i, t in enumerate(op.types[op.nin:]))\n module_code = string.Template('''\n __device__ void ${name}(${parameters}) {\n ${typedecl}\n ${operation};\n }\n ''').substitute(\n name=op.name,\n parameters=parameters,\n operation=op.operation,\n typedecl=typedecl)\n return module_code + '\\n'\n\n\ndef _get_pre_code(in_vars, out_vars, operation):\n in_params = ', '.join('%s v%s' % (_dtype_to_ctype[v.ty], v.num)\n for v in in_vars)\n out_params = ''.join('%s v%s;\\n' % (_dtype_to_ctype[v.ty], v.num)\n for v in out_vars)\n module_code = string.Template('''\n __device__ ${return_type} _pre_map(${in_params}) {\n ${out_params}\n ${operation};\n return ${return_var};\n }\n ''').substitute(\n return_type=_dtype_to_ctype[out_vars[0].ty],\n in_params=in_params,\n out_params=out_params,\n operation=operation,\n return_var='v%d' % out_vars[0].num)\n return module_code\n\n\ndef _get_reduce_op(ops, dtype):\n for i in ops._ops:\n if numpy.can_cast(dtype.type, i[0][0]):\n return i\n raise TypeError(\"Type is mismatched. %s(...), %s\" % (ops.name, dtype.type))\n\n\ndef _get_post_code(post_vars, operation, post_out):\n module_code = string.Template('''\n __device__ ${return_type} _post_map(${arg_type} v0) {\n ${operation};\n return v${return_var};\n }\n ''').substitute(\n arg_type=_dtype_to_ctype[post_vars[0].ty],\n return_type=_dtype_to_ctype[post_vars[post_out.num].ty],\n operation=operation,\n return_var=post_out.num)\n return module_code\n\n\ndef _get_fix_code(data_type, fixed_type, operation):\n module_code = string.Template('''\n __device__ ${fixed_type} _post_fix(${data_type} a) {\n ${fixed_type} out0;\n ${operation};\n return out0;\n }\n ''').substitute(\n data_type=data_type,\n fixed_type=_dtype_to_ctype[fixed_type],\n operation=operation)\n return module_code\n\n\ndef _get_fusion(func, nin, reduce, post_map, identity, input_types, name):\n in_vars = [_FusionVar(i, t) for i, t in enumerate(input_types)]\n mem = _FusionMem(in_vars)\n in_refs = [_FusionRef(_, mem) for _ in in_vars]\n out_refs = func(*in_refs)\n out_refs = list(out_refs) if type(out_refs) == tuple else [out_refs]\n out_refs = [_ for _ in out_refs if _ is not None]\n out_refs = [_FusionRef(_normalize_arg(_, mem), mem) for _ in out_refs]\n out_vars = [_normalize_arg(copy(_), mem) for _ in out_refs]\n nout = len(out_vars)\n op_list = mem.op_list\n tmpvars = mem.var_list[len(in_vars):]\n if nout > 0:\n tmpvars = tmpvars[:-nout]\n\n in_params = ', '.join(_get_params(in_vars[:nin]))\n out_params = ', '.join(_get_params(out_vars))\n operation = ''.join(_get_declaration_from_var(_) for _ in tmpvars)\n operation += ''.join(_get_declaration_from_op(_) for _ in op_list)\n operation += '\\n'.join(_get_operation_code(_) for _ in op_list)\n\n if reduce is None:\n if not out_params:\n in_params = ', '.join(_get_params(in_vars[:-1]))\n out_params = ', '.join(_get_params([in_vars[-1]]))\n submodules = _gather_submodules(op_list)\n submodule_code = ''.join(_get_submodule_code(_)\n for _ in submodules.values())\n return core.ElementwiseKernel(in_params, out_params,\n operation, preamble=submodule_code,\n name=name)\n else:\n if nout != 1:\n raise Exception(\"Wrong number of number of arguments\")\n # pre-map\n pre_type = out_vars[0].ty\n pre_code = _get_pre_code(in_vars, out_vars, operation)\n\n # reduce\n reduce_op = _get_reduce_op(reduce._raw, pre_type)\n reduce_code = reduce_op[2][1]\n reduce_type = numpy.dtype(reduce_op[1][0])\n rtype = reduce_op[2][3]\n post_type = \"type_in0_raw\" if rtype is None else rtype\n pre_code += \"typedef %s type_in0_raw;\\n\" % _dtype_to_ctype[reduce_type]\n\n # post-map\n post_in = [_FusionVar(0, reduce_type)]\n mem = _FusionMem(post_in)\n post_in_ref = [_FusionRef(_, mem) for _ in post_in]\n post_out = _normalize_arg(post_map(*post_in_ref), mem)\n if type(post_out) == tuple:\n raise Exception(\"Can't reduce a tuple\")\n post_vars = mem.var_list\n post_ops = mem.op_list\n post_code = ''.join(_get_declaration_from_var(_)\n for _ in post_vars[1:])\n post_code += ''.join(_get_declaration_from_op(_) for _ in post_ops)\n post_code += '\\n'.join(_get_operation_code(_) for _ in post_ops)\n post_code = _get_post_code(post_vars, post_code, post_out)\n post_code += (\n \"typedef %s type_out0_raw;\\n\" % _dtype_to_ctype[reduce_type])\n post_code += _get_fix_code(post_type, reduce_type, reduce_op[2][2])\n\n submodules = _gather_submodules(op_list + post_ops)\n submodule_code = ''.join(_get_submodule_code(v)\n for v in submodules.values())\n submodule_code += reduce._raw._preamble + pre_code + post_code\n operation_args = ['v' + str(i) for i in six.moves.range(nin)]\n operation = '_pre_map(' + ', '.join(operation_args) + ')'\n out_params = '%s res' % post_out.ty\n return core.ReductionKernel(in_params, out_params, operation,\n reduce_code,\n 'res = _post_map(_post_fix(a))',\n identity,\n name=name,\n reduce_type=post_type,\n preamble=submodule_code)\n\n\nclass Fusion(object):\n\n \"\"\"Function class.\n\n This class can be get by using `fuse` function and\n works like `ElementwiseKernel` or `ReductionKernel`.\n\n Attributes:\n func (function): The function before fusing.\n name (str): The name of the function.\n reduce (ufunc): Reduction ufunc.\n post_map (function): Mapping function for reduced values.\n \"\"\"\n\n def __init__(self, func, input_num, reduce, post_map, name=None):\n self.func = func\n self.name = name or func.__name__\n self.input_num = input_num\n self.reduce = reduce\n self.post_map = post_map\n self.identity = None if reduce is None else self.reduce._raw.identity\n self._memo = {}\n\n def __repr__(self):\n return \"<Fusion '%s'>\" % self.name\n\n def __call__(self, *args, **kwargs):\n _thread_local.in_fusion = True\n try:\n return self._call(*args, **kwargs)\n finally:\n _thread_local.in_fusion = False\n\n def _call(self, *args, **kwargs):\n axis = kwargs['axis'] if 'axis' in kwargs else None\n if len(args) == 0:\n raise Exception('number of arguments must be more than 0')\n if builtins.any(\n not isinstance(_, (core.ndarray, numpy.ndarray, numpy.generic))\n for _ in args):\n raise TypeError('Invalid argument type for \\'{}\\': ({})'.format(\n self.name,\n ', '.join(repr(type(_)) for _ in args)))\n\n def is_cupy_data(a):\n return isinstance(a, (core.ndarray, numpy.generic))\n if builtins.all(is_cupy_data(_) for _ in args):\n types = [_.dtype for _ in args]\n key = tuple(types)\n if key not in self._memo:\n if self.input_num is not None:\n nin = self.input_num\n else:\n nin = len(args)\n f = _get_fusion(self.func, nin, self.reduce,\n self.post_map, self.identity, types, self.name)\n self._memo[key] = f\n f = self._memo[key]\n if self.reduce is None:\n return f(*args)\n else:\n return f(*args, axis=axis)\n else:\n if builtins.any(type(_) is core.ndarray for _ in args):\n types = '.'.join(repr(type(_)) for _ in args)\n message = \"Can't fuse \\n %s(%s)\" % (self.name, types)\n warnings.warn(message)\n if self.reduce is None:\n return self.func(*args)\n elif axis is None:\n return self.post_map(self.reduce(self.func(*args)))\n else:\n return self.post_map(self.reduce(self.func(*args), axis=axis))\n\n\ndef fuse(*args, **kwargs):\n \"\"\"Function fusing decorator.\n\n This decorator can be used to define an elementwise or reduction kernel\n more easily than `ElementwiseKernel` class or `ReductionKernel` class.\n\n This decorator makes `Fusion` class from the given function.\n\n Args:\n input_num (int): Number of input arguments of the given function.\n reduce (function): The reduce function which is applied after\n pre-mapping step. If not assigned, reduction step is skipped.\n post_map (function): Mapping function for reduced values.\n If not assigned, post_map step is skipped.\n kernel_name (str): Name of the fused kernel function.\n If omitted, the name of the decorated function is used.\n\n .. note::\n This API is currently experimental and the interface may be changed in\n the future version.\n\n \"\"\"\n\n def wrapper(\n f, input_num=None, reduce=None, post_map=lambda x: x,\n kernel_name=None):\n return Fusion(f, input_num, reduce, post_map, kernel_name)\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return functools.update_wrapper(wrapper(args[0]), args[0])\n else:\n return lambda f: functools.update_wrapper(\n wrapper(f, *args, **kwargs), f)\n\n\nclass ufunc(core.ufunc):\n\n def __init__(self, fusion_op, cupy_op, numpy_op):\n self.name = fusion_op.name\n self.nin = fusion_op.nin\n self.nout = fusion_op.nout\n self.nargs = fusion_op.nargs\n self._ops = fusion_op._ops\n self._preamble = fusion_op._preamble\n self.__doc__ = cupy_op.__doc__\n self._params = fusion_op._params\n self._routine_cache = fusion_op._routine_cache\n\n self._fusion_op = fusion_op\n self._cupy_op = cupy_op\n self._numpy_op = numpy_op\n\n def __repr__(self):\n return repr(self._cupy_op)\n\n def __call__(self, *args, **kwargs):\n in_fusion = getattr(_thread_local, 'in_fusion', False)\n if in_fusion:\n if builtins.any(isinstance(_, _FusionRef) for _ in args):\n return _convert(self._fusion_op)(*args, **kwargs)\n elif builtins.any(isinstance(_, numpy.ndarray) for _ in args):\n return self._numpy_op(*args, **kwargs)\n\n return self._cupy_op(*args, **kwargs)\n\n __doc__ = core.ufunc.__doc__\n __call__.__doc__ = core.ufunc.__call__.__doc__\n\n\ndef _create_ufunc(cupy_ufunc, numpy_ufunc):\n return ufunc(cupy_ufunc, cupy_ufunc, numpy_ufunc)\n\n\nwhere = ufunc(sorting.search._where_ufunc,\n sorting.search.where, numpy.where)\n\nclip = ufunc(core._clip, math.misc.clip, numpy.clip)\n\ncopy = ufunc(core.elementwise_copy,\n creation.from_data.copy, numpy.copy)\n\nbitwise_and = _create_ufunc(core.bitwise_and, numpy.bitwise_and)\nbitwise_or = _create_ufunc(core.bitwise_or, numpy.bitwise_or)\nbitwise_xor = _create_ufunc(core.bitwise_xor, numpy.bitwise_xor)\ninvert = _create_ufunc(core.invert, numpy.invert)\nleft_shift = _create_ufunc(core.left_shift, numpy.left_shift)\nright_shift = _create_ufunc(core.right_shift, numpy.right_shift)\n\ngreater = _create_ufunc(core.greater, numpy.greater)\ngreater_equal = _create_ufunc(core.greater_equal, numpy.greater_equal)\nless = _create_ufunc(core.less, numpy.less)\nless_equal = _create_ufunc(core.less_equal, numpy.less_equal)\nequal = _create_ufunc(core.equal, numpy.equal)\nnot_equal = _create_ufunc(core.not_equal, numpy.not_equal)\n\nisfinite = _create_ufunc(logic.content.isfinite, numpy.isfinite)\nisinf = _create_ufunc(logic.content.isinf, numpy.isinf)\nisnan = _create_ufunc(logic.content.isnan, numpy.isnan)\n\nlogical_and = _create_ufunc(logic.ops.logical_and, numpy.logical_and)\nlogical_or = _create_ufunc(logic.ops.logical_or, numpy.logical_or)\nlogical_not = _create_ufunc(logic.ops.logical_not, numpy.logical_not)\nlogical_xor = _create_ufunc(logic.ops.logical_xor, numpy.logical_xor)\n\nsin = _create_ufunc(math.trigonometric.sin, numpy.sin)\ncos = _create_ufunc(math.trigonometric.cos, numpy.cos)\ntan = _create_ufunc(math.trigonometric.tan, numpy.tan)\narcsin = _create_ufunc(math.trigonometric.arcsin, numpy.arcsin)\narccos = _create_ufunc(math.trigonometric.arccos, numpy.arccos)\narctan = _create_ufunc(math.trigonometric.arctan, numpy.arctan)\narctan2 = _create_ufunc(math.trigonometric.arctan2, numpy.arctan2)\nhypot = _create_ufunc(math.trigonometric.hypot, numpy.hypot)\ndeg2rad = _create_ufunc(math.trigonometric.deg2rad, numpy.deg2rad)\nrad2deg = _create_ufunc(math.trigonometric.rad2deg, numpy.rad2deg)\ndegrees = _create_ufunc(math.trigonometric.degrees, numpy.degrees)\nradians = _create_ufunc(math.trigonometric.radians, numpy.radians)\n\nsinh = _create_ufunc(math.hyperbolic.sinh, numpy.sinh)\ncosh = _create_ufunc(math.hyperbolic.cosh, numpy.cosh)\ntanh = _create_ufunc(math.hyperbolic.tanh, numpy.tanh)\narcsinh = _create_ufunc(math.hyperbolic.arcsinh, numpy.arcsinh)\narccosh = _create_ufunc(math.hyperbolic.arccosh, numpy.arccosh)\narctanh = _create_ufunc(math.hyperbolic.arctanh, numpy.arctanh)\n\nrint = _create_ufunc(math.rounding.rint, numpy.rint)\nfloor = _create_ufunc(math.rounding.floor, numpy.floor)\nceil = _create_ufunc(math.rounding.ceil, numpy.ceil)\ntrunc = _create_ufunc(math.rounding.trunc, numpy.trunc)\nfix = _create_ufunc(math.rounding.fix, numpy.fix)\n\nexp = _create_ufunc(math.explog.exp, numpy.exp)\nexpm1 = _create_ufunc(math.explog.expm1, numpy.expm1)\nexp2 = _create_ufunc(math.explog.exp2, numpy.exp2)\nlog = _create_ufunc(math.explog.log, numpy.log)\nlog10 = _create_ufunc(math.explog.log10, numpy.log10)\nlog2 = _create_ufunc(math.explog.log2, numpy.log2)\nlog1p = _create_ufunc(math.explog.log1p, numpy.log1p)\nlogaddexp = _create_ufunc(math.explog.logaddexp, numpy.logaddexp)\nlogaddexp2 = _create_ufunc(math.explog.logaddexp2, numpy.logaddexp2)\n\nsignbit = _create_ufunc(math.floating.signbit, numpy.signbit)\ncopysign = _create_ufunc(math.floating.copysign, numpy.copysign)\nldexp = _create_ufunc(math.floating.ldexp, numpy.ldexp)\nfrexp = _create_ufunc(math.floating.frexp, numpy.frexp)\nnextafter = _create_ufunc(math.floating.nextafter, numpy.nextafter)\n\nadd = _create_ufunc(math.arithmetic.add, numpy.add)\nreciprocal = _create_ufunc(math.arithmetic.reciprocal, numpy.reciprocal)\nnegative = _create_ufunc(math.arithmetic.negative, numpy.negative)\nangle = _create_ufunc(math.arithmetic.angle, numpy.angle)\nconj = _create_ufunc(math.arithmetic.conj, numpy.conj)\nreal = _create_ufunc(math.arithmetic.real, numpy.real)\nimag = _create_ufunc(math.arithmetic.imag, numpy.imag)\nmultiply = _create_ufunc(math.arithmetic.multiply, numpy.multiply)\ndivide = _create_ufunc(math.arithmetic.divide, numpy.divide)\npower = _create_ufunc(math.arithmetic.power, numpy.power)\nsubtract = _create_ufunc(math.arithmetic.subtract, numpy.subtract)\ntrue_divide = _create_ufunc(math.arithmetic.true_divide, numpy.true_divide)\nfloor_divide = _create_ufunc(math.arithmetic.floor_divide, numpy.floor_divide)\nfmod = _create_ufunc(math.arithmetic.fmod, numpy.fmod)\nmod = _create_ufunc(math.arithmetic.remainder, numpy.mod)\nmodf = _create_ufunc(math.arithmetic.modf, numpy.modf)\nremainder = _create_ufunc(math.arithmetic.remainder, numpy.remainder)\n\nsqrt = _create_ufunc(math.misc.sqrt, numpy.sqrt)\nsqrt_fixed = _create_ufunc(math.misc.sqrt_fixed, numpy.sqrt)\nsquare = _create_ufunc(math.misc.square, numpy.square)\nabsolute = _create_ufunc(math.misc.absolute, numpy.absolute)\nabs = _create_ufunc(math.misc.absolute, numpy.abs)\nsign = _create_ufunc(math.misc.sign, numpy.sign)\nmaximum = _create_ufunc(math.misc.maximum, numpy.maximum)\nminimum = _create_ufunc(math.misc.minimum, numpy.minimum)\nfmax = _create_ufunc(math.misc.fmax, numpy.fmax)\nfmin = _create_ufunc(math.misc.fmin, numpy.fmin)\n\n\nclass reduction(object):\n\n def __init__(self, cupy_op, numpy_op):\n self._cupy_op = cupy_op\n self._numpy_op = numpy_op\n self.__doc__ = cupy_op.__doc__\n\n def __call__(self, *args, **kwargs):\n if builtins.any(type(_) == numpy.ndarray for _ in args):\n return self._numpy_op(*args, **kwargs)\n else:\n return self._cupy_op(*args, **kwargs)\n\n\nall = reduction(logic.truth.all, numpy.all)\nany = reduction(logic.truth.any, numpy.any)\nsum = reduction(math.sumprod.sum, numpy.sum)\nprod = reduction(math.sumprod.prod, numpy.prod)\namax = reduction(statistics.order.amax, numpy.amax)\namin = reduction(statistics.order.amin, numpy.amin)\n\n\nall._raw = core._all\nany._raw = core._any\nsum._raw = core._sum_auto_dtype\nprod._raw = core._prod_auto_dtype\namax._raw = core._amax\namin._raw = core._amin\n", "path": "cupy/core/fusion.py" } ]
diff --git a/cupy/core/core.pyx b/cupy/core/core.pyx index 03f901e72d4..90516fd647a 100644 --- a/cupy/core/core.pyx +++ b/cupy/core/core.pyx @@ -1144,7 +1144,10 @@ cdef class ndarray: :meth:`numpy.ndarray.sum` """ - return _sum(self, axis, dtype, out, keepdims) + if dtype is None: + return _sum_auto_dtype(self, axis, dtype, out, keepdims) + else: + return _sum_keep_dtype(self, axis, dtype, out, keepdims) # TODO(okuta): Implement cumsum @@ -1190,7 +1193,10 @@ cdef class ndarray: :meth:`numpy.ndarray.prod` """ - return _prod(self, axis, dtype, out, keepdims) + if dtype is None: + return _prod_auto_dtype(self, axis, dtype, out, keepdims) + else: + return _prod_keep_dtype(self, axis, dtype, out, keepdims) # TODO(okuta): Implement cumprod @@ -3341,7 +3347,7 @@ cpdef _prepare_multiple_array_indexing(ndarray a, list slices): [index._reshape((1,) + index.shape) for index in flattened_indexes], axis=0, shape=concat_shape, dtype=flattened_indexes[0].dtype) - reduced_idx = _sum(flattened_indexes, axis=0) + reduced_idx = _sum_auto_dtype(flattened_indexes, axis=0) return a_interm, reduced_idx, li, ri @@ -3966,21 +3972,39 @@ _any = create_reduction_func( # Mathematical functions # ----------------------------------------------------------------------------- -_sum = create_reduction_func( +_sum_auto_dtype = create_reduction_func( 'cupy_sum', - ('?->l', 'B->L', 'h->l', 'H->L', 'i->l', 'I->L', 'l->l', 'L->L', + ('?->l', 'b->l', 'B->L', 'h->l', 'H->L', 'i->l', 'I->L', 'l->l', 'L->L', 'q->q', 'Q->Q', ('e->e', (None, None, None, 'float')), 'f->f', 'd->d', 'F->F', 'D->D'), ('in0', 'a + b', 'out0 = type_out0_raw(a)', None), 0) -_prod = create_reduction_func( +_sum_keep_dtype = create_reduction_func( + 'cupy_sum_with_dtype', + ('?->?', 'b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', + 'q->q', 'Q->Q', + ('e->e', (None, None, None, 'float')), + 'f->f', 'd->d', 'F->F', 'D->D'), + ('in0', 'a + b', 'out0 = type_out0_raw(a)', None), 0) + + +_prod_auto_dtype = create_reduction_func( 'cupy_prod', - ['?->l', 'B->L', 'h->l', 'H->L', 'i->l', 'I->L', 'l->l', 'L->L', + ('?->l', 'b->l', 'B->L', 'h->l', 'H->L', 'i->l', 'I->L', 'l->l', 'L->L', 'q->q', 'Q->Q', ('e->e', (None, None, None, 'float')), - 'f->f', 'd->d', 'F->F', 'D->D'], + 'f->f', 'd->d', 'F->F', 'D->D'), + ('in0', 'a * b', 'out0 = type_out0_raw(a)', None), 1) + + +_prod_keep_dtype = create_reduction_func( + 'cupy_prod_with_dtype', + ('?->?', 'b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', + 'q->q', 'Q->Q', + ('e->e', (None, None, None, 'float')), + 'f->f', 'd->d', 'F->F', 'D->D'), ('in0', 'a * b', 'out0 = type_out0_raw(a)', None), 1) diff --git a/cupy/core/fusion.py b/cupy/core/fusion.py index 8815c5a2bb4..07a3dd1c2a0 100644 --- a/cupy/core/fusion.py +++ b/cupy/core/fusion.py @@ -851,7 +851,7 @@ def __call__(self, *args, **kwargs): all._raw = core._all any._raw = core._any -sum._raw = core._sum -prod._raw = core._prod +sum._raw = core._sum_auto_dtype +prod._raw = core._prod_auto_dtype amax._raw = core._amax amin._raw = core._amin diff --git a/tests/cupy_tests/math_tests/test_sumprod.py b/tests/cupy_tests/math_tests/test_sumprod.py index b98521dd344..1697427c129 100644 --- a/tests/cupy_tests/math_tests/test_sumprod.py +++ b/tests/cupy_tests/math_tests/test_sumprod.py @@ -118,6 +118,14 @@ def test_sum_axes4(self, xp, dtype): a = testing.shaped_arange((20, 30, 40, 50), xp, dtype) return a.sum(axis=(0, 2, 3)) + @testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype']) + @testing.numpy_cupy_allclose() + def test_sum_dtype(self, xp, src_dtype, dst_dtype): + if not xp.can_cast(src_dtype, dst_dtype): + return xp.array([]) # skip + a = testing.shaped_arange((2, 3, 4), xp, src_dtype) + return a.sum(dtype=dst_dtype) + @testing.numpy_cupy_allclose() def test_sum_keepdims(self, xp): a = testing.shaped_arange((2, 3, 4), xp) @@ -161,6 +169,14 @@ def test_external_prod_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return xp.prod(a, axis=1) + @testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype']) + @testing.numpy_cupy_allclose() + def test_prod_dtype(self, xp, src_dtype, dst_dtype): + if not xp.can_cast(src_dtype, dst_dtype): + return xp.array([]) # skip + a = testing.shaped_arange((2, 3), xp, src_dtype) + return a.prod(dtype=dst_dtype) + axes = [0, 1, 2]
mdn__kuma-7256
[ { "content": "import json\nimport os\nfrom functools import lru_cache\n\nimport requests\nimport requests.exceptions\nfrom django.conf import settings\nfrom django_jinja import library\n\n\n@lru_cache()\ndef get_localization_data(locale):\n \"\"\"\n Read the frontend string catalog for the specified locale, parse\n it as JSON, and return the resulting dict. The returned values\n are cached so that we don't have to read files all the time.\n \"\"\"\n path = os.path.join(settings.BASE_DIR, \"static\", \"jsi18n\", locale, \"react.json\")\n with open(path, \"r\") as f:\n return json.load(f)\n\n\n@library.global_function\ndef render_react(component_name, locale, url, document_data, ssr=True):\n \"\"\"\n Render a script tag to define the data and any other HTML tags needed\n to enable the display of a React-based UI. By default, this does\n server side rendering, falling back to client-side rendering if\n the SSR attempt fails. Pass False as the second argument to do\n client-side rendering unconditionally.\n\n Note that we are not defining a generic Jinja template tag here.\n The code in this file is specific to Kuma's React-based UI.\n \"\"\"\n localization_data = get_localization_data(locale)\n\n data = {\n \"locale\": locale,\n \"stringCatalog\": localization_data[\"catalog\"],\n \"pluralExpression\": localization_data[\"plural\"],\n \"url\": url,\n \"documentData\": document_data,\n }\n\n if ssr:\n return server_side_render(component_name, data)\n else:\n return client_side_render(component_name, data)\n\n\ndef _render(component_name, html, script, needs_serialization=False):\n \"\"\"A utility function used by both client side and server side rendering.\n Returns a string that includes the specified HTML and a serialized\n form of the state dict, in the format expected by the client-side code\n in kuma/javascript/src/index.jsx.\n \"\"\"\n if needs_serialization:\n assert isinstance(script, dict), type(script)\n script = json.dumps(script).replace(\"</\", \"<\\\\/\")\n else:\n script = \"JSON.parse({})\".format(script)\n\n return (\n '<div id=\"react-container\" data-component-name=\"{}\">{}</div>\\n'\n \"<script>window._react_data = {};</script>\\n\"\n ).format(component_name, html, script)\n\n\ndef client_side_render(component_name, data):\n \"\"\"\n Output an empty <div> and a script with complete state so that\n the UI can be rendered on the client-side.\n \"\"\"\n return _render(component_name, \"\", data, needs_serialization=True)\n\n\ndef server_side_render(component_name, data):\n \"\"\"\n Pre-render the React UI to HTML and output it in a <div>, and then\n also pass the necessary serialized state in a <script> so that\n React on the client side can sync itself with the pre-rendred HTML.\n\n If any exceptions are thrown during the server-side rendering, we\n fall back to client-side rendering instead.\n \"\"\"\n url = \"{}/{}\".format(settings.SSR_URL, component_name)\n timeout = settings.SSR_TIMEOUT\n # Try server side rendering\n try:\n # POST the document data as JSON to the SSR server and we\n # should get HTML text (encoded as plain text) in the body\n # of the response\n response = requests.post(\n url,\n headers={\"Content-Type\": \"application/json\"},\n data=json.dumps(data).encode(\"utf8\"),\n timeout=timeout,\n )\n\n # Even though we've got fully rendered HTML now, we still need to\n # send the document data along with it so that React can sync its\n # state on the client side with what is in the HTML. When rendering\n # a document page, the data includes long strings of HTML that\n # we can get away without duplicating. So as an optimization when\n # component_name is \"document\", we're going to make a copy of the\n # data (because the original belongs to our caller) and delete those\n # strings from the copy.\n #\n # WARNING: This optimization can save 20kb in data transfer\n # for typical pages, but it requires us to be very careful on\n # the frontend. If any components render conditionally based on\n # the state of bodyHTML, tocHTML or quickLinkHTML, then they will\n # render differently on the client than during SSR, and the hydrate\n # will not just work cleanly, and those components will re-render\n # with empty strings. This has already caused Bug 1558308, and\n # I've commented it out because the benefit in file size doesn't\n # seem worth the risk of client-side bugs.\n #\n # As an alternative, it ought to be possible to extract the HTML\n # strings from the SSR'ed document and rebuild the document object\n # on the client right before we call hydrate(). So if you uncomment\n # the lines below, you should also edit kuma/javascript/src/index.jsx\n # to extract the HTML from the document as well.\n #\n # if component_name == 'document':\n # data = data.copy()\n # data['documentData'] = data['documentData'].copy()\n # data['documentData'].update(bodyHTML='',\n # tocHTML='',\n # quickLinksHTML='')\n response.raise_for_status()\n result = response.json()\n return _render(component_name, result[\"html\"], result[\"script\"])\n\n except requests.exceptions.RequestException as exception:\n print(f\"{exception.__class__} error contacting SSR server.\")\n print(\"Falling back to client side rendering.\")\n return client_side_render(component_name, data)\n", "path": "kuma/wiki/templatetags/ssr.py" } ]
[ { "content": "import json\nimport os\nfrom functools import lru_cache\n\nimport requests\nimport requests.exceptions\nfrom django.conf import settings\nfrom django_jinja import library\n\n\n@lru_cache()\ndef get_localization_data(locale):\n \"\"\"\n Read the frontend string catalog for the specified locale, parse\n it as JSON, and return the resulting dict. The returned values\n are cached so that we don't have to read files all the time.\n \"\"\"\n path = os.path.join(settings.BASE_DIR, \"static\", \"jsi18n\", locale, \"react.json\")\n with open(path, \"r\") as f:\n return json.load(f)\n\n\n@library.global_function\ndef render_react(component_name, locale, url, document_data, ssr=True):\n \"\"\"\n Render a script tag to define the data and any other HTML tags needed\n to enable the display of a React-based UI. By default, this does\n server side rendering, falling back to client-side rendering if\n the SSR attempt fails. Pass False as the second argument to do\n client-side rendering unconditionally.\n\n Note that we are not defining a generic Jinja template tag here.\n The code in this file is specific to Kuma's React-based UI.\n \"\"\"\n localization_data = get_localization_data(locale)\n\n data = {\n \"locale\": locale,\n \"stringCatalog\": localization_data[\"catalog\"],\n \"pluralExpression\": localization_data[\"plural\"],\n \"url\": url,\n \"documentData\": document_data,\n }\n if ssr:\n return server_side_render(component_name, data)\n else:\n return client_side_render(component_name, data)\n\n\ndef _render(component_name, html, script, needs_serialization=False):\n \"\"\"A utility function used by both client side and server side rendering.\n Returns a string that includes the specified HTML and a serialized\n form of the state dict, in the format expected by the client-side code\n in kuma/javascript/src/index.jsx.\n \"\"\"\n if needs_serialization:\n assert isinstance(script, dict), type(script)\n script = json.dumps(script).replace(\"</\", \"<\\\\/\")\n else:\n script = \"JSON.parse({})\".format(script)\n\n return (\n '<div id=\"react-container\" data-component-name=\"{}\">{}</div>\\n'\n \"<script>window._react_data = {};</script>\\n\"\n ).format(component_name, html, script)\n\n\ndef client_side_render(component_name, data):\n \"\"\"\n Output an empty <div> and a script with complete state so that\n the UI can be rendered on the client-side.\n \"\"\"\n return _render(component_name, \"\", data, needs_serialization=True)\n\n\ndef server_side_render(component_name, data):\n \"\"\"\n Pre-render the React UI to HTML and output it in a <div>, and then\n also pass the necessary serialized state in a <script> so that\n React on the client side can sync itself with the pre-rendred HTML.\n\n If any exceptions are thrown during the server-side rendering, we\n fall back to client-side rendering instead.\n \"\"\"\n url = \"{}/{}\".format(settings.SSR_URL, component_name)\n timeout = settings.SSR_TIMEOUT\n # Try server side rendering\n try:\n # POST the document data as JSON to the SSR server and we\n # should get HTML text (encoded as plain text) in the body\n # of the response\n response = requests.post(\n url,\n headers={\"Content-Type\": \"application/json\"},\n data=json.dumps(data).encode(\"utf8\"),\n timeout=timeout,\n )\n\n # Even though we've got fully rendered HTML now, we still need to\n # send the document data along with it so that React can sync its\n # state on the client side with what is in the HTML. When rendering\n # a document page, the data includes long strings of HTML that\n # we can get away without duplicating. So as an optimization when\n # component_name is \"document\", we're going to make a copy of the\n # data (because the original belongs to our caller) and delete those\n # strings from the copy.\n #\n # WARNING: This optimization can save 20kb in data transfer\n # for typical pages, but it requires us to be very careful on\n # the frontend. If any components render conditionally based on\n # the state of bodyHTML, tocHTML or quickLinkHTML, then they will\n # render differently on the client than during SSR, and the hydrate\n # will not just work cleanly, and those components will re-render\n # with empty strings. This has already caused Bug 1558308, and\n # I've commented it out because the benefit in file size doesn't\n # seem worth the risk of client-side bugs.\n #\n # As an alternative, it ought to be possible to extract the HTML\n # strings from the SSR'ed document and rebuild the document object\n # on the client right before we call hydrate(). So if you uncomment\n # the lines below, you should also edit kuma/javascript/src/index.jsx\n # to extract the HTML from the document as well.\n #\n # if component_name == 'document':\n # data = data.copy()\n # data['documentData'] = data['documentData'].copy()\n # data['documentData'].update(bodyHTML='',\n # tocHTML='',\n # quickLinksHTML='')\n response.raise_for_status()\n result = response.json()\n return _render(component_name, result[\"html\"], result[\"script\"])\n\n except requests.exceptions.RequestException as exception:\n print(f\"{exception.__class__} error contacting SSR server.\")\n print(\"Falling back to client side rendering.\")\n return client_side_render(component_name, data)\n", "path": "kuma/wiki/templatetags/ssr.py" } ]
diff --git a/kuma/javascript/src/header/__snapshots__/header.test.js.snap b/kuma/javascript/src/header/__snapshots__/header.test.js.snap index 53fcfa25986..7445c77870f 100644 --- a/kuma/javascript/src/header/__snapshots__/header.test.js.snap +++ b/kuma/javascript/src/header/__snapshots__/header.test.js.snap @@ -329,7 +329,7 @@ exports[`Header snapshot 1`] = ` role="menuitem" > <a - href="https://github.com/mdn/sprints/issues/new?template=issue-template.md&projects=mdn/sprints/2&labels=user-report&title=%2Fen-US" + href="https://github.com/mdn/sprints/issues/new?template=issue-template.md&projects=mdn/sprints/2&labels=user-report&title=http://localhost/[fake%20absolute%20url]" onClick={[Function]} onContextMenu={[Function]} rel="noopener noreferrer" diff --git a/kuma/javascript/src/header/header.jsx b/kuma/javascript/src/header/header.jsx index a019dd65e27..7ccb55d264b 100644 --- a/kuma/javascript/src/header/header.jsx +++ b/kuma/javascript/src/header/header.jsx @@ -26,7 +26,7 @@ export default function Header(props: Props): React.Node { > <Logo /> </a> - <MainMenu document={props.document} locale={locale} /> + <MainMenu documentData={props.document} locale={locale} /> <Search initialQuery={props.searchQuery || ''} /> <Login /> </header> diff --git a/kuma/javascript/src/header/main-menu.jsx b/kuma/javascript/src/header/main-menu.jsx index 165ce95c66e..dfb041ae0f4 100644 --- a/kuma/javascript/src/header/main-menu.jsx +++ b/kuma/javascript/src/header/main-menu.jsx @@ -8,7 +8,7 @@ import { gettext } from '../l10n.js'; import type { DocumentData } from '../document.jsx'; type Props = { - documentData?: ?DocumentData, + documentData: ?DocumentData, locale: string, }; @@ -190,10 +190,15 @@ const _MainMenu = ({ documentData, locale }: Props) => { ); // One of the menu items has a URL that we need to substitute - // the current document path into. Compute that now. - let path = encodeURIComponent( - `/${locale}` + (documentData ? `/docs/${documentData.slug}` : '') + // the current document path into. Compute that now, if possible. + // In SPAs (e.g. the home page) there is no `documentData` but the + // useEffect below will take care of it anyway. + const [currentAbsoluteUrl, setCurrentAbsoluteUrl] = useState( + documentData ? documentData.absoluteURL : '' ); + useEffect(() => { + setCurrentAbsoluteUrl(window.location.href); + }, []); return ( <nav className="main-nav" aria-label="Main menu"> @@ -248,7 +253,7 @@ const _MainMenu = ({ documentData, locale }: Props) => { rel="noopener noreferrer" href={item.url.replace( '{{PATH}}', - path + currentAbsoluteUrl )} onClick={sendMenuItemInteraction} onContextMenu={ diff --git a/kuma/wiki/templatetags/ssr.py b/kuma/wiki/templatetags/ssr.py index 1a716ea50f2..620427925d2 100644 --- a/kuma/wiki/templatetags/ssr.py +++ b/kuma/wiki/templatetags/ssr.py @@ -41,7 +41,6 @@ def render_react(component_name, locale, url, document_data, ssr=True): "url": url, "documentData": document_data, } - if ssr: return server_side_render(component_name, data) else:
bookwyrm-social__bookwyrm-404
[ { "content": "''' note serializer and children thereof '''\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List\n\nfrom .base_activity import ActivityObject, Link\nfrom .image import Image\n\n@dataclass(init=False)\nclass Tombstone(ActivityObject):\n ''' the placeholder for a deleted status '''\n published: str\n deleted: str\n type: str = 'Tombstone'\n\n\n@dataclass(init=False)\nclass Note(ActivityObject):\n ''' Note activity '''\n published: str\n attributedTo: str\n content: str\n to: List[str] = field(default_factory=lambda: [])\n cc: List[str] = field(default_factory=lambda: [])\n replies: Dict = field(default_factory=lambda: {})\n inReplyTo: str = ''\n summary: str = ''\n tag: List[Link] = field(default_factory=lambda: [])\n attachment: List[Image] = field(default_factory=lambda: [])\n sensitive: bool = False\n type: str = 'Note'\n\n\n@dataclass(init=False)\nclass Article(Note):\n ''' what's an article except a note with more fields '''\n name: str\n type: str = 'Article'\n\n\n@dataclass(init=False)\nclass GeneratedNote(Note):\n ''' just a re-typed note '''\n type: str = 'GeneratedNote'\n\n\n@dataclass(init=False)\nclass Comment(Note):\n ''' like a note but with a book '''\n inReplyToBook: str\n type: str = 'Comment'\n\n\n@dataclass(init=False)\nclass Review(Comment):\n ''' a full book review '''\n name: str\n rating: int = None\n type: str = 'Review'\n\n\n@dataclass(init=False)\nclass Quotation(Comment):\n ''' a quote and commentary on a book '''\n quote: str\n type: str = 'Quotation'\n", "path": "bookwyrm/activitypub/note.py" } ]
[ { "content": "''' note serializer and children thereof '''\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List\n\nfrom .base_activity import ActivityObject, Link\nfrom .image import Image\n\n@dataclass(init=False)\nclass Tombstone(ActivityObject):\n ''' the placeholder for a deleted status '''\n published: str\n deleted: str\n type: str = 'Tombstone'\n\n\n@dataclass(init=False)\nclass Note(ActivityObject):\n ''' Note activity '''\n published: str\n attributedTo: str\n content: str\n to: List[str] = field(default_factory=lambda: [])\n cc: List[str] = field(default_factory=lambda: [])\n replies: Dict = field(default_factory=lambda: {})\n inReplyTo: str = ''\n summary: str = ''\n tag: List[Link] = field(default_factory=lambda: [])\n attachment: List[Image] = field(default_factory=lambda: [])\n sensitive: bool = False\n type: str = 'Note'\n\n\n@dataclass(init=False)\nclass Article(Note):\n ''' what's an article except a note with more fields '''\n name: str\n type: str = 'Article'\n\n\n@dataclass(init=False)\nclass GeneratedNote(Note):\n ''' just a re-typed note '''\n type: str = 'GeneratedNote'\n\n\n@dataclass(init=False)\nclass Comment(Note):\n ''' like a note but with a book '''\n inReplyToBook: str\n type: str = 'Comment'\n\n\n@dataclass(init=False)\nclass Review(Comment):\n ''' a full book review '''\n name: str = None\n rating: int = None\n type: str = 'Review'\n\n\n@dataclass(init=False)\nclass Quotation(Comment):\n ''' a quote and commentary on a book '''\n quote: str\n type: str = 'Quotation'\n", "path": "bookwyrm/activitypub/note.py" } ]
diff --git a/bookwyrm/activitypub/note.py b/bookwyrm/activitypub/note.py index b478c96dda..72fbe5fc2d 100644 --- a/bookwyrm/activitypub/note.py +++ b/bookwyrm/activitypub/note.py @@ -53,7 +53,7 @@ class Comment(Note): @dataclass(init=False) class Review(Comment): ''' a full book review ''' - name: str + name: str = None rating: int = None type: str = 'Review' diff --git a/bookwyrm/templates/snippets/content_warning_field.html b/bookwyrm/templates/snippets/content_warning_field.html index e2f561f241..9249602af1 100644 --- a/bookwyrm/templates/snippets/content_warning_field.html +++ b/bookwyrm/templates/snippets/content_warning_field.html @@ -1,4 +1,5 @@ -{% with uuid as uuid %} +{% load bookwyrm_tags %} +{% with 0|uuid as uuid %} <div class="control"> <div> <input type="radio" class="toggle-control" name="sensitive" value="false" id="hide-spoilers-{{ uuid }}" {% if not parent_status.content_warning %}checked{% endif %}> diff --git a/bookwyrm/templates/snippets/rate_action.html b/bookwyrm/templates/snippets/rate_action.html index b9c443ce57..49cb87ed69 100644 --- a/bookwyrm/templates/snippets/rate_action.html +++ b/bookwyrm/templates/snippets/rate_action.html @@ -4,7 +4,9 @@ {% for i in '12345'|make_list %} <form name="rate" action="/rate/" method="POST" onsubmit="return rate_stars(event)"> {% csrf_token %} + <input type="hidden" name="user" value="{{ request.user.id }}"> <input type="hidden" name="book" value="{{ book.id }}"> + <input type="hidden" name="privacy" value="public"> <input type="hidden" name="rating" value="{{ forloop.counter }}"> <button type="submit" class="icon icon-star-{% if book|rating:user < forloop.counter %}empty{% else %}full{% endif %}"> <span class="is-sr-only">{{ forloop.counter }} star{{ forloop.counter | pluralize }}</span>
PennyLaneAI__pennylane-2766
[ { "content": "# Copyright 2018-2022 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis submodule defines the symbolic operation that indicates the adjoint of an operator.\n\"\"\"\nfrom pennylane.operation import Operator, Operation, AdjointUndefinedError, Observable\nfrom pennylane.queuing import QueuingContext\nfrom pennylane.math import transpose, conj\n\n\n# pylint: disable=no-member\nclass AdjointOperation(Operation):\n \"\"\"This mixin class is dynamically added to an ``Adjoint`` instance if the provided base class is an ``Operation``.\n\n .. warning::\n This mixin class should never be initialized independent of ``Adjoint``.\n\n Overriding the dunder method ``__new__`` in ``Adjoint`` allows us to customize the creation of an instance and dynamically\n add in parent classes.\n\n .. note:: Once the ``Operation`` class does not contain any unique logic any more, this mixin class can be removed.\n \"\"\"\n\n # This inverse behavior only needs to temporarily patch behavior until in-place inversion is removed.\n\n @property\n def _inverse(self):\n return self.base._inverse # pylint: disable=protected-access\n\n @_inverse.setter\n def _inverse(self, boolean):\n self.base._inverse = boolean # pylint: disable=protected-access\n # refresh name as base_name got updated.\n self._name = f\"Adjoint({self.base.name})\"\n\n def inv(self):\n self.base.inv()\n # refresh name as base_name got updated.\n self._name = f\"Adjoint({self.base.name})\"\n return self\n\n @property\n def base_name(self):\n return self._name\n\n @property\n def name(self):\n return self._name\n\n # pylint: disable=missing-function-docstring\n @property\n def basis(self):\n return self.base.basis\n\n @property\n def control_wires(self):\n return self.base.control_wires\n\n def single_qubit_rot_angles(self):\n omega, theta, phi = self.base.single_qubit_rot_angles()\n return [-phi, -theta, -omega]\n\n @property\n def grad_method(self):\n return self.base.grad_method\n\n # pylint: disable=missing-function-docstring\n @property\n def grad_recipe(self):\n return self.base.grad_recipe\n\n def get_parameter_shift(self, idx):\n return self.base.get_parameter_shift(idx)\n\n @property\n def parameter_frequencies(self):\n return self.base.parameter_frequencies\n\n def generator(self):\n return -1.0 * self.base.generator()\n\n\n# pylint: disable=too-many-public-methods\nclass Adjoint(Operator):\n \"\"\"\n The Adjoint of an operator.\n\n Args:\n base (~.operation.Operator): The operator that is adjointed.\n\n .. seealso:: :func:`~.adjoint`, :meth:`~.operation.Operator.adjoint`\n\n This is a *developer*-facing class, and the :func:`~.adjoint` transform should be used to construct instances\n of this class.\n\n **Example**\n\n >>> op = Adjoint(qml.S(0))\n >>> op.name\n 'Adjoint(S)'\n >>> qml.matrix(op)\n array([[1.-0.j, 0.-0.j],\n [0.-0.j, 0.-1.j]])\n >>> qml.generator(Adjoint(qml.RX(1.0, wires=0)))\n (PauliX(wires=[0]), 0.5)\n >>> Adjoint(qml.RX(1.234, wires=0)).data\n [1.234]\n\n .. details::\n :title: Developer Details\n\n This class mixes in parent classes based on the inheritance tree of the provided ``Operator``. For example, when\n provided an ``Operation``, the instance will inherit from ``Operation`` and the ``AdjointOperation`` mixin.\n\n >>> op = Adjoint(qml.RX(1.234, wires=0))\n >>> isinstance(op, qml.operation.Operation)\n True\n >>> isinstance(op, AdjointOperation)\n True\n >>> op.grad_method\n 'A'\n\n If the base class is an ``Observable`` instead, the ``Adjoint`` will be an ``Observable`` as well.\n\n >>> op = Adjoint(1.0 * qml.PauliX(0))\n >>> isinstance(op, qml.operation.Observable)\n True\n >>> isinstance(op, qml.operation.Operation)\n False\n >>> Adjoint(qml.PauliX(0)) @ qml.PauliY(1)\n Adjoint(PauliX)(wires=[0]) @ PauliY(wires=[1])\n\n \"\"\"\n\n _operation_type = None # type if base inherits from operation and not observable\n _operation_observable_type = None # type if base inherits from both operation and observable\n _observable_type = None # type if base inherits from observable and not operation\n\n # pylint: disable=unused-argument\n def __new__(cls, base=None, do_queue=True, id=None):\n \"\"\"Mixes in parents based on inheritance structure of base.\n\n Though all the types will be named \"Adjoint\", their *identity* and location in memory will be different\n based on ``base``'s inheritance. We cache the different types in private class variables so that:\n\n >>> Adjoint(op).__class__ is Adjoint(op).__class__\n True\n >>> type(Adjoint(op)) == type(Adjoint(op))\n True\n >>> Adjoint(qml.RX(1.2, wires=0)).__class__ is Adjoint._operation_type\n True\n >>> Adjoint(qml.PauliX(0)).__class__ is Adjoint._operation_observable_type\n True\n\n \"\"\"\n\n if isinstance(base, Operation):\n if isinstance(base, Observable):\n if cls._operation_observable_type is None:\n class_bases = (AdjointOperation, Adjoint, Observable, Operation)\n cls._operation_observable_type = type(\n \"Adjoint\", class_bases, dict(cls.__dict__)\n )\n return object.__new__(cls._operation_observable_type)\n\n # not an observable\n if cls._operation_type is None:\n class_bases = (AdjointOperation, Adjoint, Operation)\n cls._operation_type = type(\"Adjoint\", class_bases, dict(cls.__dict__))\n return object.__new__(cls._operation_type)\n\n if isinstance(base, Observable):\n if cls._observable_type is None:\n class_bases = (Adjoint, Observable)\n cls._observable_type = type(\"Adjoint\", class_bases, dict(cls.__dict__))\n return object.__new__(cls._observable_type)\n\n return object.__new__(Adjoint)\n\n # pylint: disable=attribute-defined-outside-init\n def __copy__(self):\n # this method needs to be overwritten becuase the base must be copied too.\n copied_op = object.__new__(type(self))\n # copied_op must maintain inheritance structure of self\n # For example, it must keep AdjointOperation if self has it\n # this way preserves inheritance structure\n\n copied_base = self.base.__copy__()\n copied_op._hyperparameters = {\"base\": copied_base}\n for attr, value in vars(self).items():\n if attr not in {\"data\", \"base\", \"_hyperparameters\"}:\n setattr(copied_op, attr, value)\n\n return copied_op\n\n # pylint: disable=super-init-not-called\n def __init__(self, base=None, do_queue=True, id=None):\n self.hyperparameters[\"base\"] = base\n self._id = id\n self.queue_idx = None\n\n self._name = f\"Adjoint({self.base.name})\"\n\n if do_queue:\n self.queue()\n\n @property\n def base(self):\n \"\"\"The operator that is adjointed.\"\"\"\n return self.hyperparameters[\"base\"]\n\n @property\n def data(self):\n \"\"\"Trainable parameters that the operator depends on.\"\"\"\n return self.base.data\n\n @data.setter\n def data(self, new_data):\n \"\"\"Allows us to set base operation parameters.\"\"\"\n self.base.data = new_data\n\n @property\n def parameters(self):\n return self.base.parameters\n\n @property\n def num_params(self):\n return self.base.num_params\n\n @property\n def wires(self):\n return self.base.wires\n\n # pylint: disable=protected-access\n @property\n def _wires(self):\n return self.base._wires\n\n # pylint: disable=protected-access\n @_wires.setter\n def _wires(self, new_wires):\n # we should have a better way of updating wires than accessing a private attribute.\n self.base._wires = new_wires\n\n @property\n def num_wires(self):\n return self.base.num_wires\n\n @property\n def batch_size(self):\n return self.base.batch_size\n\n @property\n def ndim_params(self):\n return self.base.ndim_params\n\n @property\n def is_hermitian(self):\n return self.base.is_hermitian\n\n def queue(self, context=QueuingContext):\n context.safe_update_info(self.base, owner=self)\n context.append(self, owns=self.base)\n\n return self\n\n def label(self, decimals=None, base_label=None, cache=None):\n return self.base.label(decimals, base_label, cache=cache) + \"†\"\n\n # pylint: disable=arguments-differ\n @staticmethod\n def compute_matrix(*params, base=None):\n base_matrix = base.compute_matrix(*params, **base.hyperparameters)\n return transpose(conj(base_matrix))\n\n def decomposition(self):\n try:\n return [self.base.adjoint()]\n except AdjointUndefinedError:\n base_decomp = self.base.decomposition()\n return [Adjoint(op) for op in reversed(base_decomp)]\n\n # pylint: disable=arguments-differ\n @staticmethod\n def compute_sparse_matrix(*params, base=None):\n base_matrix = base.compute_sparse_matrix(*params, **base.hyperparameters)\n return transpose(conj(base_matrix)).tocsr()\n\n def eigvals(self):\n # Cannot define ``compute_eigvals`` because Hermitian only defines ``eigvals``\n return conj(self.base.eigvals())\n\n def diagonalizing_gates(self):\n return self.base.diagonalizing_gates()\n\n # pylint: disable=arguments-renamed, invalid-overridden-method\n @property\n def has_matrix(self):\n return self.base.has_matrix\n\n def adjoint(self):\n return self.base\n\n @property\n def _queue_category(self):\n \"\"\"Used for sorting objects into their respective lists in `QuantumTape` objects.\n\n This property is a temporary solution that should not exist long-term and should not be\n used outside of ``QuantumTape._process_queue``.\n\n Returns ``_queue_cateogory`` for base operator.\n \"\"\"\n return self.base._queue_category # pylint: disable=protected-access\n", "path": "pennylane/ops/op_math/adjoint_class.py" } ]
[ { "content": "# Copyright 2018-2022 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis submodule defines the symbolic operation that indicates the adjoint of an operator.\n\"\"\"\nfrom pennylane.operation import Operator, Operation, AdjointUndefinedError, Observable\nfrom pennylane.queuing import QueuingContext\nfrom pennylane.math import transpose, conj\n\n\n# pylint: disable=no-member\nclass AdjointOperation(Operation):\n \"\"\"This mixin class is dynamically added to an ``Adjoint`` instance if the provided base class is an ``Operation``.\n\n .. warning::\n This mixin class should never be initialized independent of ``Adjoint``.\n\n Overriding the dunder method ``__new__`` in ``Adjoint`` allows us to customize the creation of an instance and dynamically\n add in parent classes.\n\n .. note:: Once the ``Operation`` class does not contain any unique logic any more, this mixin class can be removed.\n \"\"\"\n\n # This inverse behavior only needs to temporarily patch behavior until in-place inversion is removed.\n\n @property\n def _inverse(self):\n return self.base._inverse # pylint: disable=protected-access\n\n @_inverse.setter\n def _inverse(self, boolean):\n self.base._inverse = boolean # pylint: disable=protected-access\n # refresh name as base_name got updated.\n self._name = f\"Adjoint({self.base.name})\"\n\n def inv(self):\n self.base.inv()\n # refresh name as base_name got updated.\n self._name = f\"Adjoint({self.base.name})\"\n return self\n\n @property\n def base_name(self):\n return self._name\n\n @property\n def name(self):\n return self._name\n\n # pylint: disable=missing-function-docstring\n @property\n def basis(self):\n return self.base.basis\n\n @property\n def control_wires(self):\n return self.base.control_wires\n\n def single_qubit_rot_angles(self):\n omega, theta, phi = self.base.single_qubit_rot_angles()\n return [-phi, -theta, -omega]\n\n @property\n def grad_method(self):\n return self.base.grad_method\n\n # pylint: disable=missing-function-docstring\n @property\n def grad_recipe(self):\n return self.base.grad_recipe\n\n def get_parameter_shift(self, idx):\n return self.base.get_parameter_shift(idx)\n\n @property\n def parameter_frequencies(self):\n return self.base.parameter_frequencies\n\n def generator(self):\n return -1.0 * self.base.generator()\n\n\n# pylint: disable=too-many-public-methods\nclass Adjoint(Operator):\n \"\"\"\n The Adjoint of an operator.\n\n Args:\n base (~.operation.Operator): The operator that is adjointed.\n\n .. seealso:: :func:`~.adjoint`, :meth:`~.operation.Operator.adjoint`\n\n This is a *developer*-facing class, and the :func:`~.adjoint` transform should be used to construct instances\n of this class.\n\n **Example**\n\n >>> op = Adjoint(qml.S(0))\n >>> op.name\n 'Adjoint(S)'\n >>> qml.matrix(op)\n array([[1.-0.j, 0.-0.j],\n [0.-0.j, 0.-1.j]])\n >>> qml.generator(Adjoint(qml.RX(1.0, wires=0)))\n (PauliX(wires=[0]), 0.5)\n >>> Adjoint(qml.RX(1.234, wires=0)).data\n [1.234]\n\n .. details::\n :title: Developer Details\n\n This class mixes in parent classes based on the inheritance tree of the provided ``Operator``. For example, when\n provided an ``Operation``, the instance will inherit from ``Operation`` and the ``AdjointOperation`` mixin.\n\n >>> op = Adjoint(qml.RX(1.234, wires=0))\n >>> isinstance(op, qml.operation.Operation)\n True\n >>> isinstance(op, AdjointOperation)\n True\n >>> op.grad_method\n 'A'\n\n If the base class is an ``Observable`` instead, the ``Adjoint`` will be an ``Observable`` as well.\n\n >>> op = Adjoint(1.0 * qml.PauliX(0))\n >>> isinstance(op, qml.operation.Observable)\n True\n >>> isinstance(op, qml.operation.Operation)\n False\n >>> Adjoint(qml.PauliX(0)) @ qml.PauliY(1)\n Adjoint(PauliX)(wires=[0]) @ PauliY(wires=[1])\n\n \"\"\"\n\n _operation_type = None # type if base inherits from operation and not observable\n _operation_observable_type = None # type if base inherits from both operation and observable\n _observable_type = None # type if base inherits from observable and not operation\n\n # pylint: disable=unused-argument\n def __new__(cls, base=None, do_queue=True, id=None):\n \"\"\"Mixes in parents based on inheritance structure of base.\n\n Though all the types will be named \"Adjoint\", their *identity* and location in memory will be different\n based on ``base``'s inheritance. We cache the different types in private class variables so that:\n\n >>> Adjoint(op).__class__ is Adjoint(op).__class__\n True\n >>> type(Adjoint(op)) == type(Adjoint(op))\n True\n >>> Adjoint(qml.RX(1.2, wires=0)).__class__ is Adjoint._operation_type\n True\n >>> Adjoint(qml.PauliX(0)).__class__ is Adjoint._operation_observable_type\n True\n\n \"\"\"\n\n if isinstance(base, Operation):\n if isinstance(base, Observable):\n if cls._operation_observable_type is None:\n class_bases = (AdjointOperation, Adjoint, Observable, Operation)\n cls._operation_observable_type = type(\n \"Adjoint\", class_bases, dict(cls.__dict__)\n )\n return object.__new__(cls._operation_observable_type)\n\n # not an observable\n if cls._operation_type is None:\n class_bases = (AdjointOperation, Adjoint, Operation)\n cls._operation_type = type(\"Adjoint\", class_bases, dict(cls.__dict__))\n return object.__new__(cls._operation_type)\n\n if isinstance(base, Observable):\n if cls._observable_type is None:\n class_bases = (Adjoint, Observable)\n cls._observable_type = type(\"Adjoint\", class_bases, dict(cls.__dict__))\n return object.__new__(cls._observable_type)\n\n return object.__new__(Adjoint)\n\n # pylint: disable=attribute-defined-outside-init\n def __copy__(self):\n # this method needs to be overwritten becuase the base must be copied too.\n copied_op = object.__new__(type(self))\n # copied_op must maintain inheritance structure of self\n # For example, it must keep AdjointOperation if self has it\n # this way preserves inheritance structure\n\n copied_base = self.base.__copy__()\n copied_op._hyperparameters = {\"base\": copied_base}\n for attr, value in vars(self).items():\n if attr not in {\"data\", \"base\", \"_hyperparameters\"}:\n setattr(copied_op, attr, value)\n\n return copied_op\n\n # pylint: disable=super-init-not-called\n def __init__(self, base=None, do_queue=True, id=None):\n self.hyperparameters[\"base\"] = base\n self._id = id\n self.queue_idx = None\n\n self._name = f\"Adjoint({self.base.name})\"\n\n if do_queue:\n self.queue()\n\n @property\n def base(self):\n \"\"\"The operator that is adjointed.\"\"\"\n return self.hyperparameters[\"base\"]\n\n @property\n def data(self):\n \"\"\"Trainable parameters that the operator depends on.\"\"\"\n return self.base.data\n\n @data.setter\n def data(self, new_data):\n \"\"\"Allows us to set base operation parameters.\"\"\"\n self.base.data = new_data\n\n @property\n def parameters(self):\n return self.base.parameters\n\n @property\n def num_params(self):\n return self.base.num_params\n\n @property\n def wires(self):\n return self.base.wires\n\n # pylint: disable=protected-access\n @property\n def _wires(self):\n return self.base._wires\n\n # pylint: disable=protected-access\n @_wires.setter\n def _wires(self, new_wires):\n # we should have a better way of updating wires than accessing a private attribute.\n self.base._wires = new_wires\n\n @property\n def num_wires(self):\n return self.base.num_wires\n\n @property\n def batch_size(self):\n return self.base.batch_size\n\n @property\n def ndim_params(self):\n return self.base.ndim_params\n\n @property\n def is_hermitian(self):\n return self.base.is_hermitian\n\n def queue(self, context=QueuingContext):\n context.safe_update_info(self.base, owner=self)\n context.append(self, owns=self.base)\n\n return self\n\n def label(self, decimals=None, base_label=None, cache=None):\n return self.base.label(decimals, base_label, cache=cache) + \"†\"\n\n # pylint: disable=arguments-differ\n @staticmethod\n def compute_matrix(*params, base=None):\n base_matrix = base.compute_matrix(*params, **base.hyperparameters)\n return transpose(conj(base_matrix))\n\n def decomposition(self):\n try:\n return [self.base.adjoint()]\n except AdjointUndefinedError:\n base_decomp = self.base.decomposition()\n return [Adjoint(op) for op in reversed(base_decomp)]\n\n # pylint: disable=arguments-differ\n @staticmethod\n def compute_sparse_matrix(*params, base=None):\n base_matrix = base.compute_sparse_matrix(*params, **base.hyperparameters)\n return transpose(conj(base_matrix)).tocsr()\n\n def eigvals(self):\n # Cannot define ``compute_eigvals`` because Hermitian only defines ``eigvals``\n return conj(self.base.eigvals())\n\n def diagonalizing_gates(self):\n return self.base.diagonalizing_gates()\n\n # pylint: disable=arguments-renamed, invalid-overridden-method\n @property\n def has_matrix(self):\n return self.base.has_matrix\n\n def adjoint(self):\n return self.base.queue()\n\n @property\n def _queue_category(self):\n \"\"\"Used for sorting objects into their respective lists in `QuantumTape` objects.\n\n This property is a temporary solution that should not exist long-term and should not be\n used outside of ``QuantumTape._process_queue``.\n\n Returns ``_queue_cateogory`` for base operator.\n \"\"\"\n return self.base._queue_category # pylint: disable=protected-access\n", "path": "pennylane/ops/op_math/adjoint_class.py" } ]
diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index 96f04e78dde..b254d833c3d 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -60,6 +60,11 @@ <h3>Documentation</h3> +<h3>Bug fixes</h3> + +* The adjoint of an adjoint has a correct `expand` result. + [(#2766)](https://github.com/PennyLaneAI/pennylane/pull/2766) + <h3>Contributors</h3> This release contains contributions from (in alphabetical order): diff --git a/pennylane/ops/op_math/adjoint_class.py b/pennylane/ops/op_math/adjoint_class.py index 750e515abcb..4de4e13d78d 100644 --- a/pennylane/ops/op_math/adjoint_class.py +++ b/pennylane/ops/op_math/adjoint_class.py @@ -310,7 +310,7 @@ def has_matrix(self): return self.base.has_matrix def adjoint(self): - return self.base + return self.base.queue() @property def _queue_category(self): diff --git a/tests/ops/op_math/test_adjoint_op.py b/tests/ops/op_math/test_adjoint_op.py index de81cb5025e..bfac5eb6dbc 100644 --- a/tests/ops/op_math/test_adjoint_op.py +++ b/tests/ops/op_math/test_adjoint_op.py @@ -608,6 +608,18 @@ def test_no_base_gate_decomposition(self): with pytest.raises(qml.operation.DecompositionUndefinedError): Adjoint(base).decomposition() + def test_adjoint_of_adjoint(self): + """Test that the adjoint an adjoint returns the base operator through both decomposition and expand.""" + + base = qml.PauliX(0) + adj1 = Adjoint(base) + adj2 = Adjoint(adj1) + + assert adj2.decomposition()[0] is base + + tape = adj2.expand() + assert tape.circuit[0] is base + class TestIntegration: """Test the integration of the Adjoint class with qnodes and gradients."""
crytic__slither-1110
[ { "content": "from slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue\n\n\nclass Phi(OperationWithLValue):\n def __init__(self, left_variable, nodes):\n # When Phi operations are created the\n # correct indexes of the variables are not yet computed\n # We store the nodes where the variables are written\n # so we can update the rvalues of the Phi operation\n # after its instantiation\n assert is_valid_lvalue(left_variable)\n assert isinstance(nodes, set)\n super().__init__()\n self._lvalue = left_variable\n self._rvalues = []\n self._nodes = nodes\n\n @property\n def read(self):\n return self.rvalues\n\n @property\n def rvalues(self):\n return self._rvalues\n\n @rvalues.setter\n def rvalues(self, vals):\n self._rvalues = vals\n\n @property\n def nodes(self):\n return self._nodes\n\n def __str__(self):\n return \"{self.lvalue}({self.lvalue.type}) := \\u03D5({[str(v) for v in self._rvalues]})\"\n", "path": "slither/slithir/operations/phi.py" } ]
[ { "content": "from slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue\n\n\nclass Phi(OperationWithLValue):\n def __init__(self, left_variable, nodes):\n # When Phi operations are created the\n # correct indexes of the variables are not yet computed\n # We store the nodes where the variables are written\n # so we can update the rvalues of the Phi operation\n # after its instantiation\n assert is_valid_lvalue(left_variable)\n assert isinstance(nodes, set)\n super().__init__()\n self._lvalue = left_variable\n self._rvalues = []\n self._nodes = nodes\n\n @property\n def read(self):\n return self.rvalues\n\n @property\n def rvalues(self):\n return self._rvalues\n\n @rvalues.setter\n def rvalues(self, vals):\n self._rvalues = vals\n\n @property\n def nodes(self):\n return self._nodes\n\n def __str__(self):\n return f\"{self.lvalue}({self.lvalue.type}) := \\u03D5({[str(v) for v in self._rvalues]})\"\n", "path": "slither/slithir/operations/phi.py" } ]
diff --git a/slither/slithir/operations/phi.py b/slither/slithir/operations/phi.py index 341d90305a..a4fa0217e6 100644 --- a/slither/slithir/operations/phi.py +++ b/slither/slithir/operations/phi.py @@ -33,4 +33,4 @@ def nodes(self): return self._nodes def __str__(self): - return "{self.lvalue}({self.lvalue.type}) := \u03D5({[str(v) for v in self._rvalues]})" + return f"{self.lvalue}({self.lvalue.type}) := \u03D5({[str(v) for v in self._rvalues]})"
acl-org__acl-anthology-3109
[ { "content": "#!/usr/bin/env python3\n\n\"\"\"\nTakes a list of XML files on STDIN, and prints all the volumes\nwithin each of those files. e.g.,\n\n git diff --name-only master | ./bin/volumes_from_xml.py https://preview.aclanthology.org/BRANCH\n\nUsed to find the list of volumes to generate previews for.\n\"\"\"\n\nimport sys\nimport argparse\nimport lxml.etree as etree\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"url_root\")\nargs = parser.parse_args()\n\nvolumes = []\nfor filepath in sys.stdin:\n if filepath.startswith(\"python/\") or not filepath.endswith(\".xml\"):\n continue\n\n try:\n tree = etree.parse(filepath.rstrip())\n except Exception:\n continue\n\n root = tree.getroot()\n collection_id = root.attrib[\"id\"]\n for volume in root.findall(\"./volume\"):\n volume_name = volume.attrib[\"id\"]\n volume_id = f\"{collection_id}-{volume_name}\"\n volumes.append(f\"[{volume_id}]({args.url_root}/{volume_id})\")\n\nif len(volumes) > 50:\n volumes = volumes[0:50] + [f\"(plus {len(volumes)-50} more...)\"]\n\nprint(\", \".join(volumes))\n", "path": "bin/volumes_from_diff.py" } ]
[ { "content": "#!/usr/bin/env python3\n\n\"\"\"\nTakes a list of XML files on STDIN, and prints all the volumes\nwithin each of those files. e.g.,\n\n git diff --name-only master | ./bin/volumes_from_xml.py https://preview.aclanthology.org/BRANCH\n\nUsed to find the list of volumes to generate previews for.\n\"\"\"\n\nimport sys\nimport argparse\nimport lxml.etree as etree\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"url_root\")\nargs = parser.parse_args()\n\nvolumes = []\nfor filepath in sys.stdin:\n filepath = filepath.rstrip()\n if filepath.startswith(\"python/\") or not filepath.endswith(\".xml\"):\n continue\n\n try:\n tree = etree.parse(filepath.rstrip())\n except Exception:\n continue\n\n root = tree.getroot()\n collection_id = root.attrib[\"id\"]\n for volume in root.findall(\"./volume\"):\n volume_name = volume.attrib[\"id\"]\n volume_id = f\"{collection_id}-{volume_name}\"\n volumes.append(f\"[{volume_id}]({args.url_root}/{volume_id})\")\n\nif len(volumes) > 50:\n volumes = volumes[0:50] + [f\"(plus {len(volumes)-50} more...)\"]\n\nprint(\", \".join(volumes))\n", "path": "bin/volumes_from_diff.py" } ]
diff --git a/bin/volumes_from_diff.py b/bin/volumes_from_diff.py index 4f73ab59d4..07a1cedf4a 100755 --- a/bin/volumes_from_diff.py +++ b/bin/volumes_from_diff.py @@ -20,6 +20,7 @@ volumes = [] for filepath in sys.stdin: + filepath = filepath.rstrip() if filepath.startswith("python/") or not filepath.endswith(".xml"): continue diff --git a/data/xml/2023.rocling.xml b/data/xml/2023.rocling.xml index e8e181ee87..74e1eaf9d8 100644 --- a/data/xml/2023.rocling.xml +++ b/data/xml/2023.rocling.xml @@ -1,6 +1,6 @@ <?xml version='1.0' encoding='UTF-8'?> <collection id="2023.rocling"> - <volume id="1" ingest-date="2023-10-21" type="proceedings"> + <volume id="1" ingest-date="2024-03-06" type="proceedings"> <meta> <booktitle>Proceedings of the 35th Conference on Computational Linguistics and Speech Processing (ROCLING 2023)</booktitle> <editor><first>Jheng-Long</first><last>Wu</last></editor> @@ -9,7 +9,7 @@ <address>Taipei City, Taiwan</address> <month>October</month> <year>2023</year> - <url hash="450e9262">2023.rocling-1</url> + <url hash="0e12f821">2023.rocling-1</url> <venue>rocling</venue> </meta> <frontmatter> @@ -22,7 +22,7 @@ <author><first>Canasai</first><last>Kruengkrai</last></author> <author><first>Junichi</first><last>Yamagishi</last></author> <pages>1–11</pages> - <url hash="df74e918">2023.rocling-1.1</url> + <url hash="ef739e00">2023.rocling-1.1</url> <bibkey>chang-etal-2023-xfever</bibkey> </paper> <paper id="2"> @@ -30,25 +30,25 @@ <author><first>Yu-Kai</first><last>Lee</last></author> <author><first>Chia-Hui</first><last>Chang</last></author> <pages>12–20</pages> - <url hash="636e7be5">2023.rocling-1.2</url> - <bibkey>yu-kai-lee-2023-story</bibkey> + <url hash="baf67443">2023.rocling-1.2</url> + <bibkey>lee-chang-2023-story-co</bibkey> </paper> <paper id="3"> <title>Improving End-to-end <fixed-case>T</fixed-case>aiwanese-Speech-to-<fixed-case>C</fixed-case>hinese-Text Translation by Semi-supervised Learning</title> <author><first>Yu-Chun</first><last>Lin</last></author> <author><first>Chung-Che</first><last>Wang</last></author> - <author><first>Jyh-Shing Roger</first><last>Jang</last></author> + <author><first>Jyh-Shing</first><last>Jang</last></author> <pages>21–28</pages> - <url hash="e7e35fa4">2023.rocling-1.3</url> - <bibkey>yu-chun-lin-jang-2023-improving</bibkey> + <url hash="34b3d727">2023.rocling-1.3</url> + <bibkey>lin-etal-2023-improving-end</bibkey> </paper> <paper id="4"> <title>Construction of Message Deliver Service Dialog Systems</title> <author><first>Cheng-Hung</first><last>Yeh</last></author> <author><first>Chia-Hui</first><last>Chang</last></author> <pages>29–37</pages> - <url hash="3b774aec">2023.rocling-1.4</url> - <bibkey>cheng-hung-yeh-2023-construction</bibkey> + <url hash="d6d5c217">2023.rocling-1.4</url> + <bibkey>yeh-chang-2023-construction</bibkey> </paper> <paper id="5"> <title>Auxiliary loss to attention head for end to end speaker diarization</title> @@ -56,7 +56,7 @@ <author><first>Jiun-Ting</first><last>Li</last></author> <author><first>Berlin</first><last>Chen</last></author> <pages>38–43</pages> - <url hash="57890d61">2023.rocling-1.5</url> + <url hash="f7060427">2023.rocling-1.5</url> <bibkey>yang-etal-2023-auxiliary</bibkey> </paper> <paper id="6"> @@ -65,7 +65,7 @@ <author><first>Huei-Ling</first><last>Lai</last></author> <author><first>Jyi-Shane</first><last>Liu</last></author> <pages>44–53</pages> - <url hash="6f3b36ac">2023.rocling-1.6</url> + <url hash="24aad126">2023.rocling-1.6</url> <bibkey>yeh-etal-2023-pilot</bibkey> </paper> <paper id="7"> @@ -74,7 +74,7 @@ <author><first>Tien-Hong</first><last>Lo</last></author> <author><first>Berlin</first><last>Chen</last></author> <pages>54–62</pages> - <url hash="9aeabbca">2023.rocling-1.7</url> + <url hash="2a2b0536">2023.rocling-1.7</url> <bibkey>huang-etal-2023-leveraging</bibkey> </paper> <paper id="8"> @@ -82,8 +82,8 @@ <author><first>Karol</first><last>Nowakowski</last></author> <author><first>Michal</first><last>Ptaszynski</last></author> <pages>63–70</pages> - <url hash="91e158e1">2023.rocling-1.8</url> - <bibkey>karol-nowakowski-2023-improving</bibkey> + <url hash="f6541bb2">2023.rocling-1.8</url> + <bibkey>nowakowski-ptaszynski-2023-improving</bibkey> </paper> <paper id="9"> <title><fixed-case>A</fixed-case>a<fixed-case>WL</fixed-case>oss: An Artifact-aware Weighted Loss Function for Speech Enhancement</title> @@ -91,7 +91,7 @@ <author><first>Kuan-Hsun</first><last>Ho</last></author> <author><first>Berlin</first><last>Chen</last></author> <pages>71–78</pages> - <url hash="1d12a483">2023.rocling-1.9</url> + <url hash="badd5d48">2023.rocling-1.9</url> <bibkey>yu-etal-2023-aawloss</bibkey> </paper> <paper id="10"> @@ -99,9 +99,9 @@ <author><first>Hsiao-Wei</first><last>Chou</last></author> <author><first>Ping-Yen</first><last>Wu</last></author> <author><first>Jia-Jang</first><last>Tu</last></author> - <author><first>Kuanyu</first><last>Chen</last></author> + <author><first>Kuan-yu</first><last>Chen</last></author> <pages>79–88</pages> - <url hash="31abf04c">2023.rocling-1.10</url> + <url hash="24387905">2023.rocling-1.10</url> <bibkey>chou-etal-2023-wordrank</bibkey> </paper> <paper id="11"> @@ -111,7 +111,7 @@ <author><first>Ming-Ju</first><last>Tsai</last></author> <author><first>Hong-Jie</first><last>Dai</last></author> <pages>89–97</pages> - <url hash="cb761fcd">2023.rocling-1.11</url> + <url hash="da00dcfe">2023.rocling-1.11</url> <bibkey>zhang-etal-2023-investigating-cross</bibkey> </paper> <paper id="12"> @@ -121,7 +121,7 @@ <author><first>Sally</first><last>Chen</last></author> <author><first>Berlin</first><last>Chen</last></author> <pages>98–105</pages> - <url hash="08ed9f9b">2023.rocling-1.12</url> + <url hash="ca66fa23">2023.rocling-1.12</url> <bibkey>peng-etal-2023-enhancing</bibkey> </paper> <paper id="13"> @@ -130,7 +130,7 @@ <author><first>Hou-Chiang</first><last>Tseng</last></author> <author><first>Yao-Ting</first><last>Sung</last></author> <pages>106–115</pages> - <url hash="d038f3d4">2023.rocling-1.13</url> + <url hash="c3a7b4e5">2023.rocling-1.13</url> <bibkey>tai-etal-2023-impact</bibkey> </paper> <paper id="14"> @@ -139,7 +139,7 @@ <author><first>Kun-Chuan</first><last>Tseng</last></author> <author><first>Men-Ching</first><last>Lei</last></author> <pages>116–123</pages> - <url hash="88c7eec9">2023.rocling-1.14</url> + <url hash="48e58879">2023.rocling-1.14</url> <bibkey>ho-etal-2023-multimodal</bibkey> </paper> <paper id="15"> @@ -148,8 +148,8 @@ <author><first>Eric</first><last>Atwell</last></author> <author><first>Mohammad Ammar</first><last>Alsalka</last></author> <pages>124–133</pages> - <url hash="105de88a">2023.rocling-1.15</url> - <bibkey>sarah-alnefaie-alsalka-2023-gpt</bibkey> + <url hash="ee07f1e3">2023.rocling-1.15</url> + <bibkey>alnefaie-etal-2023-gpt</bibkey> </paper> <paper id="16"> <title>Addressing the issue of Data Imbalance in Multi-granularity Pronunciation Assessment</title> @@ -159,7 +159,7 @@ <author><first>Berlin</first><last>Chen</last></author> <author><first>Wei-Cheng</first><last>Chao</last></author> <pages>134–140</pages> - <url hash="736be558">2023.rocling-1.16</url> + <url hash="862fb976">2023.rocling-1.16</url> <bibkey>lin-etal-2023-addressing</bibkey> </paper> <paper id="17"> @@ -169,9 +169,9 @@ <author><first>Te-Lun</first><last>Yang</last></author> <author><first>Yu-Meng</first><last>Tang</last></author> <author><first>Ta-Lin</first><last>Chen</last></author> - <author><first>Jyh-Shing</first><last>Jang</last></author> + <author><first>Jyh-Shing Roger</first><last>Jang</last></author> <pages>141–156</pages> - <url hash="0a63bc4b">2023.rocling-1.17</url> + <url hash="6d8c1681">2023.rocling-1.17</url> <bibkey>zhang-etal-2023-category</bibkey> </paper> <paper id="18"> @@ -180,7 +180,7 @@ <author><first>Aye Nyein</first><last>Aung</last></author> <author><first>Jeih-Weih</first><last>Hung</last></author> <pages>157–161</pages> - <url hash="6c09b4e1">2023.rocling-1.18</url> + <url hash="11b661a4">2023.rocling-1.18</url> <bibkey>liao-etal-2023-esc</bibkey> </paper> <paper id="19"> @@ -189,7 +189,7 @@ <author><first>Meng-Heng</first><last>Zheng</last></author> <author><first>Jheng-Long</first><last>Wu</last></author> <pages>162–170</pages> - <url hash="0c9cbdb6">2023.rocling-1.19</url> + <url hash="155922c7">2023.rocling-1.19</url> <bibkey>liang-etal-2023-comparative</bibkey> </paper> <paper id="20"> @@ -201,30 +201,30 @@ <author><first>Ming-Hsiang</first><last>Su</last></author> <author><first>Yuan-Fu</first><last>Liao</last></author> <pages>171–178</pages> - <url hash="b165f767">2023.rocling-1.20</url> + <url hash="fc8e4ac8">2023.rocling-1.20</url> <bibkey>megela-etal-2023-fine</bibkey> </paper> <paper id="21"> <title>Phonotactic Constraints on Zhangzhou Onsets</title> <author><first>Yishan</first><last>Huang</last></author> <pages>179–187</pages> - <url hash="10cb3b77">2023.rocling-1.21</url> + <url hash="923a603f">2023.rocling-1.21</url> <bibkey>huang-2023-phonotactic</bibkey> </paper> <paper id="22"> <title>Analyzing <fixed-case>C</fixed-case>hat<fixed-case>GPT</fixed-case>’s Mathematical Deficiencies: Insights and Contributions</title> <author><first>Vincent</first><last>Cheng</last></author> - <author><first>Yu</first><last>Zhang</last></author> + <author><first>Zhang</first><last>Yu</last></author> <pages>188–193</pages> - <url hash="12d3d68f">2023.rocling-1.22</url> - <bibkey>cheng-zhang-2023-analyzing</bibkey> + <url hash="6cb29a21">2023.rocling-1.22</url> + <bibkey>cheng-yu-2023-analyzing</bibkey> </paper> <paper id="23"> <title>An Analysis of β€œ<fixed-case>X</fixed-case> shi <fixed-case>Y</fixed-case>” Metaphors in <fixed-case>M</fixed-case>andarin Corpora and Learning Materials</title> <author><first>Yu-Hsiang</first><last>Shen</last></author> <author><first>Siaw-Fong</first><last>Chung</last></author> <pages>194–201</pages> - <url hash="bba445d1">2023.rocling-1.23</url> + <url hash="22358e8b">2023.rocling-1.23</url> <bibkey>shen-chung-2023-analysis</bibkey> </paper> <paper id="24"> @@ -232,16 +232,16 @@ <author><first>Zhendong</first><last>Du</last></author> <author><first>Kenji</first><last>Hashimoto</last></author> <pages>202–209</pages> - <url hash="5bacd876">2023.rocling-1.24</url> - <bibkey>zhendong-du-2023-sentence</bibkey> + <url hash="b3efdd46">2023.rocling-1.24</url> + <bibkey>du-hashimoto-2023-sentence</bibkey> </paper> <paper id="25"> <title><fixed-case>T</fixed-case>aiwanese/<fixed-case>M</fixed-case>andarin Speech Recognition using <fixed-case>O</fixed-case>pen<fixed-case>AI</fixed-case>’s Whisper Multilingual Speech Recognition Engine Based on Generative Pretrained Transformer Architecture</title> <author><first>Yueh-Che</first><last>Hsieh</last></author> - <author><first>Renyuan</first><last>Lyu</last></author> - <author><first>Keming</first><last>Lyu</last></author> + <author><first>Ke-ming</first><last>Lyu</last></author> + <author><first>Ren-yuan</first><last>Lyu</last></author> <pages>210–214</pages> - <url hash="d13b2b32">2023.rocling-1.25</url> + <url hash="d5aeed76">2023.rocling-1.25</url> <bibkey>hsieh-etal-2023-taiwanese</bibkey> </paper> <paper id="26"> @@ -251,15 +251,15 @@ <author><first>Yue-Yang</first><last>He</last></author> <author><first>Berlin</first><last>Chen</last></author> <pages>215–221</pages> - <url hash="2e43adcf">2023.rocling-1.26</url> + <url hash="b9e6078e">2023.rocling-1.26</url> <bibkey>wu-etal-2023-knot</bibkey> </paper> <paper id="27"> <title>Compact <fixed-case>CNN</fixed-case>s for End-to-End Keyword Spotting on Resource-Constrained Edge <fixed-case>AI</fixed-case> Devices</title> <author><first>Joseph</first><last>Lin</last></author> - <author><first>Renyuan</first><last>Lyu</last></author> + <author><first>Ren-yuan</first><last>Lyu</last></author> <pages>222–226</pages> - <url hash="6daeb2d3">2023.rocling-1.27</url> + <url hash="6b451ea3">2023.rocling-1.27</url> <bibkey>lin-lyu-2023-compact</bibkey> </paper> <paper id="28"> @@ -268,7 +268,7 @@ <author><first>Chao-Min</first><last>Wu</last></author> <author><first>Yu</first><last>Tsao</last></author> <pages>227–232</pages> - <url hash="0856363d">2023.rocling-1.28</url> + <url hash="488aa6f0">2023.rocling-1.28</url> <bibkey>huang-etal-2023-sound</bibkey> </paper> <paper id="29"> @@ -277,7 +277,7 @@ <author><first>Hsin-Yun</first><last>Hsu</last></author> <author><first>Jheng-Long</first><last>Wu</last></author> <pages>233–241</pages> - <url hash="0740e03a">2023.rocling-1.29</url> + <url hash="151b7b2d">2023.rocling-1.29</url> <bibkey>chen-etal-2023-analyzing</bibkey> </paper> <paper id="30"> @@ -288,7 +288,7 @@ <author><first>Heng-Yu</first><last>Lin</last></author> <author><first>Yung-Chun</first><last>Chang</last></author> <pages>242–249</pages> - <url hash="1a6e6f44">2023.rocling-1.30</url> + <url hash="dd33b228">2023.rocling-1.30</url> <bibkey>sy-etal-2023-fine</bibkey> </paper> <paper id="31"> @@ -296,7 +296,7 @@ <author><first>Yi-Lin</first><last>Hsieh</last></author> <author><first>Ming-Hsiang</first><last>Su</last></author> <pages>250–254</pages> - <url hash="2141d0b6">2023.rocling-1.31</url> + <url hash="91119115">2023.rocling-1.31</url> <bibkey>hsieh-su-2023-application</bibkey> </paper> <paper id="32"> @@ -305,7 +305,7 @@ <author><first>Ya-Mien</first><last>Cheng</last></author> <author><first>Jheng-Long</first><last>Wu</last></author> <pages>255–261</pages> - <url hash="25ef3fbe">2023.rocling-1.32</url> + <url hash="d08a143f">2023.rocling-1.32</url> <bibkey>ho-etal-2023-relevance</bibkey> </paper> <paper id="33"> @@ -316,7 +316,7 @@ <author><first>Deborah</first><last>Watty</last></author> <author><first>Shu-Kai</first><last>Hsieh</last></author> <pages>262–269</pages> - <url hash="34e242c4">2023.rocling-1.33</url> + <url hash="2d6c7b34">2023.rocling-1.33</url> <bibkey>lin-etal-2023-solving</bibkey> </paper> <paper id="34"> @@ -324,7 +324,7 @@ <author><first>Yuan-Shiang</first><last>Tsai</last></author> <author><first>Yu-Yun</first><last>Chang</last></author> <pages>270–278</pages> - <url hash="f9607fbe">2023.rocling-1.34</url> + <url hash="60ab7a5f">2023.rocling-1.34</url> <bibkey>tsai-chang-2023-generative</bibkey> </paper> <paper id="35"> @@ -333,7 +333,7 @@ <author><first>Jin-Jian</first><last>Li</last></author> <author><first>Shu-Chang</first><last>Lin</last></author> <pages>279–287</pages> - <url hash="f96612b3">2023.rocling-1.35</url> + <url hash="deecb70a">2023.rocling-1.35</url> <bibkey>yang-etal-2023-lexical</bibkey> </paper> <paper id="36"> @@ -341,7 +341,7 @@ <author><first>Pin-Wen</first><last>Wang</last></author> <author><first>Siaw-Fong</first><last>Chung</last></author> <pages>288–291</pages> - <url hash="307fba02">2023.rocling-1.36</url> + <url hash="4aa6f7fb">2023.rocling-1.36</url> <bibkey>wang-chung-2023-analysis</bibkey> </paper> <paper id="37"> @@ -351,7 +351,7 @@ <author><first>Da-Chen</first><last>Lian</last></author> <author><first>Shu-Kai</first><last>Hsieh</last></author> <pages>292–299</pages> - <url hash="072b5711">2023.rocling-1.37</url> + <url hash="c117fcff">2023.rocling-1.37</url> <bibkey>yeh-etal-2023-evaluating</bibkey> </paper> <paper id="38"> @@ -362,17 +362,17 @@ <author><first>Matus</first><last>Pleva</last></author> <author><first>Daniel</first><last>Hladek</last></author> <pages>300–310</pages> - <url hash="8d892c94">2023.rocling-1.38</url> + <url hash="41859877">2023.rocling-1.38</url> <bibkey>su-etal-2023-novel</bibkey> </paper> <paper id="39"> <title><fixed-case>SCU</fixed-case>-<fixed-case>MESCL</fixed-case>ab at <fixed-case>ROCLING</fixed-case>-2023 Shared Task:Named Entity Recognition Using Multiple Classifier Model</title> <author><first>Tzu-En</first><last>Su</last></author> <author><first>Ruei-Cyuan</first><last>Su</last></author> - <author><first>Tsung-Hsien</first><last>Yang</last></author> <author><first>Ming-Hsiang</first><last>Su</last></author> + <author><first>Tsung-Hsien</first><last>Yang</last></author> <pages>311–316</pages> - <url hash="0719a0d0">2023.rocling-1.39</url> + <url hash="c068ec2a">2023.rocling-1.39</url> <bibkey>su-etal-2023-scu</bibkey> </paper> <paper id="40"> @@ -381,7 +381,7 @@ <author><first>You</first><last>Zhang</last></author> <author><first>Xiaobing</first><last>Zhou</last></author> <pages>317–324</pages> - <url hash="0607b184">2023.rocling-1.40</url> + <url hash="3e4ecc58">2023.rocling-1.40</url> <bibkey>pang-etal-2023-ynu</bibkey> </paper> <paper id="41"> @@ -390,7 +390,7 @@ <author><first>Jin</first><last>Wang</last></author> <author><first>Xuejie</first><last>Zhang</last></author> <pages>325–332</pages> - <url hash="f640b24d">2023.rocling-1.41</url> + <url hash="a7633400">2023.rocling-1.41</url> <bibkey>zhang-etal-2023-ynu</bibkey> </paper> <paper id="42"> @@ -399,7 +399,7 @@ <author><first>Tzu-Mi</first><last>Lin</last></author> <author><first>Chao-Yi</first><last>Chen</last></author> <pages>333–338</pages> - <url hash="55365a19">2023.rocling-1.42</url> + <url hash="f2fe8b43">2023.rocling-1.42</url> <bibkey>lee-etal-2023-overview</bibkey> </paper> <paper id="43"> @@ -413,7 +413,7 @@ <author><first>Sheh</first><last>Chen</last></author> <author><first>Jyh-Shing Roger</first><last>Jang</last></author> <pages>339–349</pages> - <url hash="b7e5a565">2023.rocling-1.43</url> + <url hash="470b5f3f">2023.rocling-1.43</url> <bibkey>wang-etal-2023-crowner</bibkey> </paper> <paper id="44"> @@ -421,7 +421,7 @@ <author><first>Xuelin</first><last>Wang</last></author> <author><first>Qihao</first><last>Yang</last></author> <pages>350–358</pages> - <url hash="a7f7c7c2">2023.rocling-1.44</url> + <url hash="8879d921">2023.rocling-1.44</url> <bibkey>wang-yang-2023-lingx</bibkey> </paper> <paper id="45"> @@ -430,16 +430,16 @@ <author><first>Tao-Hsing</first><last>Chang</last></author> <author><first>Fu-Yuan</first><last>Hsu</last></author> <pages>359–366</pages> - <url hash="397be2fe">2023.rocling-1.45</url> + <url hash="1e474173">2023.rocling-1.45</url> <bibkey>wu-etal-2023-islab</bibkey> </paper> <paper id="46"> <title>Accelerating <fixed-case>H</fixed-case>akka Speech Recognition Research and Development Using the Whisper Model</title> <author><first>Ching-Yuan</first><last>Chen</last></author> <author><first>Yun-Hsiang</first><last>Hsu</last></author> - <author><first>Chenchi</first><last>Chang</last></author> + <author><first>Chen-chi</first><last>Chang</last></author> <pages>367–370</pages> - <url hash="19a59d13">2023.rocling-1.46</url> + <url hash="25491996">2023.rocling-1.46</url> <bibkey>chen-etal-2023-accelerating</bibkey> </paper> <paper id="47"> @@ -450,15 +450,15 @@ <author><first>Hsin-Min</first><last>Wang</last></author> <author><first>Jia-Ching</first><last>Wang</last></author> <pages>371–376</pages> - <url hash="ed16d09b">2023.rocling-1.47</url> + <url hash="d31a2c13">2023.rocling-1.47</url> <bibkey>chen-etal-2023-enhancing</bibkey> </paper> <paper id="48"> <title>The <fixed-case>DMS</fixed-case>-<fixed-case>ASR</fixed-case> System for the <fixed-case>F</fixed-case>ormosa Speech Recognition Challenge 2023</title> - <author><first>Hsiu Jui</first><last>Chang</last></author> - <author><first>Wei Yuan</first><last>Chen</last></author> + <author><first>Hsiu-Jui</first><last>Chang</last></author> + <author><first>Wei-Yuan</first><last>Chen</last></author> <pages>377–379</pages> - <url hash="f7986f55">2023.rocling-1.48</url> + <url hash="b4991fec">2023.rocling-1.48</url> <bibkey>chang-chen-2023-dms</bibkey> </paper> <paper id="49"> @@ -466,16 +466,16 @@ <author><first>Hong-Jie</first><last>Hu</last></author> <author><first>Chia-Ping</first><last>Chen</last></author> <pages>380–385</pages> - <url hash="dbd3eb6f">2023.rocling-1.49</url> + <url hash="58fb8c8e">2023.rocling-1.49</url> <bibkey>hu-chen-2023-nsysu</bibkey> </paper> <paper id="50"> <title>The North System for <fixed-case>F</fixed-case>ormosa Speech Recognition Challenge 2023</title> <author><first>Li-Wei</first><last>Chen</last></author> - <author><first>Hung-Shin</first><last>Lee</last></author> <author><first>Kai-Chen</first><last>Cheng</last></author> + <author><first>Hung-Shin</first><last>Lee</last></author> <pages>386–389</pages> - <url hash="ae38e0c4">2023.rocling-1.50</url> + <url hash="7e3f2d9d">2023.rocling-1.50</url> <bibkey>chen-etal-2023-north</bibkey> </paper> <paper id="51"> @@ -484,7 +484,7 @@ <author><first>Chien-Hung</first><last>Lai</last></author> <author><first>Hsuan-Sheng</first><last>Chiu</last></author> <pages>390–396</pages> - <url hash="17cf3d1c">2023.rocling-1.51</url> + <url hash="93218f6e">2023.rocling-1.51</url> <bibkey>chiang-etal-2023-whisperhakka</bibkey> </paper> <paper id="52"> @@ -494,7 +494,7 @@ <author><first>Jhen-Ke</first><last>Lin</last></author> <author><first>Tien-Hong</first><last>Lo</last></author> <pages>397–402</pages> - <url hash="9594e55a">2023.rocling-1.52</url> + <url hash="057652d9">2023.rocling-1.52</url> <bibkey>lu-etal-2023-ntnu</bibkey> </paper> <paper id="53"> @@ -503,7 +503,7 @@ <author><first>Chung-Yi</first><last>Li</last></author> <author><first>Zih-Wei</first><last>Lin</last></author> <pages>403–408</pages> - <url hash="b37dcf95">2023.rocling-1.53</url> + <url hash="789d26fc">2023.rocling-1.53</url> <bibkey>lu-etal-2023-taiwan</bibkey> </paper> <paper id="54"> @@ -512,7 +512,7 @@ <author><first>Dong-Min</first><last>Li</last></author> <author><first>Chen-Yu</first><last>Chiang</last></author> <pages>409–413</pages> - <url hash="186af3e7">2023.rocling-1.54</url> + <url hash="1cb628f4">2023.rocling-1.54</url> <bibkey>su-etal-2023-preliminary</bibkey> </paper> <paper id="55"> @@ -522,7 +522,7 @@ <author><first>Meng-Ting</first><last>Tsai</last></author> <author><first>Berlin</first><last>Chen</last></author> <pages>414–422</pages> - <url hash="9e2ed6a8">2023.rocling-1.55</url> + <url hash="0622effb">2023.rocling-1.55</url> <bibkey>yang-etal-2023-ntnu</bibkey> </paper> <paper id="56"> @@ -530,7 +530,7 @@ <author><first>Yi-Chin</first><last>Huang</last></author> <author><first>Ji-Qian</first><last>Tsai</last></author> <pages>423–427</pages> - <url hash="1f0b65d8">2023.rocling-1.56</url> + <url hash="d65dbb9a">2023.rocling-1.56</url> <bibkey>huang-tsai-2023-whisper</bibkey> </paper> </volume>
bookwyrm-social__bookwyrm-3224
[ { "content": "\"\"\" template filters for really common utilities \"\"\"\nimport os\nimport re\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\nfrom django import template\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\nfrom django.templatetags.static import static\n\nfrom bookwyrm.models import User\nfrom bookwyrm.settings import INSTANCE_ACTOR_USERNAME\n\nregister = template.Library()\n\n\n@register.filter(name=\"uuid\")\ndef get_uuid(identifier):\n \"\"\"for avoiding clashing ids when there are many forms\"\"\"\n return f\"{identifier}{uuid4()}\"\n\n\n@register.simple_tag(takes_context=False)\ndef join(*args):\n \"\"\"concatenate an arbitrary set of values\"\"\"\n return \"_\".join(str(a) for a in args)\n\n\n@register.filter(name=\"username\")\ndef get_user_identifier(user):\n \"\"\"use localname for local users, username for remote\"\"\"\n return user.localname if user.localname else user.username\n\n\n@register.filter(name=\"user_from_remote_id\")\ndef get_user_identifier_from_remote_id(remote_id):\n \"\"\"get the local user id from their remote id\"\"\"\n user = User.objects.get(remote_id=remote_id)\n return user if user else None\n\n\n@register.filter(name=\"book_title\")\ndef get_title(book, too_short=5):\n \"\"\"display the subtitle if the title is short\"\"\"\n if not book:\n return \"\"\n title = book.title\n if len(title) <= too_short and book.subtitle:\n title = _(\"%(title)s: %(subtitle)s\") % {\n \"title\": title,\n \"subtitle\": book.subtitle,\n }\n return title\n\n\n@register.simple_tag(takes_context=False)\ndef comparison_bool(str1, str2, reverse=False):\n \"\"\"idk why I need to write a tag for this, it returns a bool\"\"\"\n if reverse:\n return str1 != str2\n return str1 == str2\n\n\n@register.filter(is_safe=True)\ndef truncatepath(value, arg):\n \"\"\"Truncate a path by removing all directories except the first and truncating\"\"\"\n path = os.path.normpath(value.name)\n path_list = path.split(os.sep)\n try:\n length = int(arg)\n except ValueError: # invalid literal for int()\n return path_list[-1] # Fail silently.\n return f\"{path_list[0]}/…{path_list[-1][-length:]}\"\n\n\n@register.simple_tag(takes_context=False)\ndef get_book_cover_thumbnail(book, size=\"medium\", ext=\"jpg\"):\n \"\"\"Returns a book thumbnail at the specified size and extension,\n with fallback if needed\"\"\"\n if size == \"\":\n size = \"medium\"\n try:\n cover_thumbnail = getattr(book, f\"cover_bw_book_{size}_{ext}\")\n return cover_thumbnail.url\n except OSError:\n return static(\"images/no_cover.jpg\")\n\n\n@register.filter(name=\"get_isni_bio\")\ndef get_isni_bio(existing, author):\n \"\"\"Returns the isni bio string if an existing author has an isni listed\"\"\"\n auth_isni = re.sub(r\"\\D\", \"\", str(author.isni))\n if len(existing) == 0:\n return \"\"\n for value in existing:\n if hasattr(value, \"bio\") and auth_isni == re.sub(r\"\\D\", \"\", str(value.isni)):\n return mark_safe(f\"Author of <em>{value.bio}</em>\")\n\n return \"\"\n\n\n# pylint: disable=unused-argument\n@register.filter(name=\"get_isni\", needs_autoescape=True)\ndef get_isni(existing, author, autoescape=True):\n \"\"\"Returns the isni ID if an existing author has an ISNI listing\"\"\"\n auth_isni = re.sub(r\"\\D\", \"\", str(author.isni))\n if len(existing) == 0:\n return \"\"\n for value in existing:\n if hasattr(value, \"isni\") and auth_isni == re.sub(r\"\\D\", \"\", str(value.isni)):\n isni = value.isni\n return mark_safe(\n f'<input type=\"text\" name=\"isni-for-{author.id}\" value=\"{isni}\" hidden>'\n )\n return \"\"\n\n\n@register.simple_tag(takes_context=False)\ndef id_to_username(user_id):\n \"\"\"given an arbitrary remote id, return the username\"\"\"\n if user_id:\n url = urlparse(user_id)\n domain = url.netloc\n parts = url.path.split(\"/\")\n name = parts[-1]\n value = f\"{name}@{domain}\"\n\n return value\n\n\n@register.filter(name=\"get_file_size\")\ndef get_file_size(file):\n \"\"\"display the size of a file in human readable terms\"\"\"\n\n try:\n raw_size = os.stat(file.path).st_size\n if raw_size < 1024:\n return f\"{raw_size} bytes\"\n if raw_size < 1024**2:\n return f\"{raw_size/1024:.2f} KB\"\n if raw_size < 1024**3:\n return f\"{raw_size/1024**2:.2f} MB\"\n return f\"{raw_size/1024**3:.2f} GB\"\n except Exception: # pylint: disable=broad-except\n return \"\"\n\n\n@register.filter(name=\"get_user_permission\")\ndef get_user_permission(user):\n \"\"\"given a user, return their permission level\"\"\"\n\n return user.groups.first() or \"User\"\n\n\n@register.filter(name=\"is_instance_admin\")\ndef is_instance_admin(localname):\n \"\"\"Returns a boolean indicating whether the user is the instance admin account\"\"\"\n return localname == INSTANCE_ACTOR_USERNAME\n", "path": "bookwyrm/templatetags/utilities.py" } ]
[ { "content": "\"\"\" template filters for really common utilities \"\"\"\nimport os\nimport re\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\nfrom django import template\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\nfrom django.templatetags.static import static\n\nfrom bookwyrm.models import User\nfrom bookwyrm.settings import INSTANCE_ACTOR_USERNAME\n\nregister = template.Library()\n\n\n@register.filter(name=\"uuid\")\ndef get_uuid(identifier):\n \"\"\"for avoiding clashing ids when there are many forms\"\"\"\n return f\"{identifier}{uuid4()}\"\n\n\n@register.simple_tag(takes_context=False)\ndef join(*args):\n \"\"\"concatenate an arbitrary set of values\"\"\"\n return \"_\".join(str(a) for a in args)\n\n\n@register.filter(name=\"username\")\ndef get_user_identifier(user):\n \"\"\"use localname for local users, username for remote\"\"\"\n return user.localname if user.localname else user.username\n\n\n@register.filter(name=\"user_from_remote_id\")\ndef get_user_identifier_from_remote_id(remote_id):\n \"\"\"get the local user id from their remote id\"\"\"\n user = User.objects.get(remote_id=remote_id)\n return user if user else None\n\n\n@register.filter(name=\"book_title\")\ndef get_title(book, too_short=5):\n \"\"\"display the subtitle if the title is short\"\"\"\n if not book:\n return \"\"\n title = book.title\n if len(title) <= too_short and book.subtitle:\n title = _(\"%(title)s: %(subtitle)s\") % {\n \"title\": title,\n \"subtitle\": book.subtitle,\n }\n return title\n\n\n@register.simple_tag(takes_context=False)\ndef comparison_bool(str1, str2, reverse=False):\n \"\"\"idk why I need to write a tag for this, it returns a bool\"\"\"\n if reverse:\n return str1 != str2\n return str1 == str2\n\n\n@register.filter(is_safe=True)\ndef truncatepath(value, arg):\n \"\"\"Truncate a path by removing all directories except the first and truncating\"\"\"\n path = os.path.normpath(value.name)\n path_list = path.split(os.sep)\n try:\n length = int(arg)\n except ValueError: # invalid literal for int()\n return path_list[-1] # Fail silently.\n return f\"{path_list[0]}/…{path_list[-1][-length:]}\"\n\n\n@register.simple_tag(takes_context=False)\ndef get_book_cover_thumbnail(book, size=\"medium\", ext=\"jpg\"):\n \"\"\"Returns a book thumbnail at the specified size and extension,\n with fallback if needed\"\"\"\n if size == \"\":\n size = \"medium\"\n try:\n cover_thumbnail = getattr(book, f\"cover_bw_book_{size}_{ext}\")\n return cover_thumbnail.url\n except OSError:\n return static(\"images/no_cover.jpg\")\n\n\n@register.filter(name=\"get_isni_bio\")\ndef get_isni_bio(existing, author):\n \"\"\"Returns the isni bio string if an existing author has an isni listed\"\"\"\n auth_isni = re.sub(r\"\\D\", \"\", str(author.isni))\n if len(existing) == 0:\n return \"\"\n for value in existing:\n if hasattr(value, \"bio\") and auth_isni == re.sub(r\"\\D\", \"\", str(value.isni)):\n return mark_safe(f\"Author of <em>{value.bio}</em>\")\n\n return \"\"\n\n\n# pylint: disable=unused-argument\n@register.filter(name=\"get_isni\", needs_autoescape=True)\ndef get_isni(existing, author, autoescape=True):\n \"\"\"Returns the isni ID if an existing author has an ISNI listing\"\"\"\n auth_isni = re.sub(r\"\\D\", \"\", str(author.isni))\n if len(existing) == 0:\n return \"\"\n for value in existing:\n if hasattr(value, \"isni\") and auth_isni == re.sub(r\"\\D\", \"\", str(value.isni)):\n isni = value.isni\n return mark_safe(\n f'<input type=\"text\" name=\"isni-for-{author.id}\" value=\"{isni}\" hidden>'\n )\n return \"\"\n\n\n@register.simple_tag(takes_context=False)\ndef id_to_username(user_id):\n \"\"\"given an arbitrary remote id, return the username\"\"\"\n if user_id:\n url = urlparse(user_id)\n domain = url.netloc\n parts = url.path.split(\"/\")\n name = parts[-1]\n value = f\"{name}@{domain}\"\n\n return value\n return \"a new user account\"\n\n\n@register.filter(name=\"get_file_size\")\ndef get_file_size(file):\n \"\"\"display the size of a file in human readable terms\"\"\"\n\n try:\n raw_size = os.stat(file.path).st_size\n if raw_size < 1024:\n return f\"{raw_size} bytes\"\n if raw_size < 1024**2:\n return f\"{raw_size/1024:.2f} KB\"\n if raw_size < 1024**3:\n return f\"{raw_size/1024**2:.2f} MB\"\n return f\"{raw_size/1024**3:.2f} GB\"\n except Exception: # pylint: disable=broad-except\n return \"\"\n\n\n@register.filter(name=\"get_user_permission\")\ndef get_user_permission(user):\n \"\"\"given a user, return their permission level\"\"\"\n\n return user.groups.first() or \"User\"\n\n\n@register.filter(name=\"is_instance_admin\")\ndef is_instance_admin(localname):\n \"\"\"Returns a boolean indicating whether the user is the instance admin account\"\"\"\n return localname == INSTANCE_ACTOR_USERNAME\n", "path": "bookwyrm/templatetags/utilities.py" } ]
diff --git a/bookwyrm/templates/moved.html b/bookwyrm/templates/moved.html index 545fc3d872..382b752be4 100644 --- a/bookwyrm/templates/moved.html +++ b/bookwyrm/templates/moved.html @@ -23,7 +23,7 @@ <div class="notification is-warning"> <p> - {% id_to_username request.user.moved_to as username %} + {% id_to_username request.user.moved_to as username %} {% blocktrans trimmed with moved_to=user.moved_to %} <strong>You have moved your account</strong> to <a href="{{ moved_to }}">{{ username }}</a> {% endblocktrans %} diff --git a/bookwyrm/templates/notifications/items/move_user.html b/bookwyrm/templates/notifications/items/move_user.html index b94d96dc49..3121d3f45b 100644 --- a/bookwyrm/templates/notifications/items/move_user.html +++ b/bookwyrm/templates/notifications/items/move_user.html @@ -14,7 +14,7 @@ {% block description %} {% if related_user_moved_to %} - {% id_to_username request.user.moved_to as username %} + {% id_to_username related_user_moved_to as username %} {% blocktrans trimmed %} {{ related_user }} has moved to <a href="{{ related_user_moved_to }}">{{ username }}</a> {% endblocktrans %} diff --git a/bookwyrm/templatetags/utilities.py b/bookwyrm/templatetags/utilities.py index fca66688ac..230db366e3 100644 --- a/bookwyrm/templatetags/utilities.py +++ b/bookwyrm/templatetags/utilities.py @@ -125,7 +125,8 @@ def id_to_username(user_id): name = parts[-1] value = f"{name}@{domain}" - return value + return value + return "a new user account" @register.filter(name="get_file_size")