Datasets:

zip
stringlengths
19
109
filename
stringlengths
4
185
contents
stringlengths
0
30.1M
type_annotations
listlengths
0
1.97k
type_annotation_starts
listlengths
0
1.97k
type_annotation_ends
listlengths
0
1.97k
archives/1346520853_-.zip
tests/data/src/sample/sample/__init__.py
__version__ = '1.2.0' def main(): """Entry point for the application script""" print("Call your main application code here")
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/sample/setup.py
import codecs import os import re from setuptools import find_packages, setup here = os.path.abspath(os.path.dirname(__file__)) # Read the version number from a source file. # Why read it, and not import? # see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion def find_version(*file_paths): # Open in Latin-1 so that we avoid encoding errors. # Use codecs.open for Python 2 compatibility with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f: version_file = f.read() # The version line must have the form # __version__ = 'ver' version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") # Get the long description from the relevant file with codecs.open('DESCRIPTION.rst', encoding='utf-8') as f: long_description = f.read() setup( name="sample", version=find_version('sample', '__init__.py'), description="A sample Python project", long_description=long_description, # The project URL. url='https://github.com/pypa/sampleproject', # Author details author='The Python Packaging Authority', author_email='pypa-dev@googlegroups.com', # Choose your license license='MIT', classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 3 - Alpha', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: MIT License', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', ], # What does your project relate to? keywords='sample setuptools development', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages. packages=find_packages(exclude=["contrib", "docs", "tests*"]), # List run-time dependencies here. These will be installed by pip when your # project is installed. install_requires=['peppercorn'], # If there are data files included in your packages that need to be # installed, specify them here. If using Python 2.6 or less, then these # have to be included in MANIFEST.in as well. package_data={ 'sample': ['package_data.dat'], }, # Although 'package_data' is the preferred approach, in some case you may # need to place data files outside of your packages. # see https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # In this case, 'data_file' will be installed into '<sys.prefix>/my_data' data_files=[('my_data', ['data/data_file'])], # To provide executable scripts, use entry points in preference to the # "scripts" keyword. Entry points provide cross-platform support and allow # pip to create the appropriate form of executable for the target platform. entry_points={ 'console_scripts': [ 'sample=sample:main', ], }, )
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/simple_namespace/setup.py
from setuptools import setup setup( name='simple_namespace', version='1.0', namespace_packages=['simple_namespace'], packages=['simple_namespace.module'], )
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/simple_namespace/simple_namespace/__init__.py
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/simple_namespace/simple_namespace/module/__init__.py
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/simplewheel-1.0/setup.py
#!/usr/bin/env python from setuptools import setup import simplewheel setup(name='simplewheel', version=simplewheel.__version__, packages=['simplewheel'], )
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/simplewheel-1.0/simplewheel/__init__.py
__version__ = '1.0'
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/simplewheel-2.0/setup.py
#!/usr/bin/env python from setuptools import setup import simplewheel setup(name='simplewheel', version=simplewheel.__version__, packages=['simplewheel'], )
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/simplewheel-2.0/simplewheel/__init__.py
__version__ = '2.0'
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/singlemodule/setup.py
from setuptools import setup setup( name="singlemodule", version='0.0.1', description="A sample Python project with a single module", py_modules=['singlemodule'], )
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/singlemodule/singlemodule.py
def main(): """Entry point for the application script""" print("Call your main application code here")
[]
[]
[]
archives/1346520853_-.zip
tests/data/src/withpyproject/setup.py
from setuptools import setup setup(name='withpyproject', version='0.0.1')
[]
[]
[]
archives/1346520853_-.zip
tests/functional/__init__.py
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_broken_stdout.py
import os import subprocess import sys if sys.version_info < (3, 6): _BROKEN_STDOUT_RETURN_CODE = 1 else: # The new exit status was added in Python 3.6 as a result of: # https://bugs.python.org/issue5319 _BROKEN_STDOUT_RETURN_CODE = 120 def setup_broken_stdout_test(args, deprecated_python): proc = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) # Call close() on stdout to cause a broken pipe. proc.stdout.close() returncode = proc.wait() stderr = proc.stderr.read().decode('utf-8') expected_msg = 'ERROR: Pipe to stdout was broken' if deprecated_python: assert expected_msg in stderr else: assert stderr.startswith(expected_msg) return stderr, returncode def test_broken_stdout_pipe(deprecated_python): """ Test a broken pipe to stdout. """ stderr, returncode = setup_broken_stdout_test( ['pip', 'list'], deprecated_python=deprecated_python, ) # Check that no traceback occurs. assert 'raise BrokenStdoutLoggingError()' not in stderr assert stderr.count('Traceback') == 0 assert returncode == _BROKEN_STDOUT_RETURN_CODE def test_broken_stdout_pipe__log_option(deprecated_python, tmpdir): """ Test a broken pipe to stdout when --log is passed. """ log_path = os.path.join(str(tmpdir), 'log.txt') stderr, returncode = setup_broken_stdout_test( ['pip', '--log', log_path, 'list'], deprecated_python=deprecated_python, ) # Check that no traceback occurs. assert 'raise BrokenStdoutLoggingError()' not in stderr assert stderr.count('Traceback') == 0 assert returncode == _BROKEN_STDOUT_RETURN_CODE def test_broken_stdout_pipe__verbose(deprecated_python): """ Test a broken pipe to stdout with verbose logging enabled. """ stderr, returncode = setup_broken_stdout_test( ['pip', '-v', 'list'], deprecated_python=deprecated_python, ) # Check that a traceback occurs and that it occurs at most once. # We permit up to two because the exception can be chained. assert 'raise BrokenStdoutLoggingError()' in stderr assert 1 <= stderr.count('Traceback') <= 2 assert returncode == _BROKEN_STDOUT_RETURN_CODE
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_check.py
from tests.lib import create_test_package_with_setup def matches_expected_lines(string, expected_lines): # Ignore empty lines output_lines = set(filter(None, string.splitlines())) # Match regardless of order return set(output_lines) == set(expected_lines) def test_basic_check_clean(script): """On a clean environment, check should print a helpful message. """ result = script.pip('check') expected_lines = ( "No broken requirements found.", ) assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 0 def test_basic_check_missing_dependency(script): # Setup a small project pkga_path = create_test_package_with_setup( script, name='pkga', version='1.0', install_requires=['missing==0.1'], ) # Let's install pkga without its dependency res = script.pip('install', '--no-index', pkga_path, '--no-deps') assert "Successfully installed pkga-1.0" in res.stdout, str(res) result = script.pip('check', expect_error=True) expected_lines = ( "pkga 1.0 requires missing, which is not installed.", ) assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 1 def test_basic_check_broken_dependency(script): # Setup pkga depending on pkgb>=1.0 pkga_path = create_test_package_with_setup( script, name='pkga', version='1.0', install_requires=['broken>=1.0'], ) # Let's install pkga without its dependency res = script.pip('install', '--no-index', pkga_path, '--no-deps') assert "Successfully installed pkga-1.0" in res.stdout, str(res) # Setup broken==0.1 broken_path = create_test_package_with_setup( script, name='broken', version='0.1', ) # Let's install broken==0.1 res = script.pip( 'install', '--no-index', broken_path, '--no-warn-conflicts', ) assert "Successfully installed broken-0.1" in res.stdout, str(res) result = script.pip('check', expect_error=True) expected_lines = ( "pkga 1.0 has requirement broken>=1.0, but you have broken 0.1.", ) assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 1 def test_basic_check_broken_dependency_and_missing_dependency(script): pkga_path = create_test_package_with_setup( script, name='pkga', version='1.0', install_requires=['broken>=1.0'], ) # Let's install pkga without its dependency res = script.pip('install', '--no-index', pkga_path, '--no-deps') assert "Successfully installed pkga-1.0" in res.stdout, str(res) # Setup broken==0.1 broken_path = create_test_package_with_setup( script, name='broken', version='0.1', install_requires=['missing'], ) # Let's install broken==0.1 res = script.pip('install', '--no-index', broken_path, '--no-deps') assert "Successfully installed broken-0.1" in res.stdout, str(res) result = script.pip('check', expect_error=True) expected_lines = ( "broken 0.1 requires missing, which is not installed.", "pkga 1.0 has requirement broken>=1.0, but you have broken 0.1." ) assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 1 def test_check_complicated_name_missing(script): package_a_path = create_test_package_with_setup( script, name='package_A', version='1.0', install_requires=['Dependency-B>=1.0'], ) # Without dependency result = script.pip('install', '--no-index', package_a_path, '--no-deps') assert "Successfully installed package-A-1.0" in result.stdout, str(result) result = script.pip('check', expect_error=True) expected_lines = ( "package-a 1.0 requires dependency-b, which is not installed.", ) assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 1 def test_check_complicated_name_broken(script): package_a_path = create_test_package_with_setup( script, name='package_A', version='1.0', install_requires=['Dependency-B>=1.0'], ) dependency_b_path_incompatible = create_test_package_with_setup( script, name='dependency-b', version='0.1', ) # With broken dependency result = script.pip('install', '--no-index', package_a_path, '--no-deps') assert "Successfully installed package-A-1.0" in result.stdout, str(result) result = script.pip( 'install', '--no-index', dependency_b_path_incompatible, '--no-deps', ) assert "Successfully installed dependency-b-0.1" in result.stdout result = script.pip('check', expect_error=True) expected_lines = ( "package-a 1.0 has requirement Dependency-B>=1.0, but you have " "dependency-b 0.1.", ) assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 1 def test_check_complicated_name_clean(script): package_a_path = create_test_package_with_setup( script, name='package_A', version='1.0', install_requires=['Dependency-B>=1.0'], ) dependency_b_path = create_test_package_with_setup( script, name='dependency-b', version='1.0', ) result = script.pip('install', '--no-index', package_a_path, '--no-deps') assert "Successfully installed package-A-1.0" in result.stdout, str(result) result = script.pip( 'install', '--no-index', dependency_b_path, '--no-deps', ) assert "Successfully installed dependency-b-1.0" in result.stdout result = script.pip('check', expect_error=True) expected_lines = ( "No broken requirements found.", ) assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 0 def test_check_considers_conditional_reqs(script): package_a_path = create_test_package_with_setup( script, name='package_A', version='1.0', install_requires=[ "Dependency-B>=1.0; python_version != '2.7'", "Dependency-B>=2.0; python_version == '2.7'", ], ) result = script.pip('install', '--no-index', package_a_path, '--no-deps') assert "Successfully installed package-A-1.0" in result.stdout, str(result) result = script.pip('check', expect_error=True) expected_lines = ( "package-a 1.0 requires dependency-b, which is not installed.", ) assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 1 def test_check_development_versions_are_also_considered(script): # Setup pkga depending on pkgb>=1.0 pkga_path = create_test_package_with_setup( script, name='pkga', version='1.0', install_requires=['depend>=1.0'], ) # Let's install pkga without its dependency res = script.pip('install', '--no-index', pkga_path, '--no-deps') assert "Successfully installed pkga-1.0" in res.stdout, str(res) # Setup depend==1.1.0.dev0 depend_path = create_test_package_with_setup( script, name='depend', version='1.1.0.dev0', ) # Let's install depend==1.1.0.dev0 res = script.pip( 'install', '--no-index', depend_path, '--no-warn-conflicts', ) assert "Successfully installed depend-1.1.0.dev0" in res.stdout, str(res) result = script.pip('check') expected_lines = ( "No broken requirements found.", ) assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 0 def test_basic_check_broken_metadata(script): # Create some corrupt metadata dist_info_dir = script.site_packages_path / 'pkga-1.0.dist-info' dist_info_dir.mkdir() with open(dist_info_dir / 'METADATA', 'w') as f: f.write('Metadata-Version: 2.1\n' 'Name: pkga\n' 'Version: 1.0\n' 'Requires-Dist: pip; python_version == "3.4";extra == "test"\n' ) result = script.pip('check', expect_error=True) assert 'Error parsing requirements' in result.stderr assert result.returncode == 1
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_completion.py
import os import sys import pytest COMPLETION_FOR_SUPPORTED_SHELLS_TESTS = ( ('bash', """\ _pip_completion() { COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ COMP_CWORD=$COMP_CWORD \\ PIP_AUTO_COMPLETE=1 $1 ) ) } complete -o default -F _pip_completion pip"""), ('fish', """\ function __fish_complete_pip set -lx COMP_WORDS (commandline -o) "" set -lx COMP_CWORD ( \\ math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\ ) set -lx PIP_AUTO_COMPLETE 1 string split \\ -- (eval $COMP_WORDS[1]) end complete -fa "(__fish_complete_pip)" -c pip"""), ('zsh', """\ function _pip_completion { local words cword read -Ac words read -cn cword reply=( $( COMP_WORDS="$words[*]" \\ COMP_CWORD=$(( cword-1 )) \\ PIP_AUTO_COMPLETE=1 $words[1] ) ) } compctl -K _pip_completion pip"""), ) @pytest.mark.parametrize( 'shell, completion', COMPLETION_FOR_SUPPORTED_SHELLS_TESTS, ids=[t[0] for t in COMPLETION_FOR_SUPPORTED_SHELLS_TESTS], ) def test_completion_for_supported_shells(script, pip_src, common_wheels, shell, completion): """ Test getting completion for bash shell """ # Re-install pip so we get the launchers. script.pip_install_local('-f', common_wheels, pip_src) result = script.pip('completion', '--' + shell, use_module=False) assert completion in result.stdout, str(result.stdout) def test_completion_for_unknown_shell(script): """ Test getting completion for an unknown shell """ error_msg = 'no such option: --myfooshell' result = script.pip('completion', '--myfooshell', expect_error=True) assert error_msg in result.stderr, 'tests for an unknown shell failed' def test_completion_alone(script): """ Test getting completion for none shell, just pip completion """ result = script.pip('completion', expect_error=True) assert 'ERROR: You must pass --bash or --fish or --zsh' in result.stderr, \ 'completion alone failed -- ' + result.stderr def setup_completion(script, words, cword, cwd=None): script.environ = os.environ.copy() script.environ['PIP_AUTO_COMPLETE'] = '1' script.environ['COMP_WORDS'] = words script.environ['COMP_CWORD'] = cword # expect_error is True because autocomplete exists with 1 status code result = script.run( 'python', '-c', 'import pip._internal;pip._internal.autocomplete()', expect_error=True, cwd=cwd, ) return result, script def test_completion_for_un_snippet(script): """ Test getting completion for ``un`` should return uninstall """ res, env = setup_completion(script, 'pip un', '1') assert res.stdout.strip().split() == ['uninstall'], res.stdout def test_completion_for_default_parameters(script): """ Test getting completion for ``--`` should contain --help """ res, env = setup_completion(script, 'pip --', '1') assert '--help' in res.stdout,\ "autocomplete function could not complete ``--``" def test_completion_option_for_command(script): """ Test getting completion for ``--`` in command (e.g. ``pip search --``) """ res, env = setup_completion(script, 'pip search --', '2') assert '--help' in res.stdout,\ "autocomplete function could not complete ``--``" def test_completion_short_option(script): """ Test getting completion for short options after ``-`` (eg. pip -) """ res, env = setup_completion(script, 'pip -', '1') assert '-h' in res.stdout.split(),\ "autocomplete function could not complete short options after ``-``" def test_completion_short_option_for_command(script): """ Test getting completion for short options after ``-`` in command (eg. pip search -) """ res, env = setup_completion(script, 'pip search -', '2') assert '-h' in res.stdout.split(),\ "autocomplete function could not complete short options after ``-``" def test_completion_files_after_option(script, data): """ Test getting completion for <file> or <dir> after options in command (e.g. ``pip install -r``) """ res, env = setup_completion( script=script, words=('pip install -r r'), cword='3', cwd=data.completion_paths, ) assert 'requirements.txt' in res.stdout, ( "autocomplete function could not complete <file> " "after options in command" ) assert os.path.join('resources', '') in res.stdout, ( "autocomplete function could not complete <dir> " "after options in command" ) assert not any(out in res.stdout for out in (os.path.join('REPLAY', ''), 'README.txt')), ( "autocomplete function completed <file> or <dir> that " "should not be completed" ) if sys.platform != 'win32': return assert 'readme.txt' in res.stdout, ( "autocomplete function could not complete <file> " "after options in command" ) assert os.path.join('replay', '') in res.stdout, ( "autocomplete function could not complete <dir> " "after options in command" ) def test_completion_not_files_after_option(script, data): """ Test not getting completion files after options which not applicable (e.g. ``pip install``) """ res, env = setup_completion( script=script, words=('pip install r'), cword='2', cwd=data.completion_paths, ) assert not any(out in res.stdout for out in ('requirements.txt', 'readme.txt',)), ( "autocomplete function completed <file> when " "it should not complete" ) assert not any(os.path.join(out, '') in res.stdout for out in ('replay', 'resources')), ( "autocomplete function completed <dir> when " "it should not complete" ) @pytest.mark.parametrize("cl_opts", ["-U", "--user", "-h"]) def test_completion_not_files_after_nonexpecting_option(script, data, cl_opts): """ Test not getting completion files after options which not applicable (e.g. ``pip install``) """ res, env = setup_completion( script=script, words=('pip install %s r' % cl_opts), cword='2', cwd=data.completion_paths, ) assert not any(out in res.stdout for out in ('requirements.txt', 'readme.txt',)), ( "autocomplete function completed <file> when " "it should not complete" ) assert not any(os.path.join(out, '') in res.stdout for out in ('replay', 'resources')), ( "autocomplete function completed <dir> when " "it should not complete" ) def test_completion_directories_after_option(script, data): """ Test getting completion <dir> after options in command (e.g. ``pip --cache-dir``) """ res, env = setup_completion( script=script, words=('pip --cache-dir r'), cword='2', cwd=data.completion_paths, ) assert os.path.join('resources', '') in res.stdout, ( "autocomplete function could not complete <dir> after options" ) assert not any(out in res.stdout for out in ( 'requirements.txt', 'README.txt', os.path.join('REPLAY', ''))), ( "autocomplete function completed <dir> when " "it should not complete" ) if sys.platform == 'win32': assert os.path.join('replay', '') in res.stdout, ( "autocomplete function could not complete <dir> after options" ) def test_completion_subdirectories_after_option(script, data): """ Test getting completion <dir> after options in command given path of a directory """ res, env = setup_completion( script=script, words=('pip --cache-dir ' + os.path.join('resources', '')), cword='2', cwd=data.completion_paths, ) assert os.path.join('resources', os.path.join('images', '')) in res.stdout, ( "autocomplete function could not complete <dir> " "given path of a directory after options" ) def test_completion_path_after_option(script, data): """ Test getting completion <path> after options in command given absolute path """ res, env = setup_completion( script=script, words=('pip install -e ' + os.path.join(data.completion_paths, 'R')), cword='3', ) assert all(os.path.normcase(os.path.join(data.completion_paths, out)) in res.stdout for out in ( 'README.txt', os.path.join('REPLAY', ''))), ( "autocomplete function could not complete <path> " "after options in command given absolute path" ) @pytest.mark.parametrize('flag', ['--bash', '--zsh', '--fish']) def test_completion_uses_same_executable_name(script, flag, deprecated_python): executable_name = 'pip{}'.format(sys.version_info[0]) # Deprecated python versions produce an extra deprecation warning result = script.run( executable_name, 'completion', flag, expect_stderr=deprecated_python, ) assert executable_name in result.stdout
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_configuration.py
"""Tests for the config command """ import textwrap import pytest from pip._internal.cli.status_codes import ERROR from tests.lib.configuration_helpers import ConfigurationMixin, kinds def test_no_options_passed_should_error(script): result = script.pip('config', expect_error=True) assert result.returncode == ERROR class TestBasicLoading(ConfigurationMixin): @pytest.mark.skip("Can't modify underlying file for any mode") def test_reads_file_appropriately(self, script): contents = """ [test] hello = 1 """ with self.patched_file(kinds.USER, contents): result = script.pip("config", "list") assert "test.hello=1" in result.stdout def test_basic_modification_pipeline(self, script): script.pip("config", "get", "test.blah", expect_error=True) script.pip("config", "set", "test.blah", "1") result = script.pip("config", "get", "test.blah") assert result.stdout.strip() == "1" script.pip("config", "unset", "test.blah") script.pip("config", "get", "test.blah", expect_error=True) def test_listing_is_correct(self, script): script.pip("config", "set", "test.listing-beta", "2") script.pip("config", "set", "test.listing-alpha", "1") script.pip("config", "set", "test.listing-gamma", "3") result = script.pip("config", "list") lines = list(filter( lambda x: x.startswith("test.listing-"), result.stdout.splitlines() )) expected = """ test.listing-alpha='1' test.listing-beta='2' test.listing-gamma='3' """ assert lines == textwrap.dedent(expected).strip().splitlines()
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_download.py
import textwrap import pytest from pip._internal.cli.status_codes import ERROR from tests.lib.path import Path def fake_wheel(data, wheel_path): data.packages.join( 'simple.dist-0.1-py2.py3-none-any.whl' ).copy(data.packages.join(wheel_path)) @pytest.mark.network def test_download_if_requested(script): """ It should download (in the scratch path) and not install if requested. """ result = script.pip( 'download', '-d', 'pip_downloads', 'INITools==0.1', expect_error=True ) assert Path('scratch') / 'pip_downloads' / 'INITools-0.1.tar.gz' \ in result.files_created assert script.site_packages / 'initools' not in result.files_created @pytest.mark.network def test_basic_download_setuptools(script): """ It should download (in the scratch path) and not install if requested. """ result = script.pip('download', 'setuptools') setuptools_prefix = str(Path('scratch') / 'setuptools') assert any( path.startswith(setuptools_prefix) for path in result.files_created ) def test_download_wheel(script, data): """ Test using "pip download" to download a *.whl archive. """ result = script.pip( 'download', '--no-index', '-f', data.packages, '-d', '.', 'meta' ) assert ( Path('scratch') / 'meta-1.0-py2.py3-none-any.whl' in result.files_created ) assert script.site_packages / 'piptestpackage' not in result.files_created @pytest.mark.network def test_single_download_from_requirements_file(script): """ It should support download (in the scratch path) from PyPI from a requirements file """ script.scratch_path.join("test-req.txt").write(textwrap.dedent(""" INITools==0.1 """)) result = script.pip( 'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.', expect_error=True, ) assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created assert script.site_packages / 'initools' not in result.files_created @pytest.mark.network def test_basic_download_should_download_dependencies(script): """ It should download dependencies (in the scratch path) """ result = script.pip( 'download', 'Paste[openid]==1.7.5.1', '-d', '.', expect_error=True, ) assert Path('scratch') / 'Paste-1.7.5.1.tar.gz' in result.files_created openid_tarball_prefix = str(Path('scratch') / 'python-openid-') assert any( path.startswith(openid_tarball_prefix) for path in result.files_created ) assert script.site_packages / 'openid' not in result.files_created def test_download_wheel_archive(script, data): """ It should download a wheel archive path """ wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl' wheel_path = '/'.join((data.find_links, wheel_filename)) result = script.pip( 'download', wheel_path, '-d', '.', '--no-deps' ) assert Path('scratch') / wheel_filename in result.files_created def test_download_should_download_wheel_deps(script, data): """ It should download dependencies for wheels(in the scratch path) """ wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl' dep_filename = 'translationstring-1.1.tar.gz' wheel_path = '/'.join((data.find_links, wheel_filename)) result = script.pip( 'download', wheel_path, '-d', '.', '--find-links', data.find_links, '--no-index' ) assert Path('scratch') / wheel_filename in result.files_created assert Path('scratch') / dep_filename in result.files_created @pytest.mark.network def test_download_should_skip_existing_files(script): """ It should not download files already existing in the scratch dir """ script.scratch_path.join("test-req.txt").write(textwrap.dedent(""" INITools==0.1 """)) result = script.pip( 'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.', expect_error=True, ) assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created assert script.site_packages / 'initools' not in result.files_created # adding second package to test-req.txt script.scratch_path.join("test-req.txt").write(textwrap.dedent(""" INITools==0.1 python-openid==2.2.5 """)) # only the second package should be downloaded result = script.pip( 'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.', expect_error=True, ) openid_tarball_prefix = str(Path('scratch') / 'python-openid-') assert any( path.startswith(openid_tarball_prefix) for path in result.files_created ) assert Path('scratch') / 'INITools-0.1.tar.gz' not in result.files_created assert script.site_packages / 'initools' not in result.files_created assert script.site_packages / 'openid' not in result.files_created @pytest.mark.network def test_download_vcs_link(script): """ It should allow -d flag for vcs links, regression test for issue #798. """ result = script.pip( 'download', '-d', '.', 'git+git://github.com/pypa/pip-test-package.git' ) assert ( Path('scratch') / 'pip-test-package-0.1.1.zip' in result.files_created ) assert script.site_packages / 'piptestpackage' not in result.files_created def test_only_binary_set_then_download_specific_platform(script, data): """ Confirm that specifying an interpreter/platform constraint is allowed when ``--only-binary=:all:`` is set. """ fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', 'linux_x86_64', 'fake' ) assert ( Path('scratch') / 'fake-1.0-py2.py3-none-any.whl' in result.files_created ) def test_no_deps_set_then_download_specific_platform(script, data): """ Confirm that specifying an interpreter/platform constraint is allowed when ``--no-deps`` is set. """ fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--no-deps', '--dest', '.', '--platform', 'linux_x86_64', 'fake' ) assert ( Path('scratch') / 'fake-1.0-py2.py3-none-any.whl' in result.files_created ) def test_download_specific_platform_fails(script, data): """ Confirm that specifying an interpreter/platform constraint enforces that ``--no-deps`` or ``--only-binary=:all:`` is set. """ fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--dest', '.', '--platform', 'linux_x86_64', 'fake', expect_error=True, ) assert '--only-binary=:all:' in result.stderr def test_no_binary_set_then_download_specific_platform_fails(script, data): """ Confirm that specifying an interpreter/platform constraint enforces that ``--only-binary=:all:`` is set without ``--no-binary``. """ fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--no-binary=fake', '--dest', '.', '--platform', 'linux_x86_64', 'fake', expect_error=True, ) assert '--only-binary=:all:' in result.stderr def test_download_specify_platform(script, data): """ Test using "pip download --platform" to download a .whl archive supported for a specific platform """ fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl') # Confirm that universal wheels are returned even for specific # platforms. result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', 'linux_x86_64', 'fake' ) assert ( Path('scratch') / 'fake-1.0-py2.py3-none-any.whl' in result.files_created ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', 'macosx_10_9_x86_64', 'fake' ) data.reset() fake_wheel(data, 'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl') fake_wheel(data, 'fake-2.0-py2.py3-none-linux_x86_64.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', 'macosx_10_10_x86_64', 'fake' ) assert ( Path('scratch') / 'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl' in result.files_created ) # OSX platform wheels are not backward-compatible. result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', 'macosx_10_8_x86_64', 'fake', expect_error=True, ) # No linux wheel provided for this version. result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', 'linux_x86_64', 'fake==1', expect_error=True, ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', 'linux_x86_64', 'fake==2' ) assert ( Path('scratch') / 'fake-2.0-py2.py3-none-linux_x86_64.whl' in result.files_created ) class TestDownloadPlatformManylinuxes(object): """ "pip download --platform" downloads a .whl archive supported for manylinux platforms. """ @pytest.mark.parametrize("platform", [ "linux_x86_64", "manylinux1_x86_64", "manylinux2010_x86_64", ]) def test_download_universal(self, platform, script, data): """ Universal wheels are returned even for specific platforms. """ fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', platform, 'fake', ) assert ( Path('scratch') / 'fake-1.0-py2.py3-none-any.whl' in result.files_created ) @pytest.mark.parametrize("wheel_abi,platform", [ ("manylinux1_x86_64", "manylinux1_x86_64"), ("manylinux1_x86_64", "manylinux2010_x86_64"), ("manylinux2010_x86_64", "manylinux2010_x86_64"), ]) def test_download_compatible_manylinuxes( self, wheel_abi, platform, script, data, ): """ Earlier manylinuxes are compatible with later manylinuxes. """ wheel = 'fake-1.0-py2.py3-none-{}.whl'.format(wheel_abi) fake_wheel(data, wheel) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', platform, 'fake', ) assert Path('scratch') / wheel in result.files_created def test_explicit_platform_only(self, data, script): """ When specifying the platform, manylinux1 needs to be the explicit platform--it won't ever be added to the compatible tags. """ fake_wheel(data, 'fake-1.0-py2.py3-none-linux_x86_64.whl') script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--platform', 'linux_x86_64', 'fake', expect_error=True, ) def test_download_specify_python_version(script, data): """ Test using "pip download --python-version" to download a .whl archive supported for a specific interpreter """ fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--python-version', '2', 'fake' ) assert ( Path('scratch') / 'fake-1.0-py2.py3-none-any.whl' in result.files_created ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--python-version', '3', 'fake' ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--python-version', '27', 'fake' ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--python-version', '33', 'fake' ) data.reset() fake_wheel(data, 'fake-1.0-py2-none-any.whl') fake_wheel(data, 'fake-2.0-py3-none-any.whl') # No py3 provided for version 1. result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--python-version', '3', 'fake==1.0', expect_error=True, ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--python-version', '2', 'fake' ) assert ( Path('scratch') / 'fake-1.0-py2-none-any.whl' in result.files_created ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--python-version', '26', 'fake' ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--python-version', '3', 'fake' ) assert ( Path('scratch') / 'fake-2.0-py3-none-any.whl' in result.files_created ) def test_download_specify_abi(script, data): """ Test using "pip download --abi" to download a .whl archive supported for a specific abi """ fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--implementation', 'fk', '--abi', 'fake_abi', 'fake' ) assert ( Path('scratch') / 'fake-1.0-py2.py3-none-any.whl' in result.files_created ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--implementation', 'fk', '--abi', 'none', 'fake' ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--abi', 'cp27m', 'fake', expect_error=True, ) data.reset() fake_wheel(data, 'fake-1.0-fk2-fakeabi-fake_platform.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--python-version', '2', '--implementation', 'fk', '--platform', 'fake_platform', '--abi', 'fakeabi', 'fake' ) assert ( Path('scratch') / 'fake-1.0-fk2-fakeabi-fake_platform.whl' in result.files_created ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--implementation', 'fk', '--platform', 'fake_platform', '--abi', 'none', 'fake', expect_error=True, ) def test_download_specify_implementation(script, data): """ Test using "pip download --abi" to download a .whl archive supported for a specific abi """ fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--implementation', 'fk', 'fake' ) assert ( Path('scratch') / 'fake-1.0-py2.py3-none-any.whl' in result.files_created ) data.reset() fake_wheel(data, 'fake-1.0-fk2.fk3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--implementation', 'fk', 'fake' ) assert ( Path('scratch') / 'fake-1.0-fk2.fk3-none-any.whl' in result.files_created ) data.reset() fake_wheel(data, 'fake-1.0-fk3-none-any.whl') result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--implementation', 'fk', '--python-version', '3', 'fake' ) assert ( Path('scratch') / 'fake-1.0-fk3-none-any.whl' in result.files_created ) result = script.pip( 'download', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--dest', '.', '--implementation', 'fk', '--python-version', '2', 'fake', expect_error=True, ) def test_download_exit_status_code_when_no_requirements(script): """ Test download exit status code when no requirements specified """ result = script.pip('download', expect_error=True) assert ( "You must give at least one requirement to download" in result.stderr ) assert result.returncode == ERROR def test_download_exit_status_code_when_blank_requirements_file(script): """ Test download exit status code when blank requirements file specified """ script.scratch_path.join("blank.txt").write("\n") script.pip('download', '-r', 'blank.txt') def test_download_prefer_binary_when_tarball_higher_than_wheel(script, data): fake_wheel(data, 'source-0.8-py2.py3-none-any.whl') result = script.pip( 'download', '--prefer-binary', '--no-index', '-f', data.packages, '-d', '.', 'source' ) assert ( Path('scratch') / 'source-0.8-py2.py3-none-any.whl' in result.files_created ) assert ( Path('scratch') / 'source-1.0.tar.gz' not in result.files_created ) def test_download_prefer_binary_when_wheel_doesnt_satisfy_req(script, data): fake_wheel(data, 'source-0.8-py2.py3-none-any.whl') script.scratch_path.join("test-req.txt").write(textwrap.dedent(""" source>0.9 """)) result = script.pip( 'download', '--prefer-binary', '--no-index', '-f', data.packages, '-d', '.', '-r', script.scratch_path / 'test-req.txt' ) assert ( Path('scratch') / 'source-1.0.tar.gz' in result.files_created ) assert ( Path('scratch') / 'source-0.8-py2.py3-none-any.whl' not in result.files_created ) def test_download_prefer_binary_when_only_tarball_exists(script, data): result = script.pip( 'download', '--prefer-binary', '--no-index', '-f', data.packages, '-d', '.', 'source' ) assert ( Path('scratch') / 'source-1.0.tar.gz' in result.files_created )
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_freeze.py
import os import re import sys import textwrap from doctest import ELLIPSIS, OutputChecker import pytest from tests.lib import ( _create_test_package, _create_test_package_with_srcdir, _git_commit, need_bzr, need_mercurial, path_to_url, ) distribute_re = re.compile('^distribute==[0-9.]+\n', re.MULTILINE) def _check_output(result, expected): checker = OutputChecker() actual = str(result) # FIXME! The following is a TOTAL hack. For some reason the # __str__ result for pkg_resources.Requirement gets downcased on # Windows. Since INITools is the only package we're installing # in this file with funky case requirements, I'm forcibly # upcasing it. You can also normalize everything to lowercase, # but then you have to remember to upcase <BLANKLINE>. The right # thing to do in the end is probably to find out how to report # the proper fully-cased package name in our error message. if sys.platform == 'win32': actual = actual.replace('initools', 'INITools') # This allows our existing tests to work when run in a context # with distribute installed. actual = distribute_re.sub('', actual) def banner(msg): return '\n========== %s ==========\n' % msg assert checker.check_output(expected, actual, ELLIPSIS), ( banner('EXPECTED') + expected + banner('ACTUAL') + actual + banner(6 * '=') ) def test_basic_freeze(script): """ Some tests of freeze, first we have to install some stuff. Note that the test is a little crude at the end because Python 2.5+ adds egg info to the standard library, so stuff like wsgiref will show up in the freezing. (Probably that should be accounted for in pip, but currently it is not). """ script.scratch_path.join("initools-req.txt").write(textwrap.dedent("""\ simple==2.0 # and something else to test out: simple2<=3.0 """)) script.pip_install_local( '-r', script.scratch_path / 'initools-req.txt', ) result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent("""\ ...simple==2.0 simple2==3.0... <BLANKLINE>""") _check_output(result.stdout, expected) def test_freeze_with_pip(script): """Test pip shows itself""" result = script.pip('freeze', '--all') assert 'pip==' in result.stdout def test_freeze_with_invalid_names(script): """ Test that invalid names produce warnings and are passed over gracefully. """ def fake_install(pkgname, dest): egg_info_path = os.path.join( dest, '{}-1.0-py{}.{}.egg-info'.format( pkgname.replace('-', '_'), sys.version_info[0], sys.version_info[1] ) ) with open(egg_info_path, 'w') as egg_info_file: egg_info_file.write(textwrap.dedent("""\ Metadata-Version: 1.0 Name: {} Version: 1.0 """.format(pkgname) )) valid_pkgnames = ('middle-dash', 'middle_underscore', 'middle.dot') invalid_pkgnames = ( '-leadingdash', '_leadingunderscore', '.leadingdot', 'trailingdash-', 'trailingunderscore_', 'trailingdot.' ) for pkgname in valid_pkgnames + invalid_pkgnames: fake_install(pkgname, script.site_packages_path) result = script.pip('freeze', expect_stderr=True) for pkgname in valid_pkgnames: _check_output( result.stdout, '...{}==1.0...'.format(pkgname.replace('_', '-')) ) for pkgname in invalid_pkgnames: _check_output( result.stderr, '...Could not parse requirement: {}\n...'.format( pkgname.replace('_', '-') ) ) @pytest.mark.git def test_freeze_editable_not_vcs(script, tmpdir): """ Test an editable install that is not version controlled. """ pkg_path = _create_test_package(script) # Rename the .git directory so the directory is no longer recognized # as a VCS directory. os.rename(os.path.join(pkg_path, '.git'), os.path.join(pkg_path, '.bak')) script.pip('install', '-e', pkg_path) result = script.pip('freeze', expect_stderr=True) # We need to apply os.path.normcase() to the path since that is what # the freeze code does. expected = textwrap.dedent("""\ ...# Editable install with no version control (version-pkg==0.1) -e {} ...""".format(os.path.normcase(pkg_path))) _check_output(result.stdout, expected) @pytest.mark.git def test_freeze_editable_git_with_no_remote(script, tmpdir, deprecated_python): """ Test an editable Git install with no remote url. """ pkg_path = _create_test_package(script) script.pip('install', '-e', pkg_path) result = script.pip('freeze') if not deprecated_python: assert result.stderr == '' # We need to apply os.path.normcase() to the path since that is what # the freeze code does. expected = textwrap.dedent("""\ ...# Editable Git install with no remote (version-pkg==0.1) -e {} ...""".format(os.path.normcase(pkg_path))) _check_output(result.stdout, expected) @pytest.mark.svn def test_freeze_svn(script, tmpdir): """Test freezing a svn checkout""" checkout_path = _create_test_package(script, vcs='svn') # Install with develop script.run( 'python', 'setup.py', 'develop', cwd=checkout_path, expect_stderr=True ) result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent("""\ ...-e svn+...#egg=version_pkg ...""") _check_output(result.stdout, expected) @pytest.mark.git @pytest.mark.xfail def test_freeze_exclude_editable(script, tmpdir): """ Test excluding editable from freezing list. """ # Returns path to a generated package called "version_pkg" pkg_version = _create_test_package(script) result = script.run( 'git', 'clone', pkg_version, 'pip-test-package', expect_stderr=True, ) repo_dir = script.scratch_path / 'pip-test-package' result = script.run( 'python', 'setup.py', 'develop', cwd=repo_dir, expect_stderr=True, ) result = script.pip('freeze', '--exclude-editable', expect_stderr=True) expected = textwrap.dedent( """ ...-e git+...#egg=version_pkg ... """ ).strip() _check_output(result.stdout, expected) @pytest.mark.git def test_freeze_git_clone(script, tmpdir): """ Test freezing a Git clone. """ # Returns path to a generated package called "version_pkg" pkg_version = _create_test_package(script) result = script.run( 'git', 'clone', pkg_version, 'pip-test-package', expect_stderr=True, ) repo_dir = script.scratch_path / 'pip-test-package' result = script.run( 'python', 'setup.py', 'develop', cwd=repo_dir, expect_stderr=True, ) result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent( """ ...-e git+...#egg=version_pkg ... """ ).strip() _check_output(result.stdout, expected) result = script.pip( 'freeze', '-f', '%s#egg=pip_test_package' % repo_dir, expect_stderr=True, ) expected = textwrap.dedent( """ -f %(repo)s#egg=pip_test_package... -e git+...#egg=version_pkg ... """ % {'repo': repo_dir}, ).strip() _check_output(result.stdout, expected) # Check that slashes in branch or tag names are translated. # See also issue #1083: https://github.com/pypa/pip/issues/1083 script.run( 'git', 'checkout', '-b', 'branch/name/with/slash', cwd=repo_dir, expect_stderr=True, ) # Create a new commit to ensure that the commit has only one branch # or tag name associated to it (to avoid the non-determinism reported # in issue #1867). script.run('touch', 'newfile', cwd=repo_dir) script.run('git', 'add', 'newfile', cwd=repo_dir) _git_commit(script, repo_dir, message='...') result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent( """ ...-e ...@...#egg=version_pkg ... """ ).strip() _check_output(result.stdout, expected) @pytest.mark.git def test_freeze_git_clone_srcdir(script, tmpdir): """ Test freezing a Git clone where setup.py is in a subdirectory relative the repo root and the source code is in a subdirectory relative to setup.py. """ # Returns path to a generated package called "version_pkg" pkg_version = _create_test_package_with_srcdir(script) result = script.run( 'git', 'clone', pkg_version, 'pip-test-package', expect_stderr=True, ) repo_dir = script.scratch_path / 'pip-test-package' result = script.run( 'python', 'setup.py', 'develop', cwd=repo_dir / 'subdir', expect_stderr=True, ) result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent( """ ...-e git+...#egg=version_pkg&subdirectory=subdir ... """ ).strip() _check_output(result.stdout, expected) result = script.pip( 'freeze', '-f', '%s#egg=pip_test_package' % repo_dir, expect_stderr=True, ) expected = textwrap.dedent( """ -f %(repo)s#egg=pip_test_package... -e git+...#egg=version_pkg&subdirectory=subdir ... """ % {'repo': repo_dir}, ).strip() _check_output(result.stdout, expected) @pytest.mark.git def test_freeze_git_remote(script, tmpdir): """ Test freezing a Git clone. """ # Returns path to a generated package called "version_pkg" pkg_version = _create_test_package(script) result = script.run( 'git', 'clone', pkg_version, 'pip-test-package', expect_stderr=True, ) repo_dir = script.scratch_path / 'pip-test-package' result = script.run( 'python', 'setup.py', 'develop', cwd=repo_dir, expect_stderr=True, ) origin_remote = pkg_version other_remote = pkg_version + '-other' # check frozen remote after clone result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent( """ ...-e git+{remote}@...#egg=version_pkg ... """ ).format(remote=origin_remote).strip() _check_output(result.stdout, expected) # check frozen remote when there is no remote named origin script.run('git', 'remote', 'remove', 'origin', cwd=repo_dir) script.run('git', 'remote', 'add', 'other', other_remote, cwd=repo_dir) result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent( """ ...-e git+{remote}@...#egg=version_pkg ... """ ).format(remote=other_remote).strip() _check_output(result.stdout, expected) # when there are more than one origin, priority is given to the # remote named origin script.run('git', 'remote', 'add', 'origin', origin_remote, cwd=repo_dir) result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent( """ ...-e git+{remote}@...#egg=version_pkg ... """ ).format(remote=origin_remote).strip() _check_output(result.stdout, expected) @need_mercurial def test_freeze_mercurial_clone(script, tmpdir): """ Test freezing a Mercurial clone. """ # Returns path to a generated package called "version_pkg" pkg_version = _create_test_package(script, vcs='hg') result = script.run( 'hg', 'clone', pkg_version, 'pip-test-package', expect_stderr=True, ) repo_dir = script.scratch_path / 'pip-test-package' result = script.run( 'python', 'setup.py', 'develop', cwd=repo_dir, expect_stderr=True, ) result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent( """ ...-e hg+...#egg=version_pkg ... """ ).strip() _check_output(result.stdout, expected) result = script.pip( 'freeze', '-f', '%s#egg=pip_test_package' % repo_dir, expect_stderr=True, ) expected = textwrap.dedent( """ -f %(repo)s#egg=pip_test_package... ...-e hg+...#egg=version_pkg ... """ % {'repo': repo_dir}, ).strip() _check_output(result.stdout, expected) @need_bzr def test_freeze_bazaar_clone(script, tmpdir): """ Test freezing a Bazaar clone. """ try: checkout_path = _create_test_package(script, vcs='bazaar') except OSError as e: pytest.fail('Invoking `bzr` failed: %s' % e) result = script.run( 'bzr', 'checkout', checkout_path, 'bzr-package' ) result = script.run( 'python', 'setup.py', 'develop', cwd=script.scratch_path / 'bzr-package', expect_stderr=True, ) result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent("""\ ...-e bzr+file://...@1#egg=version_pkg ...""") _check_output(result.stdout, expected) result = script.pip( 'freeze', '-f', '%s/#egg=django-wikiapp' % checkout_path, expect_stderr=True, ) expected = textwrap.dedent("""\ -f %(repo)s/#egg=django-wikiapp ...-e bzr+file://...@...#egg=version_pkg ...""" % {'repo': checkout_path}) _check_output(result.stdout, expected) # used by the test_freeze_with_requirement_* tests below _freeze_req_opts = textwrap.dedent("""\ # Unchanged requirements below this line -r ignore.txt --requirement ignore.txt -Z ignore --always-unzip ignore -f http://ignore -i http://ignore --pre --trusted-host url --process-dependency-links --extra-index-url http://ignore --find-links http://ignore --index-url http://ignore """) def test_freeze_with_requirement_option_file_url_egg_not_installed( script, deprecated_python): """ Test "freeze -r requirements.txt" with a local file URL whose egg name is not installed. """ url = path_to_url('my-package.tar.gz') + '#egg=Does.Not-Exist' requirements_path = script.scratch_path.join('requirements.txt') requirements_path.write(url + '\n') result = script.pip( 'freeze', '--requirement', 'requirements.txt', expect_stderr=True, ) expected_err = ( 'WARNING: Requirement file [requirements.txt] contains {}, ' "but package 'Does.Not-Exist' is not installed\n" ).format(url) if deprecated_python: assert expected_err in result.stderr else: assert expected_err == result.stderr def test_freeze_with_requirement_option(script): """ Test that new requirements are created correctly with --requirement hints """ script.scratch_path.join("hint.txt").write(textwrap.dedent("""\ INITools==0.1 NoExist==4.2 # A comment that ensures end of line comments work. simple==3.0; python_version > '1.0' """) + _freeze_req_opts) result = script.pip_install_local('initools==0.2') result = script.pip_install_local('simple') result = script.pip( 'freeze', '--requirement', 'hint.txt', expect_stderr=True, ) expected = textwrap.dedent("""\ INITools==0.2 simple==3.0 """) expected += _freeze_req_opts expected += "## The following requirements were added by pip freeze:..." _check_output(result.stdout, expected) assert ( "Requirement file [hint.txt] contains NoExist==4.2, but package " "'NoExist' is not installed" ) in result.stderr def test_freeze_with_requirement_option_multiple(script): """ Test that new requirements are created correctly with multiple --requirement hints """ script.scratch_path.join('hint1.txt').write(textwrap.dedent("""\ INITools==0.1 NoExist==4.2 simple==3.0; python_version > '1.0' """) + _freeze_req_opts) script.scratch_path.join('hint2.txt').write(textwrap.dedent("""\ NoExist2==2.0 simple2==1.0 """) + _freeze_req_opts) result = script.pip_install_local('initools==0.2') result = script.pip_install_local('simple') result = script.pip_install_local('simple2==1.0') result = script.pip_install_local('meta') result = script.pip( 'freeze', '--requirement', 'hint1.txt', '--requirement', 'hint2.txt', expect_stderr=True, ) expected = textwrap.dedent("""\ INITools==0.2 simple==1.0 """) expected += _freeze_req_opts expected += textwrap.dedent("""\ simple2==1.0 """) expected += "## The following requirements were added by pip freeze:" expected += '\n' + textwrap.dedent("""\ ...meta==1.0... """) _check_output(result.stdout, expected) assert ( "Requirement file [hint1.txt] contains NoExist==4.2, but package " "'NoExist' is not installed" ) in result.stderr assert ( "Requirement file [hint2.txt] contains NoExist2==2.0, but package " "'NoExist2' is not installed" ) in result.stderr # any options like '--index-url http://ignore' should only be emitted once # even if they are listed in multiple requirements files assert result.stdout.count("--index-url http://ignore") == 1 def test_freeze_with_requirement_option_package_repeated_one_file(script): """ Test freezing with single requirements file that contains a package multiple times """ script.scratch_path.join('hint1.txt').write(textwrap.dedent("""\ simple2 simple2 NoExist """) + _freeze_req_opts) result = script.pip_install_local('simple2==1.0') result = script.pip_install_local('meta') result = script.pip( 'freeze', '--requirement', 'hint1.txt', expect_stderr=True, ) expected_out = textwrap.dedent("""\ simple2==1.0 """) expected_out += _freeze_req_opts expected_out += "## The following requirements were added by pip freeze:" expected_out += '\n' + textwrap.dedent("""\ ...meta==1.0... """) _check_output(result.stdout, expected_out) err1 = ("Requirement file [hint1.txt] contains NoExist, " "but package 'NoExist' is not installed\n") err2 = "Requirement simple2 included multiple times [hint1.txt]\n" assert err1 in result.stderr assert err2 in result.stderr # there shouldn't be any other 'is not installed' warnings assert result.stderr.count('is not installed') == 1 def test_freeze_with_requirement_option_package_repeated_multi_file(script): """ Test freezing with multiple requirements file that contain a package """ script.scratch_path.join('hint1.txt').write(textwrap.dedent("""\ simple """) + _freeze_req_opts) script.scratch_path.join('hint2.txt').write(textwrap.dedent("""\ simple NoExist """) + _freeze_req_opts) result = script.pip_install_local('simple==1.0') result = script.pip_install_local('meta') result = script.pip( 'freeze', '--requirement', 'hint1.txt', '--requirement', 'hint2.txt', expect_stderr=True, ) expected_out = textwrap.dedent("""\ simple==1.0 """) expected_out += _freeze_req_opts expected_out += "## The following requirements were added by pip freeze:" expected_out += '\n' + textwrap.dedent("""\ ...meta==1.0... """) _check_output(result.stdout, expected_out) err1 = ("Requirement file [hint2.txt] contains NoExist, but package " "'NoExist' is not installed\n") err2 = ("Requirement simple included multiple times " "[hint1.txt, hint2.txt]\n") assert err1 in result.stderr assert err2 in result.stderr # there shouldn't be any other 'is not installed' warnings assert result.stderr.count('is not installed') == 1 @pytest.mark.network def test_freeze_user(script, virtualenv, data): """ Testing freeze with --user, first we have to install some stuff. """ script.pip('download', 'setuptools', 'wheel', '-d', data.packages) script.pip_install_local('--find-links', data.find_links, '--user', 'simple==2.0') script.pip_install_local('--find-links', data.find_links, 'simple2==3.0') result = script.pip('freeze', '--user', expect_stderr=True) expected = textwrap.dedent("""\ simple==2.0 <BLANKLINE>""") _check_output(result.stdout, expected) assert 'simple2' not in result.stdout
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_hash.py
"""Tests for the ``pip hash`` command""" def test_basic_hash(script, tmpdir): """Run 'pip hash' through its default behavior.""" expected = ('--hash=sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425' 'e73043362938b9824') result = script.pip('hash', _hello_file(tmpdir)) assert expected in str(result) def test_good_algo_option(script, tmpdir): """Make sure the -a option works.""" expected = ('--hash=sha512:9b71d224bd62f3785d96d46ad3ea3d73319bfbc2890caad' 'ae2dff72519673ca72323c3d99ba5c11d7c7acc6e14b8c5da0c4663475c2e' '5c3adef46f73bcdec043') result = script.pip('hash', '-a', 'sha512', _hello_file(tmpdir)) assert expected in str(result) def test_bad_algo_option(script, tmpdir): """Make sure the -a option raises an error when given a bad operand.""" result = script.pip('hash', '-a', 'invalidname', _hello_file(tmpdir), expect_error=True) assert "invalid choice: 'invalidname'" in str(result) def _hello_file(tmpdir): """Return a temp file to hash containing "hello".""" file = tmpdir / 'hashable' file.write('hello') return file
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_help.py
import pytest from mock import Mock from pip._internal.cli.base_command import ERROR, SUCCESS from pip._internal.commands import commands_dict as commands from pip._internal.commands.help import HelpCommand from pip._internal.exceptions import CommandError def test_run_method_should_return_success_when_finds_command_name(): """ Test HelpCommand.run for existing command """ options_mock = Mock() args = ('freeze',) help_cmd = HelpCommand() status = help_cmd.run(options_mock, args) assert status == SUCCESS def test_run_method_should_return_success_when_command_name_not_specified(): """ Test HelpCommand.run when there are no args """ options_mock = Mock() args = () help_cmd = HelpCommand() status = help_cmd.run(options_mock, args) assert status == SUCCESS def test_run_method_should_raise_command_error_when_command_does_not_exist(): """ Test HelpCommand.run for non-existing command """ options_mock = Mock() args = ('mycommand',) help_cmd = HelpCommand() with pytest.raises(CommandError): help_cmd.run(options_mock, args) def test_help_command_should_exit_status_ok_when_command_exists(script): """ Test `help` command for existing command """ result = script.pip('help', 'freeze') assert result.returncode == SUCCESS def test_help_command_should_exit_status_ok_when_no_cmd_is_specified(script): """ Test `help` command for no command """ result = script.pip('help') assert result.returncode == SUCCESS def test_help_command_should_exit_status_error_when_cmd_does_not_exist(script): """ Test `help` command for non-existing command """ result = script.pip('help', 'mycommand', expect_error=True) assert result.returncode == ERROR def test_help_commands_equally_functional(in_memory_pip): """ Test if `pip help` and 'pip --help' behave the same way. """ results = list(map(in_memory_pip.pip, ('help', '--help'))) results.append(in_memory_pip.pip()) out = map(lambda x: x.stdout, results) ret = map(lambda x: x.returncode, results) msg = '"pip --help" != "pip help" != "pip"' assert len(set(out)) == 1, 'output of: ' + msg assert sum(ret) == 0, 'exit codes of: ' + msg assert all(len(o) > 0 for o in out) for name, cls in commands.items(): assert ( in_memory_pip.pip('help', name).stdout == in_memory_pip.pip(name, '--help').stdout != "" )
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install.py
import distutils import glob import os import sys import textwrap from os.path import curdir, join, pardir import pytest from pip._internal import pep425tags from pip._internal.cli.status_codes import ERROR, SUCCESS from pip._internal.models.index import PyPI, TestPyPI from pip._internal.utils.misc import rmtree from tests.lib import ( _create_svn_repo, _create_test_package, create_basic_wheel_for_package, create_test_package_with_setup, need_bzr, need_mercurial, path_to_url, pyversion, pyversion_tuple, requirements_file, ) from tests.lib.local_repos import local_checkout from tests.lib.path import Path @pytest.mark.parametrize('command', ('install', 'wheel')) @pytest.mark.parametrize('variant', ('missing_setuptools', 'bad_setuptools')) def test_pep518_uses_build_env(script, data, common_wheels, command, variant): if variant == 'missing_setuptools': script.pip("uninstall", "-y", "setuptools") elif variant == 'bad_setuptools': setuptools_mod = script.site_packages_path.join("setuptools.py") with open(setuptools_mod, 'a') as f: f.write('\nraise ImportError("toto")') else: raise ValueError(variant) script.pip( command, '--no-index', '-f', common_wheels, '-f', data.packages, data.src.join("pep518-3.0"), ) def test_pep518_build_env_uses_same_pip( script, data, pip_src, common_wheels, deprecated_python): """Ensure the subprocess call to pip for installing the build dependencies is using the same version of pip. """ with open(script.scratch_path / 'pip.py', 'w') as fp: fp.write('raise ImportError') script.run( 'python', pip_src / 'src/pip', 'install', '--no-index', '-f', common_wheels, '-f', data.packages, data.src.join("pep518-3.0"), expect_stderr=deprecated_python, ) def test_pep518_refuses_conflicting_requires(script, data): create_basic_wheel_for_package(script, 'setuptools', '1.0') create_basic_wheel_for_package(script, 'wheel', '1.0') project_dir = data.src.join("pep518_conflicting_requires") result = script.pip_install_local('-f', script.scratch_path, project_dir, expect_error=True) assert ( result.returncode != 0 and ('Some build dependencies for %s conflict with PEP 517/518 supported ' 'requirements: setuptools==1.0 is incompatible with ' 'setuptools>=40.8.0.' % path_to_url(project_dir)) in result.stderr ), str(result) def test_pep518_refuses_invalid_requires(script, data, common_wheels): result = script.pip( 'install', '-f', common_wheels, data.src.join("pep518_invalid_requires"), expect_error=True ) assert result.returncode == 1 assert "does not comply with PEP 518" in result.stderr def test_pep518_refuses_invalid_build_system(script, data, common_wheels): result = script.pip( 'install', '-f', common_wheels, data.src.join("pep518_invalid_build_system"), expect_error=True ) assert result.returncode == 1 assert "does not comply with PEP 518" in result.stderr def test_pep518_allows_missing_requires(script, data, common_wheels): result = script.pip( 'install', '-f', common_wheels, data.src.join("pep518_missing_requires"), expect_stderr=True ) # Make sure we don't warn when this occurs. assert "does not comply with PEP 518" not in result.stderr # We want it to go through isolation for now. assert "Installing build dependencies" in result.stdout, result.stdout assert result.returncode == 0 assert result.files_created def test_pep518_with_user_pip(script, pip_src, data, common_wheels): """ Check that build dependencies are installed into the build environment without using build isolation for the pip invocation. To ensure that we're not using build isolation when installing the build dependencies, we install a user copy of pip in the non-isolated environment, and break pip in the system site-packages, so that isolated uses of pip will fail. """ script.pip("install", "--ignore-installed", "-f", common_wheels, "--user", pip_src) system_pip_dir = script.site_packages_path / 'pip' system_pip_dir.rmtree() system_pip_dir.mkdir() with open(system_pip_dir / '__init__.py', 'w') as fp: fp.write('raise ImportError\n') script.pip( 'wheel', '--no-index', '-f', common_wheels, '-f', data.packages, data.src.join("pep518-3.0"), ) def test_pep518_with_extra_and_markers(script, data, common_wheels): script.pip( 'wheel', '--no-index', '-f', common_wheels, '-f', data.find_links, data.src.join("pep518_with_extra_and_markers-1.0"), ) def test_pep518_with_namespace_package(script, data, common_wheels): script.pip( 'wheel', '--no-index', '-f', common_wheels, '-f', data.find_links, data.src.join("pep518_with_namespace_package-1.0"), use_module=True, ) @pytest.mark.timeout(60) @pytest.mark.parametrize('command', ('install', 'wheel')) @pytest.mark.parametrize('package', ('pep518_forkbomb', 'pep518_twin_forkbombs_first', 'pep518_twin_forkbombs_second')) def test_pep518_forkbombs(script, data, common_wheels, command, package): package_source = next(data.packages.glob(package + '-[0-9]*.tar.gz')) result = script.pip( 'wheel', '--no-index', '-v', '-f', common_wheels, '-f', data.find_links, package, expect_error=True, ) assert '{1} is already being built: {0} from {1}'.format( package, path_to_url(package_source), ) in result.stdout, str(result) @pytest.mark.network def test_pip_second_command_line_interface_works( script, pip_src, data, common_wheels, deprecated_python): """ Check if ``pip<PYVERSION>`` commands behaves equally """ # Re-install pip so we get the launchers. script.pip_install_local('-f', common_wheels, pip_src) # On old versions of Python, urllib3/requests will raise a warning about # the lack of an SSLContext. kwargs = {'expect_stderr': deprecated_python} if pyversion_tuple < (2, 7, 9): kwargs['expect_stderr'] = True args = ['pip%s' % pyversion] args.extend(['install', 'INITools==0.2']) args.extend(['-f', data.packages]) result = script.run(*args, **kwargs) egg_info_folder = ( script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion ) initools_folder = script.site_packages / 'initools' assert egg_info_folder in result.files_created, str(result) assert initools_folder in result.files_created, str(result) def test_install_exit_status_code_when_no_requirements(script): """ Test install exit status code when no requirements specified """ result = script.pip('install', expect_error=True) assert "You must give at least one requirement to install" in result.stderr assert result.returncode == ERROR def test_install_exit_status_code_when_blank_requirements_file(script): """ Test install exit status code when blank requirements file specified """ script.scratch_path.join("blank.txt").write("\n") script.pip('install', '-r', 'blank.txt') @pytest.mark.network def test_basic_install_from_pypi(script): """ Test installing a package from PyPI. """ result = script.pip('install', '-vvv', 'INITools==0.2') egg_info_folder = ( script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion ) initools_folder = script.site_packages / 'initools' assert egg_info_folder in result.files_created, str(result) assert initools_folder in result.files_created, str(result) # Should not display where it's looking for files assert "Looking in indexes: " not in result.stdout assert "Looking in links: " not in result.stdout def test_basic_editable_install(script): """ Test editable installation. """ result = script.pip('install', '-e', 'INITools==0.2', expect_error=True) assert ( "INITools==0.2 should either be a path to a local project or a VCS url" in result.stderr ) assert not result.files_created assert not result.files_updated @pytest.mark.svn def test_basic_install_editable_from_svn(script): """ Test checking out from svn. """ checkout_path = _create_test_package(script) repo_url = _create_svn_repo(script, checkout_path) result = script.pip( 'install', '-e', 'svn+' + repo_url + '#egg=version-pkg' ) result.assert_installed('version-pkg', with_files=['.svn']) def _test_install_editable_from_git(script, tmpdir): """Test cloning from Git.""" pkg_path = _create_test_package(script, name='testpackage', vcs='git') args = ['install', '-e', 'git+%s#egg=testpackage' % path_to_url(pkg_path)] result = script.pip(*args, **{"expect_error": True}) result.assert_installed('testpackage', with_files=['.git']) def test_basic_install_editable_from_git(script, tmpdir): _test_install_editable_from_git(script, tmpdir) def test_install_editable_from_git_autobuild_wheel( script, tmpdir, with_wheel): _test_install_editable_from_git(script, tmpdir) @pytest.mark.network def test_install_editable_uninstalls_existing(data, script, tmpdir): """ Test that installing an editable uninstalls a previously installed non-editable version. https://github.com/pypa/pip/issues/1548 https://github.com/pypa/pip/pull/1552 """ to_install = data.packages.join("pip-test-package-0.1.tar.gz") result = script.pip_install_local(to_install) assert 'Successfully installed pip-test-package' in result.stdout result.assert_installed('piptestpackage', editable=False) result = script.pip( 'install', '-e', '%s#egg=pip-test-package' % local_checkout( 'git+https://github.com/pypa/pip-test-package.git', tmpdir.join("cache"), ), ) result.assert_installed('pip-test-package', with_files=['.git']) assert 'Found existing installation: pip-test-package 0.1' in result.stdout assert 'Uninstalling pip-test-package-' in result.stdout assert 'Successfully uninstalled pip-test-package' in result.stdout def test_install_editable_uninstalls_existing_from_path(script, data): """ Test that installing an editable uninstalls a previously installed non-editable version from path """ to_install = data.src.join('simplewheel-1.0') result = script.pip_install_local(to_install) assert 'Successfully installed simplewheel' in result.stdout simple_folder = script.site_packages / 'simplewheel' result.assert_installed('simplewheel', editable=False) assert simple_folder in result.files_created, str(result.stdout) result = script.pip( 'install', '-e', to_install, ) install_path = script.site_packages / 'simplewheel.egg-link' assert install_path in result.files_created, str(result) assert 'Found existing installation: simplewheel 1.0' in result.stdout assert 'Uninstalling simplewheel-' in result.stdout assert 'Successfully uninstalled simplewheel' in result.stdout assert simple_folder in result.files_deleted, str(result.stdout) @need_mercurial def test_basic_install_editable_from_hg(script, tmpdir): """Test cloning from Mercurial.""" pkg_path = _create_test_package(script, name='testpackage', vcs='hg') args = ['install', '-e', 'hg+%s#egg=testpackage' % path_to_url(pkg_path)] result = script.pip(*args, **{"expect_error": True}) result.assert_installed('testpackage', with_files=['.hg']) @need_mercurial def test_vcs_url_final_slash_normalization(script, tmpdir): """ Test that presence or absence of final slash in VCS URL is normalized. """ pkg_path = _create_test_package(script, name='testpackage', vcs='hg') args = ['install', '-e', 'hg+%s/#egg=testpackage' % path_to_url(pkg_path)] result = script.pip(*args, **{"expect_error": True}) result.assert_installed('testpackage', with_files=['.hg']) @need_bzr def test_install_editable_from_bazaar(script, tmpdir): """Test checking out from Bazaar.""" pkg_path = _create_test_package(script, name='testpackage', vcs='bazaar') args = ['install', '-e', 'bzr+%s/#egg=testpackage' % path_to_url(pkg_path)] result = script.pip(*args, **{"expect_error": True}) result.assert_installed('testpackage', with_files=['.bzr']) @pytest.mark.network @need_bzr def test_vcs_url_urlquote_normalization(script, tmpdir): """ Test that urlquoted characters are normalized for repo URL comparison. """ script.pip( 'install', '-e', '%s/#egg=django-wikiapp' % local_checkout( 'bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp' '/release-0.1', tmpdir.join("cache"), ), ) def test_basic_install_from_local_directory(script, data): """ Test installing from a local directory. """ to_install = data.packages.join("FSPkg") result = script.pip('install', to_install, expect_error=False) fspkg_folder = script.site_packages / 'fspkg' egg_info_folder = ( script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion ) assert fspkg_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) def test_basic_install_relative_directory(script, data): """ Test installing a requirement using a relative path. """ egg_info_file = ( script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion ) egg_link_file = ( script.site_packages / 'FSPkg.egg-link' ) package_folder = script.site_packages / 'fspkg' # Compute relative install path to FSPkg from scratch path. full_rel_path = data.packages.join('FSPkg') - script.scratch_path full_rel_url = ( 'file:' + full_rel_path.replace(os.path.sep, '/') + '#egg=FSPkg' ) embedded_rel_path = script.scratch_path.join(full_rel_path) # For each relative path, install as either editable or not using either # URLs with egg links or not. for req_path in (full_rel_path, full_rel_url, embedded_rel_path): # Regular install. result = script.pip('install', req_path, cwd=script.scratch_path) assert egg_info_file in result.files_created, str(result) assert package_folder in result.files_created, str(result) script.pip('uninstall', '-y', 'fspkg') # Editable install. result = script.pip('install', '-e' + req_path, cwd=script.scratch_path) assert egg_link_file in result.files_created, str(result) script.pip('uninstall', '-y', 'fspkg') def test_install_quiet(script, data): """ Test that install -q is actually quiet. """ # Apparently if pip install -q is not actually quiet, then it breaks # everything. See: # https://github.com/pypa/pip/issues/3418 # https://github.com/docker-library/python/issues/83 to_install = data.packages.join("FSPkg") result = script.pip('install', '-qqq', to_install, expect_error=False) assert result.stdout == "" assert result.stderr == "" def test_hashed_install_success(script, data, tmpdir): """ Test that installing various sorts of requirements with correct hashes works. Test file URLs and index packages (which become HTTP URLs behind the scenes). """ file_url = path_to_url( (data.packages / 'simple-1.0.tar.gz').abspath) with requirements_file( 'simple2==1.0 --hash=sha256:9336af72ca661e6336eb87bc7de3e8844d853e' '3848c2b9bbd2e8bf01db88c2c7\n' '{simple} --hash=sha256:393043e672415891885c9a2a0929b1af95fb866d6c' 'a016b42d2e6ce53619b653'.format(simple=file_url), tmpdir) as reqs_file: script.pip_install_local('-r', reqs_file.abspath, expect_error=False) def test_hashed_install_failure(script, tmpdir): """Test that wrong hashes stop installation. This makes sure prepare_files() is called in the course of installation and so has the opportunity to halt if hashes are wrong. Checks on various kinds of hashes are in test_req.py. """ with requirements_file('simple2==1.0 --hash=sha256:9336af72ca661e6336eb87b' 'c7de3e8844d853e3848c2b9bbd2e8bf01db88c2c\n', tmpdir) as reqs_file: result = script.pip_install_local('-r', reqs_file.abspath, expect_error=True) assert len(result.files_created) == 0 def test_install_from_local_directory_with_symlinks_to_directories( script, data): """ Test installing from a local directory containing symlinks to directories. """ to_install = data.packages.join("symlinks") result = script.pip('install', to_install, expect_error=False) pkg_folder = script.site_packages / 'symlinks' egg_info_folder = ( script.site_packages / 'symlinks-0.1.dev0-py%s.egg-info' % pyversion ) assert pkg_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) def test_install_from_local_directory_with_no_setup_py(script, data): """ Test installing from a local directory with no 'setup.py'. """ result = script.pip('install', data.root, expect_error=True) assert not result.files_created assert "is not installable." in result.stderr assert "Neither 'setup.py' nor 'pyproject.toml' found." in result.stderr def test_editable_install__local_dir_no_setup_py( script, data, deprecated_python): """ Test installing in editable mode from a local directory with no setup.py. """ result = script.pip('install', '-e', data.root, expect_error=True) assert not result.files_created msg = result.stderr if deprecated_python: assert 'File "setup.py" not found. ' in msg else: assert msg.startswith('ERROR: File "setup.py" not found. ') assert 'pyproject.toml' not in msg def test_editable_install__local_dir_no_setup_py_with_pyproject( script, deprecated_python): """ Test installing in editable mode from a local directory with no setup.py but that does have pyproject.toml. """ local_dir = script.scratch_path.join('temp').mkdir() pyproject_path = local_dir.join('pyproject.toml') pyproject_path.write('') result = script.pip('install', '-e', local_dir, expect_error=True) assert not result.files_created msg = result.stderr if deprecated_python: assert 'File "setup.py" not found. ' in msg else: assert msg.startswith('ERROR: File "setup.py" not found. ') assert 'A "pyproject.toml" file was found' in msg @pytest.mark.skipif("sys.version_info >= (3,4)") @pytest.mark.xfail def test_install_argparse_shadowed(script): # When argparse is in the stdlib, we support installing it # even though that's pretty useless because older packages did need to # depend on it, and not having its metadata will cause pkg_resources # requirements checks to fail // trigger easy-install, both of which are # bad. # XXX: Note, this test hits the outside-environment check, not the # in-stdlib check, because our tests run in virtualenvs... result = script.pip('install', 'argparse>=1.4') assert "Not uninstalling argparse" in result.stdout @pytest.mark.network @pytest.mark.skipif("sys.version_info < (3,4)") def test_upgrade_argparse_shadowed(script): # If argparse is installed - even if shadowed for imported - we support # upgrading it and properly remove the older versions files. script.pip('install', 'argparse==1.3') result = script.pip('install', 'argparse>=1.4') assert "Not uninstalling argparse" not in result.stdout def test_install_curdir(script, data): """ Test installing current directory ('.'). """ run_from = data.packages.join("FSPkg") # Python 2.4 Windows balks if this exists already egg_info = join(run_from, "FSPkg.egg-info") if os.path.isdir(egg_info): rmtree(egg_info) result = script.pip('install', curdir, cwd=run_from, expect_error=False) fspkg_folder = script.site_packages / 'fspkg' egg_info_folder = ( script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion ) assert fspkg_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) def test_install_pardir(script, data): """ Test installing parent directory ('..'). """ run_from = data.packages.join("FSPkg", "fspkg") result = script.pip('install', pardir, cwd=run_from, expect_error=False) fspkg_folder = script.site_packages / 'fspkg' egg_info_folder = ( script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion ) assert fspkg_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) @pytest.mark.network def test_install_global_option(script): """ Test using global distutils options. (In particular those that disable the actual install action) """ result = script.pip( 'install', '--global-option=--version', "INITools==0.1", expect_stderr=True) assert '0.1\n' in result.stdout def test_install_with_hacked_egg_info(script, data): """ test installing a package which defines its own egg_info class """ run_from = data.packages.join("HackedEggInfo") result = script.pip('install', '.', cwd=run_from) assert 'Successfully installed hackedegginfo-0.0.0\n' in result.stdout @pytest.mark.network def test_install_using_install_option_and_editable(script, tmpdir): """ Test installing a tool using -e and --install-option """ folder = 'script_folder' script.scratch_path.join(folder).mkdir() url = 'git+git://github.com/pypa/pip-test-package' result = script.pip( 'install', '-e', '%s#egg=pip-test-package' % local_checkout(url, tmpdir.join("cache")), '--install-option=--script-dir=%s' % folder, expect_stderr=True) script_file = ( script.venv / 'src' / 'pip-test-package' / folder / 'pip-test-package' + script.exe ) assert script_file in result.files_created @pytest.mark.network @need_mercurial def test_install_global_option_using_editable(script, tmpdir): """ Test using global distutils options, but in an editable installation """ url = 'hg+http://bitbucket.org/runeh/anyjson' result = script.pip( 'install', '--global-option=--version', '-e', '%s@0.2.5#egg=anyjson' % local_checkout(url, tmpdir.join("cache")), expect_stderr=True) assert 'Successfully installed anyjson' in result.stdout @pytest.mark.network def test_install_package_with_same_name_in_curdir(script): """ Test installing a package with the same name of a local folder """ script.scratch_path.join("mock==0.6").mkdir() result = script.pip('install', 'mock==0.6') egg_folder = script.site_packages / 'mock-0.6.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) mock100_setup_py = textwrap.dedent('''\ from setuptools import setup setup(name='mock', version='100.1')''') def test_install_folder_using_dot_slash(script): """ Test installing a folder using pip install ./foldername """ script.scratch_path.join("mock").mkdir() pkg_path = script.scratch_path / 'mock' pkg_path.join("setup.py").write(mock100_setup_py) result = script.pip('install', './mock') egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) def test_install_folder_using_slash_in_the_end(script): r""" Test installing a folder using pip install foldername/ or foldername\ """ script.scratch_path.join("mock").mkdir() pkg_path = script.scratch_path / 'mock' pkg_path.join("setup.py").write(mock100_setup_py) result = script.pip('install', 'mock' + os.path.sep) egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) def test_install_folder_using_relative_path(script): """ Test installing a folder using pip install folder1/folder2 """ script.scratch_path.join("initools").mkdir() script.scratch_path.join("initools", "mock").mkdir() pkg_path = script.scratch_path / 'initools' / 'mock' pkg_path.join("setup.py").write(mock100_setup_py) result = script.pip('install', Path('initools') / 'mock') egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) @pytest.mark.network def test_install_package_which_contains_dev_in_name(script): """ Test installing package from PyPI which contains 'dev' in name """ result = script.pip('install', 'django-devserver==0.0.4') devserver_folder = script.site_packages / 'devserver' egg_info_folder = ( script.site_packages / 'django_devserver-0.0.4-py%s.egg-info' % pyversion ) assert devserver_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) def test_install_package_with_target(script): """ Test installing a package using pip install --target """ target_dir = script.scratch_path / 'target' result = script.pip_install_local('-t', target_dir, "simple==1.0") assert Path('scratch') / 'target' / 'simple' in result.files_created, ( str(result) ) # Test repeated call without --upgrade, no files should have changed result = script.pip_install_local( '-t', target_dir, "simple==1.0", expect_stderr=True, ) assert not Path('scratch') / 'target' / 'simple' in result.files_updated # Test upgrade call, check that new version is installed result = script.pip_install_local('--upgrade', '-t', target_dir, "simple==2.0") assert Path('scratch') / 'target' / 'simple' in result.files_updated, ( str(result) ) egg_folder = ( Path('scratch') / 'target' / 'simple-2.0-py%s.egg-info' % pyversion) assert egg_folder in result.files_created, ( str(result) ) # Test install and upgrade of single-module package result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0') singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py' assert singlemodule_py in result.files_created, str(result) result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1', '--upgrade') assert singlemodule_py in result.files_updated, str(result) def test_install_nonlocal_compatible_wheel(script, data): target_dir = script.scratch_path / 'target' # Test install with --target result = script.pip( 'install', '-t', target_dir, '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--python', '3', '--platform', 'fakeplat', '--abi', 'fakeabi', 'simplewheel', ) assert result.returncode == SUCCESS distinfo = Path('scratch') / 'target' / 'simplewheel-2.0-1.dist-info' assert distinfo in result.files_created # Test install without --target result = script.pip( 'install', '--no-index', '--find-links', data.find_links, '--only-binary=:all:', '--python', '3', '--platform', 'fakeplat', '--abi', 'fakeabi', 'simplewheel', expect_error=True ) assert result.returncode == ERROR def test_install_nonlocal_compatible_wheel_path(script, data): target_dir = script.scratch_path / 'target' # Test a full path requirement result = script.pip( 'install', '-t', target_dir, '--no-index', '--only-binary=:all:', Path(data.packages) / 'simplewheel-2.0-py3-fakeabi-fakeplat.whl' ) assert result.returncode == SUCCESS distinfo = Path('scratch') / 'target' / 'simplewheel-2.0.dist-info' assert distinfo in result.files_created # Test a full path requirement (without --target) result = script.pip( 'install', '--no-index', '--only-binary=:all:', Path(data.packages) / 'simplewheel-2.0-py3-fakeabi-fakeplat.whl', expect_error=True ) assert result.returncode == ERROR def test_install_with_target_and_scripts_no_warning(script, with_wheel): """ Test that installing with --target does not trigger the "script not in PATH" warning (issue #5201) """ target_dir = script.scratch_path / 'target' pkga_path = script.scratch_path / 'pkga' pkga_path.mkdir() pkga_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkga', version='0.1', py_modules=["pkga"], entry_points={ 'console_scripts': ['pkga=pkga:main'] } ) """)) pkga_path.join("pkga.py").write(textwrap.dedent(""" def main(): pass """)) result = script.pip('install', '--target', target_dir, pkga_path) # This assertion isn't actually needed, if we get the script warning # the script.pip() call will fail with "stderr not expected". But we # leave the assertion to make the intention of the code clearer. assert "--no-warn-script-location" not in result.stderr, str(result) def test_install_package_with_root(script, data): """ Test installing a package using pip install --root """ root_dir = script.scratch_path / 'root' result = script.pip( 'install', '--root', root_dir, '-f', data.find_links, '--no-index', 'simple==1.0', ) normal_install_path = ( script.base_path / script.site_packages / 'simple-1.0-py%s.egg-info' % pyversion ) # use distutils to change the root exactly how the --root option does it from distutils.util import change_root root_path = change_root( os.path.join(script.scratch, 'root'), normal_install_path ) assert root_path in result.files_created, str(result) # Should show find-links location in output assert "Looking in indexes: " not in result.stdout assert "Looking in links: " in result.stdout def test_install_package_with_prefix(script, data): """ Test installing a package using pip install --prefix """ prefix_path = script.scratch_path / 'prefix' result = script.pip( 'install', '--prefix', prefix_path, '-f', data.find_links, '--no-binary', 'simple', '--no-index', 'simple==1.0', ) rel_prefix_path = script.scratch / 'prefix' install_path = ( distutils.sysconfig.get_python_lib(prefix=rel_prefix_path) / 'simple-1.0-py{}.egg-info'.format(pyversion) ) assert install_path in result.files_created, str(result) def test_install_editable_with_prefix(script): # make a dummy project pkga_path = script.scratch_path / 'pkga' pkga_path.mkdir() pkga_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkga', version='0.1') """)) if hasattr(sys, "pypy_version_info"): site_packages = os.path.join( 'prefix', 'lib', 'python{}'.format(pyversion), 'site-packages') else: site_packages = distutils.sysconfig.get_python_lib(prefix='prefix') # make sure target path is in PYTHONPATH pythonpath = script.scratch_path / site_packages pythonpath.makedirs() script.environ["PYTHONPATH"] = pythonpath # install pkga package into the absolute prefix directory prefix_path = script.scratch_path / 'prefix' result = script.pip( 'install', '--editable', pkga_path, '--prefix', prefix_path) # assert pkga is installed at correct location install_path = script.scratch / site_packages / 'pkga.egg-link' assert install_path in result.files_created, str(result) def test_install_package_conflict_prefix_and_user(script, data): """ Test installing a package using pip install --prefix --user errors out """ prefix_path = script.scratch_path / 'prefix' result = script.pip( 'install', '-f', data.find_links, '--no-index', '--user', '--prefix', prefix_path, 'simple==1.0', expect_error=True, quiet=True, ) assert ( "Can not combine '--user' and '--prefix'" in result.stderr ) # skip on win/py3 for now, see issue #782 @pytest.mark.skipif("sys.platform == 'win32' and sys.version_info >= (3,)") def test_install_package_that_emits_unicode(script, data): """ Install a package with a setup.py that emits UTF-8 output and then fails. Refs https://github.com/pypa/pip/issues/326 """ to_install = data.packages.join("BrokenEmitsUTF8") result = script.pip( 'install', to_install, expect_error=True, expect_temp=True, quiet=True, ) assert ( 'FakeError: this package designed to fail on install' in result.stdout ) assert 'UnicodeDecodeError' not in result.stdout def test_install_package_with_utf8_setup(script, data): """Install a package with a setup.py that declares a utf-8 encoding.""" to_install = data.packages.join("SetupPyUTF8") script.pip('install', to_install) def test_install_package_with_latin1_setup(script, data): """Install a package with a setup.py that declares a latin-1 encoding.""" to_install = data.packages.join("SetupPyLatin1") script.pip('install', to_install) def test_url_req_case_mismatch_no_index(script, data): """ tar ball url requirements (with no egg fragment), that happen to have upper case project names, should be considered equal to later requirements that reference the project name using lower case. tests/data/packages contains Upper-1.0.tar.gz and Upper-2.0.tar.gz 'requiresupper' has install_requires = ['upper'] """ Upper = '/'.join((data.find_links, 'Upper-1.0.tar.gz')) result = script.pip( 'install', '--no-index', '-f', data.find_links, Upper, 'requiresupper' ) # only Upper-1.0.tar.gz should get installed. egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion assert egg_folder not in result.files_created, str(result) def test_url_req_case_mismatch_file_index(script, data): """ tar ball url requirements (with no egg fragment), that happen to have upper case project names, should be considered equal to later requirements that reference the project name using lower case. tests/data/packages3 contains Dinner-1.0.tar.gz and Dinner-2.0.tar.gz 'requiredinner' has install_requires = ['dinner'] This test is similar to test_url_req_case_mismatch_no_index; that test tests behaviour when using "--no-index -f", while this one does the same test when using "--index-url". Unfortunately this requires a different set of packages as it requires a prepared index.html file and subdirectory-per-package structure. """ Dinner = '/'.join((data.find_links3, 'dinner', 'Dinner-1.0.tar.gz')) result = script.pip( 'install', '--index-url', data.find_links3, Dinner, 'requiredinner' ) # only Upper-1.0.tar.gz should get installed. egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion assert egg_folder not in result.files_created, str(result) def test_url_incorrect_case_no_index(script, data): """ Same as test_url_req_case_mismatch_no_index, except testing for the case where the incorrect case is given in the name of the package to install rather than in a requirements file. """ result = script.pip( 'install', '--no-index', '-f', data.find_links, "upper", ) # only Upper-2.0.tar.gz should get installed. egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion assert egg_folder not in result.files_created, str(result) egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) def test_url_incorrect_case_file_index(script, data): """ Same as test_url_req_case_mismatch_file_index, except testing for the case where the incorrect case is given in the name of the package to install rather than in a requirements file. """ result = script.pip( 'install', '--index-url', data.find_links3, "dinner", expect_stderr=True, ) # only Upper-2.0.tar.gz should get installed. egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion assert egg_folder not in result.files_created, str(result) egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) # Should show index-url location in output assert "Looking in indexes: " in result.stdout assert "Looking in links: " not in result.stdout @pytest.mark.network def test_compiles_pyc(script): """ Test installing with --compile on """ del script.environ["PYTHONDONTWRITEBYTECODE"] script.pip("install", "--compile", "--no-binary=:all:", "INITools==0.2") # There are many locations for the __init__.pyc file so attempt to find # any of them exists = [ os.path.exists(script.site_packages_path / "initools/__init__.pyc"), ] exists += glob.glob( script.site_packages_path / "initools/__pycache__/__init__*.pyc" ) assert any(exists) @pytest.mark.network def test_no_compiles_pyc(script): """ Test installing from wheel with --compile on """ del script.environ["PYTHONDONTWRITEBYTECODE"] script.pip("install", "--no-compile", "--no-binary=:all:", "INITools==0.2") # There are many locations for the __init__.pyc file so attempt to find # any of them exists = [ os.path.exists(script.site_packages_path / "initools/__init__.pyc"), ] exists += glob.glob( script.site_packages_path / "initools/__pycache__/__init__*.pyc" ) assert not any(exists) def test_install_upgrade_editable_depending_on_other_editable(script): script.scratch_path.join("pkga").mkdir() pkga_path = script.scratch_path / 'pkga' pkga_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkga', version='0.1') """)) script.pip('install', '--editable', pkga_path) result = script.pip('list', '--format=freeze') assert "pkga==0.1" in result.stdout script.scratch_path.join("pkgb").mkdir() pkgb_path = script.scratch_path / 'pkgb' pkgb_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkgb', version='0.1', install_requires=['pkga']) """)) script.pip('install', '--upgrade', '--editable', pkgb_path, '--no-index') result = script.pip('list', '--format=freeze') assert "pkgb==0.1" in result.stdout def test_install_subprocess_output_handling(script, data): args = ['install', data.src.join('chattymodule')] # Regular install should not show output from the chatty setup.py result = script.pip(*args) assert 0 == result.stdout.count("HELLO FROM CHATTYMODULE") script.pip("uninstall", "-y", "chattymodule") # With --verbose we should show the output. # Only count examples with sys.argv[1] == egg_info, because we call # setup.py multiple times, which should not count as duplicate output. result = script.pip(*(args + ["--verbose"])) assert 1 == result.stdout.count("HELLO FROM CHATTYMODULE egg_info") script.pip("uninstall", "-y", "chattymodule") # If the install fails, then we *should* show the output... but only once, # even if --verbose is given. result = script.pip(*(args + ["--global-option=--fail"]), expect_error=True) assert 1 == result.stdout.count("I DIE, I DIE") result = script.pip(*(args + ["--global-option=--fail", "--verbose"]), expect_error=True) assert 1 == result.stdout.count("I DIE, I DIE") def test_install_log(script, data, tmpdir): # test that verbose logs go to "--log" file f = tmpdir.join("log.txt") args = ['--log=%s' % f, 'install', data.src.join('chattymodule')] result = script.pip(*args) assert 0 == result.stdout.count("HELLO FROM CHATTYMODULE") with open(f, 'r') as fp: # one from egg_info, one from install assert 2 == fp.read().count("HELLO FROM CHATTYMODULE") def test_install_topological_sort(script, data): args = ['install', 'TopoRequires4', '--no-index', '-f', data.packages] res = str(script.pip(*args, expect_error=False)) order1 = 'TopoRequires, TopoRequires2, TopoRequires3, TopoRequires4' order2 = 'TopoRequires, TopoRequires3, TopoRequires2, TopoRequires4' assert order1 in res or order2 in res, res def test_install_wheel_broken(script, with_wheel): res = script.pip_install_local('wheelbroken', expect_stderr=True) assert "Successfully installed wheelbroken-0.1" in str(res), str(res) def test_cleanup_after_failed_wheel(script, with_wheel): res = script.pip_install_local('wheelbrokenafter', expect_stderr=True) # One of the effects of not cleaning up is broken scripts: script_py = script.bin_path / "script.py" assert script_py.exists, script_py shebang = open(script_py, 'r').readline().strip() assert shebang != '#!python', shebang # OK, assert that we *said* we were cleaning up: assert "Running setup.py clean for wheelbrokenafter" in str(res), str(res) def test_install_builds_wheels(script, data, with_wheel): # We need to use a subprocess to get the right value on Windows. res = script.run('python', '-c', ( 'from pip._internal.utils import appdirs; ' 'print(appdirs.user_cache_dir("pip"))' )) wheels_cache = os.path.join(res.stdout.rstrip('\n'), 'wheels') # NB This incidentally tests a local tree + tarball inputs # see test_install_editable_from_git_autobuild_wheel for editable # vcs coverage. to_install = data.packages.join('requires_wheelbroken_upper') res = script.pip( 'install', '--no-index', '-f', data.find_links, to_install, expect_stderr=True) expected = ("Successfully installed requires-wheelbroken-upper-0" " upper-2.0 wheelbroken-0.1") # Must have installed it all assert expected in str(res), str(res) wheels = [] for top, dirs, files in os.walk(wheels_cache): wheels.extend(files) # and built wheels for upper and wheelbroken assert "Building wheel for upper" in str(res), str(res) assert "Building wheel for wheelb" in str(res), str(res) # Wheels are built for local directories, but not cached. assert "Building wheel for requir" in str(res), str(res) # wheelbroken has to run install # into the cache assert wheels != [], str(res) # and installed from the wheel assert "Running setup.py install for upper" not in str(res), str(res) # Wheels are built for local directories, but not cached. assert "Running setup.py install for requir" not in str(res), str(res) # wheelbroken has to run install assert "Running setup.py install for wheelb" in str(res), str(res) # We want to make sure we used the correct implementation tag assert wheels == [ "Upper-2.0-{}-none-any.whl".format(pep425tags.implementation_tag), ] def test_install_no_binary_disables_building_wheels(script, data, with_wheel): to_install = data.packages.join('requires_wheelbroken_upper') res = script.pip( 'install', '--no-index', '--no-binary=upper', '-f', data.find_links, to_install, expect_stderr=True) expected = ("Successfully installed requires-wheelbroken-upper-0" " upper-2.0 wheelbroken-0.1") # Must have installed it all assert expected in str(res), str(res) # and built wheels for wheelbroken only assert "Building wheel for wheelb" in str(res), str(res) # Wheels are built for local directories, but not cached across runs assert "Building wheel for requir" in str(res), str(res) # Don't build wheel for upper which was blacklisted assert "Building wheel for upper" not in str(res), str(res) # Wheels are built for local directories, but not cached across runs assert "Running setup.py install for requir" not in str(res), str(res) # And these two fell back to sdist based installed. assert "Running setup.py install for wheelb" in str(res), str(res) assert "Running setup.py install for upper" in str(res), str(res) def test_install_no_binary_disables_cached_wheels(script, data, with_wheel): # Seed the cache script.pip( 'install', '--no-index', '-f', data.find_links, 'upper') script.pip('uninstall', 'upper', '-y') res = script.pip( 'install', '--no-index', '--no-binary=:all:', '-f', data.find_links, 'upper', expect_stderr=True) assert "Successfully installed upper-2.0" in str(res), str(res) # No wheel building for upper, which was blacklisted assert "Building wheel for upper" not in str(res), str(res) # Must have used source, not a cached wheel to install upper. assert "Running setup.py install for upper" in str(res), str(res) def test_install_editable_with_wrong_egg_name(script): script.scratch_path.join("pkga").mkdir() pkga_path = script.scratch_path / 'pkga' pkga_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkga', version='0.1') """)) result = script.pip( 'install', '--editable', 'file://%s#egg=pkgb' % pkga_path, expect_error=True) assert ("Generating metadata for package pkgb produced metadata " "for project name pkga. Fix your #egg=pkgb " "fragments.") in result.stderr assert "Successfully installed pkga" in str(result), str(result) def test_install_tar_xz(script, data): try: import lzma # noqa except ImportError: pytest.skip("No lzma support") res = script.pip('install', data.packages / 'singlemodule-0.0.1.tar.xz') assert "Successfully installed singlemodule-0.0.1" in res.stdout, res def test_install_tar_lzma(script, data): try: import lzma # noqa except ImportError: pytest.skip("No lzma support") res = script.pip('install', data.packages / 'singlemodule-0.0.1.tar.lzma') assert "Successfully installed singlemodule-0.0.1" in res.stdout, res def test_double_install(script): """ Test double install passing with two same version requirements """ result = script.pip('install', 'pip', 'pip', expect_error=False) msg = "Double requirement given: pip (already in pip, name='pip')" assert msg not in result.stderr def test_double_install_fail(script): """ Test double install failing with two different version requirements """ result = script.pip('install', 'pip==*', 'pip==7.1.2', expect_error=True) msg = ("Double requirement given: pip==7.1.2 (already in pip==*, " "name='pip')") assert msg in result.stderr def test_install_incompatible_python_requires(script): script.scratch_path.join("pkga").mkdir() pkga_path = script.scratch_path / 'pkga' pkga_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkga', python_requires='<1.0', version='0.1') """)) result = script.pip('install', pkga_path, expect_error=True) assert ("pkga requires Python '<1.0' " "but the running Python is ") in result.stderr, str(result) def test_install_incompatible_python_requires_editable(script): script.scratch_path.join("pkga").mkdir() pkga_path = script.scratch_path / 'pkga' pkga_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkga', python_requires='<1.0', version='0.1') """)) result = script.pip( 'install', '--editable=%s' % pkga_path, expect_error=True) assert ("pkga requires Python '<1.0' " "but the running Python is ") in result.stderr, str(result) def test_install_incompatible_python_requires_wheel(script, with_wheel): script.scratch_path.join("pkga").mkdir() pkga_path = script.scratch_path / 'pkga' pkga_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkga', python_requires='<1.0', version='0.1') """)) script.run( 'python', 'setup.py', 'bdist_wheel', '--universal', cwd=pkga_path) result = script.pip('install', './pkga/dist/pkga-0.1-py2.py3-none-any.whl', expect_error=True) assert ("pkga requires Python '<1.0' " "but the running Python is ") in result.stderr def test_install_compatible_python_requires(script): script.scratch_path.join("pkga").mkdir() pkga_path = script.scratch_path / 'pkga' pkga_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkga', python_requires='>1.0', version='0.1') """)) res = script.pip('install', pkga_path, expect_error=True) assert "Successfully installed pkga-0.1" in res.stdout, res @pytest.mark.network def test_install_pep508_with_url(script): res = script.pip( 'install', '--no-index', 'packaging@https://files.pythonhosted.org/packages/2f/2b/' 'c681de3e1dbcd469537aefb15186b800209aa1f299d933d23b48d85c9d56/' 'packaging-15.3-py2.py3-none-any.whl#sha256=' 'ce1a869fe039fbf7e217df36c4653d1dbe657778b2d41709593a0003584405f4' ) assert "Successfully installed packaging-15.3" in str(res), str(res) @pytest.mark.network def test_install_pep508_with_url_in_install_requires(script): pkga_path = create_test_package_with_setup( script, name='pkga', version='1.0', install_requires=[ 'packaging@https://files.pythonhosted.org/packages/2f/2b/' 'c681de3e1dbcd469537aefb15186b800209aa1f299d933d23b48d85c9d56/' 'packaging-15.3-py2.py3-none-any.whl#sha256=' 'ce1a869fe039fbf7e217df36c4653d1dbe657778b2d41709593a0003584405f4' ], ) res = script.pip('install', pkga_path) assert "Successfully installed packaging-15.3" in str(res), str(res) @pytest.mark.network @pytest.mark.parametrize('index', (PyPI.simple_url, TestPyPI.simple_url)) def test_install_from_test_pypi_with_ext_url_dep_is_blocked(script, index): res = script.pip( 'install', '--index-url', index, 'pep-508-url-deps', expect_error=True, ) error_message = ( "Packages installed from PyPI cannot depend on packages " "which are not also hosted on PyPI." ) error_cause = ( "pep-508-url-deps depends on sampleproject@ " "https://github.com/pypa/sampleproject/archive/master.zip" ) assert res.returncode == 1 assert error_message in res.stderr, str(res) assert error_cause in res.stderr, str(res) def test_installing_scripts_outside_path_prints_warning(script): result = script.pip_install_local( "--prefix", script.scratch_path, "script_wheel1", expect_error=True ) assert "Successfully installed script-wheel1" in result.stdout, str(result) assert "--no-warn-script-location" in result.stderr def test_installing_scripts_outside_path_can_suppress_warning(script): result = script.pip_install_local( "--prefix", script.scratch_path, "--no-warn-script-location", "script_wheel1" ) assert "Successfully installed script-wheel1" in result.stdout, str(result) assert "--no-warn-script-location" not in result.stderr def test_installing_scripts_on_path_does_not_print_warning(script): result = script.pip_install_local("script_wheel1") assert "Successfully installed script-wheel1" in result.stdout, str(result) assert "--no-warn-script-location" not in result.stderr def test_installed_files_recorded_in_deterministic_order(script, data): """ Ensure that we record the files installed by a package in a deterministic order, to make installs reproducible. """ to_install = data.packages.join("FSPkg") result = script.pip('install', to_install, expect_error=False) fspkg_folder = script.site_packages / 'fspkg' egg_info = 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion installed_files_path = ( script.site_packages / egg_info / 'installed-files.txt' ) assert fspkg_folder in result.files_created, str(result.stdout) assert installed_files_path in result.files_created, str(result) installed_files_path = result.files_created[installed_files_path].full installed_files_lines = [ p for p in Path(installed_files_path).read_text().split('\n') if p ] assert installed_files_lines == sorted(installed_files_lines) def test_install_conflict_results_in_warning(script, data): pkgA_path = create_test_package_with_setup( script, name='pkgA', version='1.0', install_requires=['pkgb == 1.0'], ) pkgB_path = create_test_package_with_setup( script, name='pkgB', version='2.0', ) # Install pkgA without its dependency result1 = script.pip('install', '--no-index', pkgA_path, '--no-deps') assert "Successfully installed pkgA-1.0" in result1.stdout, str(result1) # Then install an incorrect version of the dependency result2 = script.pip( 'install', '--no-index', pkgB_path, expect_stderr=True, ) assert "pkga 1.0 has requirement pkgb==1.0" in result2.stderr, str(result2) assert "Successfully installed pkgB-2.0" in result2.stdout, str(result2) def test_install_conflict_warning_can_be_suppressed(script, data): pkgA_path = create_test_package_with_setup( script, name='pkgA', version='1.0', install_requires=['pkgb == 1.0'], ) pkgB_path = create_test_package_with_setup( script, name='pkgB', version='2.0', ) # Install pkgA without its dependency result1 = script.pip('install', '--no-index', pkgA_path, '--no-deps') assert "Successfully installed pkgA-1.0" in result1.stdout, str(result1) # Then install an incorrect version of the dependency; suppressing warning result2 = script.pip( 'install', '--no-index', pkgB_path, '--no-warn-conflicts' ) assert "Successfully installed pkgB-2.0" in result2.stdout, str(result2)
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_check.py
from tests.lib import create_test_package_with_setup def matches_expected_lines(string, expected_lines, exact=True): if exact: return set(string.splitlines()) == set(expected_lines) # If not exact, check that all expected lines are present return set(expected_lines) <= set(string.splitlines()) def test_check_install_canonicalization(script, deprecated_python): pkga_path = create_test_package_with_setup( script, name='pkgA', version='1.0', install_requires=['normal-missing', 'SPECIAL.missing'], ) normal_path = create_test_package_with_setup( script, name='normal-missing', version='0.1', ) special_path = create_test_package_with_setup( script, name='SPECIAL.missing', version='0.1', ) # Let's install pkgA without its dependency result = script.pip('install', '--no-index', pkga_path, '--no-deps') assert "Successfully installed pkgA-1.0" in result.stdout, str(result) # Install the first missing dependency. Only an error for the # second dependency should remain. result = script.pip( 'install', '--no-index', normal_path, '--quiet', expect_error=True ) expected_lines = [ "ERROR: pkga 1.0 requires SPECIAL.missing, which is not installed.", ] # Deprecated python versions produce an extra warning on stderr assert matches_expected_lines( result.stderr, expected_lines, exact=not deprecated_python) assert result.returncode == 0 # Install the second missing package and expect that there is no warning # during the installation. This is special as the package name requires # name normalization (as in https://github.com/pypa/pip/issues/5134) result = script.pip( 'install', '--no-index', special_path, '--quiet', ) assert matches_expected_lines( result.stderr, [], exact=not deprecated_python) assert result.returncode == 0 # Double check that all errors are resolved in the end result = script.pip('check') expected_lines = [ "No broken requirements found.", ] assert matches_expected_lines(result.stdout, expected_lines) assert result.returncode == 0 def test_check_install_does_not_warn_for_out_of_graph_issues( script, deprecated_python): pkg_broken_path = create_test_package_with_setup( script, name='broken', version='1.0', install_requires=['missing', 'conflict < 1.0'], ) pkg_unrelated_path = create_test_package_with_setup( script, name='unrelated', version='1.0', ) pkg_conflict_path = create_test_package_with_setup( script, name='conflict', version='1.0', ) # Install a package without it's dependencies result = script.pip('install', '--no-index', pkg_broken_path, '--no-deps') # Deprecated python versions produce an extra warning on stderr assert matches_expected_lines( result.stderr, [], exact=not deprecated_python) # Install conflict package result = script.pip( 'install', '--no-index', pkg_conflict_path, expect_error=True, ) assert matches_expected_lines(result.stderr, [ "ERROR: broken 1.0 requires missing, which is not installed.", ( "ERROR: broken 1.0 has requirement conflict<1.0, but " "you'll have conflict 1.0 which is incompatible." ), ], exact=not deprecated_python) # Install unrelated package result = script.pip( 'install', '--no-index', pkg_unrelated_path, '--quiet', ) # should not warn about broken's deps when installing unrelated package assert matches_expected_lines( result.stderr, [], exact=not deprecated_python) result = script.pip('check', expect_error=True) expected_lines = [ "broken 1.0 requires missing, which is not installed.", "broken 1.0 has requirement conflict<1.0, but you have conflict 1.0.", ] assert matches_expected_lines(result.stdout, expected_lines)
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_cleanup.py
import os from os.path import exists import pytest from pip._internal.cli.status_codes import PREVIOUS_BUILD_DIR_ERROR from pip._internal.locations import write_delete_marker_file from tests.lib import need_mercurial from tests.lib.local_repos import local_checkout def test_cleanup_after_install(script, data): """ Test clean up after installing a package. """ script.pip( 'install', '--no-index', '--find-links=%s' % data.find_links, 'simple' ) build = script.venv_path / "build" src = script.venv_path / "src" assert not exists(build), "build/ dir still exists: %s" % build assert not exists(src), "unexpected src/ dir exists: %s" % src script.assert_no_temp() @pytest.mark.network def test_no_clean_option_blocks_cleaning_after_install(script, data): """ Test --no-clean option blocks cleaning after install """ build = script.base_path / 'pip-build' script.pip( 'install', '--no-clean', '--no-index', '--build', build, '--find-links=%s' % data.find_links, 'simple', expect_temp=True, ) assert exists(build) @pytest.mark.network @need_mercurial def test_cleanup_after_install_editable_from_hg(script, tmpdir): """ Test clean up after cloning from Mercurial. """ script.pip( 'install', '-e', '%s#egg=ScriptTest' % local_checkout( 'hg+https://bitbucket.org/ianb/scripttest', tmpdir.join("cache"), ), expect_error=True, ) build = script.venv_path / 'build' src = script.venv_path / 'src' assert not exists(build), "build/ dir still exists: %s" % build assert exists(src), "expected src/ dir doesn't exist: %s" % src script.assert_no_temp() def test_cleanup_after_install_from_local_directory(script, data): """ Test clean up after installing from a local directory. """ to_install = data.packages.join("FSPkg") script.pip('install', to_install, expect_error=False) build = script.venv_path / 'build' src = script.venv_path / 'src' assert not exists(build), "unexpected build/ dir exists: %s" % build assert not exists(src), "unexpected src/ dir exist: %s" % src script.assert_no_temp() def test_cleanup_req_satisifed_no_name(script, data): """ Test cleanup when req is already satisfied, and req has no 'name' """ # this test confirms Issue #420 is fixed # reqs with no 'name' that were already satisfied were leaving behind tmp # build dirs # 2 examples of reqs that would do this # 1) https://bitbucket.org/ianb/initools/get/tip.zip # 2) parent-0.1.tar.gz dist = data.packages.join("parent-0.1.tar.gz") script.pip('install', dist) script.pip('install', dist) build = script.venv_path / 'build' assert not exists(build), "unexpected build/ dir exists: %s" % build script.assert_no_temp() def test_cleanup_after_install_exception(script, data): """ Test clean up after a 'setup.py install' exception. """ # broken==0.2broken fails during install; see packages readme file result = script.pip( 'install', '-f', data.find_links, '--no-index', 'broken==0.2broken', expect_error=True, ) build = script.venv_path / 'build' assert not exists(build), "build/ dir still exists: %s" % result.stdout script.assert_no_temp() def test_cleanup_after_egg_info_exception(script, data): """ Test clean up after a 'setup.py egg_info' exception. """ # brokenegginfo fails during egg_info; see packages readme file result = script.pip( 'install', '-f', data.find_links, '--no-index', 'brokenegginfo==0.1', expect_error=True, ) build = script.venv_path / 'build' assert not exists(build), "build/ dir still exists: %s" % result.stdout script.assert_no_temp() @pytest.mark.network def test_cleanup_prevented_upon_build_dir_exception(script, data): """ Test no cleanup occurs after a PreviousBuildDirError """ build = script.venv_path / 'build' build_simple = build / 'simple' os.makedirs(build_simple) write_delete_marker_file(build_simple) build_simple.join("setup.py").write("#") result = script.pip( 'install', '-f', data.find_links, '--no-index', 'simple', '--build', build, expect_error=True, expect_temp=True, ) assert result.returncode == PREVIOUS_BUILD_DIR_ERROR, str(result) assert "pip can't proceed" in result.stderr, str(result) assert exists(build_simple), str(result)
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_compat.py
""" Tests for compatibility workarounds. """ import os import pytest from tests.lib import assert_all_changes, pyversion @pytest.mark.network def test_debian_egg_name_workaround(script): """ We can uninstall packages installed with the pyversion removed from the egg-info metadata directory name. Refs: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367 https://bugs.launchpad.net/ubuntu/+source/distribute/+bug/725178 https://bitbucket.org/ianb/pip/issue/104/pip-uninstall-on-ubuntu-linux """ result = script.pip('install', 'INITools==0.2', expect_error=True) egg_info = os.path.join( script.site_packages, "INITools-0.2-py%s.egg-info" % pyversion) # Debian only removes pyversion for global installs, not inside a venv # so even if this test runs on a Debian/Ubuntu system with broken # setuptools, since our test runs inside a venv we'll still have the normal # .egg-info assert egg_info in result.files_created, "Couldn't find %s" % egg_info # The Debian no-pyversion version of the .egg-info mangled = os.path.join(script.site_packages, "INITools-0.2.egg-info") assert mangled not in result.files_created, "Found unexpected %s" % mangled # Simulate a Debian install by copying the .egg-info to their name for it full_egg_info = os.path.join(script.base_path, egg_info) assert os.path.isdir(full_egg_info) full_mangled = os.path.join(script.base_path, mangled) os.renames(full_egg_info, full_mangled) assert os.path.isdir(full_mangled) # Try the uninstall and verify that everything is removed. result2 = script.pip("uninstall", "INITools", "-y") assert_all_changes(result, result2, [script.venv / 'build', 'cache']) def test_setup_py_with_dos_line_endings(script, data): """ It doesn't choke on a setup.py file that uses DOS line endings (\\r\\n). Refs https://github.com/pypa/pip/issues/237 """ to_install = data.packages.join("LineEndings") script.pip('install', to_install, expect_error=False)
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_config.py
import os import tempfile import textwrap import pytest def test_options_from_env_vars(script): """ Test if ConfigOptionParser reads env vars (e.g. not using PyPI here) """ script.environ['PIP_NO_INDEX'] = '1' result = script.pip('install', '-vvv', 'INITools', expect_error=True) assert "Ignoring indexes:" in result.stdout, str(result) assert ( "DistributionNotFound: No matching distribution found for INITools" in result.stdout ) def test_command_line_options_override_env_vars(script, virtualenv): """ Test that command line options override environmental variables. """ script.environ['PIP_INDEX_URL'] = 'https://example.com/simple/' result = script.pip('install', '-vvv', 'INITools', expect_error=True) assert ( "Getting page https://example.com/simple/initools" in result.stdout ) virtualenv.clear() result = script.pip( 'install', '-vvv', '--index-url', 'https://download.zope.org/ppix', 'INITools', expect_error=True, ) assert "example.com" not in result.stdout assert "Getting page https://download.zope.org/ppix" in result.stdout @pytest.mark.network def test_env_vars_override_config_file(script, virtualenv): """ Test that environmental variables override settings in config files. """ fd, config_file = tempfile.mkstemp('-pip.cfg', 'test-') try: _test_env_vars_override_config_file(script, virtualenv, config_file) finally: # `os.close` is a workaround for a bug in subprocess # https://bugs.python.org/issue3210 os.close(fd) os.remove(config_file) def _test_env_vars_override_config_file(script, virtualenv, config_file): # set this to make pip load it script.environ['PIP_CONFIG_FILE'] = config_file # It's important that we test this particular config value ('no-index') # because there is/was a bug which only shows up in cases in which # 'config-item' and 'config_item' hash to the same value modulo the size # of the config dictionary. (script.scratch_path / config_file).write(textwrap.dedent("""\ [global] no-index = 1 """)) result = script.pip('install', '-vvv', 'INITools', expect_error=True) assert ( "DistributionNotFound: No matching distribution found for INITools" in result.stdout ) script.environ['PIP_NO_INDEX'] = '0' virtualenv.clear() result = script.pip('install', '-vvv', 'INITools', expect_error=True) assert "Successfully installed INITools" in result.stdout @pytest.mark.network def test_command_line_append_flags(script, virtualenv, data): """ Test command line flags that append to defaults set by environmental variables. """ script.environ['PIP_FIND_LINKS'] = 'https://test.pypi.org' result = script.pip( 'install', '-vvv', 'INITools', '--trusted-host', 'test.pypi.org', expect_error=True, ) assert ( "Analyzing links from page https://test.pypi.org" in result.stdout ), str(result) virtualenv.clear() result = script.pip( 'install', '-vvv', '--find-links', data.find_links, 'INITools', '--trusted-host', 'test.pypi.org', expect_error=True, ) assert ( "Analyzing links from page https://test.pypi.org" in result.stdout ) assert "Skipping link %s" % data.find_links in result.stdout @pytest.mark.network def test_command_line_appends_correctly(script, data): """ Test multiple appending options set by environmental variables. """ script.environ['PIP_FIND_LINKS'] = ( 'https://test.pypi.org %s' % data.find_links ) result = script.pip( 'install', '-vvv', 'INITools', '--trusted-host', 'test.pypi.org', expect_error=True, ) assert ( "Analyzing links from page https://test.pypi.org" in result.stdout ), result.stdout assert "Skipping link %s" % data.find_links in result.stdout def test_config_file_override_stack(script, virtualenv): """ Test config files (global, overriding a global config with a local, overriding all with a command line flag). """ fd, config_file = tempfile.mkstemp('-pip.cfg', 'test-') try: _test_config_file_override_stack(script, virtualenv, config_file) finally: # `os.close` is a workaround for a bug in subprocess # https://bugs.python.org/issue3210 os.close(fd) os.remove(config_file) def _test_config_file_override_stack(script, virtualenv, config_file): # set this to make pip load it script.environ['PIP_CONFIG_FILE'] = config_file (script.scratch_path / config_file).write(textwrap.dedent("""\ [global] index-url = https://download.zope.org/ppix """)) result = script.pip('install', '-vvv', 'INITools', expect_error=True) assert ( "Getting page https://download.zope.org/ppix/initools" in result.stdout ) virtualenv.clear() (script.scratch_path / config_file).write(textwrap.dedent("""\ [global] index-url = https://download.zope.org/ppix [install] index-url = https://pypi.gocept.com/ """)) result = script.pip('install', '-vvv', 'INITools', expect_error=True) assert "Getting page https://pypi.gocept.com/initools" in result.stdout result = script.pip( 'install', '-vvv', '--index-url', 'https://pypi.org/simple/', 'INITools', expect_error=True, ) assert ( "Getting page http://download.zope.org/ppix/INITools" not in result.stdout ) assert "Getting page https://pypi.gocept.com/INITools" not in result.stdout assert ( "Getting page https://pypi.org/simple/initools" in result.stdout ) def test_options_from_venv_config(script, virtualenv): """ Test if ConfigOptionParser reads a virtualenv-local config file """ from pip._internal.locations import config_basename conf = "[global]\nno-index = true" ini = virtualenv.location / config_basename with open(ini, 'w') as f: f.write(conf) result = script.pip('install', '-vvv', 'INITools', expect_error=True) assert "Ignoring indexes:" in result.stdout, str(result) assert ( "DistributionNotFound: No matching distribution found for INITools" in result.stdout ) def test_install_no_binary_via_config_disables_cached_wheels( script, data, with_wheel): config_file = tempfile.NamedTemporaryFile(mode='wt', delete=False) try: script.environ['PIP_CONFIG_FILE'] = config_file.name config_file.write(textwrap.dedent("""\ [global] no-binary = :all: """)) config_file.close() res = script.pip( 'install', '--no-index', '-f', data.find_links, 'upper', expect_stderr=True) finally: os.unlink(config_file.name) assert "Successfully installed upper-2.0" in str(res), str(res) # No wheel building for upper, which was blacklisted assert "Building wheel for upper" not in str(res), str(res) # Must have used source, not a cached wheel to install upper. assert "Running setup.py install for upper" in str(res), str(res)
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_extras.py
import textwrap from os.path import join import pytest @pytest.mark.network def test_simple_extras_install_from_pypi(script): """ Test installing a package from PyPI using extras dependency Paste[openid]. """ result = script.pip( 'install', 'Paste[openid]==1.7.5.1', expect_stderr=True, ) initools_folder = script.site_packages / 'openid' assert initools_folder in result.files_created, result.files_created def test_extras_after_wheel(script, data): """ Test installing a package with extras after installing from a wheel. """ simple = script.site_packages / 'simple' no_extra = script.pip( 'install', '--no-index', '-f', data.find_links, 'requires_simple_extra', expect_stderr=True, ) assert simple not in no_extra.files_created, no_extra.files_created extra = script.pip( 'install', '--no-index', '-f', data.find_links, 'requires_simple_extra[extra]', expect_stderr=True, ) assert simple in extra.files_created, extra.files_created @pytest.mark.network def test_no_extras_uninstall(script): """ No extras dependency gets uninstalled when the root package is uninstalled """ result = script.pip( 'install', 'Paste[openid]==1.7.5.1', expect_stderr=True, ) assert join(script.site_packages, 'paste') in result.files_created, ( sorted(result.files_created.keys()) ) assert join(script.site_packages, 'openid') in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip('uninstall', 'Paste', '-y') # openid should not be uninstalled initools_folder = script.site_packages / 'openid' assert initools_folder not in result2.files_deleted, result.files_deleted def test_nonexistent_extra_warns_user_no_wheel(script, data): """ A warning is logged telling the user that the extra option they requested does not exist in the project they are wishing to install. This exercises source installs. """ result = script.pip( 'install', '--no-binary=:all:', '--no-index', '--find-links=' + data.find_links, 'simple[nonexistent]', expect_stderr=True, ) assert ( "simple 3.0 does not provide the extra 'nonexistent'" in result.stderr ), str(result) def test_nonexistent_extra_warns_user_with_wheel(script, data): """ A warning is logged telling the user that the extra option they requested does not exist in the project they are wishing to install. This exercises wheel installs. """ result = script.pip( 'install', '--no-index', '--find-links=' + data.find_links, 'simplewheel[nonexistent]', expect_stderr=True, ) assert ( "simplewheel 2.0 does not provide the extra 'nonexistent'" in result.stderr ) def test_nonexistent_options_listed_in_order(script, data): """ Warn the user for each extra that doesn't exist. """ result = script.pip( 'install', '--no-index', '--find-links=' + data.find_links, 'simplewheel[nonexistent, nope]', expect_stderr=True, ) msg = ( " WARNING: simplewheel 2.0 does not provide the extra 'nonexistent'\n" " WARNING: simplewheel 2.0 does not provide the extra 'nope'" ) assert msg in result.stderr def test_install_special_extra(script): # Check that uppercase letters and '-' are dealt with # make a dummy project pkga_path = script.scratch_path / 'pkga' pkga_path.mkdir() pkga_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup setup(name='pkga', version='0.1', extras_require={'Hop_hOp-hoP': ['missing_pkg']}, ) """)) result = script.pip( 'install', '--no-index', '%s[Hop_hOp-hoP]' % pkga_path, expect_error=True) assert ( "Could not find a version that satisfies the requirement missing_pkg" ) in result.stderr, str(result)
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_force_reinstall.py
from tests.lib import assert_all_changes def check_installed_version(script, package, expected): result = script.pip('show', package) lines = result.stdout.splitlines() version = None for line in lines: if line.startswith('Version: '): version = line.split()[-1] break assert version == expected, 'version {} != {}'.format(version, expected) def check_force_reinstall(script, specifier, expected): """ Args: specifier: the requirement specifier to force-reinstall. expected: the expected version after force-reinstalling. """ result = script.pip_install_local('simplewheel==1.0') check_installed_version(script, 'simplewheel', '1.0') result2 = script.pip_install_local('--force-reinstall', specifier) assert result2.files_updated, 'force-reinstall failed' check_installed_version(script, 'simplewheel', expected) result3 = script.pip('uninstall', 'simplewheel', '-y', expect_error=True) assert_all_changes(result, result3, [script.venv / 'build', 'cache']) def test_force_reinstall_with_no_version_specifier(script): """ Check --force-reinstall when there is no version specifier and the installed version is not the newest version. """ check_force_reinstall(script, 'simplewheel', '2.0') def test_force_reinstall_with_same_version_specifier(script): """ Check --force-reinstall when the version specifier equals the installed version and the installed version is not the newest version. """ check_force_reinstall(script, 'simplewheel==1.0', '1.0')
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_index.py
import os import textwrap from pip._vendor.six.moves.urllib import parse as urllib_parse from tests.lib import pyversion def test_find_links_relative_path(script, data): """Test find-links as a relative path.""" result = script.pip( 'install', 'parent==0.1', '--no-index', '--find-links', 'packages/', cwd=data.root, ) egg_info_folder = ( script.site_packages / 'parent-0.1-py%s.egg-info' % pyversion ) initools_folder = script.site_packages / 'parent' assert egg_info_folder in result.files_created, str(result) assert initools_folder in result.files_created, str(result) def test_find_links_requirements_file_relative_path(script, data): """Test find-links as a relative path to a reqs file.""" script.scratch_path.join("test-req.txt").write(textwrap.dedent(""" --no-index --find-links=%s parent==0.1 """ % data.packages.replace(os.path.sep, '/'))) result = script.pip( 'install', '-r', script.scratch_path / "test-req.txt", cwd=data.root, ) egg_info_folder = ( script.site_packages / 'parent-0.1-py%s.egg-info' % pyversion ) initools_folder = script.site_packages / 'parent' assert egg_info_folder in result.files_created, str(result) assert initools_folder in result.files_created, str(result) def test_install_from_file_index_hash_link(script, data): """ Test that a pkg can be installed from a file:// index using a link with a hash """ result = script.pip('install', '-i', data.index_url(), 'simple==1.0') egg_info_folder = ( script.site_packages / 'simple-1.0-py%s.egg-info' % pyversion ) assert egg_info_folder in result.files_created, str(result) def test_file_index_url_quoting(script, data): """ Test url quoting of file index url with a space """ index_url = data.index_url(urllib_parse.quote("in dex")) result = script.pip( 'install', '-vvv', '--index-url', index_url, 'simple', expect_error=False, ) assert (script.site_packages / 'simple') in result.files_created, ( str(result.stdout) ) assert ( script.site_packages / 'simple-1.0-py%s.egg-info' % pyversion ) in result.files_created, str(result)
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_reqs.py
import os import textwrap import pytest from tests.lib import ( _create_test_package_with_subdirectory, path_to_url, pyversion, requirements_file, ) from tests.lib.local_repos import local_checkout @pytest.mark.network def test_requirements_file(script): """ Test installing from a requirements file. """ other_lib_name, other_lib_version = 'anyjson', '0.3' script.scratch_path.join("initools-req.txt").write(textwrap.dedent("""\ INITools==0.2 # and something else to test out: %s<=%s """ % (other_lib_name, other_lib_version))) result = script.pip( 'install', '-r', script.scratch_path / 'initools-req.txt' ) assert ( script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion in result.files_created ) assert script.site_packages / 'initools' in result.files_created assert result.files_created[script.site_packages / other_lib_name].dir fn = '%s-%s-py%s.egg-info' % (other_lib_name, other_lib_version, pyversion) assert result.files_created[script.site_packages / fn].dir def test_schema_check_in_requirements_file(script): """ Test installing from a requirements file with an invalid vcs schema.. """ script.scratch_path.join("file-egg-req.txt").write( "\n%s\n" % ( "git://github.com/alex/django-fixture-generator.git" "#egg=fixture_generator" ) ) with pytest.raises(AssertionError): script.pip( "install", "-vvv", "-r", script.scratch_path / "file-egg-req.txt" ) def test_relative_requirements_file(script, data): """ Test installing from a requirements file with a relative path. For path URLs, use an egg= definition. """ egg_info_file = ( script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion ) egg_link_file = ( script.site_packages / 'FSPkg.egg-link' ) package_folder = script.site_packages / 'fspkg' # Compute relative install path to FSPkg from scratch path. full_rel_path = data.packages.join('FSPkg') - script.scratch_path full_rel_url = 'file:' + full_rel_path + '#egg=FSPkg' embedded_rel_path = script.scratch_path.join(full_rel_path) # For each relative path, install as either editable or not using either # URLs with egg links or not. for req_path in (full_rel_path, full_rel_url, embedded_rel_path): req_path = req_path.replace(os.path.sep, '/') # Regular install. with requirements_file(req_path + '\n', script.scratch_path) as reqs_file: result = script.pip('install', '-vvv', '-r', reqs_file.name, cwd=script.scratch_path) assert egg_info_file in result.files_created, str(result) assert package_folder in result.files_created, str(result) script.pip('uninstall', '-y', 'fspkg') # Editable install. with requirements_file('-e ' + req_path + '\n', script.scratch_path) as reqs_file: result = script.pip('install', '-vvv', '-r', reqs_file.name, cwd=script.scratch_path) assert egg_link_file in result.files_created, str(result) script.pip('uninstall', '-y', 'fspkg') @pytest.mark.network @pytest.mark.svn def test_multiple_requirements_files(script, tmpdir): """ Test installing from multiple nested requirements files. """ other_lib_name, other_lib_version = 'anyjson', '0.3' script.scratch_path.join("initools-req.txt").write( textwrap.dedent(""" -e %s@10#egg=INITools -r %s-req.txt """) % ( local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache"), ), other_lib_name ), ) script.scratch_path.join("%s-req.txt" % other_lib_name).write( "%s<=%s" % (other_lib_name, other_lib_version) ) result = script.pip( 'install', '-r', script.scratch_path / 'initools-req.txt' ) assert result.files_created[script.site_packages / other_lib_name].dir fn = '%s-%s-py%s.egg-info' % (other_lib_name, other_lib_version, pyversion) assert result.files_created[script.site_packages / fn].dir assert script.venv / 'src' / 'initools' in result.files_created def test_package_in_constraints_and_dependencies(script, data): script.scratch_path.join("constraints.txt").write( "TopoRequires2==0.0.1\nTopoRequires==0.0.1" ) result = script.pip('install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', 'TopoRequires2') assert 'installed TopoRequires-0.0.1' in result.stdout def test_multiple_constraints_files(script, data): script.scratch_path.join("outer.txt").write("-c inner.txt") script.scratch_path.join("inner.txt").write( "Upper==1.0") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'outer.txt', 'Upper') assert 'installed Upper-1.0' in result.stdout def test_respect_order_in_requirements_file(script, data): script.scratch_path.join("frameworks-req.txt").write(textwrap.dedent("""\ parent child simple """)) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-r', script.scratch_path / 'frameworks-req.txt' ) downloaded = [line for line in result.stdout.split('\n') if 'Collecting' in line] assert 'parent' in downloaded[0], ( 'First download should be "parent" but was "%s"' % downloaded[0] ) assert 'child' in downloaded[1], ( 'Second download should be "child" but was "%s"' % downloaded[1] ) assert 'simple' in downloaded[2], ( 'Third download should be "simple" but was "%s"' % downloaded[2] ) def test_install_local_editable_with_extras(script, data): to_install = data.packages.join("LocalExtras") res = script.pip_install_local( '-e', to_install + '[bar]', expect_error=False, expect_stderr=True, ) assert script.site_packages / 'easy-install.pth' in res.files_updated, ( str(res) ) assert ( script.site_packages / 'LocalExtras.egg-link' in res.files_created ), str(res) assert script.site_packages / 'simple' in res.files_created, str(res) def test_install_collected_dependencies_first(script): result = script.pip_install_local( 'toporequires2', ) text = [line for line in result.stdout.split('\n') if 'Installing' in line][0] assert text.endswith('toporequires2') @pytest.mark.network def test_install_local_editable_with_subdirectory(script): version_pkg_path = _create_test_package_with_subdirectory(script, 'version_subdir') result = script.pip( 'install', '-e', '%s#egg=version_subpkg&subdirectory=version_subdir' % ('git+%s' % path_to_url(version_pkg_path),) ) result.assert_installed('version-subpkg', sub_dir='version_subdir') @pytest.mark.network def test_install_local_with_subdirectory(script): version_pkg_path = _create_test_package_with_subdirectory(script, 'version_subdir') result = script.pip( 'install', '%s#egg=version_subpkg&subdirectory=version_subdir' % ('git+' + path_to_url(version_pkg_path),) ) result.assert_installed('version_subpkg.py', editable=False) def test_wheel_user_with_prefix_in_pydistutils_cfg( script, data, with_wheel): if os.name == 'posix': user_filename = ".pydistutils.cfg" else: user_filename = "pydistutils.cfg" user_cfg = os.path.join(os.path.expanduser('~'), user_filename) script.scratch_path.join("bin").mkdir() with open(user_cfg, "w") as cfg: cfg.write(textwrap.dedent(""" [install] prefix=%s""" % script.scratch_path)) result = script.pip( 'install', '--user', '--no-index', '-f', data.find_links, 'requiresupper') # Check that we are really installing a wheel assert 'Running setup.py install for requiresupper' not in result.stdout assert 'installed requiresupper' in result.stdout def test_install_option_in_requirements_file(script, data, virtualenv): """ Test --install-option in requirements file overrides same option in cli """ script.scratch_path.join("home1").mkdir() script.scratch_path.join("home2").mkdir() script.scratch_path.join("reqs.txt").write( textwrap.dedent( """simple --install-option='--home=%s'""" % script.scratch_path.join("home1"))) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-r', script.scratch_path / 'reqs.txt', '--install-option=--home=%s' % script.scratch_path.join("home2"), expect_stderr=True) package_dir = script.scratch / 'home1' / 'lib' / 'python' / 'simple' assert package_dir in result.files_created def test_constraints_not_installed_by_default(script, data): script.scratch_path.join("c.txt").write("requiresupper") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'c.txt', 'Upper') assert 'requiresupper' not in result.stdout def test_constraints_only_causes_error(script, data): script.scratch_path.join("c.txt").write("requiresupper") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'c.txt', expect_error=True) assert 'installed requiresupper' not in result.stdout def test_constraints_local_editable_install_causes_error(script, data): script.scratch_path.join("constraints.txt").write( "singlemodule==0.0.0" ) to_install = data.src.join("singlemodule") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', '-e', to_install, expect_error=True) assert 'Could not satisfy constraints for' in result.stderr @pytest.mark.network def test_constraints_local_editable_install_pep518(script, data): to_install = data.src.join("pep518-3.0") script.pip('download', 'setuptools', 'wheel', '-d', data.packages) script.pip( 'install', '--no-index', '-f', data.find_links, '-e', to_install) def test_constraints_local_install_causes_error(script, data): script.scratch_path.join("constraints.txt").write( "singlemodule==0.0.0" ) to_install = data.src.join("singlemodule") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', to_install, expect_error=True) assert 'Could not satisfy constraints for' in result.stderr def test_constraints_constrain_to_local_editable(script, data): to_install = data.src.join("singlemodule") script.scratch_path.join("constraints.txt").write( "-e %s#egg=singlemodule" % path_to_url(to_install) ) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', 'singlemodule') assert 'Running setup.py develop for singlemodule' in result.stdout def test_constraints_constrain_to_local(script, data): to_install = data.src.join("singlemodule") script.scratch_path.join("constraints.txt").write( "%s#egg=singlemodule" % path_to_url(to_install) ) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', 'singlemodule') assert 'Running setup.py install for singlemodule' in result.stdout def test_constrained_to_url_install_same_url(script, data): to_install = data.src.join("singlemodule") constraints = path_to_url(to_install) + "#egg=singlemodule" script.scratch_path.join("constraints.txt").write(constraints) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', to_install) assert ('Running setup.py install for singlemodule' in result.stdout), str(result) def test_double_install_spurious_hash_mismatch( script, tmpdir, data, with_wheel): """Make sure installing the same hashed sdist twice doesn't throw hash mismatch errors. Really, this is a test that we disable reads from the wheel cache in hash-checking mode. Locally, implicitly built wheels of sdists obviously have different hashes from the original archives. Comparing against those causes spurious mismatch errors. """ # Install wheel package, otherwise, it won't try to build wheels. with requirements_file('simple==1.0 --hash=sha256:393043e672415891885c9a2a' '0929b1af95fb866d6ca016b42d2e6ce53619b653', tmpdir) as reqs_file: # Install a package (and build its wheel): result = script.pip_install_local( '--find-links', data.find_links, '-r', reqs_file.abspath, expect_error=False) assert 'Successfully installed simple-1.0' in str(result) # Uninstall it: script.pip('uninstall', '-y', 'simple', expect_error=False) # Then install it again. We should not hit a hash mismatch, and the # package should install happily. result = script.pip_install_local( '--find-links', data.find_links, '-r', reqs_file.abspath, expect_error=False) assert 'Successfully installed simple-1.0' in str(result) def test_install_with_extras_from_constraints(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "%s#egg=LocalExtras[bar]" % path_to_url(to_install) ) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', 'LocalExtras') assert script.site_packages / 'simple' in result.files_created def test_install_with_extras_from_install(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "%s#egg=LocalExtras" % path_to_url(to_install) ) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', 'LocalExtras[baz]') assert script.site_packages / 'singlemodule.py'in result.files_created def test_install_with_extras_joined(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "%s#egg=LocalExtras[bar]" % path_to_url(to_install) ) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', 'LocalExtras[baz]' ) assert script.site_packages / 'simple' in result.files_created assert script.site_packages / 'singlemodule.py'in result.files_created def test_install_with_extras_editable_joined(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "-e %s#egg=LocalExtras[bar]" % path_to_url(to_install) ) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', 'LocalExtras[baz]') assert script.site_packages / 'simple' in result.files_created assert script.site_packages / 'singlemodule.py'in result.files_created def test_install_distribution_full_union(script, data): to_install = data.packages.join("LocalExtras") result = script.pip_install_local( to_install, to_install + "[bar]", to_install + "[baz]") assert 'Running setup.py install for LocalExtras' in result.stdout assert script.site_packages / 'simple' in result.files_created assert script.site_packages / 'singlemodule.py' in result.files_created def test_install_distribution_duplicate_extras(script, data): to_install = data.packages.join("LocalExtras") package_name = to_install + "[bar]" with pytest.raises(AssertionError): result = script.pip_install_local(package_name, package_name) assert 'Double requirement given: %s' % package_name in result.stderr def test_install_distribution_union_with_constraints(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "%s[bar]" % to_install) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', to_install + '[baz]') assert 'Running setup.py install for LocalExtras' in result.stdout assert script.site_packages / 'singlemodule.py' in result.files_created def test_install_distribution_union_with_versions(script, data): to_install_001 = data.packages.join("LocalExtras") to_install_002 = data.packages.join("LocalExtras-0.0.2") result = script.pip_install_local( to_install_001 + "[bar]", to_install_002 + "[baz]") assert ("Successfully installed LocalExtras-0.0.1 simple-3.0 " + "singlemodule-0.0.1" in result.stdout) @pytest.mark.xfail def test_install_distribution_union_conflicting_extras(script, data): # LocalExtras requires simple==1.0, LocalExtras[bar] requires simple==2.0; # without a resolver, pip does not detect the conflict between simple==1.0 # and simple==2.0. Once a resolver is added, this conflict should be # detected. to_install = data.packages.join("LocalExtras-0.0.2") result = script.pip_install_local(to_install, to_install + "[bar]", expect_error=True) assert 'installed' not in result.stdout assert "Conflict" in result.stderr def test_install_unsupported_wheel_link_with_marker(script): script.scratch_path.join("with-marker.txt").write( textwrap.dedent("""\ %s; %s """) % ( 'https://github.com/a/b/c/asdf-1.5.2-cp27-none-xyz.whl', 'sys_platform == "xyz"' ) ) result = script.pip( 'install', '-r', script.scratch_path / 'with-marker.txt', expect_error=False, ) assert ("Ignoring asdf: markers 'sys_platform == \"xyz\"' don't match " "your environment") in result.stdout assert len(result.files_created) == 0 def test_install_unsupported_wheel_file(script, data): # Trying to install a local wheel with an incompatible version/type # should fail. script.scratch_path.join("wheel-file.txt").write(textwrap.dedent("""\ %s """ % data.packages.join("simple.dist-0.1-py1-none-invalid.whl"))) result = script.pip( 'install', '-r', script.scratch_path / 'wheel-file.txt', expect_error=True, expect_stderr=True, ) assert ("simple.dist-0.1-py1-none-invalid.whl is not a supported " + "wheel on this platform" in result.stderr) assert len(result.files_created) == 0 def test_install_options_local_to_package(script, data): """Make sure --install-options does not leak across packages. A requirements.txt file can have per-package --install-options; these should be isolated to just the package instead of leaking to subsequent packages. This needs to be a functional test because the bug was around cross-contamination at install time. """ home_simple = script.scratch_path.join("for-simple") test_simple = script.scratch.join("for-simple") home_simple.mkdir() reqs_file = script.scratch_path.join("reqs.txt") reqs_file.write( textwrap.dedent(""" simple --install-option='--home=%s' INITools """ % home_simple)) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-r', reqs_file, expect_error=True, ) simple = test_simple / 'lib' / 'python' / 'simple' bad = test_simple / 'lib' / 'python' / 'initools' good = script.site_packages / 'initools' assert simple in result.files_created assert result.files_created[simple].dir assert bad not in result.files_created assert good in result.files_created assert result.files_created[good].dir
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_upgrade.py
import os import sys import textwrap import pytest from tests.lib import assert_all_changes, pyversion from tests.lib.local_repos import local_checkout def test_no_upgrade_unless_requested(script): """ No upgrade if not specifically requested. """ script.pip('install', 'INITools==0.1', expect_error=True) result = script.pip('install', 'INITools', expect_error=True) assert not result.files_created, ( 'pip install INITools upgraded when it should not have' ) def test_invalid_upgrade_strategy_causes_error(script): """ It errors out when the upgrade-strategy is an invalid/unrecognised one """ result = script.pip_install_local( '--upgrade', '--upgrade-strategy=bazinga', 'simple', expect_error=True ) assert result.returncode assert "invalid choice" in result.stderr def test_only_if_needed_does_not_upgrade_deps_when_satisfied(script): """ It doesn't upgrade a dependency if it already satisfies the requirements. """ script.pip_install_local('simple==2.0', expect_error=True) result = script.pip_install_local( '--upgrade', '--upgrade-strategy=only-if-needed', 'require_simple', expect_error=True ) assert ( (script.site_packages / 'require_simple-1.0-py%s.egg-info' % pyversion) not in result.files_deleted ), "should have installed require_simple==1.0" assert ( (script.site_packages / 'simple-2.0-py%s.egg-info' % pyversion) not in result.files_deleted ), "should not have uninstalled simple==2.0" assert ( "Requirement already satisfied, skipping upgrade: simple" in result.stdout ), "did not print correct message for not-upgraded requirement" def test_only_if_needed_does_upgrade_deps_when_no_longer_satisfied(script): """ It does upgrade a dependency if it no longer satisfies the requirements. """ script.pip_install_local('simple==1.0', expect_error=True) result = script.pip_install_local( '--upgrade', '--upgrade-strategy=only-if-needed', 'require_simple', expect_error=True ) assert ( (script.site_packages / 'require_simple-1.0-py%s.egg-info' % pyversion) not in result.files_deleted ), "should have installed require_simple==1.0" assert ( script.site_packages / 'simple-3.0-py%s.egg-info' % pyversion in result.files_created ), "should have installed simple==3.0" assert ( script.site_packages / 'simple-1.0-py%s.egg-info' % pyversion in result.files_deleted ), "should have uninstalled simple==1.0" def test_eager_does_upgrade_dependecies_when_currently_satisfied(script): """ It does upgrade a dependency even if it already satisfies the requirements. """ script.pip_install_local('simple==2.0', expect_error=True) result = script.pip_install_local( '--upgrade', '--upgrade-strategy=eager', 'require_simple', expect_error=True ) assert ( (script.site_packages / 'require_simple-1.0-py%s.egg-info' % pyversion) not in result.files_deleted ), "should have installed require_simple==1.0" assert ( (script.site_packages / 'simple-2.0-py%s.egg-info' % pyversion) in result.files_deleted ), "should have uninstalled simple==2.0" def test_eager_does_upgrade_dependecies_when_no_longer_satisfied(script): """ It does upgrade a dependency if it no longer satisfies the requirements. """ script.pip_install_local('simple==1.0', expect_error=True) result = script.pip_install_local( '--upgrade', '--upgrade-strategy=eager', 'require_simple', expect_error=True ) assert ( (script.site_packages / 'require_simple-1.0-py%s.egg-info' % pyversion) not in result.files_deleted ), "should have installed require_simple==1.0" assert ( script.site_packages / 'simple-3.0-py%s.egg-info' % pyversion in result.files_created ), "should have installed simple==3.0" assert ( script.site_packages / 'simple-1.0-py%s.egg-info' % pyversion in result.files_deleted ), "should have uninstalled simple==1.0" @pytest.mark.network def test_upgrade_to_specific_version(script): """ It does upgrade to specific version requested. """ script.pip('install', 'INITools==0.1', expect_error=True) result = script.pip('install', 'INITools==0.2', expect_error=True) assert result.files_created, ( 'pip install with specific version did not upgrade' ) assert ( script.site_packages / 'INITools-0.1-py%s.egg-info' % pyversion in result.files_deleted ) assert ( script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion in result.files_created ) @pytest.mark.network def test_upgrade_if_requested(script): """ And it does upgrade if requested. """ script.pip('install', 'INITools==0.1', expect_error=True) result = script.pip('install', '--upgrade', 'INITools', expect_error=True) assert result.files_created, 'pip install --upgrade did not upgrade' assert ( script.site_packages / 'INITools-0.1-py%s.egg-info' % pyversion not in result.files_created ) def test_upgrade_with_newest_already_installed(script, data): """ If the newest version of a package is already installed, the package should not be reinstalled and the user should be informed. """ script.pip('install', '-f', data.find_links, '--no-index', 'simple') result = script.pip( 'install', '--upgrade', '-f', data.find_links, '--no-index', 'simple' ) assert not result.files_created, 'simple upgraded when it should not have' assert 'already up-to-date' in result.stdout, result.stdout @pytest.mark.network def test_upgrade_force_reinstall_newest(script): """ Force reinstallation of a package even if it is already at its newest version if --force-reinstall is supplied. """ result = script.pip('install', 'INITools') assert script.site_packages / 'initools' in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip( 'install', '--upgrade', '--force-reinstall', 'INITools' ) assert result2.files_updated, 'upgrade to INITools 0.3 failed' result3 = script.pip('uninstall', 'initools', '-y', expect_error=True) assert_all_changes(result, result3, [script.venv / 'build', 'cache']) @pytest.mark.network def test_uninstall_before_upgrade(script): """ Automatic uninstall-before-upgrade. """ result = script.pip('install', 'INITools==0.2', expect_error=True) assert script.site_packages / 'initools' in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip('install', 'INITools==0.3', expect_error=True) assert result2.files_created, 'upgrade to INITools 0.3 failed' result3 = script.pip('uninstall', 'initools', '-y', expect_error=True) assert_all_changes(result, result3, [script.venv / 'build', 'cache']) @pytest.mark.network def test_uninstall_before_upgrade_from_url(script): """ Automatic uninstall-before-upgrade from URL. """ result = script.pip('install', 'INITools==0.2', expect_error=True) assert script.site_packages / 'initools' in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip( 'install', 'https://files.pythonhosted.org/packages/source/I/INITools/INITools-' '0.3.tar.gz', expect_error=True, ) assert result2.files_created, 'upgrade to INITools 0.3 failed' result3 = script.pip('uninstall', 'initools', '-y', expect_error=True) assert_all_changes(result, result3, [script.venv / 'build', 'cache']) @pytest.mark.network def test_upgrade_to_same_version_from_url(script): """ When installing from a URL the same version that is already installed, no need to uninstall and reinstall if --upgrade is not specified. """ result = script.pip('install', 'INITools==0.3', expect_error=True) assert script.site_packages / 'initools' in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip( 'install', 'https://files.pythonhosted.org/packages/source/I/INITools/INITools-' '0.3.tar.gz', expect_error=True, ) assert not result2.files_updated, 'INITools 0.3 reinstalled same version' result3 = script.pip('uninstall', 'initools', '-y', expect_error=True) assert_all_changes(result, result3, [script.venv / 'build', 'cache']) @pytest.mark.network def test_upgrade_from_reqs_file(script): """ Upgrade from a requirements file. """ script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\ PyLogo<0.4 # and something else to test out: INITools==0.3 """)) install_result = script.pip( 'install', '-r', script.scratch_path / 'test-req.txt' ) script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\ PyLogo # and something else to test out: INITools """)) script.pip( 'install', '--upgrade', '-r', script.scratch_path / 'test-req.txt' ) uninstall_result = script.pip( 'uninstall', '-r', script.scratch_path / 'test-req.txt', '-y' ) assert_all_changes( install_result, uninstall_result, [script.venv / 'build', 'cache', script.scratch / 'test-req.txt'], ) def test_uninstall_rollback(script, data): """ Test uninstall-rollback (using test package with a setup.py crafted to fail on install). """ result = script.pip( 'install', '-f', data.find_links, '--no-index', 'broken==0.1' ) assert script.site_packages / 'broken.py' in result.files_created, list( result.files_created.keys() ) result2 = script.pip( 'install', '-f', data.find_links, '--no-index', 'broken===0.2broken', expect_error=True, ) assert result2.returncode == 1, str(result2) assert script.run( 'python', '-c', "import broken; print(broken.VERSION)" ).stdout == '0.1\n' assert_all_changes( result.files_after, result2, [script.venv / 'build'], ) @pytest.mark.network def test_should_not_install_always_from_cache(script): """ If there is an old cached package, pip should download the newer version Related to issue #175 """ script.pip('install', 'INITools==0.2', expect_error=True) script.pip('uninstall', '-y', 'INITools') result = script.pip('install', 'INITools==0.1', expect_error=True) assert ( script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion not in result.files_created ) assert ( script.site_packages / 'INITools-0.1-py%s.egg-info' % pyversion in result.files_created ) @pytest.mark.network def test_install_with_ignoreinstalled_requested(script): """ Test old conflicting package is completely ignored """ script.pip('install', 'INITools==0.1', expect_error=True) result = script.pip('install', '-I', 'INITools==0.3', expect_error=True) assert result.files_created, 'pip install -I did not install' # both the old and new metadata should be present. assert os.path.exists( script.site_packages_path / 'INITools-0.1-py%s.egg-info' % pyversion ) assert os.path.exists( script.site_packages_path / 'INITools-0.3-py%s.egg-info' % pyversion ) @pytest.mark.network def test_upgrade_vcs_req_with_no_dists_found(script, tmpdir): """It can upgrade a VCS requirement that has no distributions otherwise.""" req = "%s#egg=pip-test-package" % local_checkout( "git+https://github.com/pypa/pip-test-package.git", tmpdir.join("cache"), ) script.pip("install", req) result = script.pip("install", "-U", req) assert not result.returncode @pytest.mark.network def test_upgrade_vcs_req_with_dist_found(script): """It can upgrade a VCS requirement that has distributions on the index.""" # TODO(pnasrat) Using local_checkout fails on windows - oddness with the # test path urls/git. req = ( "%s#egg=pretend" % ( "git+git://github.com/alex/pretend@e7f26ad7dbcb4a02a4995aade4" "743aad47656b27" ) ) script.pip("install", req, expect_stderr=True) result = script.pip("install", "-U", req, expect_stderr=True) assert "pypi.org" not in result.stdout, result.stdout class TestUpgradeDistributeToSetuptools(object): """ From pip1.4 to pip6, pip supported a set of "hacks" (see Issue #1122) to allow distribute to conflict with setuptools, so that the following would work to upgrade distribute: ``pip install -U setuptools`` In pip7, the hacks were removed. This test remains to at least confirm pip can upgrade distribute to setuptools using: ``pip install -U distribute`` The reason this works is that a final version of distribute (v0.7.3) was released that is simple wrapper with: install_requires=['setuptools>=0.7'] The test use a fixed set of packages from our test packages dir. Note that virtualenv-1.9.1 contains distribute-0.6.34 and virtualenv-1.10 contains setuptools-0.9.7 """ def prep_ve(self, script, version, pip_src, distribute=False): self.script = script self.script.pip_install_local('virtualenv==%s' % version) args = ['virtualenv', self.script.scratch_path / 'VE'] if distribute: args.insert(1, '--distribute') if version == "1.9.1" and not distribute: # setuptools 0.6 didn't support PYTHONDONTWRITEBYTECODE del self.script.environ["PYTHONDONTWRITEBYTECODE"] self.script.run(*args) if sys.platform == 'win32': bindir = "Scripts" else: bindir = "bin" self.ve_bin = self.script.scratch_path / 'VE' / bindir self.script.run(self.ve_bin / 'pip', 'uninstall', '-y', 'pip') self.script.run( self.ve_bin / 'python', 'setup.py', 'install', cwd=pip_src, expect_stderr=True, )
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_user.py
""" tests specific to "pip install --user" """ import textwrap from os.path import curdir, isdir, isfile import pytest from tests.lib import pyversion from tests.lib.local_repos import local_checkout def _patch_dist_in_site_packages(virtualenv): # Since the tests are run from a virtualenv, and to avoid the "Will not # install to the usersite because it will lack sys.path precedence..." # error: Monkey patch `pip._internal.req.req_install.dist_in_site_packages` # so it's possible to install a conflicting distribution in the user site. virtualenv.sitecustomize = textwrap.dedent(""" def dist_in_site_packages(dist): return False from pip._internal.req import req_install req_install.dist_in_site_packages = dist_in_site_packages """) class Tests_UserSite: @pytest.mark.network def test_reset_env_system_site_packages_usersite(self, script): """ Check user site works as expected. """ script.pip('install', '--user', 'INITools==0.2') result = script.run( 'python', '-c', "import pkg_resources; print(pkg_resources.get_distribution" "('initools').project_name)", ) project_name = result.stdout.strip() assert 'INITools' == project_name, project_name @pytest.mark.network @pytest.mark.svn def test_install_subversion_usersite_editable_with_distribute( self, script, tmpdir): """ Test installing current directory ('.') into usersite after installing distribute """ result = script.pip( 'install', '--user', '-e', '%s#egg=initools' % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache"), ) ) result.assert_installed('INITools', use_user_site=True) def test_install_from_current_directory_into_usersite( self, script, data, with_wheel): """ Test installing current directory ('.') into usersite """ run_from = data.packages.join("FSPkg") result = script.pip( 'install', '-vvv', '--user', curdir, cwd=run_from, expect_error=False, ) fspkg_folder = script.user_site / 'fspkg' assert fspkg_folder in result.files_created, result.stdout dist_info_folder = ( script.user_site / 'FSPkg-0.1.dev0.dist-info' ) assert dist_info_folder in result.files_created @pytest.mark.incompatible_with_test_venv def test_install_user_venv_nositepkgs_fails(self, virtualenv, script, data): """ user install in virtualenv (with no system packages) fails with message """ # We can't use PYTHONNOUSERSITE, as it's not # honoured by virtualenv's custom site.py. virtualenv.user_site_packages = False run_from = data.packages.join("FSPkg") result = script.pip( 'install', '--user', curdir, cwd=run_from, expect_error=True, ) assert ( "Can not perform a '--user' install. User site-packages are not " "visible in this virtualenv." in result.stderr ) @pytest.mark.network def test_install_user_conflict_in_usersite(self, script): """ Test user install with conflict in usersite updates usersite. """ script.pip('install', '--user', 'INITools==0.3', '--no-binary=:all:') result2 = script.pip( 'install', '--user', 'INITools==0.1', '--no-binary=:all:') # usersite has 0.1 egg_info_folder = ( script.user_site / 'INITools-0.1-py%s.egg-info' % pyversion ) initools_v3_file = ( # file only in 0.3 script.base_path / script.user_site / 'initools' / 'configparser.py' ) assert egg_info_folder in result2.files_created, str(result2) assert not isfile(initools_v3_file), initools_v3_file @pytest.mark.network def test_install_user_conflict_in_globalsite(self, virtualenv, script): """ Test user install with conflict in global site ignores site and installs to usersite """ _patch_dist_in_site_packages(virtualenv) script.pip('install', 'INITools==0.2', '--no-binary=:all:') result2 = script.pip( 'install', '--user', 'INITools==0.1', '--no-binary=:all:') # usersite has 0.1 egg_info_folder = ( script.user_site / 'INITools-0.1-py%s.egg-info' % pyversion ) initools_folder = script.user_site / 'initools' assert egg_info_folder in result2.files_created, str(result2) assert initools_folder in result2.files_created, str(result2) # site still has 0.2 (can't look in result1; have to check) egg_info_folder = ( script.base_path / script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion ) initools_folder = script.base_path / script.site_packages / 'initools' assert isdir(egg_info_folder) assert isdir(initools_folder) @pytest.mark.network def test_upgrade_user_conflict_in_globalsite(self, virtualenv, script): """ Test user install/upgrade with conflict in global site ignores site and installs to usersite """ _patch_dist_in_site_packages(virtualenv) script.pip('install', 'INITools==0.2', '--no-binary=:all:') result2 = script.pip( 'install', '--user', '--upgrade', 'INITools', '--no-binary=:all:') # usersite has 0.3.1 egg_info_folder = ( script.user_site / 'INITools-0.3.1-py%s.egg-info' % pyversion ) initools_folder = script.user_site / 'initools' assert egg_info_folder in result2.files_created, str(result2) assert initools_folder in result2.files_created, str(result2) # site still has 0.2 (can't look in result1; have to check) egg_info_folder = ( script.base_path / script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion ) initools_folder = script.base_path / script.site_packages / 'initools' assert isdir(egg_info_folder), result2.stdout assert isdir(initools_folder) @pytest.mark.network def test_install_user_conflict_in_globalsite_and_usersite( self, virtualenv, script): """ Test user install with conflict in globalsite and usersite ignores global site and updates usersite. """ _patch_dist_in_site_packages(virtualenv) script.pip('install', 'INITools==0.2', '--no-binary=:all:') script.pip('install', '--user', 'INITools==0.3', '--no-binary=:all:') result3 = script.pip( 'install', '--user', 'INITools==0.1', '--no-binary=:all:') # usersite has 0.1 egg_info_folder = ( script.user_site / 'INITools-0.1-py%s.egg-info' % pyversion ) initools_v3_file = ( # file only in 0.3 script.base_path / script.user_site / 'initools' / 'configparser.py' ) assert egg_info_folder in result3.files_created, str(result3) assert not isfile(initools_v3_file), initools_v3_file # site still has 0.2 (can't just look in result1; have to check) egg_info_folder = ( script.base_path / script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion ) initools_folder = script.base_path / script.site_packages / 'initools' assert isdir(egg_info_folder) assert isdir(initools_folder) @pytest.mark.network def test_install_user_in_global_virtualenv_with_conflict_fails( self, script): """ Test user install in --system-site-packages virtualenv with conflict in site fails. """ script.pip('install', 'INITools==0.2') result2 = script.pip( 'install', '--user', 'INITools==0.1', expect_error=True, ) resultp = script.run( 'python', '-c', "import pkg_resources; print(pkg_resources.get_distribution" "('initools').location)", ) dist_location = resultp.stdout.strip() assert ( "Will not install to the user site because it will lack sys.path " "precedence to %s in %s" % ('INITools', dist_location) in result2.stderr )
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_vcs_git.py
import pytest from tests.lib import ( _change_test_package_version, _create_test_package, _test_path_to_file_url, pyversion, ) from tests.lib.git_submodule_helpers import ( _change_test_package_submodule, _create_test_package_with_submodule, _pull_in_submodule_changes_to_module, ) from tests.lib.local_repos import local_checkout def _get_editable_repo_dir(script, package_name): """ Return the repository directory for an editable install. """ return script.venv_path / 'src' / package_name def _get_editable_branch(script, package_name): """ Return the current branch of an editable install. """ repo_dir = _get_editable_repo_dir(script, package_name) result = script.run( 'git', 'rev-parse', '--abbrev-ref', 'HEAD', cwd=repo_dir ) return result.stdout.strip() def _get_branch_remote(script, package_name, branch): """ """ repo_dir = _get_editable_repo_dir(script, package_name) result = script.run( 'git', 'config', 'branch.{}.remote'.format(branch), cwd=repo_dir ) return result.stdout.strip() def _github_checkout(url_path, temp_dir, egg=None, scheme=None): """ Call local_checkout() with a GitHub URL, and return the resulting URL. Args: url_path: the string used to create the package URL by filling in the format string "git+{scheme}://github.com/{url_path}". temp_dir: the pytest tmpdir value. egg: an optional project name to append to the URL as the egg fragment, prior to returning. scheme: the scheme without the "git+" prefix. Defaults to "https". """ if scheme is None: scheme = 'https' url = 'git+{}://github.com/{}'.format(scheme, url_path) local_url = local_checkout(url, temp_dir.join('cache')) if egg is not None: local_url += '#egg={}'.format(egg) return local_url def _make_version_pkg_url(path, rev=None): """ Return a "git+file://" URL to the version_pkg test package. Args: path: a tests.lib.path.Path object pointing to a Git repository containing the version_pkg package. rev: an optional revision to install like a branch name, tag, or SHA. """ file_url = _test_path_to_file_url(path) url_rev = '' if rev is None else '@{}'.format(rev) url = 'git+{}{}#egg=version_pkg'.format(file_url, url_rev) return url def _install_version_pkg_only(script, path, rev=None, expect_stderr=False): """ Install the version_pkg package in editable mode (without returning the version). Args: path: a tests.lib.path.Path object pointing to a Git repository containing the package. rev: an optional revision to install like a branch name or tag. """ version_pkg_url = _make_version_pkg_url(path, rev=rev) script.pip('install', '-e', version_pkg_url, expect_stderr=expect_stderr) def _install_version_pkg(script, path, rev=None, expect_stderr=False): """ Install the version_pkg package in editable mode, and return the version installed. Args: path: a tests.lib.path.Path object pointing to a Git repository containing the package. rev: an optional revision to install like a branch name or tag. """ _install_version_pkg_only( script, path, rev=rev, expect_stderr=expect_stderr, ) result = script.run('version_pkg') version = result.stdout.strip() return version def test_git_install_again_after_changes(script): """ Test installing a repository a second time without specifying a revision, and after updates to the remote repository. This test also checks that no warning message like the following gets logged on the update: "Did not find branch or tag ..., assuming ref or revision." """ version_pkg_path = _create_test_package(script) version = _install_version_pkg(script, version_pkg_path) assert version == '0.1' _change_test_package_version(script, version_pkg_path) version = _install_version_pkg(script, version_pkg_path) assert version == 'some different version' def test_git_install_branch_again_after_branch_changes(script): """ Test installing a branch again after the branch is updated in the remote repository. """ version_pkg_path = _create_test_package(script) version = _install_version_pkg(script, version_pkg_path, rev='master') assert version == '0.1' _change_test_package_version(script, version_pkg_path) version = _install_version_pkg(script, version_pkg_path, rev='master') assert version == 'some different version' @pytest.mark.network def test_install_editable_from_git_with_https(script, tmpdir): """ Test cloning from Git with https. """ url_path = 'pypa/pip-test-package.git' local_url = _github_checkout(url_path, tmpdir, egg='pip-test-package') result = script.pip('install', '-e', local_url, expect_error=True) result.assert_installed('pip-test-package', with_files=['.git']) @pytest.mark.network def test_install_noneditable_git(script, tmpdir): """ Test installing from a non-editable git URL with a given tag. """ result = script.pip( 'install', 'git+https://github.com/pypa/pip-test-package.git' '@0.1.1#egg=pip-test-package' ) egg_info_folder = ( script.site_packages / 'pip_test_package-0.1.1-py%s.egg-info' % pyversion ) result.assert_installed('piptestpackage', without_egg_link=True, editable=False) assert egg_info_folder in result.files_created, str(result) def test_git_with_sha1_revisions(script): """ Git backend should be able to install from SHA1 revisions """ version_pkg_path = _create_test_package(script) _change_test_package_version(script, version_pkg_path) sha1 = script.run( 'git', 'rev-parse', 'HEAD~1', cwd=version_pkg_path, ).stdout.strip() version = _install_version_pkg( script, version_pkg_path, rev=sha1, expect_stderr=True, ) assert '0.1' == version def test_git_with_short_sha1_revisions(script): """ Git backend should be able to install from SHA1 revisions """ version_pkg_path = _create_test_package(script) _change_test_package_version(script, version_pkg_path) sha1 = script.run( 'git', 'rev-parse', 'HEAD~1', cwd=version_pkg_path, ).stdout.strip()[:7] version = _install_version_pkg( script, version_pkg_path, rev=sha1, expect_stderr=True, ) assert '0.1' == version def test_git_with_branch_name_as_revision(script): """ Git backend should be able to install from branch names """ version_pkg_path = _create_test_package(script) branch = 'test_branch' script.run( 'git', 'checkout', '-b', branch, expect_stderr=True, cwd=version_pkg_path, ) _change_test_package_version(script, version_pkg_path) version = _install_version_pkg(script, version_pkg_path, rev=branch) assert 'some different version' == version def test_git_with_tag_name_as_revision(script): """ Git backend should be able to install from tag names """ version_pkg_path = _create_test_package(script) script.run( 'git', 'tag', 'test_tag', expect_stderr=True, cwd=version_pkg_path, ) _change_test_package_version(script, version_pkg_path) version = _install_version_pkg(script, version_pkg_path, rev='test_tag') assert '0.1' == version def _add_ref(script, path, ref): """ Add a new ref to a repository at the given path. """ script.run('git', 'update-ref', ref, 'HEAD', expect_stderr=True, cwd=path) def test_git_install_ref(script): """ The Git backend should be able to install a ref with the first install. """ version_pkg_path = _create_test_package(script) _add_ref(script, version_pkg_path, 'refs/foo/bar') _change_test_package_version(script, version_pkg_path) version = _install_version_pkg( script, version_pkg_path, rev='refs/foo/bar', expect_stderr=True, ) assert '0.1' == version def test_git_install_then_install_ref(script): """ The Git backend should be able to install a ref after a package has already been installed. """ version_pkg_path = _create_test_package(script) _add_ref(script, version_pkg_path, 'refs/foo/bar') _change_test_package_version(script, version_pkg_path) version = _install_version_pkg( script, version_pkg_path, expect_stderr=True, ) assert 'some different version' == version # Now install the ref. version = _install_version_pkg( script, version_pkg_path, rev='refs/foo/bar', expect_stderr=True, ) assert '0.1' == version @pytest.mark.network def test_git_with_tag_name_and_update(script, tmpdir): """ Test cloning a git repository and updating to a different version. """ url_path = 'pypa/pip-test-package.git' local_url = _github_checkout(url_path, tmpdir, egg='pip-test-package') result = script.pip('install', '-e', local_url, expect_error=True) result.assert_installed('pip-test-package', with_files=['.git']) new_local_url = _github_checkout(url_path, tmpdir) new_local_url += '@0.1.2#egg=pip-test-package' result = script.pip( 'install', '--global-option=--version', '-e', new_local_url, expect_error=True, ) assert '0.1.2' in result.stdout @pytest.mark.network def test_git_branch_should_not_be_changed(script, tmpdir): """ Editable installations should not change branch related to issue #32 and #161 """ url_path = 'pypa/pip-test-package.git' local_url = _github_checkout(url_path, tmpdir, egg='pip-test-package') script.pip('install', '-e', local_url, expect_error=True) branch = _get_editable_branch(script, 'pip-test-package') assert 'master' == branch @pytest.mark.network def test_git_with_non_editable_unpacking(script, tmpdir): """ Test cloning a git repository from a non-editable URL with a given tag. """ url_path = 'pypa/pip-test-package.git@0.1.2#egg=pip-test-package' local_url = _github_checkout(url_path, tmpdir) result = script.pip( 'install', '--global-option=--version', local_url, expect_error=True, ) assert '0.1.2' in result.stdout @pytest.mark.network def test_git_with_editable_where_egg_contains_dev_string(script, tmpdir): """ Test cloning a git repository from an editable url which contains "dev" string """ url_path = 'dcramer/django-devserver.git' local_url = _github_checkout( url_path, tmpdir, egg='django-devserver', scheme='git', ) result = script.pip('install', '-e', local_url) result.assert_installed('django-devserver', with_files=['.git']) @pytest.mark.network def test_git_with_non_editable_where_egg_contains_dev_string(script, tmpdir): """ Test cloning a git repository from a non-editable url which contains "dev" string """ url_path = 'dcramer/django-devserver.git' local_url = _github_checkout( url_path, tmpdir, egg='django-devserver', scheme='git', ) result = script.pip('install', local_url) devserver_folder = script.site_packages / 'devserver' assert devserver_folder in result.files_created, str(result) def test_git_with_ambiguous_revs(script): """ Test git with two "names" (tag/branch) pointing to the same commit """ version_pkg_path = _create_test_package(script) version_pkg_url = _make_version_pkg_url(version_pkg_path, rev='0.1') script.run('git', 'tag', '0.1', cwd=version_pkg_path) result = script.pip('install', '-e', version_pkg_url) assert 'Could not find a tag or branch' not in result.stdout # it is 'version-pkg' instead of 'version_pkg' because # egg-link name is version-pkg.egg-link because it is a single .py module result.assert_installed('version-pkg', with_files=['.git']) def test_editable__no_revision(script): """ Test a basic install in editable mode specifying no revision. """ version_pkg_path = _create_test_package(script) _install_version_pkg_only(script, version_pkg_path) branch = _get_editable_branch(script, 'version-pkg') assert branch == 'master' remote = _get_branch_remote(script, 'version-pkg', 'master') assert remote == 'origin' def test_editable__branch_with_sha_same_as_default(script): """ Test installing in editable mode a branch whose sha matches the sha of the default branch, but is different from the default branch. """ version_pkg_path = _create_test_package(script) # Create a second branch with the same SHA. script.run( 'git', 'branch', 'develop', expect_stderr=True, cwd=version_pkg_path, ) _install_version_pkg_only( script, version_pkg_path, rev='develop', expect_stderr=True ) branch = _get_editable_branch(script, 'version-pkg') assert branch == 'develop' remote = _get_branch_remote(script, 'version-pkg', 'develop') assert remote == 'origin' def test_editable__branch_with_sha_different_from_default(script): """ Test installing in editable mode a branch whose sha is different from the sha of the default branch. """ version_pkg_path = _create_test_package(script) # Create a second branch. script.run( 'git', 'branch', 'develop', expect_stderr=True, cwd=version_pkg_path, ) # Add another commit to the master branch to give it a different sha. _change_test_package_version(script, version_pkg_path) version = _install_version_pkg( script, version_pkg_path, rev='develop', expect_stderr=True ) assert version == '0.1' branch = _get_editable_branch(script, 'version-pkg') assert branch == 'develop' remote = _get_branch_remote(script, 'version-pkg', 'develop') assert remote == 'origin' def test_editable__non_master_default_branch(script): """ Test the branch you get after an editable install from a remote repo with a non-master default branch. """ version_pkg_path = _create_test_package(script) # Change the default branch of the remote repo to a name that is # alphabetically after "master". script.run( 'git', 'checkout', '-b', 'release', expect_stderr=True, cwd=version_pkg_path, ) _install_version_pkg_only(script, version_pkg_path) branch = _get_editable_branch(script, 'version-pkg') assert branch == 'release' def test_reinstalling_works_with_editable_non_master_branch(script): """ Reinstalling an editable installation should not assume that the "master" branch exists. See https://github.com/pypa/pip/issues/4448. """ version_pkg_path = _create_test_package(script) # Switch the default branch to something other than 'master' script.run('git', 'branch', '-m', 'foobar', cwd=version_pkg_path) version = _install_version_pkg(script, version_pkg_path) assert '0.1' == version _change_test_package_version(script, version_pkg_path) version = _install_version_pkg(script, version_pkg_path) assert 'some different version' == version # TODO(pnasrat) fix all helpers to do right things with paths on windows. @pytest.mark.skipif("sys.platform == 'win32'") def test_check_submodule_addition(script): """ Submodules are pulled in on install and updated on upgrade. """ module_path, submodule_path = ( _create_test_package_with_submodule(script, rel_path='testpkg/static') ) install_result = script.pip( 'install', '-e', 'git+' + module_path + '#egg=version_pkg' ) assert ( script.venv / 'src/version-pkg/testpkg/static/testfile' in install_result.files_created ) _change_test_package_submodule(script, submodule_path) _pull_in_submodule_changes_to_module( script, module_path, rel_path='testpkg/static', ) # expect error because git may write to stderr update_result = script.pip( 'install', '-e', 'git+' + module_path + '#egg=version_pkg', '--upgrade', expect_error=True, ) assert ( script.venv / 'src/version-pkg/testpkg/static/testfile2' in update_result.files_created )
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_vcs_svn.py
import pytest from mock import patch from pip._internal.vcs.subversion import Subversion @patch('pip._internal.vcs.call_subprocess') @pytest.mark.network def test_obtain_should_recognize_auth_info_url(call_subprocess_mock, script): svn = Subversion(url='svn+http://username:password@svn.example.com/') svn.obtain(script.scratch_path / 'test') assert call_subprocess_mock.call_args[0][0] == [ svn.name, 'checkout', '-q', '--username', 'username', '--password', 'password', 'http://svn.example.com/', script.scratch_path / 'test', ] @patch('pip._internal.vcs.call_subprocess') @pytest.mark.network def test_export_should_recognize_auth_info_url(call_subprocess_mock, script): svn = Subversion(url='svn+http://username:password@svn.example.com/') svn.export(script.scratch_path / 'test') assert call_subprocess_mock.call_args[0][0] == [ svn.name, 'export', '--username', 'username', '--password', 'password', 'http://svn.example.com/', script.scratch_path / 'test', ]
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_install_wheel.py
import distutils import glob import os import pytest from tests.lib.path import Path def test_install_from_future_wheel_version(script, data): """ Test installing a future wheel """ from tests.lib import TestFailure package = data.packages.join("futurewheel-3.0-py2.py3-none-any.whl") result = script.pip('install', package, '--no-index', expect_error=True) with pytest.raises(TestFailure): result.assert_installed('futurewheel', without_egg_link=True, editable=False) package = data.packages.join("futurewheel-1.9-py2.py3-none-any.whl") result = script.pip( 'install', package, '--no-index', expect_error=False, expect_stderr=True, ) result.assert_installed('futurewheel', without_egg_link=True, editable=False) def test_install_from_broken_wheel(script, data): """ Test that installing a broken wheel fails properly """ from tests.lib import TestFailure package = data.packages.join("brokenwheel-1.0-py2.py3-none-any.whl") result = script.pip('install', package, '--no-index', expect_error=True) with pytest.raises(TestFailure): result.assert_installed('futurewheel', without_egg_link=True, editable=False) def test_basic_install_from_wheel(script, data): """ Test installing from a wheel (that has a script) """ result = script.pip( 'install', 'has.script==1.0', '--no-index', '--find-links=' + data.find_links, expect_error=False, ) dist_info_folder = script.site_packages / 'has.script-1.0.dist-info' assert dist_info_folder in result.files_created, (dist_info_folder, result.files_created, result.stdout) script_file = script.bin / 'script.py' assert script_file in result.files_created def test_basic_install_from_wheel_with_extras(script, data): """ Test installing from a wheel with extras. """ result = script.pip( 'install', 'complex-dist[simple]', '--no-index', '--find-links=' + data.find_links, expect_error=False, ) dist_info_folder = script.site_packages / 'complex_dist-0.1.dist-info' assert dist_info_folder in result.files_created, (dist_info_folder, result.files_created, result.stdout) dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info' assert dist_info_folder in result.files_created, (dist_info_folder, result.files_created, result.stdout) def test_basic_install_from_wheel_file(script, data): """ Test installing directly from a wheel file. """ package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl") result = script.pip('install', package, '--no-index', expect_error=False) dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info' assert dist_info_folder in result.files_created, (dist_info_folder, result.files_created, result.stdout) installer = dist_info_folder / 'INSTALLER' assert installer in result.files_created, (dist_info_folder, result.files_created, result.stdout) with open(script.base_path / installer, 'rb') as installer_file: installer_details = installer_file.read() assert installer_details == b'pip\n' installer_temp = dist_info_folder / 'INSTALLER.pip' assert installer_temp not in result.files_created, (dist_info_folder, result.files_created, result.stdout) # header installs are broke in pypy virtualenvs # https://github.com/pypa/virtualenv/issues/510 @pytest.mark.skipif("hasattr(sys, 'pypy_version_info')") def test_install_from_wheel_with_headers(script, data): """ Test installing from a wheel file with headers """ package = data.packages.join("headers.dist-0.1-py2.py3-none-any.whl") result = script.pip('install', package, '--no-index', expect_error=False) dist_info_folder = script.site_packages / 'headers.dist-0.1.dist-info' assert dist_info_folder in result.files_created, (dist_info_folder, result.files_created, result.stdout) def test_install_wheel_with_target(script, data, with_wheel): """ Test installing a wheel using pip install --target """ target_dir = script.scratch_path / 'target' result = script.pip( 'install', 'simple.dist==0.1', '-t', target_dir, '--no-index', '--find-links=' + data.find_links, ) assert Path('scratch') / 'target' / 'simpledist' in result.files_created, ( str(result) ) def test_install_wheel_with_target_and_data_files(script, data, with_wheel): """ Test for issue #4092. It will be checked that a data_files specification in setup.py is handled correctly when a wheel is installed with the --target option. The setup() for the wheel 'prjwithdatafile-1.0-py2.py3-none-any.whl' is as follows :: setup( name='prjwithdatafile', version='1.0', packages=['prjwithdatafile'], data_files=[ (r'packages1', ['prjwithdatafile/README.txt']), (r'packages2', ['prjwithdatafile/README.txt']) ] ) """ target_dir = script.scratch_path / 'prjwithdatafile' package = data.packages.join("prjwithdatafile-1.0-py2.py3-none-any.whl") result = script.pip('install', package, '-t', target_dir, '--no-index', expect_error=False) assert (Path('scratch') / 'prjwithdatafile' / 'packages1' / 'README.txt' in result.files_created), str(result) assert (Path('scratch') / 'prjwithdatafile' / 'packages2' / 'README.txt' in result.files_created), str(result) assert (Path('scratch') / 'prjwithdatafile' / 'lib' / 'python' not in result.files_created), str(result) def test_install_wheel_with_root(script, data): """ Test installing a wheel using pip install --root """ root_dir = script.scratch_path / 'root' result = script.pip( 'install', 'simple.dist==0.1', '--root', root_dir, '--no-index', '--find-links=' + data.find_links, ) assert Path('scratch') / 'root' in result.files_created def test_install_wheel_with_prefix(script, data): """ Test installing a wheel using pip install --prefix """ prefix_dir = script.scratch_path / 'prefix' result = script.pip( 'install', 'simple.dist==0.1', '--prefix', prefix_dir, '--no-index', '--find-links=' + data.find_links, ) lib = distutils.sysconfig.get_python_lib(prefix=Path('scratch') / 'prefix') assert lib in result.files_created, str(result) def test_install_from_wheel_installs_deps(script, data): """ Test can install dependencies of wheels """ # 'requires_source' depends on the 'source' project package = data.packages.join("requires_source-1.0-py2.py3-none-any.whl") result = script.pip( 'install', '--no-index', '--find-links', data.find_links, package, ) result.assert_installed('source', editable=False) def test_install_from_wheel_no_deps(script, data): """ Test --no-deps works with wheel installs """ # 'requires_source' depends on the 'source' project package = data.packages.join("requires_source-1.0-py2.py3-none-any.whl") result = script.pip( 'install', '--no-index', '--find-links', data.find_links, '--no-deps', package, ) pkg_folder = script.site_packages / 'source' assert pkg_folder not in result.files_created def test_wheel_record_lines_in_deterministic_order(script, data): to_install = data.packages.join("simplewheel-1.0-py2.py3-none-any.whl") result = script.pip('install', to_install) dist_info_folder = script.site_packages / 'simplewheel-1.0.dist-info' record_path = dist_info_folder / 'RECORD' assert dist_info_folder in result.files_created, str(result) assert record_path in result.files_created, str(result) record_path = result.files_created[record_path].full record_lines = [ p for p in Path(record_path).read_text().split('\n') if p ] assert record_lines == sorted(record_lines) def test_install_user_wheel(script, data, with_wheel): """ Test user install from wheel (that has a script) """ result = script.pip( 'install', 'has.script==1.0', '--user', '--no-index', '--find-links=' + data.find_links, ) egg_info_folder = script.user_site / 'has.script-1.0.dist-info' assert egg_info_folder in result.files_created, str(result) script_file = script.user_bin / 'script.py' assert script_file in result.files_created, str(result) def test_install_from_wheel_gen_entrypoint(script, data): """ Test installing scripts (entry points are generated) """ result = script.pip( 'install', 'script.wheel1a==0.1', '--no-index', '--find-links=' + data.find_links, expect_error=False, ) if os.name == 'nt': wrapper_file = script.bin / 't1.exe' else: wrapper_file = script.bin / 't1' assert wrapper_file in result.files_created if os.name != "nt": assert bool(os.access(script.base_path / wrapper_file, os.X_OK)) def test_install_from_wheel_gen_uppercase_entrypoint(script, data): """ Test installing scripts with uppercase letters in entry point names """ result = script.pip( 'install', 'console-scripts-uppercase==1.0', '--no-index', '--find-links=' + data.find_links, expect_error=False, ) if os.name == 'nt': # Case probably doesn't make any difference on NT wrapper_file = script.bin / 'cmdName.exe' else: wrapper_file = script.bin / 'cmdName' assert wrapper_file in result.files_created if os.name != "nt": assert bool(os.access(script.base_path / wrapper_file, os.X_OK)) def test_install_from_wheel_with_legacy(script, data): """ Test installing scripts (legacy scripts are preserved) """ result = script.pip( 'install', 'script.wheel2a==0.1', '--no-index', '--find-links=' + data.find_links, expect_error=False, ) legacy_file1 = script.bin / 'testscript1.bat' legacy_file2 = script.bin / 'testscript2' assert legacy_file1 in result.files_created assert legacy_file2 in result.files_created def test_install_from_wheel_no_setuptools_entrypoint(script, data): """ Test that when we generate scripts, any existing setuptools wrappers in the wheel are skipped. """ result = script.pip( 'install', 'script.wheel1==0.1', '--no-index', '--find-links=' + data.find_links, expect_error=False, ) if os.name == 'nt': wrapper_file = script.bin / 't1.exe' else: wrapper_file = script.bin / 't1' wrapper_helper = script.bin / 't1-script.py' # The wheel has t1.exe and t1-script.py. We will be generating t1 or # t1.exe depending on the platform. So we check that the correct wrapper # is present and that the -script.py helper has been skipped. We can't # easily test that the wrapper from the wheel has been skipped / # overwritten without getting very platform-dependent, so omit that. assert wrapper_file in result.files_created assert wrapper_helper not in result.files_created def test_skipping_setuptools_doesnt_skip_legacy(script, data): """ Test installing scripts (legacy scripts are preserved even when we skip setuptools wrappers) """ result = script.pip( 'install', 'script.wheel2==0.1', '--no-index', '--find-links=' + data.find_links, expect_error=False, ) legacy_file1 = script.bin / 'testscript1.bat' legacy_file2 = script.bin / 'testscript2' wrapper_helper = script.bin / 't1-script.py' assert legacy_file1 in result.files_created assert legacy_file2 in result.files_created assert wrapper_helper not in result.files_created def test_install_from_wheel_gui_entrypoint(script, data): """ Test installing scripts (gui entry points are generated) """ result = script.pip( 'install', 'script.wheel3==0.1', '--no-index', '--find-links=' + data.find_links, expect_error=False, ) if os.name == 'nt': wrapper_file = script.bin / 't1.exe' else: wrapper_file = script.bin / 't1' assert wrapper_file in result.files_created def test_wheel_compiles_pyc(script, data): """ Test installing from wheel with --compile on """ script.pip( "install", "--compile", "simple.dist==0.1", "--no-index", "--find-links=" + data.find_links ) # There are many locations for the __init__.pyc file so attempt to find # any of them exists = [ os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"), ] exists += glob.glob( script.site_packages_path / "simpledist/__pycache__/__init__*.pyc" ) assert any(exists) def test_wheel_no_compiles_pyc(script, data): """ Test installing from wheel with --compile on """ script.pip( "install", "--no-compile", "simple.dist==0.1", "--no-index", "--find-links=" + data.find_links ) # There are many locations for the __init__.pyc file so attempt to find # any of them exists = [ os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"), ] exists += glob.glob( script.site_packages_path / "simpledist/__pycache__/__init__*.pyc" ) assert not any(exists) def test_install_from_wheel_uninstalls_old_version(script, data): # regression test for https://github.com/pypa/pip/issues/1825 package = data.packages.join("simplewheel-1.0-py2.py3-none-any.whl") result = script.pip('install', package, '--no-index', expect_error=True) package = data.packages.join("simplewheel-2.0-py2.py3-none-any.whl") result = script.pip('install', package, '--no-index', expect_error=False) dist_info_folder = script.site_packages / 'simplewheel-2.0.dist-info' assert dist_info_folder in result.files_created dist_info_folder = script.site_packages / 'simplewheel-1.0.dist-info' assert dist_info_folder not in result.files_created def test_wheel_compile_syntax_error(script, data): package = data.packages.join("compilewheel-1.0-py2.py3-none-any.whl") result = script.pip('install', '--compile', package, '--no-index') assert 'yield from' not in result.stdout assert 'SyntaxError: ' not in result.stdout def test_wheel_install_with_no_cache_dir(script, tmpdir, data): """Check wheel installations work, even with no cache. """ package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl") result = script.pip('install', '--no-cache-dir', '--no-index', package) result.assert_installed('simpledist', editable=False)
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_list.py
import json import os import pytest def test_basic_list(script, data): """ Test default behavior of list command without format specifier. """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', ) result = script.pip('list') assert 'simple 1.0' in result.stdout, str(result) assert 'simple2 3.0' in result.stdout, str(result) def test_verbose_flag(script, data): """ Test the list command with the '-v' option """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', ) result = script.pip('list', '-v', '--format=columns') assert 'Package' in result.stdout, str(result) assert 'Version' in result.stdout, str(result) assert 'Location' in result.stdout, str(result) assert 'Installer' in result.stdout, str(result) assert 'simple 1.0' in result.stdout, str(result) assert 'simple2 3.0' in result.stdout, str(result) def test_columns_flag(script, data): """ Test the list command with the '--format=columns' option """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', ) result = script.pip('list', '--format=columns') assert 'Package' in result.stdout, str(result) assert 'Version' in result.stdout, str(result) assert 'simple (1.0)' not in result.stdout, str(result) assert 'simple 1.0' in result.stdout, str(result) assert 'simple2 3.0' in result.stdout, str(result) def test_format_priority(script, data): """ Test that latest format has priority over previous ones. """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', ) result = script.pip('list', '--format=columns', '--format=freeze', expect_stderr=True) assert 'simple==1.0' in result.stdout, str(result) assert 'simple2==3.0' in result.stdout, str(result) assert 'simple 1.0' not in result.stdout, str(result) assert 'simple2 3.0' not in result.stdout, str(result) result = script.pip('list', '--format=freeze', '--format=columns') assert 'Package' in result.stdout, str(result) assert 'Version' in result.stdout, str(result) assert 'simple==1.0' not in result.stdout, str(result) assert 'simple2==3.0' not in result.stdout, str(result) assert 'simple 1.0' in result.stdout, str(result) assert 'simple2 3.0' in result.stdout, str(result) def test_local_flag(script, data): """ Test the behavior of --local flag in the list command """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip('list', '--local', '--format=json') assert {"name": "simple", "version": "1.0"} in json.loads(result.stdout) def test_local_columns_flag(script, data): """ Test the behavior of --local --format=columns flags in the list command """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip('list', '--local', '--format=columns') assert 'Package' in result.stdout assert 'Version' in result.stdout assert 'simple (1.0)' not in result.stdout assert 'simple 1.0' in result.stdout, str(result) @pytest.mark.network def test_user_flag(script, data): """ Test the behavior of --user flag in the list command """ script.pip('download', 'setuptools', 'wheel', '-d', data.packages) script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') script.pip('install', '-f', data.find_links, '--no-index', '--user', 'simple2==2.0') result = script.pip('list', '--user', '--format=json') assert {"name": "simple", "version": "1.0"} \ not in json.loads(result.stdout) assert {"name": "simple2", "version": "2.0"} in json.loads(result.stdout) @pytest.mark.network def test_user_columns_flag(script, data): """ Test the behavior of --user --format=columns flags in the list command """ script.pip('download', 'setuptools', 'wheel', '-d', data.packages) script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') script.pip('install', '-f', data.find_links, '--no-index', '--user', 'simple2==2.0') result = script.pip('list', '--user', '--format=columns') assert 'Package' in result.stdout assert 'Version' in result.stdout assert 'simple2 (2.0)' not in result.stdout assert 'simple2 2.0' in result.stdout, str(result) @pytest.mark.network def test_uptodate_flag(script, data): """ Test the behavior of --uptodate flag in the list command """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', ) script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package' ) result = script.pip( 'list', '-f', data.find_links, '--no-index', '--uptodate', '--format=json', ) assert {"name": "simple", "version": "1.0"} \ not in json.loads(result.stdout) # 3.0 is latest assert {"name": "pip-test-package", "version": "0.1.1"} \ in json.loads(result.stdout) # editables included assert {"name": "simple2", "version": "3.0"} in json.loads(result.stdout) @pytest.mark.network def test_uptodate_columns_flag(script, data): """ Test the behavior of --uptodate --format=columns flag in the list command """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', ) script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package' ) result = script.pip( 'list', '-f', data.find_links, '--no-index', '--uptodate', '--format=columns', ) assert 'Package' in result.stdout assert 'Version' in result.stdout assert 'Location' in result.stdout # editables included assert 'pip-test-package (0.1.1,' not in result.stdout assert 'pip-test-package 0.1.1' in result.stdout, str(result) assert 'simple2 3.0' in result.stdout, str(result) @pytest.mark.network def test_outdated_flag(script, data): """ Test the behavior of --outdated flag in the list command """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', 'simplewheel==1.0', ) script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git' '@0.1#egg=pip-test-package' ) result = script.pip( 'list', '-f', data.find_links, '--no-index', '--outdated', '--format=json', ) assert {"name": "simple", "version": "1.0", "latest_version": "3.0", "latest_filetype": "sdist"} \ in json.loads(result.stdout) assert dict(name="simplewheel", version="1.0", latest_version="2.0", latest_filetype="wheel") \ in json.loads(result.stdout) assert dict(name="pip-test-package", version="0.1", latest_version="0.1.1", latest_filetype="sdist") \ in json.loads(result.stdout) assert "simple2" not in {p["name"] for p in json.loads(result.stdout)} @pytest.mark.network def test_outdated_columns_flag(script, data): """ Test the behavior of --outdated --format=columns flag in the list command """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', 'simplewheel==1.0', ) script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git' '@0.1#egg=pip-test-package' ) result = script.pip( 'list', '-f', data.find_links, '--no-index', '--outdated', '--format=columns', ) assert 'Package' in result.stdout assert 'Version' in result.stdout assert 'Latest' in result.stdout assert 'Type' in result.stdout assert 'simple (1.0) - Latest: 3.0 [sdist]' not in result.stdout assert 'simplewheel (1.0) - Latest: 2.0 [wheel]' not in result.stdout assert 'simple 1.0 3.0 sdist' in result.stdout, ( str(result) ) assert 'simplewheel 1.0 2.0 wheel' in result.stdout, ( str(result) ) assert 'simple2' not in result.stdout, str(result) # 3.0 is latest @pytest.mark.network def test_editables_flag(script, data): """ Test the behavior of --editables flag in the list command """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package' ) result = script.pip('list', '--editable', '--format=json') result2 = script.pip('list', '--editable') assert {"name": "simple", "version": "1.0"} \ not in json.loads(result.stdout) assert os.path.join('src', 'pip-test-package') in result2.stdout @pytest.mark.network def test_exclude_editable_flag(script, data): """ Test the behavior of --editables flag in the list command """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package' ) result = script.pip('list', '--exclude-editable', '--format=json') assert {"name": "simple", "version": "1.0"} in json.loads(result.stdout) assert "pip-test-package" \ not in {p["name"] for p in json.loads(result.stdout)} @pytest.mark.network def test_editables_columns_flag(script, data): """ Test the behavior of --editables flag in the list command """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package' ) result = script.pip('list', '--editable', '--format=columns') assert 'Package' in result.stdout assert 'Version' in result.stdout assert 'Location' in result.stdout assert os.path.join('src', 'pip-test-package') in result.stdout, ( str(result) ) @pytest.mark.network def test_uptodate_editables_flag(script, data): """ test the behavior of --editable --uptodate flag in the list command """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package' ) result = script.pip( 'list', '-f', data.find_links, '--no-index', '--editable', '--uptodate', ) assert 'simple' not in result.stdout assert os.path.join('src', 'pip-test-package') in result.stdout, ( str(result) ) @pytest.mark.network def test_uptodate_editables_columns_flag(script, data): """ test the behavior of --editable --uptodate --format=columns flag in the list command """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package' ) result = script.pip( 'list', '-f', data.find_links, '--no-index', '--editable', '--uptodate', '--format=columns', ) assert 'Package' in result.stdout assert 'Version' in result.stdout assert 'Location' in result.stdout assert os.path.join('src', 'pip-test-package') in result.stdout, ( str(result) ) @pytest.mark.network def test_outdated_editables_flag(script, data): """ test the behavior of --editable --outdated flag in the list command """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git' '@0.1#egg=pip-test-package' ) result = script.pip( 'list', '-f', data.find_links, '--no-index', '--editable', '--outdated', ) assert 'simple' not in result.stdout assert os.path.join('src', 'pip-test-package') in result.stdout @pytest.mark.network def test_outdated_editables_columns_flag(script, data): """ test the behavior of --editable --outdated flag in the list command """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip( 'install', '-e', 'git+https://github.com/pypa/pip-test-package.git' '@0.1#egg=pip-test-package' ) result = script.pip( 'list', '-f', data.find_links, '--no-index', '--editable', '--outdated', '--format=columns', ) assert 'Package' in result.stdout assert 'Version' in result.stdout assert 'Location' in result.stdout assert os.path.join('src', 'pip-test-package') in result.stdout, ( str(result) ) def test_outdated_not_required_flag(script, data): """ test the behavior of --outdated --not-required flag in the list command """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==2.0', 'require_simple==1.0' ) result = script.pip( 'list', '-f', data.find_links, '--no-index', '--outdated', '--not-required', '--format=json', ) assert [] == json.loads(result.stdout) def test_outdated_pre(script, data): script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') # Let's build a fake wheelhouse script.scratch_path.join("wheelhouse").mkdir() wheelhouse_path = script.scratch_path / 'wheelhouse' wheelhouse_path.join('simple-1.1-py2.py3-none-any.whl').write('') wheelhouse_path.join('simple-2.0.dev0-py2.py3-none-any.whl').write('') result = script.pip( 'list', '--no-index', '--find-links', wheelhouse_path, '--format=json', ) assert {"name": "simple", "version": "1.0"} in json.loads(result.stdout) result = script.pip( 'list', '--no-index', '--find-links', wheelhouse_path, '--outdated', '--format=json', ) assert {"name": "simple", "version": "1.0", "latest_version": "1.1", "latest_filetype": "wheel"} \ in json.loads(result.stdout) result_pre = script.pip('list', '--no-index', '--find-links', wheelhouse_path, '--outdated', '--pre', '--format=json') assert {"name": "simple", "version": "1.0", "latest_version": "2.0.dev0", "latest_filetype": "wheel"} \ in json.loads(result_pre.stdout) def test_outdated_formats(script, data): """ Test of different outdated formats """ script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') # Let's build a fake wheelhouse script.scratch_path.join("wheelhouse").mkdir() wheelhouse_path = script.scratch_path / 'wheelhouse' wheelhouse_path.join('simple-1.1-py2.py3-none-any.whl').write('') result = script.pip( 'list', '--no-index', '--find-links', wheelhouse_path, '--format=freeze', ) assert 'simple==1.0' in result.stdout # Check columns result = script.pip( 'list', '--no-index', '--find-links', wheelhouse_path, '--outdated', '--format=columns', ) assert 'Package Version Latest Type' in result.stdout assert 'simple 1.0 1.1 wheel' in result.stdout # Check freeze result = script.pip( 'list', '--no-index', '--find-links', wheelhouse_path, '--outdated', '--format=freeze', ) assert 'simple==1.0' in result.stdout # Check json result = script.pip( 'list', '--no-index', '--find-links', wheelhouse_path, '--outdated', '--format=json', ) data = json.loads(result.stdout) assert data == [{'name': 'simple', 'version': '1.0', 'latest_version': '1.1', 'latest_filetype': 'wheel'}] def test_not_required_flag(script, data): script.pip( 'install', '-f', data.find_links, '--no-index', 'TopoRequires4' ) result = script.pip('list', '--not-required', expect_stderr=True) assert 'TopoRequires4 ' in result.stdout, str(result) assert 'TopoRequires ' not in result.stdout assert 'TopoRequires2 ' not in result.stdout assert 'TopoRequires3 ' not in result.stdout def test_list_freeze(script, data): """ Test freeze formatting of list command """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', ) result = script.pip('list', '--format=freeze') assert 'simple==1.0' in result.stdout, str(result) assert 'simple2==3.0' in result.stdout, str(result) def test_list_json(script, data): """ Test json formatting of list command """ script.pip( 'install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0', ) result = script.pip('list', '--format=json') data = json.loads(result.stdout) assert {'name': 'simple', 'version': '1.0'} in data assert {'name': 'simple2', 'version': '3.0'} in data
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_no_color.py
""" Test specific for the --no-color option """ import os import subprocess import pytest def test_no_color(script): """Ensure colour output disabled when --no-color is passed. """ # Using 'script' in this test allows for transparently testing pip's output # since pip is smart enough to disable colour output when piped, which is # not the behaviour we want to be testing here. # # On the other hand, this test is non-portable due to the options passed to # 'script' and well as the mere use of the same. # # This test will stay until someone has the time to rewrite it. command = ( 'script --flush --quiet --return /tmp/pip-test-no-color.txt ' '--command "pip uninstall {} noSuchPackage"' ) def get_run_output(option): cmd = command.format(option) proc = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) proc.communicate() if proc.returncode: pytest.skip("Unable to capture output using script: " + cmd) try: with open("/tmp/pip-test-no-color.txt", "r") as output_file: retval = output_file.read() return retval finally: os.unlink("/tmp/pip-test-no-color.txt") assert "\x1b" in get_run_output(option=""), "Expected color in output" assert "\x1b" not in get_run_output(option="--no-color"), \ "Expected no color in output"
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_pep517.py
from pip._vendor import pytoml from pip._internal.build_env import BuildEnvironment from pip._internal.download import PipSession from pip._internal.index import PackageFinder from pip._internal.req import InstallRequirement from tests.lib import path_to_url def make_project(tmpdir, requires=[], backend=None): project_dir = (tmpdir / 'project').mkdir() buildsys = {'requires': requires} if backend: buildsys['build-backend'] = backend data = pytoml.dumps({'build-system': buildsys}) project_dir.join('pyproject.toml').write(data) return project_dir def test_backend(tmpdir, data): """Check we can call a requirement's backend successfully""" project_dir = make_project(tmpdir, backend="dummy_backend") req = InstallRequirement(None, None, source_dir=project_dir) req.load_pyproject_toml() env = BuildEnvironment() finder = PackageFinder([data.backends], [], session=PipSession()) env.install_requirements(finder, ["dummy_backend"], 'normal', "Installing") conflicting, missing = env.check_requirements(["dummy_backend"]) assert not conflicting and not missing assert hasattr(req.pep517_backend, 'build_wheel') with env: assert req.pep517_backend.build_wheel("dir") == "Backend called" def test_pep517_install(script, tmpdir, data): """Check we can build with a custom backend""" project_dir = make_project( tmpdir, requires=['test_backend'], backend="test_backend" ) result = script.pip( 'install', '--no-index', '-f', data.backends, project_dir ) result.assert_installed('project', editable=False) def test_pep517_install_with_reqs(script, tmpdir, data): """Backend generated requirements are installed in the build env""" project_dir = make_project( tmpdir, requires=['test_backend'], backend="test_backend" ) project_dir.join("backend_reqs.txt").write("simplewheel") result = script.pip( 'install', '--no-index', '-f', data.backends, '-f', data.packages, project_dir ) result.assert_installed('project', editable=False) def test_no_use_pep517_without_setup_py(script, tmpdir, data): """Using --no-use-pep517 requires setup.py""" project_dir = make_project( tmpdir, requires=['test_backend'], backend="test_backend" ) result = script.pip( 'install', '--no-index', '--no-use-pep517', '-f', data.backends, project_dir, expect_error=True ) assert 'project does not have a setup.py' in result.stderr def test_conflicting_pep517_backend_requirements(script, tmpdir, data): project_dir = make_project( tmpdir, requires=['test_backend', 'simplewheel==1.0'], backend="test_backend" ) project_dir.join("backend_reqs.txt").write("simplewheel==2.0") result = script.pip( 'install', '--no-index', '-f', data.backends, '-f', data.packages, project_dir, expect_error=True ) assert ( result.returncode != 0 and ('Some build dependencies for %s conflict with the backend ' 'dependencies: simplewheel==1.0 is incompatible with ' 'simplewheel==2.0.' % path_to_url(project_dir)) in result.stderr ), str(result) def test_pep517_backend_requirements_already_satisfied(script, tmpdir, data): project_dir = make_project( tmpdir, requires=['test_backend', 'simplewheel==1.0'], backend="test_backend" ) project_dir.join("backend_reqs.txt").write("simplewheel") result = script.pip( 'install', '--no-index', '-f', data.backends, '-f', data.packages, project_dir, ) assert 'Installing backend dependencies:' not in result.stdout def test_pep517_install_with_no_cache_dir(script, tmpdir, data): """Check builds with a custom backends work, even with no cache. """ project_dir = make_project( tmpdir, requires=['test_backend'], backend="test_backend" ) result = script.pip( 'install', '--no-cache-dir', '--no-index', '-f', data.backends, project_dir, ) result.assert_installed('project', editable=False) def make_pyproject_with_setup(tmpdir, build_system=True, set_backend=True): project_dir = (tmpdir / 'project').mkdir() setup_script = ( 'from setuptools import setup\n' ) expect_script_dir_on_path = True if build_system: buildsys = { 'requires': ['setuptools', 'wheel'], } if set_backend: buildsys['build-backend'] = 'setuptools.build_meta' expect_script_dir_on_path = False project_data = pytoml.dumps({'build-system': buildsys}) else: project_data = '' if expect_script_dir_on_path: setup_script += ( 'from pep517_test import __version__\n' ) else: setup_script += ( 'try:\n' ' import pep517_test\n' 'except ImportError:\n' ' pass\n' 'else:\n' ' raise RuntimeError("Source dir incorrectly on sys.path")\n' ) setup_script += ( 'setup(name="pep517_test", version="0.1", packages=["pep517_test"])' ) project_dir.join('pyproject.toml').write(project_data) project_dir.join('setup.py').write(setup_script) package_dir = (project_dir / "pep517_test").mkdir() package_dir.join('__init__.py').write('__version__ = "0.1"') return project_dir, "pep517_test" def test_no_build_system_section(script, tmpdir, data, common_wheels): """Check builds with setup.py, pyproject.toml, but no build-system section. """ project_dir, name = make_pyproject_with_setup(tmpdir, build_system=False) result = script.pip( 'install', '--no-cache-dir', '--no-index', '-f', common_wheels, project_dir, ) result.assert_installed(name, editable=False) def test_no_build_backend_entry(script, tmpdir, data, common_wheels): """Check builds with setup.py, pyproject.toml, but no build-backend entry. """ project_dir, name = make_pyproject_with_setup(tmpdir, set_backend=False) result = script.pip( 'install', '--no-cache-dir', '--no-index', '-f', common_wheels, project_dir, ) result.assert_installed(name, editable=False) def test_explicit_setuptools_backend(script, tmpdir, data, common_wheels): """Check builds with setup.py, pyproject.toml, and a build-backend entry. """ project_dir, name = make_pyproject_with_setup(tmpdir) result = script.pip( 'install', '--no-cache-dir', '--no-index', '-f', common_wheels, project_dir, ) result.assert_installed(name, editable=False) def test_pep517_and_build_options(script, tmpdir, data, common_wheels): """Backend generated requirements are installed in the build env""" project_dir, name = make_pyproject_with_setup(tmpdir) result = script.pip( 'wheel', '--wheel-dir', tmpdir, '--build-option', 'foo', '-f', common_wheels, project_dir, expect_error=True ) assert 'Cannot build wheel' in result.stderr assert 'when --build-options is present' in result.stderr
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_requests.py
import pytest @pytest.mark.skipif def test_timeout(script): result = script.pip( "--timeout", "0.01", "install", "-vvv", "INITools", expect_error=True, ) assert ( "Could not fetch URL https://pypi.org/simple/INITools/: " "timed out" in result.stdout ) assert ( "Could not fetch URL https://pypi.org/simple/: " "timed out" in result.stdout )
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_search.py
import logging import pytest from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS from pip._internal.commands.search import ( SearchCommand, highest_version, print_results, transform_hits, ) from tests.lib import pyversion if pyversion >= '3': VERBOSE_FALSE = False else: VERBOSE_FALSE = 0 def test_version_compare(): """ Test version comparison. """ assert highest_version(['1.0', '2.0', '0.1']) == '2.0' assert highest_version(['1.0a1', '1.0']) == '1.0' def test_pypi_xml_transformation(): """ Test transformation of data structures (PyPI xmlrpc to custom list). """ pypi_hits = [ { 'name': 'foo', 'summary': 'foo summary', 'version': '1.0', }, { 'name': 'foo', 'summary': 'foo summary v2', 'version': '2.0', }, { '_pypi_ordering': 50, 'name': 'bar', 'summary': 'bar summary', 'version': '1.0', }, ] expected = [ { 'versions': ['1.0', '2.0'], 'name': 'foo', 'summary': 'foo summary v2', }, { 'versions': ['1.0'], 'name': 'bar', 'summary': 'bar summary', }, ] assert transform_hits(pypi_hits) == expected @pytest.mark.network def test_basic_search(script): """ End to end test of search command. """ output = script.pip('search', 'pip') assert ( 'The PyPA recommended tool for installing ' 'Python packages.' in output.stdout ) @pytest.mark.network @pytest.mark.skip( reason=("Warehouse search behavior is different and no longer returns " "multiple results. See " "https://github.com/pypa/warehouse/issues/3717 for more " "information."), ) def test_multiple_search(script): """ Test searching for multiple packages at once. """ output = script.pip('search', 'pip', 'INITools') assert ( 'The PyPA recommended tool for installing ' 'Python packages.' in output.stdout ) assert 'Tools for parsing and using INI-style files' in output.stdout def test_search_missing_argument(script): """ Test missing required argument for search """ result = script.pip('search', expect_error=True) assert 'ERROR: Missing required argument (search query).' in result.stderr @pytest.mark.network def test_run_method_should_return_success_when_find_packages(): """ Test SearchCommand.run for found package """ command = SearchCommand() cmdline = "--index=https://pypi.org/pypi pip" options, args = command.parse_args(cmdline.split()) status = command.run(options, args) assert status == SUCCESS @pytest.mark.network def test_run_method_should_return_no_matches_found_when_does_not_find_pkgs(): """ Test SearchCommand.run for no matches """ command = SearchCommand() cmdline = "--index=https://pypi.org/pypi nonexistentpackage" options, args = command.parse_args(cmdline.split()) status = command.run(options, args) assert status == NO_MATCHES_FOUND @pytest.mark.network def test_search_should_exit_status_code_zero_when_find_packages(script): """ Test search exit status code for package found """ result = script.pip('search', 'pip') assert result.returncode == SUCCESS @pytest.mark.network def test_search_exit_status_code_when_finds_no_package(script): """ Test search exit status code for no matches """ result = script.pip('search', 'nonexistentpackage', expect_error=True) assert result.returncode == NO_MATCHES_FOUND, result.returncode def test_search_print_results_should_contain_latest_versions(caplog): """ Test that printed search results contain the latest package versions """ hits = [ { 'name': 'testlib1', 'summary': 'Test library 1.', 'versions': ['1.0.5', '1.0.3'] }, { 'name': 'testlib2', 'summary': 'Test library 1.', 'versions': ['2.0.1', '2.0.3'] } ] with caplog.at_level(logging.INFO): print_results(hits) log_messages = sorted([r.getMessage() for r in caplog.records]) assert log_messages[0].startswith('testlib1 (1.0.5)') assert log_messages[1].startswith('testlib2 (2.0.3)')
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_show.py
import os import re import pytest from pip import __version__ from pip._internal.commands.show import search_packages_info def test_basic_show(script): """ Test end to end test for show command. """ result = script.pip('show', 'pip') lines = result.stdout.splitlines() assert len(lines) == 10 assert 'Name: pip' in lines assert 'Version: %s' % __version__ in lines assert any(line.startswith('Location: ') for line in lines) assert 'Requires: ' in lines def test_show_with_files_not_found(script, data): """ Test for show command with installed files listing enabled and installed-files.txt not found. """ editable = data.packages.join('SetupPyUTF8') script.pip('install', '-e', editable) result = script.pip('show', '-f', 'SetupPyUTF8') lines = result.stdout.splitlines() assert len(lines) == 12 assert 'Name: SetupPyUTF8' in lines assert 'Version: 0.0.0' in lines assert any(line.startswith('Location: ') for line in lines) assert 'Requires: ' in lines assert 'Files:' in lines assert 'Cannot locate installed-files.txt' in lines def test_show_with_files_from_wheel(script, data): """ Test that a wheel's files can be listed """ wheel_file = data.packages.join('simple.dist-0.1-py2.py3-none-any.whl') script.pip('install', '--no-index', wheel_file) result = script.pip('show', '-f', 'simple.dist') lines = result.stdout.splitlines() assert 'Name: simple.dist' in lines assert 'Cannot locate installed-files.txt' not in lines[6], lines[6] assert re.search(r"Files:\n( .+\n)+", result.stdout) @pytest.mark.network def test_show_with_all_files(script): """ Test listing all files in the show command. """ script.pip('install', 'initools==0.2') result = script.pip('show', '--files', 'initools') lines = result.stdout.splitlines() assert 'Cannot locate installed-files.txt' not in lines[6], lines[6] assert re.search(r"Files:\n( .+\n)+", result.stdout) def test_missing_argument(script): """ Test show command with no arguments. """ result = script.pip('show', expect_error=True) assert 'ERROR: Please provide a package name or names.' in result.stderr def test_find_package_not_found(): """ Test trying to get info about a nonexistent package. """ result = search_packages_info(['abcd3']) assert len(list(result)) == 0 def test_search_any_case(): """ Search for a package in any case. """ result = list(search_packages_info(['PIP'])) assert len(result) == 1 assert 'pip' == result[0]['name'] def test_more_than_one_package(): """ Search for more than one package. """ result = list(search_packages_info(['Pip', 'pytest', 'Virtualenv'])) assert len(result) == 3 def test_show_verbose_with_classifiers(script): """ Test that classifiers can be listed """ result = script.pip('show', 'pip', '--verbose') lines = result.stdout.splitlines() assert 'Name: pip' in lines assert re.search(r"Classifiers:\n( .+\n)+", result.stdout) assert "Intended Audience :: Developers" in result.stdout def test_show_verbose_installer(script, data): """ Test that the installer is shown (this currently needs a wheel install) """ wheel_file = data.packages.join('simple.dist-0.1-py2.py3-none-any.whl') script.pip('install', '--no-index', wheel_file) result = script.pip('show', '--verbose', 'simple.dist') lines = result.stdout.splitlines() assert 'Name: simple.dist' in lines assert 'Installer: pip' in lines def test_show_verbose(script): """ Test end to end test for verbose show command. """ result = script.pip('show', '--verbose', 'pip') lines = result.stdout.splitlines() assert any(line.startswith('Metadata-Version: ') for line in lines) assert any(line.startswith('Installer: ') for line in lines) assert 'Entry-points:' in lines assert 'Classifiers:' in lines def test_all_fields(script): """ Test that all the fields are present """ result = script.pip('show', 'pip') lines = result.stdout.splitlines() expected = {'Name', 'Version', 'Summary', 'Home-page', 'Author', 'Author-email', 'License', 'Location', 'Requires', 'Required-by'} actual = {re.sub(':.*$', '', line) for line in lines} assert actual == expected def test_pip_show_is_short(script): """ Test that pip show stays short """ result = script.pip('show', 'pip') lines = result.stdout.splitlines() assert len(lines) <= 10 def test_pip_show_divider(script, data): """ Expect a divider between packages """ script.pip('install', 'pip-test-package', '--no-index', '-f', data.packages) result = script.pip('show', 'pip', 'pip-test-package') lines = result.stdout.splitlines() assert "---" in lines def test_package_name_is_canonicalized(script, data): script.pip('install', 'pip-test-package', '--no-index', '-f', data.packages) dash_show_result = script.pip('show', 'pip-test-package') underscore_upper_show_result = script.pip('show', 'pip-test_Package') assert underscore_upper_show_result.returncode == 0 assert underscore_upper_show_result.stdout == dash_show_result.stdout def test_show_required_by_packages(script, data): """ Test that installed packages that depend on this package are shown """ editable_path = os.path.join(data.src, 'requires_simple') script.pip( 'install', '--no-index', '-f', data.find_links, editable_path ) result = script.pip('show', 'simple') lines = result.stdout.splitlines() assert 'Name: simple' in lines assert 'Required-by: requires-simple' in lines
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_uninstall.py
from __future__ import with_statement import json import logging import os import sys import textwrap from os.path import join, normpath from tempfile import mkdtemp import pretend import pytest from pip._internal.req.constructors import install_req_from_line from pip._internal.utils.misc import rmtree from tests.lib import assert_all_changes, create_test_package_with_setup from tests.lib.local_repos import local_checkout, local_repo @pytest.mark.network def test_basic_uninstall(script): """ Test basic install and uninstall. """ result = script.pip('install', 'INITools==0.2') assert join(script.site_packages, 'initools') in result.files_created, ( sorted(result.files_created.keys()) ) # the import forces the generation of __pycache__ if the version of python # supports it script.run('python', '-c', "import initools") result2 = script.pip('uninstall', 'INITools', '-y') assert_all_changes(result, result2, [script.venv / 'build', 'cache']) def test_basic_uninstall_distutils(script): """ Test basic install and uninstall. """ script.scratch_path.join("distutils_install").mkdir() pkg_path = script.scratch_path / 'distutils_install' pkg_path.join("setup.py").write(textwrap.dedent(""" from distutils.core import setup setup( name='distutils-install', version='0.1', ) """)) result = script.run('python', pkg_path / 'setup.py', 'install') result = script.pip('list', '--format=json') assert {"name": "distutils-install", "version": "0.1"} \ in json.loads(result.stdout) result = script.pip('uninstall', 'distutils_install', '-y', expect_stderr=True, expect_error=True) assert ( "Cannot uninstall 'distutils-install'. It is a distutils installed " "project and thus we cannot accurately determine which files belong " "to it which would lead to only a partial uninstall." ) in result.stderr @pytest.mark.network def test_basic_uninstall_with_scripts(script): """ Uninstall an easy_installed package with scripts. """ result = script.easy_install('PyLogo', expect_stderr=True) easy_install_pth = script.site_packages / 'easy-install.pth' pylogo = sys.platform == 'win32' and 'pylogo' or 'PyLogo' assert(pylogo in result.files_updated[easy_install_pth].bytes) result2 = script.pip('uninstall', 'pylogo', '-y') assert_all_changes( result, result2, [script.venv / 'build', 'cache', easy_install_pth], ) @pytest.mark.network def test_uninstall_easy_install_after_import(script): """ Uninstall an easy_installed package after it's been imported """ result = script.easy_install('--always-unzip', 'INITools==0.2', expect_stderr=True) # the import forces the generation of __pycache__ if the version of python # supports it script.run('python', '-c', "import initools") result2 = script.pip('uninstall', 'INITools', '-y') assert_all_changes( result, result2, [ script.venv / 'build', 'cache', script.site_packages / 'easy-install.pth', ] ) @pytest.mark.network def test_uninstall_trailing_newline(script): """ Uninstall behaves appropriately if easy-install.pth lacks a trailing newline """ script.easy_install('INITools==0.2', expect_stderr=True) script.easy_install('PyLogo', expect_stderr=True) easy_install_pth = script.site_packages_path / 'easy-install.pth' # trim trailing newline from easy-install.pth with open(easy_install_pth) as f: pth_before = f.read() with open(easy_install_pth, 'w') as f: f.write(pth_before.rstrip()) # uninstall initools script.pip('uninstall', 'INITools', '-y') with open(easy_install_pth) as f: pth_after = f.read() # verify that only initools is removed before_without_initools = [ line for line in pth_before.splitlines() if 'initools' not in line.lower() ] lines_after = pth_after.splitlines() assert lines_after == before_without_initools @pytest.mark.network def test_basic_uninstall_namespace_package(script): """ Uninstall a distribution with a namespace package without clobbering the namespace and everything in it. """ result = script.pip('install', 'pd.requires==0.0.3', expect_error=True) assert join(script.site_packages, 'pd') in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip('uninstall', 'pd.find', '-y', expect_error=True) assert join(script.site_packages, 'pd') not in result2.files_deleted, ( sorted(result2.files_deleted.keys()) ) assert join(script.site_packages, 'pd', 'find') in result2.files_deleted, ( sorted(result2.files_deleted.keys()) ) def test_uninstall_overlapping_package(script, data): """ Uninstalling a distribution that adds modules to a pre-existing package should only remove those added modules, not the rest of the existing package. See: GitHub issue #355 (pip uninstall removes things it didn't install) """ parent_pkg = data.packages.join("parent-0.1.tar.gz") child_pkg = data.packages.join("child-0.1.tar.gz") result1 = script.pip('install', parent_pkg, expect_error=False) assert join(script.site_packages, 'parent') in result1.files_created, ( sorted(result1.files_created.keys()) ) result2 = script.pip('install', child_pkg, expect_error=False) assert join(script.site_packages, 'child') in result2.files_created, ( sorted(result2.files_created.keys()) ) assert normpath( join(script.site_packages, 'parent/plugins/child_plugin.py') ) in result2.files_created, sorted(result2.files_created.keys()) # The import forces the generation of __pycache__ if the version of python # supports it script.run('python', '-c', "import parent.plugins.child_plugin, child") result3 = script.pip('uninstall', '-y', 'child', expect_error=False) assert join(script.site_packages, 'child') in result3.files_deleted, ( sorted(result3.files_created.keys()) ) assert normpath( join(script.site_packages, 'parent/plugins/child_plugin.py') ) in result3.files_deleted, sorted(result3.files_deleted.keys()) assert join(script.site_packages, 'parent') not in result3.files_deleted, ( sorted(result3.files_deleted.keys()) ) # Additional check: uninstalling 'child' should return things to the # previous state, without unintended side effects. assert_all_changes(result2, result3, []) @pytest.mark.parametrize("console_scripts", ["test_ = distutils_install", "test_:test_ = distutils_install"]) def test_uninstall_entry_point(script, console_scripts): """ Test uninstall package with two or more entry points in the same section, whose name contain a colon. """ pkg_name = 'ep_install' pkg_path = create_test_package_with_setup( script, name=pkg_name, version='0.1', entry_points={"console_scripts": [console_scripts, ], "pip_test.ep": ["ep:name1 = distutils_install", "ep:name2 = distutils_install"] } ) script_name = script.bin_path.join(console_scripts.split('=')[0].strip()) if sys.platform == 'win32': script_name += '.exe' result = script.pip('install', pkg_path) assert script_name.exists result = script.pip('list', '--format=json') assert {"name": "ep-install", "version": "0.1"} \ in json.loads(result.stdout) script.pip('uninstall', 'ep_install', '-y') assert not script_name.exists result2 = script.pip('list', '--format=json') assert {"name": "ep-install", "version": "0.1"} \ not in json.loads(result2.stdout) def test_uninstall_gui_scripts(script): """ Make sure that uninstall removes gui scripts """ pkg_name = "gui_pkg" pkg_path = create_test_package_with_setup( script, name=pkg_name, version='0.1', entry_points={"gui_scripts": ["test_ = distutils_install", ], } ) script_name = script.bin_path.join('test_') if sys.platform == 'win32': script_name += '.exe' script.pip('install', pkg_path) assert script_name.exists script.pip('uninstall', pkg_name, '-y') assert not script_name.exists @pytest.mark.network def test_uninstall_console_scripts(script): """ Test uninstalling a package with more files (console_script entry points, extra directories). """ args = ['install'] args.append('discover') result = script.pip(*args, **{"expect_error": True}) assert script.bin / 'discover' + script.exe in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip('uninstall', 'discover', '-y', expect_error=True) assert_all_changes(result, result2, [script.venv / 'build', 'cache']) @pytest.mark.network def test_uninstall_easy_installed_console_scripts(script): """ Test uninstalling package with console_scripts that is easy_installed. """ result = script.easy_install('discover', expect_error=True) assert script.bin / 'discover' + script.exe in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip('uninstall', 'discover', '-y') assert_all_changes( result, result2, [ script.venv / 'build', 'cache', script.site_packages / 'easy-install.pth', ] ) @pytest.mark.network def test_uninstall_editable_from_svn(script, tmpdir): """ Test uninstalling an editable installation from svn. """ result = script.pip( 'install', '-e', '%s#egg=initools' % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache"), ), ) result.assert_installed('INITools') result2 = script.pip('uninstall', '-y', 'initools') assert (script.venv / 'src' / 'initools' in result2.files_after) assert_all_changes( result, result2, [ script.venv / 'src', script.venv / 'build', script.site_packages / 'easy-install.pth' ], ) @pytest.mark.network def test_uninstall_editable_with_source_outside_venv(script, tmpdir): """ Test uninstalling editable install from existing source outside the venv. """ cache_dir = tmpdir.join("cache") try: temp = mkdtemp() tmpdir = join(temp, 'pip-test-package') _test_uninstall_editable_with_source_outside_venv( script, tmpdir, cache_dir, ) finally: rmtree(temp) def _test_uninstall_editable_with_source_outside_venv( script, tmpdir, cache_dir): result = script.run( 'git', 'clone', local_repo( 'git+git://github.com/pypa/pip-test-package', cache_dir, ), tmpdir, expect_stderr=True, ) result2 = script.pip('install', '-e', tmpdir) assert join( script.site_packages, 'pip-test-package.egg-link' ) in result2.files_created, list(result2.files_created.keys()) result3 = script.pip('uninstall', '-y', 'pip-test-package', expect_error=True) assert_all_changes( result, result3, [script.venv / 'build', script.site_packages / 'easy-install.pth'], ) @pytest.mark.network @pytest.mark.svn def test_uninstall_from_reqs_file(script, tmpdir): """ Test uninstall from a requirements file. """ script.scratch_path.join("test-req.txt").write( textwrap.dedent(""" -e %s#egg=initools # and something else to test out: PyLogo<0.4 """) % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache") ) ) result = script.pip('install', '-r', 'test-req.txt') script.scratch_path.join("test-req.txt").write( textwrap.dedent(""" # -f, -i, and --extra-index-url should all be ignored by uninstall -f http://www.example.com -i http://www.example.com --extra-index-url http://www.example.com -e %s#egg=initools # and something else to test out: PyLogo<0.4 """) % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache") ) ) result2 = script.pip('uninstall', '-r', 'test-req.txt', '-y') assert_all_changes( result, result2, [ script.venv / 'build', script.venv / 'src', script.scratch / 'test-req.txt', script.site_packages / 'easy-install.pth', ], ) def test_uninstallpathset_no_paths(caplog): """ Test UninstallPathSet logs notification when there are no paths to uninstall """ from pip._internal.req.req_uninstall import UninstallPathSet from pkg_resources import get_distribution caplog.set_level(logging.INFO) test_dist = get_distribution('pip') uninstall_set = UninstallPathSet(test_dist) uninstall_set.remove() # with no files added to set assert ( "Can't uninstall 'pip'. No files were found to uninstall." in caplog.text ) def test_uninstall_non_local_distutils(caplog, monkeypatch, tmpdir): einfo = tmpdir.join("thing-1.0.egg-info") with open(einfo, "wb"): pass dist = pretend.stub( key="thing", project_name="thing", egg_info=einfo, location=einfo, _provider=pretend.stub(), ) get_dist = pretend.call_recorder(lambda x: dist) monkeypatch.setattr("pip._vendor.pkg_resources.get_distribution", get_dist) req = install_req_from_line("thing") req.uninstall() assert os.path.exists(einfo) def test_uninstall_wheel(script, data): """ Test uninstalling a wheel """ package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl") result = script.pip('install', package, '--no-index') dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info' assert dist_info_folder in result.files_created result2 = script.pip('uninstall', 'simple.dist', '-y') assert_all_changes(result, result2, []) def test_uninstall_setuptools_develop_install(script, data): """Try uninstall after setup.py develop followed of setup.py install""" pkg_path = data.packages.join("FSPkg") script.run('python', 'setup.py', 'develop', expect_stderr=True, cwd=pkg_path) script.run('python', 'setup.py', 'install', expect_stderr=True, cwd=pkg_path) list_result = script.pip('list', '--format=json') assert {"name": os.path.normcase("FSPkg"), "version": "0.1.dev0"} \ in json.loads(list_result.stdout), str(list_result) # Uninstall both develop and install uninstall = script.pip('uninstall', 'FSPkg', '-y') assert any(filename.endswith('.egg') for filename in uninstall.files_deleted.keys()) uninstall2 = script.pip('uninstall', 'FSPkg', '-y') assert join( script.site_packages, 'FSPkg.egg-link' ) in uninstall2.files_deleted, list(uninstall2.files_deleted.keys()) list_result2 = script.pip('list', '--format=json') assert "FSPkg" not in {p["name"] for p in json.loads(list_result2.stdout)} def test_uninstall_editable_and_pip_install(script, data): """Try uninstall after pip install -e after pip install""" # SETUPTOOLS_SYS_PATH_TECHNIQUE=raw removes the assumption that `-e` # installs are always higher priority than regular installs. # This becomes the default behavior in setuptools 25. script.environ['SETUPTOOLS_SYS_PATH_TECHNIQUE'] = 'raw' pkg_path = data.packages.join("FSPkg") script.pip('install', '-e', '.', expect_stderr=True, cwd=pkg_path) # ensure both are installed with --ignore-installed: script.pip('install', '--ignore-installed', '.', expect_stderr=True, cwd=pkg_path) list_result = script.pip('list', '--format=json') assert {"name": "FSPkg", "version": "0.1.dev0"} \ in json.loads(list_result.stdout) # Uninstall both develop and install uninstall = script.pip('uninstall', 'FSPkg', '-y') assert not any(filename.endswith('.egg-link') for filename in uninstall.files_deleted.keys()) uninstall2 = script.pip('uninstall', 'FSPkg', '-y') assert join( script.site_packages, 'FSPkg.egg-link' ) in uninstall2.files_deleted, list(uninstall2.files_deleted.keys()) list_result2 = script.pip('list', '--format=json') assert "FSPkg" not in {p["name"] for p in json.loads(list_result2.stdout)} def test_uninstall_ignores_missing_packages(script, data): """Uninstall of a non existent package prints a warning and exits cleanly """ result = script.pip( 'uninstall', '-y', 'non-existent-pkg', expect_stderr=True, ) assert "Skipping non-existent-pkg as it is not installed." in result.stderr assert result.returncode == 0, "Expected clean exit" def test_uninstall_ignores_missing_packages_and_uninstalls_rest(script, data): script.pip_install_local('simple') result = script.pip( 'uninstall', '-y', 'non-existent-pkg', 'simple', expect_stderr=True, ) assert "Skipping non-existent-pkg as it is not installed." in result.stderr assert "Successfully uninstalled simple" in result.stdout assert result.returncode == 0, "Expected clean exit"
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_uninstall_user.py
""" tests specific to uninstalling --user installs """ from os.path import isdir, isfile, normcase import pytest from tests.functional.test_install_user import _patch_dist_in_site_packages from tests.lib import assert_all_changes, pyversion class Tests_UninstallUserSite: @pytest.mark.network def test_uninstall_from_usersite(self, script): """ Test uninstall from usersite """ result1 = script.pip('install', '--user', 'INITools==0.3') result2 = script.pip('uninstall', '-y', 'INITools') assert_all_changes(result1, result2, [script.venv / 'build', 'cache']) def test_uninstall_from_usersite_with_dist_in_global_site( self, virtualenv, script): """ Test uninstall from usersite (with same dist in global site) """ _patch_dist_in_site_packages(virtualenv) script.pip_install_local('pip-test-package==0.1', '--no-binary=:all:') result2 = script.pip_install_local( '--user', 'pip-test-package==0.1.1', '--no-binary=:all:') result3 = script.pip('uninstall', '-vy', 'pip-test-package') # uninstall console is mentioning user scripts, but not global scripts assert normcase(script.user_bin_path) in result3.stdout, str(result3) assert normcase(script.bin_path) not in result3.stdout, str(result3) # uninstall worked assert_all_changes(result2, result3, [script.venv / 'build', 'cache']) # site still has 0.2 (can't look in result1; have to check) egg_info_folder = ( script.base_path / script.site_packages / 'pip_test_package-0.1-py%s.egg-info' % pyversion ) assert isdir(egg_info_folder) def test_uninstall_editable_from_usersite(self, script, data): """ Test uninstall editable local user install """ script.user_site_path.makedirs() # install to_install = data.packages.join("FSPkg") result1 = script.pip( 'install', '--user', '-e', to_install, expect_error=False, ) egg_link = script.user_site / 'FSPkg.egg-link' assert egg_link in result1.files_created, str(result1.stdout) # uninstall result2 = script.pip('uninstall', '-y', 'FSPkg') assert not isfile(script.base_path / egg_link) assert_all_changes( result1, result2, [ script.venv / 'build', 'cache', script.user_site / 'easy-install.pth', ] )
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_vcs_bazaar.py
""" Contains functional tests of the Bazaar class. """ import os import pytest from pip._internal.vcs.bazaar import Bazaar from tests.lib import ( _test_path_to_file_url, _vcs_add, create_file, is_bzr_installed, need_bzr, ) @pytest.mark.skipif( 'TRAVIS' not in os.environ, reason='Bazaar is only required under Travis') def test_ensure_bzr_available(): """Make sure that bzr is available when running in Travis.""" assert is_bzr_installed() @need_bzr def test_export(script, tmpdir): """Test that a Bazaar branch can be exported.""" source_dir = tmpdir / 'test-source' source_dir.mkdir() create_file(source_dir / 'test_file', 'something') _vcs_add(script, str(source_dir), vcs='bazaar') bzr = Bazaar('bzr+' + _test_path_to_file_url(source_dir)) export_dir = str(tmpdir / 'export') bzr.export(export_dir) assert os.listdir(export_dir) == ['test_file'] @need_bzr def test_export_rev(script, tmpdir): """Test that a Bazaar branch can be exported, specifying a rev.""" source_dir = tmpdir / 'test-source' source_dir.mkdir() # Create a single file that is changed by two revisions. create_file(source_dir / 'test_file', 'something initial') _vcs_add(script, str(source_dir), vcs='bazaar') create_file(source_dir / 'test_file', 'something new') script.run( 'bzr', 'commit', '-q', '--author', 'pip <pypa-dev@googlegroups.com>', '-m', 'change test file', cwd=source_dir, ) bzr = Bazaar('bzr+' + _test_path_to_file_url(source_dir) + '@1') export_dir = tmpdir / 'export' bzr.export(str(export_dir)) with open(export_dir / 'test_file', 'r') as f: assert f.read() == 'something initial'
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_vcs_git.py
""" Contains functional tests of the Git class. """ import os import pytest from pip._internal.vcs.git import Git, RemoteNotFoundError from tests.lib import _create_test_package, _git_commit, _test_path_to_file_url def get_head_sha(script, dest): """Return the HEAD sha.""" result = script.run('git', 'rev-parse', 'HEAD', cwd=dest) sha = result.stdout.strip() return sha def checkout_ref(script, repo_dir, ref): script.run('git', 'checkout', ref, cwd=repo_dir, expect_stderr=True) def checkout_new_branch(script, repo_dir, branch): script.run( 'git', 'checkout', '-b', branch, cwd=repo_dir, expect_stderr=True, ) def do_commit(script, dest): _git_commit(script, dest, message='test commit', args=['--allow-empty']) return get_head_sha(script, dest) def add_commits(script, dest, count): """Return a list of the commit hashes from oldest to newest.""" shas = [] for index in range(count): sha = do_commit(script, dest) shas.append(sha) return shas def check_rev(repo_dir, rev, expected): git = Git() assert git.get_revision_sha(repo_dir, rev) == expected def test_git_dir_ignored(tmpdir): """ Test that a GIT_DIR environment variable is ignored. """ repo_path = tmpdir / 'test-repo' repo_path.mkdir() repo_dir = str(repo_path) env = {'GIT_DIR': 'foo'} # If GIT_DIR is not ignored, then os.listdir() will return ['foo']. Git.run_command(['init', repo_dir], cwd=repo_dir, extra_environ=env) assert os.listdir(repo_dir) == ['.git'] def test_git_work_tree_ignored(tmpdir): """ Test that a GIT_WORK_TREE environment variable is ignored. """ repo_path = tmpdir / 'test-repo' repo_path.mkdir() repo_dir = str(repo_path) Git.run_command(['init', repo_dir], cwd=repo_dir) # Choose a directory relative to the cwd that does not exist. # If GIT_WORK_TREE is not ignored, then the command will error out # with: "fatal: This operation must be run in a work tree". env = {'GIT_WORK_TREE': 'foo'} Git.run_command(['status', repo_dir], extra_environ=env, cwd=repo_dir) def test_get_remote_url(script, tmpdir): source_dir = tmpdir / 'source' source_dir.mkdir() source_url = _test_path_to_file_url(source_dir) source_dir = str(source_dir) script.run('git', 'init', cwd=source_dir) do_commit(script, source_dir) repo_dir = str(tmpdir / 'repo') script.run('git', 'clone', source_url, repo_dir, expect_stderr=True) remote_url = Git.get_remote_url(repo_dir) assert remote_url == source_url def test_get_remote_url__no_remote(script, tmpdir): """ Test a repo with no remote. """ repo_dir = tmpdir / 'temp-repo' repo_dir.mkdir() repo_dir = str(repo_dir) script.run('git', 'init', cwd=repo_dir) with pytest.raises(RemoteNotFoundError): Git.get_remote_url(repo_dir) def test_get_current_branch(script): repo_dir = str(script.scratch_path) script.run('git', 'init', cwd=repo_dir) sha = do_commit(script, repo_dir) git = Git() assert git.get_current_branch(repo_dir) == 'master' # Switch to a branch with the same SHA as "master" but whose name # is alphabetically after. checkout_new_branch(script, repo_dir, 'release') assert git.get_current_branch(repo_dir) == 'release' # Also test the detached HEAD case. checkout_ref(script, repo_dir, sha) assert git.get_current_branch(repo_dir) is None def test_get_current_branch__branch_and_tag_same_name(script, tmpdir): """ Check calling get_current_branch() from a branch or tag when the branch and tag have the same name. """ repo_dir = str(tmpdir) script.run('git', 'init', cwd=repo_dir) do_commit(script, repo_dir) checkout_new_branch(script, repo_dir, 'dev') # Create a tag with the same name as the branch. script.run('git', 'tag', 'dev', cwd=repo_dir) git = Git() assert git.get_current_branch(repo_dir) == 'dev' # Now try with the tag checked out. checkout_ref(script, repo_dir, 'refs/tags/dev') assert git.get_current_branch(repo_dir) is None def test_get_revision_sha(script): repo_dir = str(script.scratch_path) script.run('git', 'init', cwd=repo_dir) shas = add_commits(script, repo_dir, count=3) tag_sha = shas[0] origin_sha = shas[1] head_sha = shas[2] assert head_sha == shas[-1] origin_ref = 'refs/remotes/origin/origin-branch' generic_ref = 'refs/generic-ref' script.run( 'git', 'branch', 'local-branch', head_sha, cwd=repo_dir ) script.run('git', 'tag', 'v1.0', tag_sha, cwd=repo_dir) script.run('git', 'update-ref', origin_ref, origin_sha, cwd=repo_dir) script.run( 'git', 'update-ref', 'refs/remotes/upstream/upstream-branch', head_sha, cwd=repo_dir ) script.run('git', 'update-ref', generic_ref, head_sha, cwd=repo_dir) # Test two tags pointing to the same sha. script.run('git', 'tag', 'v2.0', tag_sha, cwd=repo_dir) # Test tags sharing the same suffix as another tag, both before and # after the suffix alphabetically. script.run('git', 'tag', 'aaa/v1.0', head_sha, cwd=repo_dir) script.run('git', 'tag', 'zzz/v1.0', head_sha, cwd=repo_dir) check_rev(repo_dir, 'v1.0', (tag_sha, False)) check_rev(repo_dir, 'v2.0', (tag_sha, False)) check_rev(repo_dir, 'origin-branch', (origin_sha, True)) ignored_names = [ # Local branches should be ignored. 'local-branch', # Non-origin remote branches should be ignored. 'upstream-branch', # Generic refs should be ignored. 'generic-ref', # Fully spelled-out refs should be ignored. origin_ref, generic_ref, # Test passing a valid commit hash. tag_sha, # Test passing a non-existent name. 'does-not-exist', ] for name in ignored_names: check_rev(repo_dir, name, (None, False)) def test_is_commit_id_equal(script): """ Test Git.is_commit_id_equal(). """ version_pkg_path = _create_test_package(script) script.run('git', 'branch', 'branch0.1', cwd=version_pkg_path) commit = script.run( 'git', 'rev-parse', 'HEAD', cwd=version_pkg_path ).stdout.strip() git = Git() assert git.is_commit_id_equal(version_pkg_path, commit) assert not git.is_commit_id_equal(version_pkg_path, commit[:7]) assert not git.is_commit_id_equal(version_pkg_path, 'branch0.1') assert not git.is_commit_id_equal(version_pkg_path, 'abc123') # Also check passing a None value. assert not git.is_commit_id_equal(version_pkg_path, None)
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_warning.py
import textwrap def test_environ(script, tmpdir): """$PYTHONWARNINGS was added in python2.7""" demo = tmpdir.join('warnings_demo.py') demo.write(textwrap.dedent(''' from logging import basicConfig from pip._internal.utils import deprecation deprecation.install_warning_logger() basicConfig() deprecation.deprecated("deprecated!", replacement=None, gone_in=None) ''')) result = script.run('python', demo, expect_stderr=True) expected = 'WARNING:pip._internal.deprecations:DEPRECATION: deprecated!\n' assert result.stderr == expected script.environ['PYTHONWARNINGS'] = 'ignore' result = script.run('python', demo) assert result.stderr == ''
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_wheel.py
"""'pip wheel' tests""" import os from os.path import exists import pytest from pip._internal.cli.status_codes import ERROR, PREVIOUS_BUILD_DIR_ERROR from pip._internal.locations import write_delete_marker_file from tests.lib import pyversion @pytest.fixture(autouse=True) def auto_with_wheel(with_wheel): pass def add_files_to_dist_directory(folder): (folder / 'dist').makedirs() (folder / 'dist' / 'a_name-0.0.1.tar.gz').write("hello") # Not adding a wheel file since that confuses setuptools' backend. # (folder / 'dist' / 'a_name-0.0.1-py2.py3-none-any.whl').write("hello") def test_wheel_exit_status_code_when_no_requirements(script): """ Test wheel exit status code when no requirements specified """ result = script.pip('wheel', expect_error=True) assert "You must give at least one requirement to wheel" in result.stderr assert result.returncode == ERROR def test_wheel_exit_status_code_when_blank_requirements_file(script): """ Test wheel exit status code when blank requirements file specified """ script.scratch_path.join("blank.txt").write("\n") script.pip('wheel', '-r', 'blank.txt') def test_pip_wheel_success(script, data): """ Test 'pip wheel' success. """ result = script.pip( 'wheel', '--no-index', '-f', data.find_links, 'simple==3.0', ) wheel_file_name = 'simple-3.0-py%s-none-any.whl' % pyversion[0] wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path in result.files_created, result.stdout assert "Successfully built simple" in result.stdout, result.stdout def test_basic_pip_wheel_downloads_wheels(script, data): """ Test 'pip wheel' downloads wheels """ result = script.pip( 'wheel', '--no-index', '-f', data.find_links, 'simple.dist', ) wheel_file_name = 'simple.dist-0.1-py2.py3-none-any.whl' wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path in result.files_created, result.stdout assert "Saved" in result.stdout, result.stdout def test_pip_wheel_builds_when_no_binary_set(script, data): data.packages.join('simple-3.0-py2.py3-none-any.whl').touch() # Check that the wheel package is ignored res = script.pip( 'wheel', '--no-index', '--no-binary', ':all:', '-f', data.find_links, 'simple==3.0') assert "Building wheel for simple" in str(res), str(res) def test_pip_wheel_builds_editable_deps(script, data): """ Test 'pip wheel' finds and builds dependencies of editables """ editable_path = os.path.join(data.src, 'requires_simple') result = script.pip( 'wheel', '--no-index', '-f', data.find_links, '-e', editable_path ) wheel_file_name = 'simple-1.0-py%s-none-any.whl' % pyversion[0] wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path in result.files_created, result.stdout def test_pip_wheel_builds_editable(script, data): """ Test 'pip wheel' builds an editable package """ editable_path = os.path.join(data.src, 'simplewheel-1.0') result = script.pip( 'wheel', '--no-index', '-f', data.find_links, '-e', editable_path ) wheel_file_name = 'simplewheel-1.0-py%s-none-any.whl' % pyversion[0] wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path in result.files_created, result.stdout def test_pip_wheel_fail(script, data): """ Test 'pip wheel' failure. """ result = script.pip( 'wheel', '--no-index', '-f', data.find_links, 'wheelbroken==0.1', expect_error=True, ) wheel_file_name = 'wheelbroken-0.1-py%s-none-any.whl' % pyversion[0] wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path not in result.files_created, ( wheel_file_path, result.files_created, ) assert "FakeError" in result.stdout, result.stdout assert "Failed to build wheelbroken" in result.stdout, result.stdout assert result.returncode != 0 def test_no_clean_option_blocks_cleaning_after_wheel(script, data): """ Test --no-clean option blocks cleaning after wheel build """ build = script.venv_path / 'build' result = script.pip( 'wheel', '--no-clean', '--no-index', '--build', build, '--find-links=%s' % data.find_links, 'simple', expect_temp=True, ) build = build / 'simple' assert exists(build), "build/simple should still exist %s" % str(result) def test_pip_wheel_source_deps(script, data): """ Test 'pip wheel' finds and builds source archive dependencies of wheels """ # 'requires_source' is a wheel that depends on the 'source' project result = script.pip( 'wheel', '--no-index', '-f', data.find_links, 'requires_source', ) wheel_file_name = 'source-1.0-py%s-none-any.whl' % pyversion[0] wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path in result.files_created, result.stdout assert "Successfully built source" in result.stdout, result.stdout def test_pip_wheel_fail_cause_of_previous_build_dir(script, data): """ Test when 'pip wheel' tries to install a package that has a previous build directory """ # Given that I have a previous build dir of the `simple` package build = script.venv_path / 'build' / 'simple' os.makedirs(build) write_delete_marker_file(script.venv_path / 'build' / 'simple') build.join('setup.py').write('#') # When I call pip trying to install things again result = script.pip( 'wheel', '--no-index', '--find-links=%s' % data.find_links, '--build', script.venv_path / 'build', 'simple==3.0', expect_error=True, expect_temp=True, ) # Then I see that the error code is the right one assert result.returncode == PREVIOUS_BUILD_DIR_ERROR, result def test_wheel_package_with_latin1_setup(script, data): """Create a wheel from a package with latin-1 encoded setup.py.""" pkg_to_wheel = data.packages.join("SetupPyLatin1") result = script.pip('wheel', pkg_to_wheel) assert 'Successfully built SetupPyUTF8' in result.stdout def test_pip_wheel_with_pep518_build_reqs(script, data, common_wheels): result = script.pip('wheel', '--no-index', '-f', data.find_links, '-f', common_wheels, 'pep518==3.0',) wheel_file_name = 'pep518-3.0-py%s-none-any.whl' % pyversion[0] wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path in result.files_created, result.stdout assert "Successfully built pep518" in result.stdout, result.stdout assert "Installing build dependencies" in result.stdout, result.stdout def test_pip_wheel_with_pep518_build_reqs_no_isolation(script, data): script.pip_install_local('simplewheel==2.0') result = script.pip( 'wheel', '--no-index', '-f', data.find_links, '--no-build-isolation', 'pep518==3.0', ) wheel_file_name = 'pep518-3.0-py%s-none-any.whl' % pyversion[0] wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path in result.files_created, result.stdout assert "Successfully built pep518" in result.stdout, result.stdout assert "Installing build dependencies" not in result.stdout, result.stdout def test_pip_wheel_with_user_set_in_config(script, data, common_wheels): config_file = script.scratch_path / 'pip.conf' script.environ['PIP_CONFIG_FILE'] = str(config_file) config_file.write("[install]\nuser = true") result = script.pip( 'wheel', data.src / 'withpyproject', '--no-index', '-f', common_wheels ) assert "Successfully built withpyproject" in result.stdout, result.stdout @pytest.mark.network def test_pep517_wheels_are_not_confused_with_other_files(script, tmpdir, data): """Check correct wheels are copied. (#6196) """ pkg_to_wheel = data.src / 'withpyproject' add_files_to_dist_directory(pkg_to_wheel) result = script.pip('wheel', pkg_to_wheel, '-w', script.scratch_path) assert "Installing build dependencies" in result.stdout, result.stdout wheel_file_name = 'withpyproject-0.0.1-py%s-none-any.whl' % pyversion[0] wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path in result.files_created, result.stdout def test_legacy_wheels_are_not_confused_with_other_files(script, tmpdir, data): """Check correct wheels are copied. (#6196) """ pkg_to_wheel = data.src / 'simplewheel-1.0' add_files_to_dist_directory(pkg_to_wheel) result = script.pip('wheel', pkg_to_wheel, '-w', script.scratch_path) assert "Installing build dependencies" not in result.stdout, result.stdout wheel_file_name = 'simplewheel-1.0-py%s-none-any.whl' % pyversion[0] wheel_file_path = script.scratch / wheel_file_name assert wheel_file_path in result.files_created, result.stdout
[]
[]
[]
archives/1346520853_-.zip
tests/functional/test_yaml.py
"""Tests for the resolver """ import os import re import pytest from tests.lib import DATA_DIR, create_basic_wheel_for_package, path_to_url from tests.lib.yaml_helpers import generate_yaml_tests, id_func _conflict_finder_re = re.compile( # Conflicting Requirements: \ # A 1.0.0 requires B == 2.0.0, C 1.0.0 requires B == 1.0.0. r""" (?P<package>[\w\-_]+?) [ ] (?P<version>\S+?) [ ]requires[ ] (?P<selector>.+?) (?=,|\.$) """, re.X ) def _convert_to_dict(string): def stripping_split(my_str, splitwith, count=None): if count is None: return [x.strip() for x in my_str.strip().split(splitwith)] else: return [x.strip() for x in my_str.strip().split(splitwith, count)] parts = stripping_split(string, ";") retval = {} retval["depends"] = [] retval["extras"] = {} retval["name"], retval["version"] = stripping_split(parts[0], " ") for part in parts[1:]: verb, args_str = stripping_split(part, " ", 1) assert verb in ["depends"], "Unknown verb {!r}".format(verb) retval[verb] = stripping_split(args_str, ",") return retval def handle_install_request(script, requirement): assert isinstance(requirement, str), ( "Need install requirement to be a string only" ) result = script.pip( "install", "--no-index", "--find-links", path_to_url(script.scratch_path), requirement ) retval = {} if result.returncode == 0: # Check which packages got installed retval["install"] = [] for path in result.files_created: if path.endswith(".dist-info"): name, version = ( os.path.basename(path)[:-len(".dist-info")] ).rsplit("-", 1) # TODO: information about extras. retval["install"].append(" ".join((name, version))) retval["install"].sort() # TODO: Support checking uninstallations # retval["uninstall"] = [] elif "conflicting" in result.stderr.lower(): retval["conflicting"] = [] message = result.stderr.rsplit("\n", 1)[-1] # XXX: There might be a better way than parsing the message for match in re.finditer(message, _conflict_finder_re): di = match.groupdict() retval["conflicting"].append( { "required_by": "{} {}".format(di["name"], di["version"]), "selector": di["selector"] } ) return retval @pytest.mark.yaml @pytest.mark.parametrize( "case", generate_yaml_tests(DATA_DIR.folder / "yaml"), ids=id_func ) def test_yaml_based(script, case): available = case.get("available", []) requests = case.get("request", []) transaction = case.get("transaction", []) assert len(requests) == len(transaction), ( "Expected requests and transaction counts to be same" ) # Create a custom index of all the packages that are supposed to be # available # XXX: This doesn't work because this isn't making an index of files. for package in available: if isinstance(package, str): package = _convert_to_dict(package) assert isinstance(package, dict), "Needs to be a dictionary" create_basic_wheel_for_package(script, **package) available_actions = { "install": handle_install_request } # use scratch path for index for request, expected in zip(requests, transaction): # The name of the key is what action has to be taken assert len(request.keys()) == 1, "Expected only one action" # Get the only key action = list(request.keys())[0] assert action in available_actions.keys(), ( "Unsupported action {!r}".format(action) ) # Perform the requested action effect = available_actions[action](script, request[action]) assert effect == expected, "Fixture did not succeed."
[]
[]
[]
archives/1346520853_-.zip
tests/lib/__init__.py
from __future__ import absolute_import from contextlib import contextmanager import os import sys import re import textwrap import site import shutil import subprocess import pytest from scripttest import FoundDir, TestFileEnvironment from tests.lib.path import Path, curdir DATA_DIR = Path(__file__).folder.folder.join("data").abspath SRC_DIR = Path(__file__).abspath.folder.folder.folder pyversion = sys.version[:3] pyversion_tuple = sys.version_info def assert_paths_equal(actual, expected): os.path.normpath(actual) == os.path.normpath(expected) def path_to_url(path): """ Convert a path to URI. The path will be made absolute and will not have quoted path parts. (adapted from pip.util) """ path = os.path.normpath(os.path.abspath(path)) drive, path = os.path.splitdrive(path) filepath = path.split(os.path.sep) url = '/'.join(filepath) if drive: # Note: match urllib.request.pathname2url's # behavior: uppercase the drive letter. return 'file:///' + drive.upper() + url return 'file://' + url def _test_path_to_file_url(path): """ Convert a test Path to a "file://" URL. Args: path: a tests.lib.path.Path object. """ return 'file://' + path.abspath.replace('\\', '/') def create_file(path, contents=None): """Create a file on the path, with the given contents """ from pip._internal.utils.misc import ensure_dir ensure_dir(os.path.dirname(path)) with open(path, "w") as f: if contents is not None: f.write(contents) else: f.write("\n") class TestData(object): """ Represents a bundle of pre-created test data. This copies a pristine set of test data into a root location that is designed to be test specific. The reason for this is when running the tests concurrently errors can be generated because the related tooling uses the directory as a work space. This leads to two concurrent processes trampling over each other. This class gets around that by copying all data into a directory and operating on the copied data. """ def __init__(self, root, source=None): self.source = source or DATA_DIR self.root = Path(root).abspath @classmethod def copy(cls, root): obj = cls(root) obj.reset() return obj def reset(self): self.root.rmtree() self.source.copytree(self.root) @property def packages(self): return self.root.join("packages") @property def packages2(self): return self.root.join("packages2") @property def packages3(self): return self.root.join("packages3") @property def src(self): return self.root.join("src") @property def indexes(self): return self.root.join("indexes") @property def reqfiles(self): return self.root.join("reqfiles") @property def completion_paths(self): return self.root.join("completion_paths") @property def find_links(self): return path_to_url(self.packages) @property def find_links2(self): return path_to_url(self.packages2) @property def find_links3(self): return path_to_url(self.packages3) @property def backends(self): return path_to_url(self.root.join("backends")) def index_url(self, index="simple"): return path_to_url(self.root.join("indexes", index)) class TestFailure(AssertionError): """ An "assertion" failed during testing. """ pass class TestPipResult(object): def __init__(self, impl, verbose=False): self._impl = impl if verbose: print(self.stdout) if self.stderr: print('======= stderr ========') print(self.stderr) print('=======================') def __getattr__(self, attr): return getattr(self._impl, attr) if sys.platform == 'win32': @property def stdout(self): return self._impl.stdout.replace('\r\n', '\n') @property def stderr(self): return self._impl.stderr.replace('\r\n', '\n') def __str__(self): return str(self._impl).replace('\r\n', '\n') else: # Python doesn't automatically forward __str__ through __getattr__ def __str__(self): return str(self._impl) def assert_installed(self, pkg_name, editable=True, with_files=[], without_files=[], without_egg_link=False, use_user_site=False, sub_dir=False): e = self.test_env if editable: pkg_dir = e.venv / 'src' / pkg_name.lower() # If package was installed in a sub directory if sub_dir: pkg_dir = pkg_dir / sub_dir else: without_egg_link = True pkg_dir = e.site_packages / pkg_name if use_user_site: egg_link_path = e.user_site / pkg_name + '.egg-link' else: egg_link_path = e.site_packages / pkg_name + '.egg-link' if without_egg_link: if egg_link_path in self.files_created: raise TestFailure( 'unexpected egg link file created: %r\n%s' % (egg_link_path, self) ) else: if egg_link_path not in self.files_created: raise TestFailure( 'expected egg link file missing: %r\n%s' % (egg_link_path, self) ) egg_link_file = self.files_created[egg_link_path] egg_link_contents = egg_link_file.bytes.replace(os.linesep, '\n') # FIXME: I don't understand why there's a trailing . here if not (egg_link_contents.endswith('\n.') and egg_link_contents[:-2].endswith(pkg_dir)): raise TestFailure(textwrap.dedent(u'''\ Incorrect egg_link file %r Expected ending: %r ------- Actual contents ------- %s -------------------------------''' % ( egg_link_file, pkg_dir + '\n.', repr(egg_link_contents)) )) if use_user_site: pth_file = e.user_site / 'easy-install.pth' else: pth_file = e.site_packages / 'easy-install.pth' if (pth_file in self.files_updated) == without_egg_link: raise TestFailure('%r unexpectedly %supdated by install' % ( pth_file, (not without_egg_link and 'not ' or ''))) if (pkg_dir in self.files_created) == (curdir in without_files): raise TestFailure(textwrap.dedent('''\ expected package directory %r %sto be created actually created: %s ''') % ( pkg_dir, (curdir in without_files and 'not ' or ''), sorted(self.files_created.keys()))) for f in with_files: if not (pkg_dir / f).normpath in self.files_created: raise TestFailure( 'Package directory %r missing expected content %r' % (pkg_dir, f) ) for f in without_files: if (pkg_dir / f).normpath in self.files_created: raise TestFailure( 'Package directory %r has unexpected content %f' % (pkg_dir, f) ) class PipTestEnvironment(TestFileEnvironment): """ A specialized TestFileEnvironment for testing pip """ # # Attribute naming convention # --------------------------- # # Instances of this class have many attributes representing paths # in the filesystem. To keep things straight, absolute paths have # a name of the form xxxx_path and relative paths have a name that # does not end in '_path'. exe = sys.platform == 'win32' and '.exe' or '' verbose = False def __init__(self, base_path, *args, **kwargs): # Make our base_path a test.lib.path.Path object base_path = Path(base_path) # Store paths related to the virtual environment venv = kwargs.pop("virtualenv") self.venv_path = venv.location self.lib_path = venv.lib self.site_packages_path = venv.site self.bin_path = venv.bin self.user_base_path = self.venv_path.join("user") self.user_site_path = self.venv_path.join( "user", site.USER_SITE[len(site.USER_BASE) + 1:], ) if sys.platform == 'win32': if sys.version_info >= (3, 5): scripts_base = self.user_site_path.join('..').normpath else: scripts_base = self.user_base_path self.user_bin_path = scripts_base.join('Scripts') else: self.user_bin_path = self.user_base_path.join( self.bin_path - self.venv_path ) # Create a Directory to use as a scratch pad self.scratch_path = base_path.join("scratch").mkdir() # Set our default working directory kwargs.setdefault("cwd", self.scratch_path) # Setup our environment environ = kwargs.get("environ") if environ is None: environ = os.environ.copy() environ["PATH"] = Path.pathsep.join( [self.bin_path] + [environ.get("PATH", [])], ) environ["PYTHONUSERBASE"] = self.user_base_path # Writing bytecode can mess up updated file detection environ["PYTHONDONTWRITEBYTECODE"] = "1" # Make sure we get UTF-8 on output, even on Windows... environ["PYTHONIOENCODING"] = "UTF-8" kwargs["environ"] = environ # Whether all pip invocations should expect stderr # (useful for Python version deprecation) self.pip_expect_stderr = kwargs.pop('pip_expect_stderr', None) # Call the TestFileEnvironment __init__ super(PipTestEnvironment, self).__init__(base_path, *args, **kwargs) # Expand our absolute path directories into relative for name in ["base", "venv", "bin", "lib", "site_packages", "user_base", "user_site", "user_bin", "scratch"]: real_name = "%s_path" % name setattr(self, name, getattr(self, real_name) - self.base_path) # Make sure temp_path is a Path object self.temp_path = Path(self.temp_path) # Ensure the tmp dir exists, things break horribly if it doesn't self.temp_path.mkdir() # create easy-install.pth in user_site, so we always have it updated # instead of created self.user_site_path.makedirs() self.user_site_path.join("easy-install.pth").touch() def _ignore_file(self, fn): if fn.endswith('__pycache__') or fn.endswith(".pyc"): result = True else: result = super(PipTestEnvironment, self)._ignore_file(fn) return result def _find_traverse(self, path, result): # Ignore symlinked directories to avoid duplicates in `run()` # results because of venv `lib64 -> lib/` symlink on Linux. full = os.path.join(self.base_path, path) if os.path.isdir(full) and os.path.islink(full): if not self.temp_path or path != 'tmp': result[path] = FoundDir(self.base_path, path) else: super(PipTestEnvironment, self)._find_traverse(path, result) def run(self, *args, **kw): if self.verbose: print('>> running %s %s' % (args, kw)) cwd = kw.pop('cwd', None) run_from = kw.pop('run_from', None) assert not cwd or not run_from, "Don't use run_from; it's going away" cwd = cwd or run_from or self.cwd if sys.platform == 'win32': # Partial fix for ScriptTest.run using `shell=True` on Windows. args = [str(a).replace('^', '^^').replace('&', '^&') for a in args] return TestPipResult( super(PipTestEnvironment, self).run(cwd=cwd, *args, **kw), verbose=self.verbose, ) def pip(self, *args, **kwargs): if self.pip_expect_stderr: kwargs['expect_stderr'] = True # On old versions of Python, urllib3/requests will raise a warning # about the lack of an SSLContext. Expect it when running commands # that will touch the outside world. if (pyversion_tuple < (2, 7, 9) and args and args[0] in ('search', 'install', 'download')): kwargs['expect_stderr'] = True if kwargs.pop('use_module', True): exe = 'python' args = ('-m', 'pip') + args else: exe = 'pip' return self.run(exe, *args, **kwargs) def pip_install_local(self, *args, **kwargs): return self.pip( "install", "--no-index", "--find-links", path_to_url(os.path.join(DATA_DIR, "packages")), *args, **kwargs ) def easy_install(self, *args, **kwargs): args = ('-m', 'easy_install') + args return self.run('python', *args, **kwargs) # FIXME ScriptTest does something similar, but only within a single # ProcResult; this generalizes it so states can be compared across # multiple commands. Maybe should be rolled into ScriptTest? def diff_states(start, end, ignore=None): """ Differences two "filesystem states" as represented by dictionaries of FoundFile and FoundDir objects. Returns a dictionary with following keys: ``deleted`` Dictionary of files/directories found only in the start state. ``created`` Dictionary of files/directories found only in the end state. ``updated`` Dictionary of files whose size has changed (FIXME not entirely reliable, but comparing contents is not possible because FoundFile.bytes is lazy, and comparing mtime doesn't help if we want to know if a file has been returned to its earlier state). Ignores mtime and other file attributes; only presence/absence and size are considered. """ ignore = ignore or [] def prefix_match(path, prefix): if path == prefix: return True prefix = prefix.rstrip(os.path.sep) + os.path.sep return path.startswith(prefix) start_keys = {k for k in start.keys() if not any([prefix_match(k, i) for i in ignore])} end_keys = {k for k in end.keys() if not any([prefix_match(k, i) for i in ignore])} deleted = {k: start[k] for k in start_keys.difference(end_keys)} created = {k: end[k] for k in end_keys.difference(start_keys)} updated = {} for k in start_keys.intersection(end_keys): if (start[k].size != end[k].size): updated[k] = end[k] return dict(deleted=deleted, created=created, updated=updated) def assert_all_changes(start_state, end_state, expected_changes): """ Fails if anything changed that isn't listed in the expected_changes. start_state is either a dict mapping paths to scripttest.[FoundFile|FoundDir] objects or a TestPipResult whose files_before we'll test. end_state is either a similar dict or a TestPipResult whose files_after we'll test. Note: listing a directory means anything below that directory can be expected to have changed. """ __tracebackhide__ = True start_files = start_state end_files = end_state if isinstance(start_state, TestPipResult): start_files = start_state.files_before if isinstance(end_state, TestPipResult): end_files = end_state.files_after diff = diff_states(start_files, end_files, ignore=expected_changes) if list(diff.values()) != [{}, {}, {}]: raise TestFailure('Unexpected changes:\n' + '\n'.join( [k + ': ' + ', '.join(v.keys()) for k, v in diff.items()])) # Don't throw away this potentially useful information return diff def _create_main_file(dir_path, name=None, output=None): """ Create a module with a main() function that prints the given output. """ if name is None: name = 'version_pkg' if output is None: output = '0.1' text = textwrap.dedent("""\ def main(): print({!r}) """.format(output)) filename = '{}.py'.format(name) dir_path.join(filename).write(text) def _git_commit(env_or_script, repo_dir, message=None, args=None, expect_stderr=False): """ Run git-commit. Args: env_or_script: pytest's `script` or `env` argument. repo_dir: a path to a Git repository. message: an optional commit message. args: optional additional options to pass to git-commit. """ if message is None: message = 'test commit' if args is None: args = [] new_args = [ 'git', 'commit', '-q', '--author', 'pip <pypa-dev@googlegroups.com>', ] new_args.extend(args) new_args.extend(['-m', message]) env_or_script.run(*new_args, cwd=repo_dir, expect_stderr=expect_stderr) def _vcs_add(script, version_pkg_path, vcs='git'): if vcs == 'git': script.run('git', 'init', cwd=version_pkg_path) script.run('git', 'add', '.', cwd=version_pkg_path) _git_commit(script, version_pkg_path, message='initial version') elif vcs == 'hg': script.run('hg', 'init', cwd=version_pkg_path) script.run('hg', 'add', '.', cwd=version_pkg_path) script.run( 'hg', 'commit', '-q', '--user', 'pip <pypa-dev@googlegroups.com>', '-m', 'initial version', cwd=version_pkg_path, ) elif vcs == 'svn': repo_url = _create_svn_repo(script, version_pkg_path) script.run( 'svn', 'checkout', repo_url, 'pip-test-package', cwd=script.scratch_path ) checkout_path = script.scratch_path / 'pip-test-package' # svn internally stores windows drives as uppercase; we'll match that. checkout_path = checkout_path.replace('c:', 'C:') version_pkg_path = checkout_path elif vcs == 'bazaar': script.run('bzr', 'init', cwd=version_pkg_path) script.run('bzr', 'add', '.', cwd=version_pkg_path) script.run( 'bzr', 'whoami', 'pip <pypa-dev@googlegroups.com>', cwd=version_pkg_path) script.run( 'bzr', 'commit', '-q', '--author', 'pip <pypa-dev@googlegroups.com>', '-m', 'initial version', cwd=version_pkg_path, ) else: raise ValueError('Unknown vcs: %r' % vcs) return version_pkg_path def _create_test_package_with_subdirectory(script, subdirectory): script.scratch_path.join("version_pkg").mkdir() version_pkg_path = script.scratch_path / 'version_pkg' _create_main_file(version_pkg_path, name="version_pkg", output="0.1") version_pkg_path.join("setup.py").write( textwrap.dedent(""" from setuptools import setup, find_packages setup(name='version_pkg', version='0.1', packages=find_packages(), py_modules=['version_pkg'], entry_points=dict(console_scripts=['version_pkg=version_pkg:main'])) """)) subdirectory_path = version_pkg_path.join(subdirectory) subdirectory_path.mkdir() _create_main_file(subdirectory_path, name="version_subpkg", output="0.1") subdirectory_path.join('setup.py').write( textwrap.dedent(""" from setuptools import setup, find_packages setup(name='version_subpkg', version='0.1', packages=find_packages(), py_modules=['version_subpkg'], entry_points=dict(console_scripts=['version_pkg=version_subpkg:main'])) """)) script.run('git', 'init', cwd=version_pkg_path) script.run('git', 'add', '.', cwd=version_pkg_path) _git_commit(script, version_pkg_path, message='initial version') return version_pkg_path def _create_test_package_with_srcdir(script, name='version_pkg', vcs='git'): script.scratch_path.join(name).mkdir() version_pkg_path = script.scratch_path / name subdir_path = version_pkg_path.join('subdir') subdir_path.mkdir() src_path = subdir_path.join('src') src_path.mkdir() pkg_path = src_path.join('pkg') pkg_path.mkdir() pkg_path.join('__init__.py').write('') subdir_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup, find_packages setup( name='{name}', version='0.1', packages=find_packages(), package_dir={{'': 'src'}}, ) """.format(name=name))) return _vcs_add(script, version_pkg_path, vcs) def _create_test_package(script, name='version_pkg', vcs='git'): script.scratch_path.join(name).mkdir() version_pkg_path = script.scratch_path / name _create_main_file(version_pkg_path, name=name, output='0.1') version_pkg_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup, find_packages setup( name='{name}', version='0.1', packages=find_packages(), py_modules=['{name}'], entry_points=dict(console_scripts=['{name}={name}:main']) ) """.format(name=name))) return _vcs_add(script, version_pkg_path, vcs) def _create_svn_repo(script, version_pkg_path): repo_url = path_to_url( script.scratch_path / 'pip-test-package-repo' / 'trunk') script.run( 'svnadmin', 'create', 'pip-test-package-repo', cwd=script.scratch_path ) script.run( 'svn', 'import', version_pkg_path, repo_url, '-m', 'Initial import of pip-test-package', cwd=script.scratch_path ) return repo_url def _change_test_package_version(script, version_pkg_path): _create_main_file( version_pkg_path, name='version_pkg', output='some different version' ) # Pass -a to stage the change to the main file. _git_commit( script, version_pkg_path, message='messed version', args=['-a'], expect_stderr=True, ) def assert_raises_regexp(exception, reg, run, *args, **kwargs): """Like assertRaisesRegexp in unittest""" __tracebackhide__ = True try: run(*args, **kwargs) assert False, "%s should have been thrown" % exception except exception: e = sys.exc_info()[1] p = re.compile(reg) assert p.search(str(e)), str(e) @contextmanager def requirements_file(contents, tmpdir): """Return a Path to a requirements file of given contents. As long as the context manager is open, the requirements file will exist. :param tmpdir: A Path to the folder in which to create the file """ path = tmpdir / 'reqs.txt' path.write(contents) yield path path.remove() def create_test_package_with_setup(script, **setup_kwargs): assert 'name' in setup_kwargs, setup_kwargs pkg_path = script.scratch_path / setup_kwargs['name'] pkg_path.mkdir() pkg_path.join("setup.py").write(textwrap.dedent(""" from setuptools import setup kwargs = %r setup(**kwargs) """) % setup_kwargs) return pkg_path def create_basic_wheel_for_package(script, name, version, depends=None, extras=None): if depends is None: depends = [] if extras is None: extras = {} files = { "{name}/__init__.py": """ __version__ = {version} def hello(): return "Hello From {name}" """, "{dist_info}/DESCRIPTION": """ UNKNOWN """, "{dist_info}/WHEEL": """ Wheel-Version: 1.0 Generator: pip-test-suite Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any """, "{dist_info}/METADATA": """ Metadata-Version: 2.0 Name: {name} Version: {version} Summary: UNKNOWN Home-page: UNKNOWN Author: UNKNOWN Author-email: UNKNOWN License: UNKNOWN Platform: UNKNOWN {requires_dist} UNKNOWN """, "{dist_info}/top_level.txt": """ {name} """, # Have an empty RECORD because we don't want to be checking hashes. "{dist_info}/RECORD": "" } # Some useful shorthands archive_name = "{name}-{version}-py2.py3-none-any.whl".format( name=name, version=version ) dist_info = "{name}-{version}.dist-info".format( name=name, version=version ) requires_dist = "\n".join([ "Requires-Dist: {}".format(pkg) for pkg in depends ] + [ "Provides-Extra: {}".format(pkg) for pkg in extras.keys() ] + [ "Requires-Dist: {}; extra == \"{}\"".format(pkg, extra) for extra in extras for pkg in extras[extra] ]) # Replace key-values with formatted values for key, value in list(files.items()): del files[key] key = key.format(name=name, dist_info=dist_info) files[key] = textwrap.dedent(value).format( name=name, version=version, requires_dist=requires_dist ).strip() for fname in files: path = script.temp_path / fname path.folder.mkdir() path.write(files[fname]) retval = script.scratch_path / archive_name generated = shutil.make_archive(retval, 'zip', script.temp_path) shutil.move(generated, retval) script.temp_path.rmtree() script.temp_path.mkdir() return retval def need_executable(name, check_cmd): def wrapper(fn): try: subprocess.check_output(check_cmd) except OSError: return pytest.mark.skip(reason='%s is not available' % name)(fn) return fn return wrapper def is_bzr_installed(): try: subprocess.check_output(('bzr', 'version', '--short')) except OSError: return False return True def need_bzr(fn): return pytest.mark.bzr(need_executable( 'Bazaar', ('bzr', 'version', '--short') )(fn)) def need_mercurial(fn): return pytest.mark.mercurial(need_executable( 'Mercurial', ('hg', 'version') )(fn))
[]
[]
[]
archives/1346520853_-.zip
tests/lib/configuration_helpers.py
"""Helpers for tests that check configuration """ import contextlib import functools import os import tempfile import textwrap import pip._internal.configuration from pip._internal.utils.misc import ensure_dir # This is so that tests don't need to import pip._internal.configuration. kinds = pip._internal.configuration.kinds def reset_os_environ(old_environ): """ Reset os.environ while preserving the same underlying mapping. """ # Preserving the same mapping is preferable to assigning a new mapping # because the latter has interfered with test isolation by, for example, # preventing time.tzset() from working in subsequent tests after # changing os.environ['TZ'] in those tests. os.environ.clear() os.environ.update(old_environ) class ConfigurationMixin(object): def setup(self): self.configuration = pip._internal.configuration.Configuration( isolated=False, ) self._files_to_clear = [] self._old_environ = os.environ.copy() def teardown(self): for fname in self._files_to_clear: fname.stop() reset_os_environ(self._old_environ) def patch_configuration(self, variant, di): old = self.configuration._load_config_files @functools.wraps(old) def overridden(): # Manual Overload self.configuration._config[variant].update(di) self.configuration._parsers[variant].append((None, None)) return old() self.configuration._load_config_files = overridden @contextlib.contextmanager def tmpfile(self, contents): # Create a temporary file fd, path = tempfile.mkstemp( prefix="pip_", suffix="_config.ini", text=True ) os.close(fd) contents = textwrap.dedent(contents).lstrip() ensure_dir(os.path.dirname(path)) with open(path, "w") as f: f.write(contents) yield path os.remove(path) @staticmethod def get_file_contents(path): with open(path) as f: return f.read()
[]
[]
[]
archives/1346520853_-.zip
tests/lib/git_submodule_helpers.py
from __future__ import absolute_import import textwrap from tests.lib import _create_main_file, _git_commit def _create_test_package_submodule(env): env.scratch_path.join("version_pkg_submodule").mkdir() submodule_path = env.scratch_path / 'version_pkg_submodule' env.run('touch', 'testfile', cwd=submodule_path) env.run('git', 'init', cwd=submodule_path) env.run('git', 'add', '.', cwd=submodule_path) _git_commit(env, submodule_path, message='initial version / submodule') return submodule_path def _change_test_package_submodule(env, submodule_path): submodule_path.join("testfile").write("this is a changed file") submodule_path.join("testfile2").write("this is an added file") env.run('git', 'add', '.', cwd=submodule_path) _git_commit(env, submodule_path, message='submodule change') def _pull_in_submodule_changes_to_module(env, module_path, rel_path): """ Args: rel_path: the location of the submodule relative to the superproject. """ submodule_path = module_path / rel_path env.run('git', 'pull', '-q', 'origin', 'master', cwd=submodule_path) # Pass -a to stage the submodule changes that were just pulled in. _git_commit(env, module_path, message='submodule change', args=['-a']) def _create_test_package_with_submodule(env, rel_path): """ Args: rel_path: the location of the submodule relative to the superproject. """ env.scratch_path.join("version_pkg").mkdir() version_pkg_path = env.scratch_path / 'version_pkg' version_pkg_path.join("testpkg").mkdir() pkg_path = version_pkg_path / 'testpkg' pkg_path.join("__init__.py").write("# hello there") _create_main_file(pkg_path, name="version_pkg", output="0.1") version_pkg_path.join("setup.py").write(textwrap.dedent('''\ from setuptools import setup, find_packages setup(name='version_pkg', version='0.1', packages=find_packages(), ) ''')) env.run('git', 'init', cwd=version_pkg_path, expect_error=True) env.run('git', 'add', '.', cwd=version_pkg_path, expect_error=True) _git_commit(env, version_pkg_path, message='initial version') submodule_path = _create_test_package_submodule(env) env.run( 'git', 'submodule', 'add', submodule_path, rel_path, cwd=version_pkg_path, expect_error=True, ) _git_commit(env, version_pkg_path, message='initial version w submodule') return version_pkg_path, submodule_path
[]
[]
[]
archives/1346520853_-.zip
tests/lib/local_repos.py
from __future__ import absolute_import import os import subprocess from pip._vendor.six.moves.urllib import request as urllib_request from pip._internal.vcs import bazaar, git, mercurial, subversion from tests.lib import path_to_url def _create_initools_repository(directory): subprocess.check_call('svnadmin create INITools'.split(), cwd=directory) def _dump_initools_repository(directory): filename, _ = urllib_request.urlretrieve( 'http://bitbucket.org/hltbra/pip-initools-dump/raw/8b55c908a320/' 'INITools_modified.dump' ) initools_folder = os.path.join(directory, 'INITools') devnull = open(os.devnull, 'w') dump = open(filename) subprocess.check_call( ['svnadmin', 'load', initools_folder], stdin=dump, stdout=devnull, ) dump.close() devnull.close() os.remove(filename) def _create_svn_repository_for_initools(directory): if not os.path.exists(os.path.join(directory, 'INITools')): _create_initools_repository(directory) _dump_initools_repository(directory) def _get_vcs_and_checkout_url(remote_repository, directory): vcs_classes = {'svn': subversion.Subversion, 'git': git.Git, 'bzr': bazaar.Bazaar, 'hg': mercurial.Mercurial} default_vcs = 'svn' if '+' not in remote_repository: remote_repository = '%s+%s' % (default_vcs, remote_repository) vcs, repository_path = remote_repository.split('+', 1) vcs_class = vcs_classes[vcs] branch = '' if vcs == 'svn': branch = os.path.basename(remote_repository) # remove the slash repository_name = os.path.basename( remote_repository[:-len(branch) - 1] ) else: repository_name = os.path.basename(remote_repository) destination_path = os.path.join(directory, repository_name) if not os.path.exists(destination_path): vcs_class(remote_repository).obtain(destination_path) return '%s+%s' % ( vcs, path_to_url('/'.join([directory, repository_name, branch])), ) def local_checkout(remote_repo, directory): if not os.path.exists(directory): os.mkdir(directory) # os.makedirs(directory) if remote_repo.startswith('svn'): _create_svn_repository_for_initools(directory) return _get_vcs_and_checkout_url(remote_repo, directory) def local_repo(remote_repo, directory): return local_checkout(remote_repo, directory).split('+', 1)[1]
[]
[]
[]
archives/1346520853_-.zip
tests/lib/options_helpers.py
"""Provides helper classes for testing option handling in pip """ import os from pip._internal.cli import cmdoptions from pip._internal.cli.base_command import Command from pip._internal.commands import commands_dict from tests.lib.configuration_helpers import reset_os_environ class FakeCommand(Command): name = 'fake' summary = name def main(self, args): index_opts = cmdoptions.make_option_group( cmdoptions.index_group, self.parser, ) self.parser.add_option_group(index_opts) return self.parse_args(args) class AddFakeCommandMixin(object): def setup(self): self.environ_before = os.environ.copy() commands_dict[FakeCommand.name] = FakeCommand def teardown(self): reset_os_environ(self.environ_before) commands_dict.pop(FakeCommand.name)
[]
[]
[]
archives/1346520853_-.zip
tests/lib/path.py
# flake8: noqa # -*- coding: utf-8 -*- # Author: Aziz Köksal from __future__ import absolute_import import glob import os import shutil import sys from pip._vendor import six try: from os import supports_fd except ImportError: supports_fd = set() _base = six.text_type if os.path.supports_unicode_filenames else str class Path(_base): """ Models a path in an object oriented way. """ # File system path separator: '/' or '\'. sep = os.sep # Separator in the PATH environment variable. pathsep = os.pathsep def __new__(cls, *paths): if len(paths): return _base.__new__(cls, os.path.join(*paths)) return _base.__new__(cls) def __div__(self, path): """ Joins this path with another path. >>> path_obj / 'bc.d' >>> path_obj / path_obj2 """ return Path(self, path) __truediv__ = __div__ def __rdiv__(self, path): """ Joins this path with another path. >>> "/home/a" / path_obj """ return Path(path, self) __rtruediv__ = __rdiv__ def __idiv__(self, path): """ Like __div__ but also assigns to the variable. >>> path_obj /= 'bc.d' """ return Path(self, path) __itruediv__ = __idiv__ def __floordiv__(self, paths): """ Returns a list of paths prefixed with 'self'. >>> '/home/a' // [bc.d, ef.g] [/home/a/bc.d, /home/a/ef.g] """ return [Path(self, path) for path in paths] def __sub__(self, path): """ Makes this path relative to another path. >>> path_obj - '/home/a' >>> path_obj - path_obj2 """ return Path(os.path.relpath(self, path)) def __rsub__(self, path): """ Returns path relative to this path. >>> "/home/a" - path_obj """ return Path(os.path.relpath(path, self)) def __add__(self, path): """ >>> Path('/home/a') + 'bc.d' '/home/abc.d' """ return Path(_base(self) + path) def __radd__(self, path): """ >>> '/home/a' + Path('bc.d') '/home/abc.d' """ return Path(path + _base(self)) def __repr__(self): return u"Path(%s)" % _base.__repr__(self) def __hash__(self): return _base.__hash__(self) @property def name(self): """ '/home/a/bc.d' -> 'bc.d' """ return os.path.basename(self) @property def namebase(self): """ '/home/a/bc.d' -> 'bc' """ return self.noext.name @property def noext(self): """ '/home/a/bc.d' -> '/home/a/bc' """ return Path(os.path.splitext(self)[0]) @property def ext(self): """ '/home/a/bc.d' -> '.d' """ return Path(os.path.splitext(self)[1]) @property def abspath(self): """ './a/bc.d' -> '/home/a/bc.d' """ return Path(os.path.abspath(self)) @property def realpath(self): """ Resolves symbolic links. """ return Path(os.path.realpath(self)) @property def normpath(self): """ '/home/x/.././a//bc.d' -> '/home/a/bc.d' """ return Path(os.path.normpath(self)) @property def normcase(self): """ Deals with case-insensitive filesystems """ return Path(os.path.normcase(self)) @property def folder(self): """ Returns the folder of this path. '/home/a/bc.d' -> '/home/a' '/home/a/' -> '/home/a' '/home/a' -> '/home' """ return Path(os.path.dirname(self)) @property def exists(self): """ Returns True if the path exists. """ return os.path.exists(self) @property def atime(self): """ Returns last accessed time. """ return os.path.getatime(self) @property def mtime(self): """ Returns last modified time. """ return os.path.getmtime(self) @property def ctime(self): """ Returns last changed time. """ return os.path.getctime(self) @classmethod def supports_unicode(self): """ Returns True if the system can handle Unicode file names. """ return os.path.supports_unicode_filenames() def walk(self, **kwargs): """ Returns a generator that walks through a directory tree. """ return os.walk(self, **kwargs) def mkdir(self, mode=0x1FF): # 0o777 """ Creates a directory, if it doesn't exist already. """ if not self.exists: os.mkdir(self, mode) return self def makedirs(self, mode=0x1FF): # 0o777 """ Like mkdir(), but also creates parent directories. """ if not self.exists: os.makedirs(self, mode) return self def remove(self): """ Removes a file. """ return os.remove(self) rm = remove # Alias. def rmdir(self): """ Removes a directory. """ return os.rmdir(self) def rmtree(self, noerrors=True): """ Removes a directory tree. Ignores errors by default. """ return shutil.rmtree(self, ignore_errors=noerrors) def copy(self, to): return shutil.copy(self, to) def copytree(self, to): """ Copies a directory tree to another path. """ return shutil.copytree(self, to, symlinks=True) def move(self, to): """ Moves a file or directory to another path. """ return shutil.move(self, to) def rename(self, to): """ Renames a file or directory. May throw an OSError. """ return os.rename(self, to) def renames(self, to): return os.renames(self, to) def glob(self, pattern): return (Path(i) for i in glob.iglob(self.join(pattern))) def join(self, *parts): return Path(self, *parts) def read_text(self): with open(self, "r") as fp: return fp.read() def write(self, content): with open(self, "w") as fp: fp.write(content) def touch(self, times=None): with open(self, "a") as fp: os.utime(fp.fileno() if os.utime in supports_fd else self, times) curdir = Path(os.path.curdir)
[]
[]
[]
archives/1346520853_-.zip
tests/lib/scripttest.py
from __future__ import absolute_import from . import PipTestEnvironment # noqa
[]
[]
[]
archives/1346520853_-.zip
tests/lib/test_lib.py
"""Test the test support.""" from __future__ import absolute_import import filecmp import re from os.path import isdir, join from tests.lib import SRC_DIR def test_tmp_dir_exists_in_env(script): """ Test that $TMPDIR == env.temp_path and path exists and env.assert_no_temp() passes (in fast env) """ # need these tests to ensure the assert_no_temp feature of scripttest is # working script.assert_no_temp() # this fails if env.tmp_path doesn't exist assert script.environ['TMPDIR'] == script.temp_path assert isdir(script.temp_path) def test_correct_pip_version(script): """ Check we are running proper version of pip in run_pip. """ # output is like: # pip PIPVERSION from PIPDIRECTORY (python PYVERSION) result = script.pip('--version') # compare the directory tree of the invoked pip with that of this source # distribution pip_folder_outputed = re.match( r'pip \d+(\.[\d]+)+(\.?(b|rc|dev|pre|post)\d+)? from (.*) ' r'\(python \d(.[\d])+\)$', result.stdout ).group(4) pip_folder = join(SRC_DIR, 'src', 'pip') diffs = filecmp.dircmp(pip_folder, pip_folder_outputed) # If any non-matching .py files exist, we have a problem: run_pip # is picking up some other version! N.B. if this project acquires # primary resources other than .py files, this code will need # maintenance mismatch_py = [ x for x in diffs.left_only + diffs.right_only + diffs.diff_files if x.endswith('.py') ] assert not mismatch_py, ( 'mismatched source files in %r and %r: %r' % (pip_folder, pip_folder_outputed, mismatch_py) ) def test_as_import(script): """ test that pip.__init__.py does not shadow the command submodule with a dictionary """ import pip._internal.commands.install as inst assert inst is not None
[]
[]
[]
archives/1346520853_-.zip
tests/lib/venv.py
from __future__ import absolute_import import compileall import sys import textwrap import six import virtualenv as _virtualenv from .path import Path if six.PY3: import venv as _venv class VirtualEnvironment(object): """ An abstraction around virtual environments, currently it only uses virtualenv but in the future it could use pyvenv. """ def __init__(self, location, template=None, venv_type=None): assert template is None or venv_type is None assert venv_type in (None, 'virtualenv', 'venv') self.location = Path(location) self._venv_type = venv_type or template._venv_type or 'virtualenv' self._user_site_packages = False self._template = template self._sitecustomize = None self._update_paths() self._create() def _update_paths(self): home, lib, inc, bin = _virtualenv.path_locations(self.location) self.bin = Path(bin) self.site = Path(lib) / 'site-packages' # Workaround for https://github.com/pypa/virtualenv/issues/306 if hasattr(sys, "pypy_version_info"): version_fmt = '{0}' if six.PY3 else '{0}.{1}' version_dir = version_fmt.format(*sys.version_info) self.lib = Path(home, 'lib-python', version_dir) else: self.lib = Path(lib) def __repr__(self): return "<VirtualEnvironment {}>".format(self.location) def _create(self, clear=False): if clear: self.location.rmtree() if self._template: # On Windows, calling `_virtualenv.path_locations(target)` # will have created the `target` directory... if sys.platform == 'win32' and self.location.exists: self.location.rmdir() # Clone virtual environment from template. self._template.location.copytree(self.location) self._sitecustomize = self._template.sitecustomize self._user_site_packages = self._template.user_site_packages else: # Create a new virtual environment. if self._venv_type == 'virtualenv': _virtualenv.create_environment( self.location, no_pip=True, no_wheel=True, no_setuptools=True, ) self._fix_virtualenv_site_module() elif self._venv_type == 'venv': builder = _venv.EnvBuilder() context = builder.ensure_directories(self.location) builder.create_configuration(context) builder.setup_python(context) self.site.makedirs() self.sitecustomize = self._sitecustomize self.user_site_packages = self._user_site_packages def _fix_virtualenv_site_module(self): # Patch `site.py` so user site work as expected. site_py = self.lib / 'site.py' with open(site_py) as fp: site_contents = fp.read() for pattern, replace in ( ( # Ensure enabling user site does not result in adding # the real site-packages' directory to `sys.path`. ( '\ndef virtual_addsitepackages(known_paths):\n' ), ( '\ndef virtual_addsitepackages(known_paths):\n' ' return known_paths\n' ), ), ( # Fix sites ordering: user site must be added before system. ( '\n paths_in_sys = addsitepackages(paths_in_sys)' '\n paths_in_sys = addusersitepackages(paths_in_sys)\n' ), ( '\n paths_in_sys = addusersitepackages(paths_in_sys)' '\n paths_in_sys = addsitepackages(paths_in_sys)\n' ), ), ): assert pattern in site_contents site_contents = site_contents.replace(pattern, replace) with open(site_py, 'w') as fp: fp.write(site_contents) # Make sure bytecode is up-to-date too. assert compileall.compile_file(str(site_py), quiet=1, force=True) def _customize_site(self): contents = '' if self._venv_type == 'venv': # Enable user site (before system). contents += textwrap.dedent( ''' import os, site, sys if not os.environ.get('PYTHONNOUSERSITE', False): site.ENABLE_USER_SITE = True # First, drop system-sites related paths. original_sys_path = sys.path[:] known_paths = set() for path in site.getsitepackages(): site.addsitedir(path, known_paths=known_paths) system_paths = sys.path[len(original_sys_path):] for path in system_paths: if path in original_sys_path: original_sys_path.remove(path) sys.path = original_sys_path # Second, add user-site. site.addsitedir(site.getusersitepackages()) # Third, add back system-sites related paths. for path in site.getsitepackages(): site.addsitedir(path) ''').strip() if self._sitecustomize is not None: contents += '\n' + self._sitecustomize sitecustomize = self.site / "sitecustomize.py" sitecustomize.write(contents) # Make sure bytecode is up-to-date too. assert compileall.compile_file(str(sitecustomize), quiet=1, force=True) def clear(self): self._create(clear=True) def move(self, location): self.location.move(location) self.location = Path(location) self._update_paths() @property def sitecustomize(self): return self._sitecustomize @sitecustomize.setter def sitecustomize(self, value): self._sitecustomize = value self._customize_site() @property def user_site_packages(self): return self._user_site_packages @user_site_packages.setter def user_site_packages(self, value): self._user_site_packages = value if self._venv_type == 'virtualenv': marker = self.lib / "no-global-site-packages.txt" if self._user_site_packages: marker.rm() else: marker.touch() elif self._venv_type == 'venv': self._customize_site()
[]
[]
[]
archives/1346520853_-.zip
tests/lib/yaml_helpers.py
""" """ import pytest import yaml def generate_yaml_tests(directory): for yml_file in directory.glob("*/*.yml"): data = yaml.safe_load(yml_file.read_text()) assert "cases" in data, "A fixture needs cases to be used in testing" # Strip the parts of the directory to only get a name without # extension and resolver directory base_name = str(yml_file)[len(str(directory)) + 1:-4] base = data.get("base", {}) cases = data["cases"] for i, case_template in enumerate(cases): case = base.copy() case.update(case_template) case[":name:"] = base_name if len(cases) > 1: case[":name:"] += "-" + str(i) if case.pop("skip", False): case = pytest.param(case, marks=pytest.mark.xfail) yield case def id_func(param): """Give a nice parameter name to the generated function parameters """ if isinstance(param, dict) and ":name:" in param: return param[":name:"] retval = str(param) if len(retval) > 25: retval = retval[:20] + "..." + retval[-2:] return retval
[]
[]
[]
archives/1346520853_-.zip
tests/scripts/test_all_pip.py
import os import re import subprocess import sys from os.path import abspath, dirname from pip._vendor.six.moves.urllib import request as urllib_request from pip._internal.utils.misc import rmtree src_folder = dirname(dirname(abspath(__file__))) if sys.platform == 'win32': bin_dir = 'Scripts' else: bin_dir = 'bin' def all_projects(): data = urllib_request.urlopen('http://pypi.org/simple/').read() projects = [m.group(1) for m in re.finditer(r'<a.*?>(.+)</a>', data)] return projects def main(args=None): if args is None: args = sys.argv[1:] if not args: print('Usage: test_all_pip.py <output-dir>') sys.exit(1) output = os.path.abspath(args[0]) if not os.path.exists(output): print('Creating %s' % output) os.makedirs(output) pending_fn = os.path.join(output, 'pending.txt') if not os.path.exists(pending_fn): print('Downloading pending list') projects = all_projects() print('Found %s projects' % len(projects)) with open(pending_fn, 'w') as f: for name in projects: f.write(name + '\n') print('Starting testing...') while os.stat(pending_fn).st_size: _test_packages(output, pending_fn) print('Finished all pending!') def _test_packages(output, pending_fn): package = get_last_item(pending_fn) print('Testing package %s' % package) dest_dir = os.path.join(output, package) print('Creating virtualenv in %s' % dest_dir) create_venv(dest_dir) print('Uninstalling actual pip') code = subprocess.check_call([ os.path.join(dest_dir, bin_dir, 'pip'), 'uninstall', '-y', 'pip', ]) assert not code, 'pip uninstallation failed' print('Installing development pip') code = subprocess.check_call( [ os.path.join(dest_dir, bin_dir, 'python'), 'setup.py', 'install' ], cwd=src_folder, ) assert not code, 'pip installation failed' print('Trying installation of %s' % dest_dir) code = subprocess.check_call([ os.path.join(dest_dir, bin_dir, 'pip'), 'install', package, ]) if code: print('Installation of %s failed' % package) print('Now checking easy_install...') create_venv(dest_dir) code = subprocess.check_call([ os.path.join(dest_dir, bin_dir, 'easy_install'), package, ]) if code: print('easy_install also failed') add_package(os.path.join(output, 'easy-failure.txt'), package) else: print('easy_install succeeded') add_package(os.path.join(output, 'failure.txt'), package) pop_last_item(pending_fn, package) else: print('Installation of %s succeeded' % package) add_package(os.path.join(output, 'success.txt'), package) pop_last_item(pending_fn, package) rmtree(dest_dir) def create_venv(dest_dir): if os.path.exists(dest_dir): rmtree(dest_dir) print('Creating virtualenv in %s' % dest_dir) code = subprocess.check_call([ 'virtualenv', '--no-site-packages', dest_dir, ]) assert not code, "virtualenv failed" def get_last_item(fn): f = open(fn, 'r') lines = f.readlines() f.close() return lines[-1].strip() def pop_last_item(fn, line=None): f = open(fn, 'r') lines = f.readlines() f.close() if line: assert lines[-1].strip() == line.strip() lines.pop() f = open(fn, 'w') f.writelines(lines) f.close() def add_package(filename, package): f = open(filename, 'a') f.write(package + '\n') f.close() if __name__ == '__main__': main()
[]
[]
[]
archives/1346520853_-.zip
tests/unit/__init__.py
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_appdirs.py
import ntpath import os import posixpath import sys import pretend from pip._internal.utils import appdirs class TestUserCacheDir: def test_user_cache_dir_win(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Local" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) monkeypatch.setattr(os, "path", ntpath) assert (appdirs.user_cache_dir("pip") == "C:\\Users\\test\\AppData\\Local\\pip\\Cache") assert _get_win_folder.calls == [pretend.call("CSIDL_LOCAL_APPDATA")] def test_user_cache_dir_osx(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "darwin") assert appdirs.user_cache_dir("pip") == "/home/test/Library/Caches/pip" def test_user_cache_dir_linux(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.delenv("XDG_CACHE_HOME", raising=False) monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_cache_dir("pip") == "/home/test/.cache/pip" def test_user_cache_dir_linux_override(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.setenv("XDG_CACHE_HOME", "/home/test/.other-cache") monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_cache_dir("pip") == "/home/test/.other-cache/pip" def test_user_cache_dir_linux_home_slash(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) # Verify that we are not affected by https://bugs.python.org/issue14768 monkeypatch.delenv("XDG_CACHE_HOME", raising=False) monkeypatch.setenv("HOME", "/") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_cache_dir("pip") == "/.cache/pip" def test_user_cache_dir_unicode(self, monkeypatch): if sys.platform != 'win32': return def my_get_win_folder(csidl_name): return u"\u00DF\u00E4\u03B1\u20AC" monkeypatch.setattr(appdirs, "_get_win_folder", my_get_win_folder) # Do not use the isinstance expression directly in the # assert statement, as the Unicode characters in the result # cause pytest to fail with an internal error on Python 2.7 result_is_str = isinstance(appdirs.user_cache_dir('test'), str) assert result_is_str, "user_cache_dir did not return a str" # Test against regression #3463 from pip._internal.cli.main_parser import create_main_parser create_main_parser().print_help() # This should not crash class TestSiteConfigDirs: def test_site_config_dirs_win(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\ProgramData" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) monkeypatch.setattr(os, "path", ntpath) assert appdirs.site_config_dirs("pip") == ["C:\\ProgramData\\pip"] assert _get_win_folder.calls == [pretend.call("CSIDL_COMMON_APPDATA")] def test_site_config_dirs_osx(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "darwin") assert appdirs.site_config_dirs("pip") == \ ["/Library/Application Support/pip"] def test_site_config_dirs_linux(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.delenv("XDG_CONFIG_DIRS", raising=False) monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.site_config_dirs("pip") == [ '/etc/xdg/pip', '/etc' ] def test_site_config_dirs_linux_override(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.setattr(os, "pathsep", ':') monkeypatch.setenv("XDG_CONFIG_DIRS", "/spam:/etc:/etc/xdg") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.site_config_dirs("pip") == [ '/spam/pip', '/etc/pip', '/etc/xdg/pip', '/etc' ] class TestUserDataDir: def test_user_data_dir_win_no_roaming(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Local" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) monkeypatch.setattr(os, "path", ntpath) assert (appdirs.user_data_dir("pip") == "C:\\Users\\test\\AppData\\Local\\pip") assert _get_win_folder.calls == [pretend.call("CSIDL_LOCAL_APPDATA")] def test_user_data_dir_win_yes_roaming(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Roaming" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) monkeypatch.setattr(os, "path", ntpath) assert ( appdirs.user_data_dir("pip", roaming=True) == "C:\\Users\\test\\AppData\\Roaming\\pip" ) assert _get_win_folder.calls == [pretend.call("CSIDL_APPDATA")] def test_user_data_dir_osx(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "darwin") if os.path.isdir('/home/test/Library/Application Support/'): assert (appdirs.user_data_dir("pip") == "/home/test/Library/Application Support/pip") else: assert (appdirs.user_data_dir("pip") == "/home/test/.config/pip") def test_user_data_dir_linux(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.delenv("XDG_DATA_HOME", raising=False) monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_data_dir("pip") == "/home/test/.local/share/pip" def test_user_data_dir_linux_override(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.setenv("XDG_DATA_HOME", "/home/test/.other-share") monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_data_dir("pip") == "/home/test/.other-share/pip" def test_user_data_dir_linux_home_slash(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) # Verify that we are not affected by https://bugs.python.org/issue14768 monkeypatch.delenv("XDG_DATA_HOME", raising=False) monkeypatch.setenv("HOME", "/") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_data_dir("pip") == "/.local/share/pip" class TestUserConfigDir: def test_user_config_dir_win_no_roaming(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Local" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) monkeypatch.setattr(os, "path", ntpath) assert ( appdirs.user_config_dir("pip", roaming=False) == "C:\\Users\\test\\AppData\\Local\\pip" ) assert _get_win_folder.calls == [pretend.call("CSIDL_LOCAL_APPDATA")] def test_user_config_dir_win_yes_roaming(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Roaming" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) monkeypatch.setattr(os, "path", ntpath) assert (appdirs.user_config_dir("pip") == "C:\\Users\\test\\AppData\\Roaming\\pip") assert _get_win_folder.calls == [pretend.call("CSIDL_APPDATA")] def test_user_config_dir_osx(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "darwin") if os.path.isdir('/home/test/Library/Application Support/'): assert (appdirs.user_data_dir("pip") == "/home/test/Library/Application Support/pip") else: assert (appdirs.user_data_dir("pip") == "/home/test/.config/pip") def test_user_config_dir_linux(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.delenv("XDG_CONFIG_HOME", raising=False) monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_config_dir("pip") == "/home/test/.config/pip" def test_user_config_dir_linux_override(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) monkeypatch.setenv("XDG_CONFIG_HOME", "/home/test/.other-config") monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_config_dir("pip") == "/home/test/.other-config/pip" def test_user_config_dir_linux_home_slash(self, monkeypatch): monkeypatch.setattr(appdirs, "WINDOWS", False) monkeypatch.setattr(os, "path", posixpath) # Verify that we are not affected by https://bugs.python.org/issue14768 monkeypatch.delenv("XDG_CONFIG_HOME", raising=False) monkeypatch.setenv("HOME", "/") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_config_dir("pip") == "/.config/pip"
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_base_command.py
import logging import os import time from pip._internal.cli.base_command import Command from pip._internal.utils.logging import BrokenStdoutLoggingError class FakeCommand(Command): name = 'fake' summary = name def __init__(self, run_func=None, error=False): if error: def run_func(): raise SystemExit(1) self.run_func = run_func super(FakeCommand, self).__init__() def main(self, args): args.append("--disable-pip-version-check") return super(FakeCommand, self).main(args) def run(self, options, args): logging.getLogger("pip.tests").info("fake") if self.run_func: return self.run_func() class FakeCommandWithUnicode(FakeCommand): name = 'fake_unicode' summary = name def run(self, options, args): logging.getLogger("pip.tests").info(b"bytes here \xE9") logging.getLogger("pip.tests").info( b"unicode here \xC3\xA9".decode("utf-8") ) class TestCommand(object): def call_main(self, capsys, args): """ Call command.main(), and return the command's stderr. """ def raise_broken_stdout(): raise BrokenStdoutLoggingError() cmd = FakeCommand(run_func=raise_broken_stdout) status = cmd.main(args) assert status == 1 stderr = capsys.readouterr().err return stderr def test_raise_broken_stdout(self, capsys): """ Test raising BrokenStdoutLoggingError. """ stderr = self.call_main(capsys, []) assert stderr.rstrip() == 'ERROR: Pipe to stdout was broken' def test_raise_broken_stdout__debug_logging(self, capsys): """ Test raising BrokenStdoutLoggingError with debug logging enabled. """ stderr = self.call_main(capsys, ['-v']) assert 'ERROR: Pipe to stdout was broken' in stderr assert 'Traceback (most recent call last):' in stderr class Test_base_command_logging(object): """ Test `pip.base_command.Command` setting up logging consumers based on options """ def setup(self): self.old_time = time.time time.time = lambda: 1547704837.4 self.old_tz = os.environ.get('TZ') os.environ['TZ'] = 'UTC' # time.tzset() is not implemented on some platforms (notably, Windows). if hasattr(time, 'tzset'): time.tzset() def teardown(self): if self.old_tz: os.environ['TZ'] = self.old_tz else: del os.environ['TZ'] if 'tzset' in dir(time): time.tzset() time.time = self.old_time def test_log_command_success(self, tmpdir): """ Test the --log option logs when command succeeds """ cmd = FakeCommand() log_path = tmpdir.join('log') cmd.main(['fake', '--log', log_path]) with open(log_path) as f: assert f.read().rstrip() == '2019-01-17T06:00:37 fake' def test_log_command_error(self, tmpdir): """ Test the --log option logs when command fails """ cmd = FakeCommand(error=True) log_path = tmpdir.join('log') cmd.main(['fake', '--log', log_path]) with open(log_path) as f: assert f.read().startswith('2019-01-17T06:00:37 fake') def test_log_file_command_error(self, tmpdir): """ Test the --log-file option logs (when there's an error). """ cmd = FakeCommand(error=True) log_file_path = tmpdir.join('log_file') cmd.main(['fake', '--log-file', log_file_path]) with open(log_file_path) as f: assert f.read().startswith('2019-01-17T06:00:37 fake') def test_unicode_messages(self, tmpdir): """ Tests that logging bytestrings and unicode objects don't break logging """ cmd = FakeCommandWithUnicode() log_path = tmpdir.join('log') cmd.main(['fake_unicode', '--log', log_path])
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_build_env.py
from textwrap import dedent import pytest from pip._internal.build_env import BuildEnvironment from pip._internal.download import PipSession from pip._internal.index import PackageFinder from tests.lib import create_basic_wheel_for_package def indent(text, prefix): return '\n'.join((prefix if line else '') + line for line in text.split('\n')) def run_with_build_env(script, setup_script_contents, test_script_contents=None): build_env_script = script.scratch_path / 'build_env.py' build_env_script.write( dedent( ''' from __future__ import print_function import subprocess import sys from pip._internal.build_env import BuildEnvironment from pip._internal.download import PipSession from pip._internal.index import PackageFinder finder = PackageFinder([%r], [], session=PipSession()) build_env = BuildEnvironment() try: ''' % str(script.scratch_path)) + indent(dedent(setup_script_contents), ' ') + dedent( ''' if len(sys.argv) > 1: with build_env: subprocess.check_call((sys.executable, sys.argv[1])) finally: build_env.cleanup() ''') ) args = ['python', build_env_script] if test_script_contents is not None: test_script = script.scratch_path / 'test.py' test_script.write(dedent(test_script_contents)) args.append(test_script) return script.run(*args) def test_build_env_allow_empty_requirements_install(): build_env = BuildEnvironment() for prefix in ('normal', 'overlay'): build_env.install_requirements(None, [], prefix, None) def test_build_env_allow_only_one_install(script): create_basic_wheel_for_package(script, 'foo', '1.0') create_basic_wheel_for_package(script, 'bar', '1.0') finder = PackageFinder([script.scratch_path], [], session=PipSession()) build_env = BuildEnvironment() for prefix in ('normal', 'overlay'): build_env.install_requirements(finder, ['foo'], prefix, 'installing foo in %s' % prefix) with pytest.raises(AssertionError): build_env.install_requirements(finder, ['bar'], prefix, 'installing bar in %s' % prefix) with pytest.raises(AssertionError): build_env.install_requirements(finder, [], prefix, 'installing in %s' % prefix) def test_build_env_requirements_check(script): create_basic_wheel_for_package(script, 'foo', '2.0') create_basic_wheel_for_package(script, 'bar', '1.0') create_basic_wheel_for_package(script, 'bar', '3.0') create_basic_wheel_for_package(script, 'other', '0.5') script.pip_install_local('-f', script.scratch_path, 'foo', 'bar', 'other') run_with_build_env( script, ''' r = build_env.check_requirements(['foo', 'bar', 'other']) assert r == (set(), {'foo', 'bar', 'other'}), repr(r) r = build_env.check_requirements(['foo>1.0', 'bar==3.0']) assert r == (set(), {'foo>1.0', 'bar==3.0'}), repr(r) r = build_env.check_requirements(['foo>3.0', 'bar>=2.5']) assert r == (set(), {'foo>3.0', 'bar>=2.5'}), repr(r) ''') run_with_build_env( script, ''' build_env.install_requirements(finder, ['foo', 'bar==3.0'], 'normal', 'installing foo in normal') r = build_env.check_requirements(['foo', 'bar', 'other']) assert r == (set(), {'other'}), repr(r) r = build_env.check_requirements(['foo>1.0', 'bar==3.0']) assert r == (set(), set()), repr(r) r = build_env.check_requirements(['foo>3.0', 'bar>=2.5']) assert r == ({('foo==2.0', 'foo>3.0')}, set()), repr(r) ''') run_with_build_env( script, ''' build_env.install_requirements(finder, ['foo', 'bar==3.0'], 'normal', 'installing foo in normal') build_env.install_requirements(finder, ['bar==1.0'], 'overlay', 'installing foo in overlay') r = build_env.check_requirements(['foo', 'bar', 'other']) assert r == (set(), {'other'}), repr(r) r = build_env.check_requirements(['foo>1.0', 'bar==3.0']) assert r == ({('bar==1.0', 'bar==3.0')}, set()), repr(r) r = build_env.check_requirements(['foo>3.0', 'bar>=2.5']) assert r == ({('bar==1.0', 'bar>=2.5'), ('foo==2.0', 'foo>3.0')}, \ set()), repr(r) ''') def test_build_env_overlay_prefix_has_priority(script): create_basic_wheel_for_package(script, 'pkg', '2.0') create_basic_wheel_for_package(script, 'pkg', '4.3') result = run_with_build_env( script, ''' build_env.install_requirements(finder, ['pkg==2.0'], 'overlay', 'installing pkg==2.0 in overlay') build_env.install_requirements(finder, ['pkg==4.3'], 'normal', 'installing pkg==4.3 in normal') ''', ''' from __future__ import print_function print(__import__('pkg').__version__) ''') assert result.stdout.strip() == '2.0', str(result) def test_build_env_isolation(script): # Create dummy `pkg` wheel. pkg_whl = create_basic_wheel_for_package(script, 'pkg', '1.0') # Install it to site packages. script.pip_install_local(pkg_whl) # And a copy in the user site. script.pip_install_local('--ignore-installed', '--user', pkg_whl) # And to another directory available through a .pth file. target = script.scratch_path / 'pth_install' script.pip_install_local('-t', target, pkg_whl) (script.site_packages_path / 'build_requires.pth').write( str(target) + '\n' ) # And finally to yet another directory available through PYTHONPATH. target = script.scratch_path / 'pypath_install' script.pip_install_local('-t', target, pkg_whl) script.environ["PYTHONPATH"] = target run_with_build_env( script, '', r''' from __future__ import print_function from distutils.sysconfig import get_python_lib import sys try: import pkg except ImportError: pass else: print('imported `pkg` from `%s`' % pkg.__file__, file=sys.stderr) print('system sites:\n ' + '\n '.join(sorted({ get_python_lib(plat_specific=0), get_python_lib(plat_specific=1), })), file=sys.stderr) print('sys.path:\n ' + '\n '.join(sys.path), file=sys.stderr) sys.exit(1) ''')
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_cache.py
from pip._internal.cache import WheelCache from pip._internal.utils.compat import expanduser class TestWheelCache: def test_expands_path(self): wc = WheelCache("~/.foo/", None) assert wc.cache_dir == expanduser("~/.foo/") def test_falsey_path_none(self): wc = WheelCache(False, None) assert wc.cache_dir is None
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_check.py
"""Unit Tests for pip's dependency checking logic """ import mock from pip._internal.operations import check class TestInstalledDistributionsCall(object): def test_passes_correct_default_kwargs(self, monkeypatch): my_mock = mock.MagicMock(return_value=[]) monkeypatch.setattr(check, "get_installed_distributions", my_mock) check.create_package_set_from_installed() my_mock.assert_called_with(local_only=False, skip=()) def test_passes_any_given_kwargs(self, monkeypatch): my_mock = mock.MagicMock(return_value=[]) monkeypatch.setattr(check, "get_installed_distributions", my_mock) obj = object() check.create_package_set_from_installed(hi=obj) my_mock.assert_called_with(hi=obj)
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_command_install.py
from mock import Mock, call, patch from pip._internal.commands.install import build_wheels class TestWheelCache: def check_build_wheels( self, pep517_requirements, legacy_requirements, session, ): """ Return: (mock_calls, return_value). """ def build(reqs, **kwargs): # Fail the first requirement. return [reqs[0]] builder = Mock() builder.build.side_effect = build build_failures = build_wheels( builder=builder, pep517_requirements=pep517_requirements, legacy_requirements=legacy_requirements, # A session value isn't needed. session='<session>', ) return (builder.build.mock_calls, build_failures) @patch('pip._internal.commands.install.is_wheel_installed') def test_build_wheels__wheel_installed(self, is_wheel_installed): is_wheel_installed.return_value = True mock_calls, build_failures = self.check_build_wheels( pep517_requirements=['a', 'b'], legacy_requirements=['c', 'd'], session='<session>', ) # Legacy requirements were built. assert mock_calls == [ call(['a', 'b'], autobuilding=True, session='<session>'), call(['c', 'd'], autobuilding=True, session='<session>'), ] # Legacy build failures are not included in the return value. assert build_failures == ['a'] @patch('pip._internal.commands.install.is_wheel_installed') def test_build_wheels__wheel_not_installed(self, is_wheel_installed): is_wheel_installed.return_value = False mock_calls, build_failures = self.check_build_wheels( pep517_requirements=['a', 'b'], legacy_requirements=['c', 'd'], session='<session>', ) # Legacy requirements were not built. assert mock_calls == [ call(['a', 'b'], autobuilding=True, session='<session>'), ] assert build_failures == ['a']
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_compat.py
import locale import os import pytest import pip._internal.utils.compat as pip_compat from pip._internal.utils.compat import ( console_to_str, expanduser, get_path_uid, native_str, ) def test_get_path_uid(): path = os.getcwd() assert get_path_uid(path) == os.stat(path).st_uid @pytest.mark.skipif("not hasattr(os, 'O_NOFOLLOW')") def test_get_path_uid_without_NOFOLLOW(monkeypatch): monkeypatch.delattr("os.O_NOFOLLOW") path = os.getcwd() assert get_path_uid(path) == os.stat(path).st_uid # Skip unconditionally on Windows, as symlinks need admin privs there @pytest.mark.skipif("sys.platform == 'win32'") @pytest.mark.skipif("not hasattr(os, 'symlink')") def test_get_path_uid_symlink(tmpdir): f = tmpdir.mkdir("symlink").join("somefile") f.write("content") fs = f + '_link' os.symlink(f, fs) with pytest.raises(OSError): get_path_uid(fs) @pytest.mark.skipif("not hasattr(os, 'O_NOFOLLOW')") @pytest.mark.skipif("not hasattr(os, 'symlink')") def test_get_path_uid_symlink_without_NOFOLLOW(tmpdir, monkeypatch): monkeypatch.delattr("os.O_NOFOLLOW") f = tmpdir.mkdir("symlink").join("somefile") f.write("content") fs = f + '_link' os.symlink(f, fs) with pytest.raises(OSError): get_path_uid(fs) def test_console_to_str(monkeypatch): some_bytes = b"a\xE9\xC3\xE9b" encodings = ('ascii', 'utf-8', 'iso-8859-1', 'iso-8859-5', 'koi8_r', 'cp850') for e in encodings: monkeypatch.setattr(locale, 'getpreferredencoding', lambda: e) result = console_to_str(some_bytes) assert result.startswith("a") assert result.endswith("b") def test_console_to_str_warning(monkeypatch): some_bytes = b"a\xE9b" def check_warning(msg, *args, **kwargs): assert msg.startswith( "Subprocess output does not appear to be encoded as") monkeypatch.setattr(locale, 'getpreferredencoding', lambda: 'utf-8') monkeypatch.setattr(pip_compat.logger, 'warning', check_warning) console_to_str(some_bytes) def test_to_native_str_type(): some_bytes = b"test\xE9 et approuv\xC3\xE9" some_unicode = b"test\xE9 et approuv\xE9".decode('iso-8859-15') assert isinstance(native_str(some_bytes, True), str) assert isinstance(native_str(some_unicode, True), str) @pytest.mark.parametrize("home,path,expanded", [ ("/Users/test", "~", "/Users/test"), ("/Users/test", "~/.cache", "/Users/test/.cache"), # Verify that we are not affected by https://bugs.python.org/issue14768 ("/", "~", "/"), ("/", "~/.cache", "/.cache"), ]) def test_expanduser(home, path, expanded, monkeypatch): monkeypatch.setenv("HOME", home) assert expanduser(path) == expanded
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_configuration.py
"""Tests for all things related to the configuration """ import os import pytest from mock import MagicMock from pip._internal.exceptions import ConfigurationError from pip._internal.locations import ( global_config_files, new_config_file, site_config_file, ) from tests.lib.configuration_helpers import ConfigurationMixin, kinds class TestConfigurationLoading(ConfigurationMixin): def test_global_loading(self): self.patch_configuration(kinds.GLOBAL, {"test.hello": "1"}) self.configuration.load() assert self.configuration.get_value("test.hello") == "1" def test_user_loading(self): self.patch_configuration(kinds.USER, {"test.hello": "2"}) self.configuration.load() assert self.configuration.get_value("test.hello") == "2" def test_site_loading(self): self.patch_configuration(kinds.SITE, {"test.hello": "3"}) self.configuration.load() assert self.configuration.get_value("test.hello") == "3" def test_environment_config_loading(self): contents = """ [test] hello = 4 """ with self.tmpfile(contents) as config_file: os.environ["PIP_CONFIG_FILE"] = config_file self.configuration.load() assert self.configuration.get_value("test.hello") == "4", \ self.configuration._config def test_environment_var_loading(self): os.environ["PIP_HELLO"] = "5" self.configuration.load() assert self.configuration.get_value(":env:.hello") == "5" @pytest.mark.skipif("sys.platform == 'win32'") def test_environment_var_does_not_load_lowercase(self): os.environ["pip_hello"] = "5" self.configuration.load() with pytest.raises(ConfigurationError): self.configuration.get_value(":env:.hello") def test_environment_var_does_not_load_version(self): os.environ["PIP_VERSION"] = "True" self.configuration.load() with pytest.raises(ConfigurationError): self.configuration.get_value(":env:.version") def test_environment_config_errors_if_malformed(self): contents = """ test] hello = 4 """ with self.tmpfile(contents) as config_file: os.environ["PIP_CONFIG_FILE"] = config_file with pytest.raises(ConfigurationError) as err: self.configuration.load() assert "section header" in str(err.value) # error kind assert "1" in str(err.value) # line number assert ( # file name config_file in str(err.value) or repr(config_file) in str(err.value) ) class TestConfigurationPrecedence(ConfigurationMixin): # Tests for methods to that determine the order of precedence of # configuration options def test_env_overides_site(self): self.patch_configuration(kinds.SITE, {"test.hello": "1"}) self.patch_configuration(kinds.ENV, {"test.hello": "0"}) self.configuration.load() assert self.configuration.get_value("test.hello") == "0" def test_env_overides_user(self): self.patch_configuration(kinds.USER, {"test.hello": "2"}) self.patch_configuration(kinds.ENV, {"test.hello": "0"}) self.configuration.load() assert self.configuration.get_value("test.hello") == "0" def test_env_overides_global(self): self.patch_configuration(kinds.GLOBAL, {"test.hello": "3"}) self.patch_configuration(kinds.ENV, {"test.hello": "0"}) self.configuration.load() assert self.configuration.get_value("test.hello") == "0" def test_site_overides_user(self): self.patch_configuration(kinds.USER, {"test.hello": "2"}) self.patch_configuration(kinds.SITE, {"test.hello": "1"}) self.configuration.load() assert self.configuration.get_value("test.hello") == "1" def test_site_overides_global(self): self.patch_configuration(kinds.GLOBAL, {"test.hello": "3"}) self.patch_configuration(kinds.SITE, {"test.hello": "1"}) self.configuration.load() assert self.configuration.get_value("test.hello") == "1" def test_user_overides_global(self): self.patch_configuration(kinds.GLOBAL, {"test.hello": "3"}) self.patch_configuration(kinds.USER, {"test.hello": "2"}) self.configuration.load() assert self.configuration.get_value("test.hello") == "2" def test_env_not_overriden_by_environment_var(self): self.patch_configuration(kinds.ENV, {"test.hello": "1"}) os.environ["PIP_HELLO"] = "5" self.configuration.load() assert self.configuration.get_value("test.hello") == "1" assert self.configuration.get_value(":env:.hello") == "5" def test_site_not_overriden_by_environment_var(self): self.patch_configuration(kinds.SITE, {"test.hello": "2"}) os.environ["PIP_HELLO"] = "5" self.configuration.load() assert self.configuration.get_value("test.hello") == "2" assert self.configuration.get_value(":env:.hello") == "5" def test_user_not_overriden_by_environment_var(self): self.patch_configuration(kinds.USER, {"test.hello": "3"}) os.environ["PIP_HELLO"] = "5" self.configuration.load() assert self.configuration.get_value("test.hello") == "3" assert self.configuration.get_value(":env:.hello") == "5" def test_global_not_overriden_by_environment_var(self): self.patch_configuration(kinds.GLOBAL, {"test.hello": "4"}) os.environ["PIP_HELLO"] = "5" self.configuration.load() assert self.configuration.get_value("test.hello") == "4" assert self.configuration.get_value(":env:.hello") == "5" class TestConfigurationModification(ConfigurationMixin): # Tests for methods to that modify the state of a Configuration def test_no_specific_given_modification(self): self.configuration.load() try: self.configuration.set_value("test.hello", "10") except ConfigurationError: pass else: assert False, "Should have raised an error." def test_site_modification(self): self.configuration.load_only = kinds.SITE self.configuration.load() # Mock out the method mymock = MagicMock(spec=self.configuration._mark_as_modified) self.configuration._mark_as_modified = mymock self.configuration.set_value("test.hello", "10") # get the path to site config file assert mymock.call_count == 1 assert mymock.call_args[0][0] == site_config_file def test_user_modification(self): # get the path to local config file self.configuration.load_only = kinds.USER self.configuration.load() # Mock out the method mymock = MagicMock(spec=self.configuration._mark_as_modified) self.configuration._mark_as_modified = mymock self.configuration.set_value("test.hello", "10") # get the path to user config file assert mymock.call_count == 1 assert mymock.call_args[0][0] == new_config_file def test_global_modification(self): # get the path to local config file self.configuration.load_only = kinds.GLOBAL self.configuration.load() # Mock out the method mymock = MagicMock(spec=self.configuration._mark_as_modified) self.configuration._mark_as_modified = mymock self.configuration.set_value("test.hello", "10") # get the path to user config file assert mymock.call_count == 1 assert mymock.call_args[0][0] == global_config_files[-1]
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_download.py
import hashlib import os import sys from io import BytesIO from shutil import copy, rmtree from tempfile import mkdtemp import pytest from mock import Mock, patch from pip._vendor.six.moves.urllib import request as urllib_request import pip from pip._internal.download import ( CI_ENVIRONMENT_VARIABLES, PipSession, SafeFileCache, path_to_url, unpack_file_url, unpack_http_url, url_to_path, ) from pip._internal.exceptions import HashMismatch from pip._internal.models.link import Link from pip._internal.utils.hashes import Hashes from tests.lib import create_file def test_unpack_http_url_with_urllib_response_without_content_type(data): """ It should download and unpack files even if no Content-Type header exists """ _real_session = PipSession() def _fake_session_get(*args, **kwargs): resp = _real_session.get(*args, **kwargs) del resp.headers["Content-Type"] return resp session = Mock() session.get = _fake_session_get uri = path_to_url(data.packages.join("simple-1.0.tar.gz")) link = Link(uri) temp_dir = mkdtemp() try: unpack_http_url( link, temp_dir, download_dir=None, session=session, ) assert set(os.listdir(temp_dir)) == { 'PKG-INFO', 'setup.cfg', 'setup.py', 'simple', 'simple.egg-info' } finally: rmtree(temp_dir) def get_user_agent(): return PipSession().headers["User-Agent"] def test_user_agent(): user_agent = get_user_agent() assert user_agent.startswith("pip/%s" % pip.__version__) @pytest.mark.parametrize('name, expected_like_ci', [ ('BUILD_BUILDID', True), ('BUILD_ID', True), ('CI', True), # Test a prefix substring of one of the variable names we use. ('BUILD', False), ]) def test_user_agent__ci(monkeypatch, name, expected_like_ci): # Delete the variable names we use to check for CI to prevent the # detection from always returning True in case the tests are being run # under actual CI. It is okay to depend on CI_ENVIRONMENT_VARIABLES # here (part of the code under test) because this setup step can only # prevent false test failures. It can't cause a false test passage. for ci_name in CI_ENVIRONMENT_VARIABLES: monkeypatch.delenv(ci_name, raising=False) # Confirm the baseline before setting the environment variable. user_agent = get_user_agent() assert '"ci":null' in user_agent assert '"ci":true' not in user_agent monkeypatch.setenv(name, 'true') user_agent = get_user_agent() assert ('"ci":true' in user_agent) == expected_like_ci assert ('"ci":null' in user_agent) == (not expected_like_ci) class FakeStream(object): def __init__(self, contents): self._io = BytesIO(contents) def read(self, size, decode_content=None): return self._io.read(size) def stream(self, size, decode_content=None): yield self._io.read(size) class MockResponse(object): def __init__(self, contents): self.raw = FakeStream(contents) def raise_for_status(self): pass @patch('pip._internal.download.unpack_file') def test_unpack_http_url_bad_downloaded_checksum(mock_unpack_file): """ If already-downloaded file has bad checksum, re-download. """ base_url = 'http://www.example.com/somepackage.tgz' contents = b'downloaded' download_hash = hashlib.new('sha1', contents) link = Link(base_url + '#sha1=' + download_hash.hexdigest()) session = Mock() session.get = Mock() response = session.get.return_value = MockResponse(contents) response.headers = {'content-type': 'application/x-tar'} response.url = base_url download_dir = mkdtemp() try: downloaded_file = os.path.join(download_dir, 'somepackage.tgz') create_file(downloaded_file, 'some contents') unpack_http_url( link, 'location', download_dir=download_dir, session=session, hashes=Hashes({'sha1': [download_hash.hexdigest()]}) ) # despite existence of downloaded file with bad hash, downloaded again session.get.assert_called_once_with( 'http://www.example.com/somepackage.tgz', headers={"Accept-Encoding": "identity"}, stream=True, ) # cached file is replaced with newly downloaded file with open(downloaded_file) as fh: assert fh.read() == 'downloaded' finally: rmtree(download_dir) @pytest.mark.skipif("sys.platform == 'win32'") def test_path_to_url_unix(): assert path_to_url('/tmp/file') == 'file:///tmp/file' path = os.path.join(os.getcwd(), 'file') assert path_to_url('file') == 'file://' + urllib_request.pathname2url(path) @pytest.mark.skipif("sys.platform != 'win32'") def test_path_to_url_win(): assert path_to_url('c:/tmp/file') == 'file:///C:/tmp/file' assert path_to_url('c:\\tmp\\file') == 'file:///C:/tmp/file' assert path_to_url(r'\\unc\as\path') == 'file://unc/as/path' path = os.path.join(os.getcwd(), 'file') assert path_to_url('file') == 'file:' + urllib_request.pathname2url(path) @pytest.mark.parametrize("url,win_expected,non_win_expected", [ ('file:tmp', 'tmp', 'tmp'), ('file:c:/path/to/file', r'C:\path\to\file', 'c:/path/to/file'), ('file:/path/to/file', r'\path\to\file', '/path/to/file'), ('file://localhost/tmp/file', r'\tmp\file', '/tmp/file'), ('file://localhost/c:/tmp/file', r'C:\tmp\file', '/c:/tmp/file'), ('file://somehost/tmp/file', r'\\somehost\tmp\file', None), ('file:///tmp/file', r'\tmp\file', '/tmp/file'), ('file:///c:/tmp/file', r'C:\tmp\file', '/c:/tmp/file'), ]) def test_url_to_path(url, win_expected, non_win_expected): if sys.platform == 'win32': expected_path = win_expected else: expected_path = non_win_expected if expected_path is None: with pytest.raises(ValueError): url_to_path(url) else: assert url_to_path(url) == expected_path @pytest.mark.skipif("sys.platform != 'win32'") def test_url_to_path_path_to_url_symmetry_win(): path = r'C:\tmp\file' assert url_to_path(path_to_url(path)) == path unc_path = r'\\unc\share\path' assert url_to_path(path_to_url(unc_path)) == unc_path class Test_unpack_file_url(object): def prep(self, tmpdir, data): self.build_dir = tmpdir.join('build') self.download_dir = tmpdir.join('download') os.mkdir(self.build_dir) os.mkdir(self.download_dir) self.dist_file = "simple-1.0.tar.gz" self.dist_file2 = "simple-2.0.tar.gz" self.dist_path = data.packages.join(self.dist_file) self.dist_path2 = data.packages.join(self.dist_file2) self.dist_url = Link(path_to_url(self.dist_path)) self.dist_url2 = Link(path_to_url(self.dist_path2)) def test_unpack_file_url_no_download(self, tmpdir, data): self.prep(tmpdir, data) unpack_file_url(self.dist_url, self.build_dir) assert os.path.isdir(os.path.join(self.build_dir, 'simple')) assert not os.path.isfile( os.path.join(self.download_dir, self.dist_file)) def test_unpack_file_url_and_download(self, tmpdir, data): self.prep(tmpdir, data) unpack_file_url(self.dist_url, self.build_dir, download_dir=self.download_dir) assert os.path.isdir(os.path.join(self.build_dir, 'simple')) assert os.path.isfile(os.path.join(self.download_dir, self.dist_file)) def test_unpack_file_url_download_already_exists(self, tmpdir, data, monkeypatch): self.prep(tmpdir, data) # add in previous download (copy simple-2.0 as simple-1.0) # so we can tell it didn't get overwritten dest_file = os.path.join(self.download_dir, self.dist_file) copy(self.dist_path2, dest_file) with open(self.dist_path2, 'rb') as f: dist_path2_md5 = hashlib.md5(f.read()).hexdigest() unpack_file_url(self.dist_url, self.build_dir, download_dir=self.download_dir) # our hash should be the same, i.e. not overwritten by simple-1.0 hash with open(dest_file, 'rb') as f: assert dist_path2_md5 == hashlib.md5(f.read()).hexdigest() def test_unpack_file_url_bad_hash(self, tmpdir, data, monkeypatch): """ Test when the file url hash fragment is wrong """ self.prep(tmpdir, data) self.dist_url.url = "%s#md5=bogus" % self.dist_url.url with pytest.raises(HashMismatch): unpack_file_url(self.dist_url, self.build_dir, hashes=Hashes({'md5': ['bogus']})) def test_unpack_file_url_download_bad_hash(self, tmpdir, data, monkeypatch): """ Test when existing download has different hash from the file url fragment """ self.prep(tmpdir, data) # add in previous download (copy simple-2.0 as simple-1.0 so it's wrong # hash) dest_file = os.path.join(self.download_dir, self.dist_file) copy(self.dist_path2, dest_file) with open(self.dist_path, 'rb') as f: dist_path_md5 = hashlib.md5(f.read()).hexdigest() with open(dest_file, 'rb') as f: dist_path2_md5 = hashlib.md5(f.read()).hexdigest() assert dist_path_md5 != dist_path2_md5 self.dist_url.url = "%s#md5=%s" % ( self.dist_url.url, dist_path_md5 ) unpack_file_url(self.dist_url, self.build_dir, download_dir=self.download_dir, hashes=Hashes({'md5': [dist_path_md5]})) # confirm hash is for simple1-1.0 # the previous bad download has been removed with open(dest_file, 'rb') as f: assert hashlib.md5(f.read()).hexdigest() == dist_path_md5 def test_unpack_file_url_thats_a_dir(self, tmpdir, data): self.prep(tmpdir, data) dist_path = data.packages.join("FSPkg") dist_url = Link(path_to_url(dist_path)) unpack_file_url(dist_url, self.build_dir, download_dir=self.download_dir) assert os.path.isdir(os.path.join(self.build_dir, 'fspkg')) class TestSafeFileCache: """ The no_perms test are useless on Windows since SafeFileCache uses pip._internal.utils.filesystem.check_path_owner which is based on os.geteuid which is absent on Windows. """ def test_cache_roundtrip(self, tmpdir): cache_dir = tmpdir.join("test-cache") cache_dir.makedirs() cache = SafeFileCache(cache_dir) assert cache.get("test key") is None cache.set("test key", b"a test string") assert cache.get("test key") == b"a test string" cache.delete("test key") assert cache.get("test key") is None @pytest.mark.skipif("sys.platform == 'win32'") def test_safe_get_no_perms(self, tmpdir, monkeypatch): cache_dir = tmpdir.join("unreadable-cache") cache_dir.makedirs() os.chmod(cache_dir, 000) monkeypatch.setattr(os.path, "exists", lambda x: True) cache = SafeFileCache(cache_dir) cache.get("foo") @pytest.mark.skipif("sys.platform == 'win32'") def test_safe_set_no_perms(self, tmpdir): cache_dir = tmpdir.join("unreadable-cache") cache_dir.makedirs() os.chmod(cache_dir, 000) cache = SafeFileCache(cache_dir) cache.set("foo", b"bar") @pytest.mark.skipif("sys.platform == 'win32'") def test_safe_delete_no_perms(self, tmpdir): cache_dir = tmpdir.join("unreadable-cache") cache_dir.makedirs() os.chmod(cache_dir, 000) cache = SafeFileCache(cache_dir) cache.delete("foo") class TestPipSession: def test_cache_defaults_off(self): session = PipSession() assert not hasattr(session.adapters["http://"], "cache") assert not hasattr(session.adapters["https://"], "cache") def test_cache_is_enabled(self, tmpdir): session = PipSession(cache=tmpdir.join("test-cache")) assert hasattr(session.adapters["https://"], "cache") assert (session.adapters["https://"].cache.directory == tmpdir.join("test-cache")) def test_http_cache_is_not_enabled(self, tmpdir): session = PipSession(cache=tmpdir.join("test-cache")) assert not hasattr(session.adapters["http://"], "cache") def test_insecure_host_cache_is_not_enabled(self, tmpdir): session = PipSession( cache=tmpdir.join("test-cache"), insecure_hosts=["example.com"], ) assert not hasattr(session.adapters["https://example.com/"], "cache")
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_finder.py
import logging import sys import pytest from mock import Mock, patch from pkg_resources import Distribution, parse_version import pip._internal.pep425tags import pip._internal.wheel from pip._internal.download import PipSession from pip._internal.exceptions import ( BestVersionAlreadyInstalled, DistributionNotFound, ) from pip._internal.index import ( InstallationCandidate, Link, PackageFinder, Search, ) from pip._internal.req.constructors import install_req_from_line def test_no_mpkg(data): """Finder skips zipfiles with "macosx10" in the name.""" finder = PackageFinder([data.find_links], [], session=PipSession()) req = install_req_from_line("pkgwithmpkg") found = finder.find_requirement(req, False) assert found.url.endswith("pkgwithmpkg-1.0.tar.gz"), found def test_no_partial_name_match(data): """Finder requires the full project name to match, not just beginning.""" finder = PackageFinder([data.find_links], [], session=PipSession()) req = install_req_from_line("gmpy") found = finder.find_requirement(req, False) assert found.url.endswith("gmpy-1.15.tar.gz"), found def test_tilde(): """Finder can accept a path with ~ in it and will normalize it.""" session = PipSession() with patch('pip._internal.index.os.path.exists', return_value=True): finder = PackageFinder(['~/python-pkgs'], [], session=session) req = install_req_from_line("gmpy") with pytest.raises(DistributionNotFound): finder.find_requirement(req, False) def test_duplicates_sort_ok(data): """Finder successfully finds one of a set of duplicates in different locations""" finder = PackageFinder( [data.find_links, data.find_links2], [], session=PipSession(), ) req = install_req_from_line("duplicate") found = finder.find_requirement(req, False) assert found.url.endswith("duplicate-1.0.tar.gz"), found def test_finder_detects_latest_find_links(data): """Test PackageFinder detects latest using find-links""" req = install_req_from_line('simple', None) finder = PackageFinder([data.find_links], [], session=PipSession()) link = finder.find_requirement(req, False) assert link.url.endswith("simple-3.0.tar.gz") def test_incorrect_case_file_index(data): """Test PackageFinder detects latest using wrong case""" req = install_req_from_line('dinner', None) finder = PackageFinder([], [data.find_links3], session=PipSession()) link = finder.find_requirement(req, False) assert link.url.endswith("Dinner-2.0.tar.gz") @pytest.mark.network def test_finder_detects_latest_already_satisfied_find_links(data): """Test PackageFinder detects latest already satisfied using find-links""" req = install_req_from_line('simple', None) # the latest simple in local pkgs is 3.0 latest_version = "3.0" satisfied_by = Mock( location="/path", parsed_version=parse_version(latest_version), version=latest_version ) req.satisfied_by = satisfied_by finder = PackageFinder([data.find_links], [], session=PipSession()) with pytest.raises(BestVersionAlreadyInstalled): finder.find_requirement(req, True) @pytest.mark.network def test_finder_detects_latest_already_satisfied_pypi_links(): """Test PackageFinder detects latest already satisfied using pypi links""" req = install_req_from_line('initools', None) # the latest initools on PyPI is 0.3.1 latest_version = "0.3.1" satisfied_by = Mock( location="/path", parsed_version=parse_version(latest_version), version=latest_version, ) req.satisfied_by = satisfied_by finder = PackageFinder( [], ["http://pypi.org/simple/"], session=PipSession(), ) with pytest.raises(BestVersionAlreadyInstalled): finder.find_requirement(req, True) class TestWheel: def test_skip_invalid_wheel_link(self, caplog, data): """ Test if PackageFinder skips invalid wheel filenames """ caplog.set_level(logging.DEBUG) req = install_req_from_line("invalid") # data.find_links contains "invalid.whl", which is an invalid wheel finder = PackageFinder( [data.find_links], [], session=PipSession(), ) with pytest.raises(DistributionNotFound): finder.find_requirement(req, True) assert ( "invalid.whl; invalid wheel filename" in caplog.text ) def test_not_find_wheel_not_supported(self, data, monkeypatch): """ Test not finding an unsupported wheel. """ monkeypatch.setattr( pip._internal.pep425tags, "get_supported", lambda **kw: [("py1", "none", "any")], ) req = install_req_from_line("simple.dist") finder = PackageFinder( [data.find_links], [], session=PipSession(), ) finder.valid_tags = pip._internal.pep425tags.get_supported() with pytest.raises(DistributionNotFound): finder.find_requirement(req, True) def test_find_wheel_supported(self, data, monkeypatch): """ Test finding supported wheel. """ monkeypatch.setattr( pip._internal.pep425tags, "get_supported", lambda **kw: [('py2', 'none', 'any')], ) req = install_req_from_line("simple.dist") finder = PackageFinder( [data.find_links], [], session=PipSession(), ) found = finder.find_requirement(req, True) assert ( found.url.endswith("simple.dist-0.1-py2.py3-none-any.whl") ), found def test_wheel_over_sdist_priority(self, data): """ Test wheels have priority over sdists. `test_link_sorting` also covers this at lower level """ req = install_req_from_line("priority") finder = PackageFinder( [data.find_links], [], session=PipSession(), ) found = finder.find_requirement(req, True) assert found.url.endswith("priority-1.0-py2.py3-none-any.whl"), found def test_existing_over_wheel_priority(self, data): """ Test existing install has priority over wheels. `test_link_sorting` also covers this at a lower level """ req = install_req_from_line('priority', None) latest_version = "1.0" satisfied_by = Mock( location="/path", parsed_version=parse_version(latest_version), version=latest_version, ) req.satisfied_by = satisfied_by finder = PackageFinder( [data.find_links], [], session=PipSession(), ) with pytest.raises(BestVersionAlreadyInstalled): finder.find_requirement(req, True) def test_link_sorting(self): """ Test link sorting """ links = [ InstallationCandidate("simple", "2.0", Link('simple-2.0.tar.gz')), InstallationCandidate( "simple", "1.0", Link('simple-1.0-pyT-none-TEST.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-pyT-TEST-any.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0-pyT-none-any.whl'), ), InstallationCandidate( "simple", '1.0', Link('simple-1.0.tar.gz'), ), ] finder = PackageFinder([], [], session=PipSession()) finder.valid_tags = [ ('pyT', 'none', 'TEST'), ('pyT', 'TEST', 'any'), ('pyT', 'none', 'any'), ] results = sorted(links, key=finder._candidate_sort_key, reverse=True) results2 = sorted(reversed(links), key=finder._candidate_sort_key, reverse=True) assert links == results == results2, results2 def test_link_sorting_wheels_with_build_tags(self): """Verify build tags affect sorting.""" links = [ InstallationCandidate( "simplewheel", "2.0", Link("simplewheel-2.0-1-py2.py3-none-any.whl"), ), InstallationCandidate( "simplewheel", "2.0", Link("simplewheel-2.0-py2.py3-none-any.whl"), ), InstallationCandidate( "simplewheel", "1.0", Link("simplewheel-1.0-py2.py3-none-any.whl"), ), ] finder = PackageFinder([], [], session=PipSession()) results = sorted(links, key=finder._candidate_sort_key, reverse=True) results2 = sorted(reversed(links), key=finder._candidate_sort_key, reverse=True) assert links == results == results2, results2 def test_finder_priority_file_over_page(data): """Test PackageFinder prefers file links over equivalent page links""" req = install_req_from_line('gmpy==1.15', None) finder = PackageFinder( [data.find_links], ["http://pypi.org/simple/"], session=PipSession(), ) all_versions = finder.find_all_candidates(req.name) # 1 file InstallationCandidate followed by all https ones assert all_versions[0].location.scheme == 'file' assert all(version.location.scheme == 'https' for version in all_versions[1:]), all_versions link = finder.find_requirement(req, False) assert link.url.startswith("file://") def test_finder_priority_nonegg_over_eggfragments(): """Test PackageFinder prefers non-egg links over "#egg=" links""" req = install_req_from_line('bar==1.0', None) links = ['http://foo/bar.py#egg=bar-1.0', 'http://foo/bar-1.0.tar.gz'] finder = PackageFinder(links, [], session=PipSession()) with patch.object(finder, "_get_pages", lambda x, y: []): all_versions = finder.find_all_candidates(req.name) assert all_versions[0].location.url.endswith('tar.gz') assert all_versions[1].location.url.endswith('#egg=bar-1.0') link = finder.find_requirement(req, False) assert link.url.endswith('tar.gz') links.reverse() finder = PackageFinder(links, [], session=PipSession()) with patch.object(finder, "_get_pages", lambda x, y: []): all_versions = finder.find_all_candidates(req.name) assert all_versions[0].location.url.endswith('tar.gz') assert all_versions[1].location.url.endswith('#egg=bar-1.0') link = finder.find_requirement(req, False) assert link.url.endswith('tar.gz') def test_finder_only_installs_stable_releases(data): """ Test PackageFinder only accepts stable versioned releases by default. """ req = install_req_from_line("bar", None) # using a local index (that has pre & dev releases) finder = PackageFinder([], [data.index_url("pre")], session=PipSession()) link = finder.find_requirement(req, False) assert link.url.endswith("bar-1.0.tar.gz"), link.url # using find-links links = ["https://foo/bar-1.0.tar.gz", "https://foo/bar-2.0b1.tar.gz"] finder = PackageFinder(links, [], session=PipSession()) with patch.object(finder, "_get_pages", lambda x, y: []): link = finder.find_requirement(req, False) assert link.url == "https://foo/bar-1.0.tar.gz" links.reverse() finder = PackageFinder(links, [], session=PipSession()) with patch.object(finder, "_get_pages", lambda x, y: []): link = finder.find_requirement(req, False) assert link.url == "https://foo/bar-1.0.tar.gz" def test_finder_only_installs_data_require(data): """ Test whether the PackageFinder understand data-python-requires This can optionally be exposed by a simple-repository to tell which distribution are compatible with which version of Python by adding a data-python-require to the anchor links. See pep 503 for more information. """ # using a local index (that has pre & dev releases) finder = PackageFinder([], [data.index_url("datarequire")], session=PipSession()) links = finder.find_all_candidates("fakepackage") expected = ['1.0.0', '9.9.9'] if (2, 7) < sys.version_info < (3,): expected.append('2.7.0') elif sys.version_info > (3, 3): expected.append('3.3.0') assert {str(v.version) for v in links} == set(expected) def test_finder_installs_pre_releases(data): """ Test PackageFinder finds pre-releases if asked to. """ req = install_req_from_line("bar", None) # using a local index (that has pre & dev releases) finder = PackageFinder( [], [data.index_url("pre")], allow_all_prereleases=True, session=PipSession(), ) link = finder.find_requirement(req, False) assert link.url.endswith("bar-2.0b1.tar.gz"), link.url # using find-links links = ["https://foo/bar-1.0.tar.gz", "https://foo/bar-2.0b1.tar.gz"] finder = PackageFinder( links, [], allow_all_prereleases=True, session=PipSession(), ) with patch.object(finder, "_get_pages", lambda x, y: []): link = finder.find_requirement(req, False) assert link.url == "https://foo/bar-2.0b1.tar.gz" links.reverse() finder = PackageFinder( links, [], allow_all_prereleases=True, session=PipSession(), ) with patch.object(finder, "_get_pages", lambda x, y: []): link = finder.find_requirement(req, False) assert link.url == "https://foo/bar-2.0b1.tar.gz" def test_finder_installs_dev_releases(data): """ Test PackageFinder finds dev releases if asked to. """ req = install_req_from_line("bar", None) # using a local index (that has dev releases) finder = PackageFinder( [], [data.index_url("dev")], allow_all_prereleases=True, session=PipSession(), ) link = finder.find_requirement(req, False) assert link.url.endswith("bar-2.0.dev1.tar.gz"), link.url def test_finder_installs_pre_releases_with_version_spec(): """ Test PackageFinder only accepts stable versioned releases by default. """ req = install_req_from_line("bar>=0.0.dev0", None) links = ["https://foo/bar-1.0.tar.gz", "https://foo/bar-2.0b1.tar.gz"] finder = PackageFinder(links, [], session=PipSession()) with patch.object(finder, "_get_pages", lambda x, y: []): link = finder.find_requirement(req, False) assert link.url == "https://foo/bar-2.0b1.tar.gz" links.reverse() finder = PackageFinder(links, [], session=PipSession()) with patch.object(finder, "_get_pages", lambda x, y: []): link = finder.find_requirement(req, False) assert link.url == "https://foo/bar-2.0b1.tar.gz" class TestLinkPackageVersions(object): # patch this for travis which has distribute in its base env for now @patch( 'pip._internal.wheel.pkg_resources.get_distribution', lambda x: Distribution(project_name='setuptools', version='0.9') ) def setup(self): self.version = '1.0' self.search_name = 'pytest' self.canonical_name = 'pytest' self.finder = PackageFinder( [], [], session=PipSession(), ) @pytest.mark.parametrize( 'url', [ 'http:/yo/pytest-1.0.tar.gz', 'http:/yo/pytest-1.0-py2.py3-none-any.whl', ], ) def test_link_package_versions_match(self, url): """Test that 'pytest' archives match for 'pytest'""" link = Link(url) search = Search( supplied=self.search_name, canonical=self.canonical_name, formats=['source', 'binary'], ) result = self.finder._link_package_versions(link, search) expected = InstallationCandidate(self.search_name, self.version, link) assert result == expected, result @pytest.mark.parametrize( 'url', [ # TODO: Uncomment this test case when #1217 is fixed. # 'http:/yo/pytest-xdist-1.0.tar.gz', 'http:/yo/pytest2-1.0.tar.gz', 'http:/yo/pytest_xdist-1.0-py2.py3-none-any.whl', ], ) def est_link_package_versions_substring_fails(self, url): """Test that 'pytest<something> archives won't match for 'pytest'.""" link = Link(url) search = Search( supplied=self.search_name, canonical=self.canonical_name, formats=['source', 'binary'], ) result = self.finder._link_package_versions(link, search) assert result is None, result def test_get_index_urls_locations(): """Check that the canonical name is on all indexes""" finder = PackageFinder( [], ['file://index1/', 'file://index2'], session=PipSession()) locations = finder._get_index_urls_locations( install_req_from_line('Complex_Name').name) assert locations == ['file://index1/complex-name/', 'file://index2/complex-name/'] def test_find_all_candidates_nothing(): """Find nothing without anything""" finder = PackageFinder([], [], session=PipSession()) assert not finder.find_all_candidates('pip') def test_find_all_candidates_find_links(data): finder = PackageFinder( [data.find_links], [], session=PipSession()) versions = finder.find_all_candidates('simple') assert [str(v.version) for v in versions] == ['3.0', '2.0', '1.0'] def test_find_all_candidates_index(data): finder = PackageFinder( [], [data.index_url('simple')], session=PipSession()) versions = finder.find_all_candidates('simple') assert [str(v.version) for v in versions] == ['1.0'] def test_find_all_candidates_find_links_and_index(data): finder = PackageFinder( [data.find_links], [data.index_url('simple')], session=PipSession()) versions = finder.find_all_candidates('simple') # first the find-links versions then the page versions assert [str(v.version) for v in versions] == ['3.0', '2.0', '1.0', '1.0']
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_format_control.py
import pytest from pip._internal.cli import cmdoptions from pip._internal.cli.base_command import Command from pip._internal.models.format_control import FormatControl class SimpleCommand(Command): name = 'fake' summary = name def __init__(self): super(SimpleCommand, self).__init__() self.cmd_opts.add_option(cmdoptions.no_binary()) self.cmd_opts.add_option(cmdoptions.only_binary()) def run(self, options, args): self.options = options def test_no_binary_overrides(): cmd = SimpleCommand() cmd.main(['fake', '--only-binary=:all:', '--no-binary=fred']) format_control = FormatControl({'fred'}, {':all:'}) assert cmd.options.format_control == format_control def test_only_binary_overrides(): cmd = SimpleCommand() cmd.main(['fake', '--no-binary=:all:', '--only-binary=fred']) format_control = FormatControl({':all:'}, {'fred'}) assert cmd.options.format_control == format_control def test_none_resets(): cmd = SimpleCommand() cmd.main(['fake', '--no-binary=:all:', '--no-binary=:none:']) format_control = FormatControl(set(), set()) assert cmd.options.format_control == format_control def test_none_preserves_other_side(): cmd = SimpleCommand() cmd.main( ['fake', '--no-binary=:all:', '--only-binary=fred', '--no-binary=:none:']) format_control = FormatControl(set(), {'fred'}) assert cmd.options.format_control == format_control def test_comma_separated_values(): cmd = SimpleCommand() cmd.main(['fake', '--no-binary=1,2,3']) format_control = FormatControl({'1', '2', '3'}, set()) assert cmd.options.format_control == format_control @pytest.mark.parametrize( "no_binary,only_binary,argument,expected", [ ({"fred"}, set(), "fred", frozenset(["source"])), ({"fred"}, {":all:"}, "fred", frozenset(["source"])), (set(), {"fred"}, "fred", frozenset(["binary"])), ({":all:"}, {"fred"}, "fred", frozenset(["binary"])) ] ) def test_fmt_ctl_matches(no_binary, only_binary, argument, expected): fmt = FormatControl(no_binary, only_binary) assert fmt.get_allowed_formats(argument) == expected
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_index.py
import logging import os.path import pytest from mock import Mock from pip._vendor import html5lib, requests from pip._internal.download import PipSession from pip._internal.index import ( Link, PackageFinder, _determine_base_url, _egg_info_matches, _find_name_version_sep, _get_html_page, ) def test_sort_locations_file_expand_dir(data): """ Test that a file:// dir gets listdir run with expand_dir """ finder = PackageFinder([data.find_links], [], session=PipSession()) files, urls = finder._sort_locations([data.find_links], expand_dir=True) assert files and not urls, ( "files and not urls should have been found at find-links url: %s" % data.find_links ) def test_sort_locations_file_not_find_link(data): """ Test that a file:// url dir that's not a find-link, doesn't get a listdir run """ finder = PackageFinder([], [], session=PipSession()) files, urls = finder._sort_locations([data.index_url("empty_with_pkg")]) assert urls and not files, "urls, but not files should have been found" def test_sort_locations_non_existing_path(): """ Test that a non-existing path is ignored. """ finder = PackageFinder([], [], session=PipSession()) files, urls = finder._sort_locations( [os.path.join('this', 'doesnt', 'exist')]) assert not urls and not files, "nothing should have been found" class TestLink(object): def test_splitext(self): assert ('wheel', '.whl') == Link('http://yo/wheel.whl').splitext() @pytest.mark.parametrize( ("url", "expected"), [ ("http://yo/wheel.whl", "wheel.whl"), ("http://yo/wheel", "wheel"), ( "http://yo/myproject-1.0%2Bfoobar.0-py2.py3-none-any.whl", "myproject-1.0+foobar.0-py2.py3-none-any.whl", ), ], ) def test_filename(self, url, expected): assert Link(url).filename == expected def test_no_ext(self): assert '' == Link('http://yo/wheel').ext def test_ext(self): assert '.whl' == Link('http://yo/wheel.whl').ext def test_ext_fragment(self): assert '.whl' == Link('http://yo/wheel.whl#frag').ext def test_ext_query(self): assert '.whl' == Link('http://yo/wheel.whl?a=b').ext def test_is_wheel(self): assert Link('http://yo/wheel.whl').is_wheel def test_is_wheel_false(self): assert not Link('http://yo/not_a_wheel').is_wheel def test_fragments(self): url = 'git+https://example.com/package#egg=eggname' assert 'eggname' == Link(url).egg_fragment assert None is Link(url).subdirectory_fragment url = 'git+https://example.com/package#egg=eggname&subdirectory=subdir' assert 'eggname' == Link(url).egg_fragment assert 'subdir' == Link(url).subdirectory_fragment url = 'git+https://example.com/package#subdirectory=subdir&egg=eggname' assert 'eggname' == Link(url).egg_fragment assert 'subdir' == Link(url).subdirectory_fragment @pytest.mark.parametrize( ("html", "url", "expected"), [ (b"<html></html>", "https://example.com/", "https://example.com/"), ( b"<html><head>" b"<base href=\"https://foo.example.com/\">" b"</head></html>", "https://example.com/", "https://foo.example.com/", ), ( b"<html><head>" b"<base><base href=\"https://foo.example.com/\">" b"</head></html>", "https://example.com/", "https://foo.example.com/", ), ], ) def test_determine_base_url(html, url, expected): document = html5lib.parse( html, transport_encoding=None, namespaceHTMLElements=False, ) assert _determine_base_url(document, url) == expected class MockLogger(object): def __init__(self): self.called = False def warning(self, *args, **kwargs): self.called = True @pytest.mark.parametrize( ("location", "trusted", "expected"), [ ("http://pypi.org/something", [], True), ("https://pypi.org/something", [], False), ("git+http://pypi.org/something", [], True), ("git+https://pypi.org/something", [], False), ("git+ssh://git@pypi.org/something", [], False), ("http://localhost", [], False), ("http://127.0.0.1", [], False), ("http://example.com/something/", [], True), ("http://example.com/something/", ["example.com"], False), ("http://eXample.com/something/", ["example.cOm"], False), ], ) def test_secure_origin(location, trusted, expected): finder = PackageFinder([], [], session=[], trusted_hosts=trusted) logger = MockLogger() finder._validate_secure_origin(logger, location) assert logger.called == expected def test_get_formatted_locations_basic_auth(): """ Test that basic authentication credentials defined in URL is not included in formatted output. """ index_urls = [ 'https://pypi.org/simple', 'https://user:pass@repo.domain.com', ] finder = PackageFinder([], index_urls, session=[]) result = finder.get_formatted_locations() assert 'user' in result assert '****' in result assert 'pass' not in result @pytest.mark.parametrize( ("egg_info", "canonical_name", "expected"), [ # Trivial. ("pip-18.0", "pip", 3), ("zope-interface-4.5.0", "zope-interface", 14), # Canonicalized name match non-canonicalized egg info. (pypa/pip#5870) ("Jinja2-2.10", "jinja2", 6), ("zope.interface-4.5.0", "zope-interface", 14), ("zope_interface-4.5.0", "zope-interface", 14), # Should be smart enough to parse ambiguous names from the provided # package name. ("foo-2-2", "foo", 3), ("foo-2-2", "foo-2", 5), # Should be able to detect collapsed characters in the egg info. ("foo--bar-1.0", "foo-bar", 8), ("foo-_bar-1.0", "foo-bar", 8), # The package name must not ends with a dash (PEP 508), so the first # dash would be the separator, not the second. ("zope.interface--4.5.0", "zope-interface", 14), ("zope.interface--", "zope-interface", 14), # The version part is missing, but the split function does not care. ("zope.interface-", "zope-interface", 14), ], ) def test_find_name_version_sep(egg_info, canonical_name, expected): index = _find_name_version_sep(egg_info, canonical_name) assert index == expected @pytest.mark.parametrize( ("egg_info", "canonical_name"), [ # A dash must follow the package name. ("zope.interface4.5.0", "zope-interface"), ("zope.interface.4.5.0", "zope-interface"), ("zope.interface.-4.5.0", "zope-interface"), ("zope.interface", "zope-interface"), ], ) def test_find_name_version_sep_failure(egg_info, canonical_name): with pytest.raises(ValueError) as ctx: _find_name_version_sep(egg_info, canonical_name) message = "{} does not match {}".format(egg_info, canonical_name) assert str(ctx.value) == message @pytest.mark.parametrize( ("egg_info", "canonical_name", "expected"), [ # Trivial. ("pip-18.0", "pip", "18.0"), ("zope-interface-4.5.0", "zope-interface", "4.5.0"), # Canonicalized name match non-canonicalized egg info. (pypa/pip#5870) ("Jinja2-2.10", "jinja2", "2.10"), ("zope.interface-4.5.0", "zope-interface", "4.5.0"), ("zope_interface-4.5.0", "zope-interface", "4.5.0"), # Should be smart enough to parse ambiguous names from the provided # package name. ("foo-2-2", "foo", "2-2"), ("foo-2-2", "foo-2", "2"), ("zope.interface--4.5.0", "zope-interface", "-4.5.0"), ("zope.interface--", "zope-interface", "-"), # Should be able to detect collapsed characters in the egg info. ("foo--bar-1.0", "foo-bar", "1.0"), ("foo-_bar-1.0", "foo-bar", "1.0"), # Invalid. ("the-package-name-8.19", "does-not-match", None), ("zope.interface.-4.5.0", "zope.interface", None), ("zope.interface-", "zope-interface", None), ("zope.interface4.5.0", "zope-interface", None), ("zope.interface.4.5.0", "zope-interface", None), ("zope.interface.-4.5.0", "zope-interface", None), ("zope.interface", "zope-interface", None), ], ) def test_egg_info_matches(egg_info, canonical_name, expected): version = _egg_info_matches(egg_info, canonical_name) assert version == expected def test_request_http_error(caplog): caplog.set_level(logging.DEBUG) link = Link('http://localhost') session = Mock(PipSession) session.get.return_value = resp = Mock() resp.raise_for_status.side_effect = requests.HTTPError('Http error') assert _get_html_page(link, session=session) is None assert ( 'Could not fetch URL http://localhost: Http error - skipping' in caplog.text ) def test_request_retries(caplog): caplog.set_level(logging.DEBUG) link = Link('http://localhost') session = Mock(PipSession) session.get.side_effect = requests.exceptions.RetryError('Retry error') assert _get_html_page(link, session=session) is None assert ( 'Could not fetch URL http://localhost: Retry error - skipping' in caplog.text )
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_index_html_page.py
import logging import mock import pytest from pip._vendor.six.moves.urllib import request as urllib_request from pip._internal.download import PipSession from pip._internal.index import ( Link, _get_html_page, _get_html_response, _NotHTML, _NotHTTP, ) @pytest.mark.parametrize( "url", [ "ftp://python.org/python-3.7.1.zip", "file:///opt/data/pip-18.0.tar.gz", ], ) def test_get_html_response_archive_to_naive_scheme(url): """ `_get_html_response()` should error on an archive-like URL if the scheme does not allow "poking" without getting data. """ with pytest.raises(_NotHTTP): _get_html_response(url, session=mock.Mock(PipSession)) @pytest.mark.parametrize( "url, content_type", [ ("http://python.org/python-3.7.1.zip", "application/zip"), ("https://pypi.org/pip-18.0.tar.gz", "application/gzip"), ], ) def test_get_html_response_archive_to_http_scheme(url, content_type): """ `_get_html_response()` should send a HEAD request on an archive-like URL if the scheme supports it, and raise `_NotHTML` if the response isn't HTML. """ session = mock.Mock(PipSession) session.head.return_value = mock.Mock(**{ "request.method": "HEAD", "headers": {"Content-Type": content_type}, }) with pytest.raises(_NotHTML) as ctx: _get_html_response(url, session=session) session.assert_has_calls([ mock.call.head(url, allow_redirects=True), ]) assert ctx.value.args == (content_type, "HEAD") @pytest.mark.parametrize( "url", [ "http://python.org/python-3.7.1.zip", "https://pypi.org/pip-18.0.tar.gz", ], ) def test_get_html_response_archive_to_http_scheme_is_html(url): """ `_get_html_response()` should work with archive-like URLs if the HEAD request is responded with text/html. """ session = mock.Mock(PipSession) session.head.return_value = mock.Mock(**{ "request.method": "HEAD", "headers": {"Content-Type": "text/html"}, }) session.get.return_value = mock.Mock(headers={"Content-Type": "text/html"}) resp = _get_html_response(url, session=session) assert resp is not None assert session.mock_calls == [ mock.call.head(url, allow_redirects=True), mock.call.head().raise_for_status(), mock.call.get(url, headers={ "Accept": "text/html", "Cache-Control": "max-age=0", }), mock.call.get().raise_for_status(), ] @pytest.mark.parametrize( "url", [ "https://pypi.org/simple/pip", "https://pypi.org/simple/pip/", "https://python.org/sitemap.xml", ], ) def test_get_html_response_no_head(url): """ `_get_html_response()` shouldn't send a HEAD request if the URL does not look like an archive, only the GET request that retrieves data. """ session = mock.Mock(PipSession) # Mock the headers dict to ensure it is accessed. session.get.return_value = mock.Mock(headers=mock.Mock(**{ "get.return_value": "text/html", })) resp = _get_html_response(url, session=session) assert resp is not None assert session.head.call_count == 0 assert session.get.mock_calls == [ mock.call(url, headers={ "Accept": "text/html", "Cache-Control": "max-age=0", }), mock.call().raise_for_status(), mock.call().headers.get("Content-Type", ""), ] def test_get_html_response_dont_log_clear_text_password(caplog): """ `_get_html_response()` should redact the password from the index URL in its DEBUG log message. """ session = mock.Mock(PipSession) # Mock the headers dict to ensure it is accessed. session.get.return_value = mock.Mock(headers=mock.Mock(**{ "get.return_value": "text/html", })) caplog.set_level(logging.DEBUG) resp = _get_html_response( "https://user:my_password@example.com/simple/", session=session ) assert resp is not None assert len(caplog.records) == 1 record = caplog.records[0] assert record.levelname == 'DEBUG' assert record.message.splitlines() == [ "Getting page https://user:****@example.com/simple/", ] @pytest.mark.parametrize( "url, vcs_scheme", [ ("svn+http://pypi.org/something", "svn"), ("git+https://github.com/pypa/pip.git", "git"), ], ) def test_get_html_page_invalid_scheme(caplog, url, vcs_scheme): """`_get_html_page()` should error if an invalid scheme is given. Only file:, http:, https:, and ftp: are allowed. """ with caplog.at_level(logging.DEBUG): page = _get_html_page(Link(url), session=mock.Mock(PipSession)) assert page is None assert caplog.record_tuples == [ ( "pip._internal.index", logging.DEBUG, "Cannot look at {} URL {}".format(vcs_scheme, url), ), ] def test_get_html_page_directory_append_index(tmpdir): """`_get_html_page()` should append "index.html" to a directory URL. """ dirpath = tmpdir.mkdir("something") dir_url = "file:///{}".format( urllib_request.pathname2url(dirpath).lstrip("/"), ) session = mock.Mock(PipSession) with mock.patch("pip._internal.index._get_html_response") as mock_func: _get_html_page(Link(dir_url), session=session) assert mock_func.mock_calls == [ mock.call( "{}/index.html".format(dir_url.rstrip("/")), session=session, ), ]
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_locations.py
""" locations.py tests """ import getpass import os import shutil import sys import tempfile import pytest from mock import Mock from pip._internal.locations import distutils_scheme if sys.platform == 'win32': pwd = Mock() else: import pwd class TestLocations: def setup(self): self.tempdir = tempfile.mkdtemp() self.st_uid = 9999 self.username = "example" self.patch() def teardown(self): self.revert_patch() shutil.rmtree(self.tempdir, ignore_errors=True) def patch(self): """ first store and then patch python methods pythons """ self.tempfile_gettempdir = tempfile.gettempdir self.old_os_fstat = os.fstat if sys.platform != 'win32': # os.geteuid and pwd.getpwuid are not implemented on windows self.old_os_geteuid = os.geteuid self.old_pwd_getpwuid = pwd.getpwuid self.old_getpass_getuser = getpass.getuser # now patch tempfile.gettempdir = lambda: self.tempdir getpass.getuser = lambda: self.username os.geteuid = lambda: self.st_uid os.fstat = lambda fd: self.get_mock_fstat(fd) if sys.platform != 'win32': pwd.getpwuid = lambda uid: self.get_mock_getpwuid(uid) def revert_patch(self): """ revert the patches to python methods """ tempfile.gettempdir = self.tempfile_gettempdir getpass.getuser = self.old_getpass_getuser if sys.platform != 'win32': # os.geteuid and pwd.getpwuid are not implemented on windows os.geteuid = self.old_os_geteuid pwd.getpwuid = self.old_pwd_getpwuid os.fstat = self.old_os_fstat def get_mock_fstat(self, fd): """ returns a basic mock fstat call result. Currently only the st_uid attribute has been set. """ result = Mock() result.st_uid = self.st_uid return result def get_mock_getpwuid(self, uid): """ returns a basic mock pwd.getpwuid call result. Currently only the pw_name attribute has been set. """ result = Mock() result.pw_name = self.username return result class TestDisutilsScheme: def test_root_modifies_appropriately(self, monkeypatch): # This deals with nt/posix path differences # root is c:\somewhere\else or /somewhere/else root = os.path.normcase(os.path.abspath( os.path.join(os.path.sep, 'somewhere', 'else'))) norm_scheme = distutils_scheme("example") root_scheme = distutils_scheme("example", root=root) for key, value in norm_scheme.items(): drive, path = os.path.splitdrive(os.path.abspath(value)) expected = os.path.join(root, path[1:]) assert os.path.abspath(root_scheme[key]) == expected @pytest.mark.incompatible_with_venv def test_distutils_config_file_read(self, tmpdir, monkeypatch): # This deals with nt/posix path differences install_scripts = os.path.normcase(os.path.abspath( os.path.join(os.path.sep, 'somewhere', 'else'))) f = tmpdir.mkdir("config").join("setup.cfg") f.write("[install]\ninstall-scripts=" + install_scripts) from distutils.dist import Distribution # patch the function that returns what config files are present monkeypatch.setattr( Distribution, 'find_config_files', lambda self: [f], ) scheme = distutils_scheme('example') assert scheme['scripts'] == install_scripts @pytest.mark.incompatible_with_venv # when we request install-lib, we should install everything (.py & # .so) into that path; i.e. ensure platlib & purelib are set to # this path def test_install_lib_takes_precedence(self, tmpdir, monkeypatch): # This deals with nt/posix path differences install_lib = os.path.normcase(os.path.abspath( os.path.join(os.path.sep, 'somewhere', 'else'))) f = tmpdir.mkdir("config").join("setup.cfg") f.write("[install]\ninstall-lib=" + install_lib) from distutils.dist import Distribution # patch the function that returns what config files are present monkeypatch.setattr( Distribution, 'find_config_files', lambda self: [f], ) scheme = distutils_scheme('example') assert scheme['platlib'] == install_lib + os.path.sep assert scheme['purelib'] == install_lib + os.path.sep
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_logging.py
import errno import logging import os import time import pytest from mock import patch from pip._vendor.six import PY2 from pip._internal.utils.logging import ( BrokenStdoutLoggingError, ColorizedStreamHandler, IndentingFormatter, ) from pip._internal.utils.misc import captured_stderr, captured_stdout logger = logging.getLogger(__name__) # This is a Python 2/3 compatibility helper. def _make_broken_pipe_error(): """ Return an exception object representing a broken pipe. """ if PY2: # This is one way a broken pipe error can show up in Python 2 # (a non-Windows example in this case). return IOError(errno.EPIPE, 'Broken pipe') return BrokenPipeError() # noqa: F821 class TestIndentingFormatter(object): """ Test `pip._internal.utils.logging.IndentingFormatter`. """ def setup(self): self.old_tz = os.environ.get('TZ') os.environ['TZ'] = 'UTC' # time.tzset() is not implemented on some platforms (notably, Windows). if hasattr(time, 'tzset'): time.tzset() def teardown(self): if self.old_tz: os.environ['TZ'] = self.old_tz else: del os.environ['TZ'] if 'tzset' in dir(time): time.tzset() def make_record(self, msg, level_name): level_number = getattr(logging, level_name) attrs = dict( msg=msg, created=1547704837.4, levelname=level_name, levelno=level_number, ) record = logging.makeLogRecord(attrs) return record @pytest.mark.parametrize('level_name, expected', [ ('DEBUG', 'hello\nworld'), ('INFO', 'hello\nworld'), ('WARNING', 'WARNING: hello\nworld'), ('ERROR', 'ERROR: hello\nworld'), ('CRITICAL', 'ERROR: hello\nworld'), ]) def test_format(self, level_name, expected): """ Args: level_name: a logging level name (e.g. "WARNING"). """ record = self.make_record('hello\nworld', level_name=level_name) f = IndentingFormatter(fmt="%(message)s") assert f.format(record) == expected @pytest.mark.parametrize('level_name, expected', [ ('INFO', '2019-01-17T06:00:37 hello\n2019-01-17T06:00:37 world'), ('WARNING', '2019-01-17T06:00:37 WARNING: hello\n2019-01-17T06:00:37 world'), ]) def test_format_with_timestamp(self, level_name, expected): record = self.make_record('hello\nworld', level_name=level_name) f = IndentingFormatter(fmt="%(message)s", add_timestamp=True) assert f.format(record) == expected @pytest.mark.parametrize('level_name, expected', [ ('WARNING', 'DEPRECATION: hello\nworld'), ('ERROR', 'DEPRECATION: hello\nworld'), ('CRITICAL', 'DEPRECATION: hello\nworld'), ]) def test_format_deprecated(self, level_name, expected): """ Test that logged deprecation warnings coming from deprecated() don't get another prefix. """ record = self.make_record( 'DEPRECATION: hello\nworld', level_name=level_name, ) f = IndentingFormatter(fmt="%(message)s") assert f.format(record) == expected class TestColorizedStreamHandler(object): def _make_log_record(self): attrs = { 'msg': 'my error', } record = logging.makeLogRecord(attrs) return record def test_broken_pipe_in_stderr_flush(self): """ Test sys.stderr.flush() raising BrokenPipeError. This error should _not_ trigger an error in the logging framework. """ record = self._make_log_record() with captured_stderr() as stderr: handler = ColorizedStreamHandler(stream=stderr) with patch('sys.stderr.flush') as mock_flush: mock_flush.side_effect = _make_broken_pipe_error() # The emit() call raises no exception. handler.emit(record) err_text = stderr.getvalue() assert err_text.startswith('my error') # Check that the logging framework tried to log the exception. if PY2: assert 'IOError: [Errno 32] Broken pipe' in err_text assert 'Logged from file' in err_text else: assert 'Logging error' in err_text assert 'BrokenPipeError' in err_text assert "Message: 'my error'" in err_text def test_broken_pipe_in_stdout_write(self): """ Test sys.stdout.write() raising BrokenPipeError. This error _should_ trigger an error in the logging framework. """ record = self._make_log_record() with captured_stdout() as stdout: handler = ColorizedStreamHandler(stream=stdout) with patch('sys.stdout.write') as mock_write: mock_write.side_effect = _make_broken_pipe_error() with pytest.raises(BrokenStdoutLoggingError): handler.emit(record) def test_broken_pipe_in_stdout_flush(self): """ Test sys.stdout.flush() raising BrokenPipeError. This error _should_ trigger an error in the logging framework. """ record = self._make_log_record() with captured_stdout() as stdout: handler = ColorizedStreamHandler(stream=stdout) with patch('sys.stdout.flush') as mock_flush: mock_flush.side_effect = _make_broken_pipe_error() with pytest.raises(BrokenStdoutLoggingError): handler.emit(record) output = stdout.getvalue() # Sanity check that the log record was written, since flush() happens # after write(). assert output.startswith('my error')
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_models.py
"""Tests for various classes in pip._internal.models """ from pip._vendor.packaging.version import parse as parse_version from pip._internal.models import candidate, index class TestPackageIndex(object): """Tests for pip._internal.models.index.PackageIndex """ def test_gives_right_urls(self): url = "https://mypypi.internal/path/" file_storage_domain = "files.mypypi.internal" pack_index = index.PackageIndex(url, file_storage_domain) assert pack_index.url == url assert pack_index.file_storage_domain == file_storage_domain assert pack_index.netloc == "mypypi.internal" assert pack_index.simple_url == url + "simple" assert pack_index.pypi_url == url + "pypi" def test_PyPI_urls_are_correct(self): pack_index = index.PyPI assert pack_index.netloc == "pypi.org" assert pack_index.url == "https://pypi.org/" assert pack_index.simple_url == "https://pypi.org/simple" assert pack_index.pypi_url == "https://pypi.org/pypi" assert pack_index.file_storage_domain == "files.pythonhosted.org" def test_TestPyPI_urls_are_correct(self): pack_index = index.TestPyPI assert pack_index.netloc == "test.pypi.org" assert pack_index.url == "https://test.pypi.org/" assert pack_index.simple_url == "https://test.pypi.org/simple" assert pack_index.pypi_url == "https://test.pypi.org/pypi" assert pack_index.file_storage_domain == "test-files.pythonhosted.org" class TestInstallationCandidate(object): def test_sets_correct_variables(self): obj = candidate.InstallationCandidate( "A", "1.0.0", "https://somewhere.com/path/A-1.0.0.tar.gz" ) assert obj.project == "A" assert obj.version == parse_version("1.0.0") assert obj.location == "https://somewhere.com/path/A-1.0.0.tar.gz" # NOTE: This isn't checking the ordering logic; only the data provided to # it is correct. def test_sets_the_right_key(self): obj = candidate.InstallationCandidate( "A", "1.0.0", "https://somewhere.com/path/A-1.0.0.tar.gz" ) assert obj._compare_key == (obj.project, obj.version, obj.location)
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_options.py
import os from contextlib import contextmanager import pytest import pip._internal.configuration from pip._internal import main from pip._internal.commands import ConfigurationCommand, DownloadCommand from pip._internal.exceptions import PipError from tests.lib.options_helpers import AddFakeCommandMixin @contextmanager def temp_environment_variable(name, value): not_set = object() original = os.environ[name] if name in os.environ else not_set os.environ[name] = value try: yield finally: # Return the environment variable to its original state. if original is not_set: if name in os.environ: del os.environ[name] else: os.environ[name] = original @contextmanager def assert_option_error(capsys, expected): """ Assert that a SystemExit occurred because of a parsing error. Args: expected: an expected substring of stderr. """ with pytest.raises(SystemExit) as excinfo: yield assert excinfo.value.code == 2 stderr = capsys.readouterr().err assert expected in stderr def assert_is_default_cache_dir(value): # This path looks different on different platforms, but the path always # has the substring "pip". assert 'pip' in value class TestOptionPrecedence(AddFakeCommandMixin): """ Tests for confirming our option precedence: cli -> environment -> subcommand config -> global config -> option defaults """ def get_config_section(self, section): config = { 'global': [('timeout', '-3')], 'fake': [('timeout', '-2')], } return config[section] def get_config_section_global(self, section): config = { 'global': [('timeout', '-3')], 'fake': [], } return config[section] def test_env_override_default_int(self): """ Test that environment variable overrides an int option default. """ os.environ['PIP_TIMEOUT'] = '-1' options, args = main(['fake']) assert options.timeout == -1 def test_env_override_default_append(self): """ Test that environment variable overrides an append option default. """ os.environ['PIP_FIND_LINKS'] = 'F1' options, args = main(['fake']) assert options.find_links == ['F1'] os.environ['PIP_FIND_LINKS'] = 'F1 F2' options, args = main(['fake']) assert options.find_links == ['F1', 'F2'] def test_env_override_default_choice(self): """ Test that environment variable overrides a choice option default. """ os.environ['PIP_EXISTS_ACTION'] = 'w' options, args = main(['fake']) assert options.exists_action == ['w'] os.environ['PIP_EXISTS_ACTION'] = 's w' options, args = main(['fake']) assert options.exists_action == ['s', 'w'] def test_env_alias_override_default(self): """ When an option has multiple long forms, test that the technique of using the env variable, "PIP_<long form>" works for all cases. (e.g. PIP_LOG_FILE and PIP_LOCAL_LOG should all work) """ os.environ['PIP_LOG_FILE'] = 'override.log' options, args = main(['fake']) assert options.log == 'override.log' os.environ['PIP_LOCAL_LOG'] = 'override.log' options, args = main(['fake']) assert options.log == 'override.log' def test_cli_override_environment(self): """ Test the cli overrides and environment variable """ os.environ['PIP_TIMEOUT'] = '-1' options, args = main(['fake', '--timeout', '-2']) assert options.timeout == -2 @pytest.mark.parametrize('pip_no_cache_dir', [ # Enabling --no-cache-dir means no cache directory. '1', 'true', 'on', 'yes', # For historical / backwards compatibility reasons, we also disable # the cache directory if provided a value that translates to 0. '0', 'false', 'off', 'no', ]) def test_cache_dir__PIP_NO_CACHE_DIR(self, pip_no_cache_dir): """ Test setting the PIP_NO_CACHE_DIR environment variable without passing any command-line flags. """ os.environ['PIP_NO_CACHE_DIR'] = pip_no_cache_dir options, args = main(['fake']) assert options.cache_dir is False @pytest.mark.parametrize('pip_no_cache_dir', ['yes', 'no']) def test_cache_dir__PIP_NO_CACHE_DIR__with_cache_dir( self, pip_no_cache_dir ): """ Test setting PIP_NO_CACHE_DIR while also passing an explicit --cache-dir value. """ os.environ['PIP_NO_CACHE_DIR'] = pip_no_cache_dir options, args = main(['--cache-dir', '/cache/dir', 'fake']) # The command-line flag takes precedence. assert options.cache_dir == '/cache/dir' @pytest.mark.parametrize('pip_no_cache_dir', ['yes', 'no']) def test_cache_dir__PIP_NO_CACHE_DIR__with_no_cache_dir( self, pip_no_cache_dir ): """ Test setting PIP_NO_CACHE_DIR while also passing --no-cache-dir. """ os.environ['PIP_NO_CACHE_DIR'] = pip_no_cache_dir options, args = main(['--no-cache-dir', 'fake']) # The command-line flag should take precedence (which has the same # value in this case). assert options.cache_dir is False def test_cache_dir__PIP_NO_CACHE_DIR_invalid__with_no_cache_dir( self, capsys, ): """ Test setting PIP_NO_CACHE_DIR to an invalid value while also passing --no-cache-dir. """ os.environ['PIP_NO_CACHE_DIR'] = 'maybe' expected_err = "--no-cache-dir error: invalid truth value 'maybe'" with assert_option_error(capsys, expected=expected_err): main(['--no-cache-dir', 'fake']) class TestUsePEP517Options(object): """ Test options related to using --use-pep517. """ def parse_args(self, args): # We use DownloadCommand since that is one of the few Command # classes with the use_pep517 options. command = DownloadCommand() options, args = command.parse_args(args) return options def test_no_option(self): """ Test passing no option. """ options = self.parse_args([]) assert options.use_pep517 is None def test_use_pep517(self): """ Test passing --use-pep517. """ options = self.parse_args(['--use-pep517']) assert options.use_pep517 is True def test_no_use_pep517(self): """ Test passing --no-use-pep517. """ options = self.parse_args(['--no-use-pep517']) assert options.use_pep517 is False def test_PIP_USE_PEP517_true(self): """ Test setting PIP_USE_PEP517 to "true". """ with temp_environment_variable('PIP_USE_PEP517', 'true'): options = self.parse_args([]) # This is an int rather than a boolean because strtobool() in pip's # configuration code returns an int. assert options.use_pep517 == 1 def test_PIP_USE_PEP517_false(self): """ Test setting PIP_USE_PEP517 to "false". """ with temp_environment_variable('PIP_USE_PEP517', 'false'): options = self.parse_args([]) # This is an int rather than a boolean because strtobool() in pip's # configuration code returns an int. assert options.use_pep517 == 0 def test_use_pep517_and_PIP_USE_PEP517_false(self): """ Test passing --use-pep517 and setting PIP_USE_PEP517 to "false". """ with temp_environment_variable('PIP_USE_PEP517', 'false'): options = self.parse_args(['--use-pep517']) assert options.use_pep517 is True def test_no_use_pep517_and_PIP_USE_PEP517_true(self): """ Test passing --no-use-pep517 and setting PIP_USE_PEP517 to "true". """ with temp_environment_variable('PIP_USE_PEP517', 'true'): options = self.parse_args(['--no-use-pep517']) assert options.use_pep517 is False def test_PIP_NO_USE_PEP517(self, capsys): """ Test setting PIP_NO_USE_PEP517, which isn't allowed. """ expected_err = ( '--no-use-pep517 error: A value was passed for --no-use-pep517,\n' ) with temp_environment_variable('PIP_NO_USE_PEP517', 'true'): with assert_option_error(capsys, expected=expected_err): self.parse_args([]) class TestOptionsInterspersed(AddFakeCommandMixin): def test_general_option_after_subcommand(self): options, args = main(['fake', '--timeout', '-1']) assert options.timeout == -1 def test_option_after_subcommand_arg(self): options, args = main(['fake', 'arg', '--timeout', '-1']) assert options.timeout == -1 def test_additive_before_after_subcommand(self): options, args = main(['-v', 'fake', '-v']) assert options.verbose == 2 def test_subcommand_option_before_subcommand_fails(self): with pytest.raises(SystemExit): main(['--find-links', 'F1', 'fake']) class TestGeneralOptions(AddFakeCommandMixin): # the reason to specifically test general options is due to the # extra processing they receive, and the number of bugs we've had def test_cache_dir__default(self): options, args = main(['fake']) # With no options the default cache dir should be used. assert_is_default_cache_dir(options.cache_dir) def test_cache_dir__provided(self): options, args = main(['--cache-dir', '/cache/dir', 'fake']) assert options.cache_dir == '/cache/dir' def test_no_cache_dir__provided(self): options, args = main(['--no-cache-dir', 'fake']) assert options.cache_dir is False def test_require_virtualenv(self): options1, args1 = main(['--require-virtualenv', 'fake']) options2, args2 = main(['fake', '--require-virtualenv']) assert options1.require_venv assert options2.require_venv def test_verbose(self): options1, args1 = main(['--verbose', 'fake']) options2, args2 = main(['fake', '--verbose']) assert options1.verbose == options2.verbose == 1 def test_quiet(self): options1, args1 = main(['--quiet', 'fake']) options2, args2 = main(['fake', '--quiet']) assert options1.quiet == options2.quiet == 1 options3, args3 = main(['--quiet', '--quiet', 'fake']) options4, args4 = main(['fake', '--quiet', '--quiet']) assert options3.quiet == options4.quiet == 2 options5, args5 = main(['--quiet', '--quiet', '--quiet', 'fake']) options6, args6 = main(['fake', '--quiet', '--quiet', '--quiet']) assert options5.quiet == options6.quiet == 3 def test_log(self): options1, args1 = main(['--log', 'path', 'fake']) options2, args2 = main(['fake', '--log', 'path']) assert options1.log == options2.log == 'path' def test_local_log(self): options1, args1 = main(['--local-log', 'path', 'fake']) options2, args2 = main(['fake', '--local-log', 'path']) assert options1.log == options2.log == 'path' def test_no_input(self): options1, args1 = main(['--no-input', 'fake']) options2, args2 = main(['fake', '--no-input']) assert options1.no_input assert options2.no_input def test_proxy(self): options1, args1 = main(['--proxy', 'path', 'fake']) options2, args2 = main(['fake', '--proxy', 'path']) assert options1.proxy == options2.proxy == 'path' def test_retries(self): options1, args1 = main(['--retries', '-1', 'fake']) options2, args2 = main(['fake', '--retries', '-1']) assert options1.retries == options2.retries == -1 def test_timeout(self): options1, args1 = main(['--timeout', '-1', 'fake']) options2, args2 = main(['fake', '--timeout', '-1']) assert options1.timeout == options2.timeout == -1 def test_skip_requirements_regex(self): options1, args1 = main(['--skip-requirements-regex', 'path', 'fake']) options2, args2 = main(['fake', '--skip-requirements-regex', 'path']) assert options1.skip_requirements_regex == 'path' assert options2.skip_requirements_regex == 'path' def test_exists_action(self): options1, args1 = main(['--exists-action', 'w', 'fake']) options2, args2 = main(['fake', '--exists-action', 'w']) assert options1.exists_action == options2.exists_action == ['w'] def test_cert(self): options1, args1 = main(['--cert', 'path', 'fake']) options2, args2 = main(['fake', '--cert', 'path']) assert options1.cert == options2.cert == 'path' def test_client_cert(self): options1, args1 = main(['--client-cert', 'path', 'fake']) options2, args2 = main(['fake', '--client-cert', 'path']) assert options1.client_cert == options2.client_cert == 'path' class TestOptionsConfigFiles(object): def test_venv_config_file_found(self, monkeypatch): # strict limit on the global_config_files list monkeypatch.setattr( pip._internal.configuration, 'global_config_files', ['/a/place'] ) cp = pip._internal.configuration.Configuration(isolated=False) files = [] for _, val in cp._iter_config_files(): files.extend(val) assert len(files) == 4 @pytest.mark.parametrize( "args, expect", ( ([], None), (["--global"], "global"), (["--site"], "site"), (["--user"], "user"), (["--global", "--user"], PipError), (["--global", "--site"], PipError), (["--global", "--site", "--user"], PipError), ) ) def test_config_file_options(self, monkeypatch, args, expect): cmd = ConfigurationCommand() # Replace a handler with a no-op to avoid side effects monkeypatch.setattr(cmd, "get_name", lambda *a: None) options, args = cmd.parser.parse_args(args + ["get", "name"]) if expect is PipError: with pytest.raises(PipError): cmd._determine_file(options, need_value=False) else: assert expect == cmd._determine_file(options, need_value=False) def test_config_file_venv_option(self, monkeypatch): cmd = ConfigurationCommand() # Replace a handler with a no-op to avoid side effects monkeypatch.setattr(cmd, "get_name", lambda *a: None) collected_warnings = [] def _warn(message, *a, **kw): collected_warnings.append(message) monkeypatch.setattr("warnings.warn", _warn) options, args = cmd.parser.parse_args(["--venv", "get", "name"]) assert "site" == cmd._determine_file(options, need_value=False) assert collected_warnings assert "--site" in collected_warnings[0] # No warning or error if both "--venv" and "--site" are specified collected_warnings[:] = [] options, args = cmd.parser.parse_args(["--venv", "--site", "get", "name"]) assert "site" == cmd._determine_file(options, need_value=False) assert not collected_warnings
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_pep425tags.py
import sys import pytest from mock import patch from pip._internal import pep425tags class TestPEP425Tags(object): def mock_get_config_var(self, **kwd): """ Patch sysconfig.get_config_var for arbitrary keys. """ import pip._internal.pep425tags get_config_var = pip._internal.pep425tags.sysconfig.get_config_var def _mock_get_config_var(var): if var in kwd: return kwd[var] return get_config_var(var) return _mock_get_config_var def abi_tag_unicode(self, flags, config_vars): """ Used to test ABI tags, verify correct use of the `u` flag """ import pip._internal.pep425tags config_vars.update({'SOABI': None}) base = pip._internal.pep425tags.get_abbr_impl() + \ pip._internal.pep425tags.get_impl_ver() if sys.version_info < (3, 3): config_vars.update({'Py_UNICODE_SIZE': 2}) mock_gcf = self.mock_get_config_var(**config_vars) with patch('pip._internal.pep425tags.sysconfig.get_config_var', mock_gcf): abi_tag = pip._internal.pep425tags.get_abi_tag() assert abi_tag == base + flags config_vars.update({'Py_UNICODE_SIZE': 4}) mock_gcf = self.mock_get_config_var(**config_vars) with patch('pip._internal.pep425tags.sysconfig.get_config_var', mock_gcf): abi_tag = pip._internal.pep425tags.get_abi_tag() assert abi_tag == base + flags + 'u' else: # On Python >= 3.3, UCS-4 is essentially permanently enabled, and # Py_UNICODE_SIZE is None. SOABI on these builds does not include # the 'u' so manual SOABI detection should not do so either. config_vars.update({'Py_UNICODE_SIZE': None}) mock_gcf = self.mock_get_config_var(**config_vars) with patch('pip._internal.pep425tags.sysconfig.get_config_var', mock_gcf): abi_tag = pip._internal.pep425tags.get_abi_tag() assert abi_tag == base + flags def test_broken_sysconfig(self): """ Test that pep425tags still works when sysconfig is broken. Can be a problem on Python 2.7 Issue #1074. """ import pip._internal.pep425tags def raises_ioerror(var): raise IOError("I have the wrong path!") with patch('pip._internal.pep425tags.sysconfig.get_config_var', raises_ioerror): assert len(pip._internal.pep425tags.get_supported()) def test_no_hyphen_tag(self): """ Test that no tag contains a hyphen. """ import pip._internal.pep425tags mock_gcf = self.mock_get_config_var(SOABI='cpython-35m-darwin') with patch('pip._internal.pep425tags.sysconfig.get_config_var', mock_gcf): supported = pip._internal.pep425tags.get_supported() for (py, abi, plat) in supported: assert '-' not in py assert '-' not in abi assert '-' not in plat def test_manual_abi_noflags(self): """ Test that no flags are set on a non-PyDebug, non-Pymalloc ABI tag. """ self.abi_tag_unicode('', {'Py_DEBUG': False, 'WITH_PYMALLOC': False}) def test_manual_abi_d_flag(self): """ Test that the `d` flag is set on a PyDebug, non-Pymalloc ABI tag. """ self.abi_tag_unicode('d', {'Py_DEBUG': True, 'WITH_PYMALLOC': False}) def test_manual_abi_m_flag(self): """ Test that the `m` flag is set on a non-PyDebug, Pymalloc ABI tag. """ self.abi_tag_unicode('m', {'Py_DEBUG': False, 'WITH_PYMALLOC': True}) def test_manual_abi_dm_flags(self): """ Test that the `dm` flags are set on a PyDebug, Pymalloc ABI tag. """ self.abi_tag_unicode('dm', {'Py_DEBUG': True, 'WITH_PYMALLOC': True}) @pytest.mark.parametrize('is_manylinux_compatible', [ pep425tags.is_manylinux1_compatible, pep425tags.is_manylinux2010_compatible, ]) class TestManylinuxTags(object): """ Tests common to all manylinux tags (e.g. manylinux1, manylinux2010, ...) """ @patch('pip._internal.pep425tags.get_platform', lambda: 'linux_x86_64') @patch('pip._internal.utils.glibc.have_compatible_glibc', lambda major, minor: True) def test_manylinux_compatible_on_linux_x86_64(self, is_manylinux_compatible): """ Test that manylinuxes are enabled on linux_x86_64 """ assert is_manylinux_compatible() @patch('pip._internal.pep425tags.get_platform', lambda: 'linux_i686') @patch('pip._internal.utils.glibc.have_compatible_glibc', lambda major, minor: True) def test_manylinux1_compatible_on_linux_i686(self, is_manylinux_compatible): """ Test that manylinux1 is enabled on linux_i686 """ assert is_manylinux_compatible() @patch('pip._internal.pep425tags.get_platform', lambda: 'linux_x86_64') @patch('pip._internal.utils.glibc.have_compatible_glibc', lambda major, minor: False) def test_manylinux1_2(self, is_manylinux_compatible): """ Test that manylinux1 is disabled with incompatible glibc """ assert not is_manylinux_compatible() @patch('pip._internal.pep425tags.get_platform', lambda: 'arm6vl') @patch('pip._internal.utils.glibc.have_compatible_glibc', lambda major, minor: True) def test_manylinux1_3(self, is_manylinux_compatible): """ Test that manylinux1 is disabled on arm6vl """ assert not is_manylinux_compatible() class TestManylinux1Tags(object): @patch('pip._internal.pep425tags.is_manylinux2010_compatible', lambda: False) @patch('pip._internal.pep425tags.get_platform', lambda: 'linux_x86_64') @patch('pip._internal.utils.glibc.have_compatible_glibc', lambda major, minor: True) @patch('sys.platform', 'linux2') def test_manylinux1_tag_is_first(self): """ Test that the more specific tag manylinux1 comes first. """ groups = {} for pyimpl, abi, arch in pep425tags.get_supported(): groups.setdefault((pyimpl, abi), []).append(arch) for arches in groups.values(): if arches == ['any']: continue # Expect the most specific arch first: if len(arches) == 3: assert arches == ['manylinux1_x86_64', 'linux_x86_64', 'any'] else: assert arches == ['manylinux1_x86_64', 'linux_x86_64'] class TestManylinux2010Tags(object): @patch('pip._internal.pep425tags.get_platform', lambda: 'linux_x86_64') @patch('pip._internal.utils.glibc.have_compatible_glibc', lambda major, minor: True) @patch('sys.platform', 'linux2') def test_manylinux2010_tag_is_first(self): """ Test that the more specific tag manylinux2010 comes first. """ groups = {} for pyimpl, abi, arch in pep425tags.get_supported(): groups.setdefault((pyimpl, abi), []).append(arch) for arches in groups.values(): if arches == ['any']: continue # Expect the most specific arch first: if len(arches) == 4: assert arches == ['manylinux2010_x86_64', 'manylinux1_x86_64', 'linux_x86_64', 'any'] else: assert arches == ['manylinux2010_x86_64', 'manylinux1_x86_64', 'linux_x86_64'] @pytest.mark.parametrize("manylinux2010,manylinux1", [ ("manylinux2010_x86_64", "manylinux1_x86_64"), ("manylinux2010_i686", "manylinux1_i686"), ]) def test_manylinux2010_implies_manylinux1(self, manylinux2010, manylinux1): """ Specifying manylinux2010 implies manylinux1. """ groups = {} supported = pep425tags.get_supported(platform=manylinux2010) for pyimpl, abi, arch in supported: groups.setdefault((pyimpl, abi), []).append(arch) for arches in groups.values(): if arches == ['any']: continue assert arches[:2] == [manylinux2010, manylinux1]
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_pep517.py
import pytest from pip._internal.exceptions import InstallationError from pip._internal.req import InstallRequirement @pytest.mark.parametrize(('source', 'expected'), [ ("pep517_setup_and_pyproject", True), ("pep517_setup_only", False), ("pep517_pyproject_only", True), ]) def test_use_pep517(data, source, expected): """ Test that we choose correctly between PEP 517 and legacy code paths """ src = data.src.join(source) req = InstallRequirement(None, None, source_dir=src) req.load_pyproject_toml() assert req.use_pep517 is expected @pytest.mark.parametrize(('source', 'msg'), [ ("pep517_setup_and_pyproject", "specifies a build backend"), ("pep517_pyproject_only", "does not have a setup.py"), ]) def test_disabling_pep517_invalid(data, source, msg): """ Test that we fail if we try to disable PEP 517 when it's not acceptable """ src = data.src.join(source) req = InstallRequirement(None, None, source_dir=src) # Simulate --no-use-pep517 req.use_pep517 = False with pytest.raises(InstallationError) as e: req.load_pyproject_toml() err_msg = e.value.args[0] assert "Disabling PEP 517 processing is invalid" in err_msg assert msg in err_msg
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_req.py
import os import shutil import sys import tempfile import pytest from mock import patch from pip._vendor import pkg_resources from pip._vendor.packaging.markers import Marker from pip._vendor.packaging.requirements import Requirement from pip._internal.commands.install import InstallCommand from pip._internal.download import PipSession, path_to_url from pip._internal.exceptions import ( HashErrors, InstallationError, InvalidWheelFilename, PreviousBuildDirError, ) from pip._internal.index import PackageFinder from pip._internal.operations.prepare import RequirementPreparer from pip._internal.req import InstallRequirement, RequirementSet from pip._internal.req.constructors import ( install_req_from_editable, install_req_from_line, parse_editable, ) from pip._internal.req.req_file import process_line from pip._internal.req.req_tracker import RequirementTracker from pip._internal.resolve import Resolver from tests.lib import DATA_DIR, assert_raises_regexp, requirements_file def get_processed_req_from_line(line, fname='file', lineno=1): req = list(process_line(line, fname, lineno))[0] req.is_direct = True return req class TestRequirementSet(object): """RequirementSet tests""" def setup(self): self.tempdir = tempfile.mkdtemp() def teardown(self): shutil.rmtree(self.tempdir, ignore_errors=True) def _basic_resolver(self, finder): preparer = RequirementPreparer( build_dir=os.path.join(self.tempdir, 'build'), src_dir=os.path.join(self.tempdir, 'src'), download_dir=None, wheel_download_dir=None, progress_bar="on", build_isolation=True, req_tracker=RequirementTracker(), ) return Resolver( preparer=preparer, wheel_cache=None, session=PipSession(), finder=finder, use_user_site=False, upgrade_strategy="to-satisfy-only", ignore_dependencies=False, ignore_installed=False, ignore_requires_python=False, force_reinstall=False, isolated=False, ) def test_no_reuse_existing_build_dir(self, data): """Test prepare_files raise exception with previous build dir""" build_dir = os.path.join(self.tempdir, 'build', 'simple') os.makedirs(build_dir) with open(os.path.join(build_dir, "setup.py"), 'w'): pass reqset = RequirementSet() req = install_req_from_line('simple') req.is_direct = True reqset.add_requirement(req) finder = PackageFinder([data.find_links], [], session=PipSession()) resolver = self._basic_resolver(finder) assert_raises_regexp( PreviousBuildDirError, r"pip can't proceed with [\s\S]*%s[\s\S]*%s" % (req, build_dir.replace('\\', '\\\\')), resolver.resolve, reqset, ) # TODO: Update test when Python 2.7 or Python 3.4 is dropped. def test_environment_marker_extras(self, data): """ Test that the environment marker extras are used with non-wheel installs. """ reqset = RequirementSet() req = install_req_from_editable( data.packages.join("LocalEnvironMarker") ) req.is_direct = True reqset.add_requirement(req) finder = PackageFinder([data.find_links], [], session=PipSession()) resolver = self._basic_resolver(finder) resolver.resolve(reqset) # This is hacky but does test both case in py2 and py3 if sys.version_info[:2] in ((2, 7), (3, 4)): assert reqset.has_requirement('simple') else: assert not reqset.has_requirement('simple') @pytest.mark.network def test_missing_hash_checking(self): """Make sure prepare_files() raises an error when a requirement has no hash in implicit hash-checking mode. """ reqset = RequirementSet() # No flags here. This tests that detection of later flags nonetheless # requires earlier packages to have hashes: reqset.add_requirement(get_processed_req_from_line( 'blessings==1.0', lineno=1 )) # This flag activates --require-hashes mode: reqset.add_requirement(get_processed_req_from_line( 'tracefront==0.1 --hash=sha256:somehash', lineno=2, )) # This hash should be accepted because it came from the reqs file, not # from the internet: reqset.add_requirement(get_processed_req_from_line( 'https://files.pythonhosted.org/packages/source/m/more-itertools/' 'more-itertools-1.0.tar.gz#md5=b21850c3cfa7efbb70fd662ab5413bdd', lineno=3, )) # The error text should list this as a URL and not `peep==3.1.1`: reqset.add_requirement(get_processed_req_from_line( 'https://files.pythonhosted.org/' 'packages/source/p/peep/peep-3.1.1.tar.gz', lineno=4, )) finder = PackageFinder( [], ['https://pypi.org/simple/'], session=PipSession(), ) resolver = self._basic_resolver(finder) assert_raises_regexp( HashErrors, r'Hashes are required in --require-hashes mode, but they are ' r'missing .*\n' r' https://files\.pythonhosted\.org/packages/source/p/peep/peep' r'-3\.1\.1\.tar\.gz --hash=sha256:[0-9a-f]+\n' r' blessings==1.0 --hash=sha256:[0-9a-f]+\n' r'THESE PACKAGES DO NOT MATCH THE HASHES.*\n' r' tracefront==0.1 .*:\n' r' Expected sha256 somehash\n' r' Got [0-9a-f]+$', resolver.resolve, reqset ) def test_missing_hash_with_require_hashes(self, data): """Setting --require-hashes explicitly should raise errors if hashes are missing. """ reqset = RequirementSet(require_hashes=True) reqset.add_requirement(get_processed_req_from_line( 'simple==1.0', lineno=1 )) finder = PackageFinder([data.find_links], [], session=PipSession()) resolver = self._basic_resolver(finder) assert_raises_regexp( HashErrors, r'Hashes are required in --require-hashes mode, but they are ' r'missing .*\n' r' simple==1.0 --hash=sha256:393043e672415891885c9a2a0929b1af95' r'fb866d6ca016b42d2e6ce53619b653$', resolver.resolve, reqset ) def test_missing_hash_with_require_hashes_in_reqs_file(self, data, tmpdir): """--require-hashes in a requirements file should make its way to the RequirementSet. """ req_set = RequirementSet(require_hashes=False) session = PipSession() finder = PackageFinder([data.find_links], [], session=session) command = InstallCommand() with requirements_file('--require-hashes', tmpdir) as reqs_file: options, args = command.parse_args(['-r', reqs_file]) command.populate_requirement_set( req_set, args, options, finder, session, command.name, wheel_cache=None, ) assert req_set.require_hashes def test_unsupported_hashes(self, data): """VCS and dir links should raise errors when --require-hashes is on. In addition, complaints about the type of requirement (VCS or dir) should trump the presence or absence of a hash. """ reqset = RequirementSet(require_hashes=True) reqset.add_requirement(get_processed_req_from_line( 'git+git://github.com/pypa/pip-test-package --hash=sha256:123', lineno=1, )) dir_path = data.packages.join('FSPkg') reqset.add_requirement(get_processed_req_from_line( 'file://%s' % (dir_path,), lineno=2, )) finder = PackageFinder([data.find_links], [], session=PipSession()) resolver = self._basic_resolver(finder) sep = os.path.sep if sep == '\\': sep = '\\\\' # This needs to be escaped for the regex assert_raises_regexp( HashErrors, r"Can't verify hashes for these requirements because we don't " r"have a way to hash version control repositories:\n" r" git\+git://github\.com/pypa/pip-test-package \(from -r file " r"\(line 1\)\)\n" r"Can't verify hashes for these file:// requirements because they " r"point to directories:\n" r" file://.*{sep}data{sep}packages{sep}FSPkg " r"\(from -r file \(line 2\)\)".format(sep=sep), resolver.resolve, reqset) def test_unpinned_hash_checking(self, data): """Make sure prepare_files() raises an error when a requirement is not version-pinned in hash-checking mode. """ reqset = RequirementSet() # Test that there must be exactly 1 specifier: reqset.add_requirement(get_processed_req_from_line( 'simple --hash=sha256:a90427ae31f5d1d0d7ec06ee97d9fcf2d0fc9a786985' '250c1c83fd68df5911dd', lineno=1, )) # Test that the operator must be ==: reqset.add_requirement(get_processed_req_from_line( 'simple2>1.0 --hash=sha256:3ad45e1e9aa48b4462af0' '123f6a7e44a9115db1ef945d4d92c123dfe21815a06', lineno=2, )) finder = PackageFinder([data.find_links], [], session=PipSession()) resolver = self._basic_resolver(finder) assert_raises_regexp( HashErrors, # Make sure all failing requirements are listed: r'versions pinned with ==. These do not:\n' r' simple .* \(from -r file \(line 1\)\)\n' r' simple2>1.0 .* \(from -r file \(line 2\)\)', resolver.resolve, reqset) def test_hash_mismatch(self, data): """A hash mismatch should raise an error.""" file_url = path_to_url( (data.packages / 'simple-1.0.tar.gz').abspath) reqset = RequirementSet(require_hashes=True) reqset.add_requirement(get_processed_req_from_line( '%s --hash=sha256:badbad' % file_url, lineno=1, )) finder = PackageFinder([data.find_links], [], session=PipSession()) resolver = self._basic_resolver(finder) assert_raises_regexp( HashErrors, r'THESE PACKAGES DO NOT MATCH THE HASHES.*\n' r' file:///.*/data/packages/simple-1\.0\.tar\.gz .*:\n' r' Expected sha256 badbad\n' r' Got 393043e672415891885c9a2a0929b1af95fb866d' r'6ca016b42d2e6ce53619b653$', resolver.resolve, reqset) def test_unhashed_deps_on_require_hashes(self, data): """Make sure unhashed, unpinned, or otherwise unrepeatable dependencies get complained about when --require-hashes is on.""" reqset = RequirementSet() finder = PackageFinder([data.find_links], [], session=PipSession()) resolver = self._basic_resolver(finder) reqset.add_requirement(get_processed_req_from_line( 'TopoRequires2==0.0.1 ' # requires TopoRequires '--hash=sha256:eaf9a01242c9f2f42cf2bd82a6a848cd' 'e3591d14f7896bdbefcf48543720c970', lineno=1 )) assert_raises_regexp( HashErrors, r'In --require-hashes mode, all requirements must have their ' r'versions pinned.*\n' r' TopoRequires from .*$', resolver.resolve, reqset) def test_hashed_deps_on_require_hashes(self): """Make sure hashed dependencies get installed when --require-hashes is on. (We actually just check that no "not all dependencies are hashed!" error gets raised while preparing; there is no reason to expect installation to then fail, as the code paths are the same as ever.) """ reqset = RequirementSet() reqset.add_requirement(get_processed_req_from_line( 'TopoRequires2==0.0.1 ' # requires TopoRequires '--hash=sha256:eaf9a01242c9f2f42cf2bd82a6a848cd' 'e3591d14f7896bdbefcf48543720c970', lineno=1 )) reqset.add_requirement(get_processed_req_from_line( 'TopoRequires==0.0.1 ' '--hash=sha256:d6dd1e22e60df512fdcf3640ced3039b3b02a56ab2cee81ebcb' '3d0a6d4e8bfa6', lineno=2 )) class TestInstallRequirement(object): def setup(self): self.tempdir = tempfile.mkdtemp() def teardown(self): shutil.rmtree(self.tempdir, ignore_errors=True) def test_url_with_query(self): """InstallRequirement should strip the fragment, but not the query.""" url = 'http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz' fragment = '#egg=bar' req = install_req_from_line(url + fragment) assert req.link.url == url + fragment, req.link def test_unsupported_wheel_link_requirement_raises(self): reqset = RequirementSet() req = install_req_from_line( 'https://whatever.com/peppercorn-0.4-py2.py3-bogus-any.whl', ) assert req.link is not None assert req.link.is_wheel assert req.link.scheme == "https" with pytest.raises(InstallationError): reqset.add_requirement(req) def test_unsupported_wheel_local_file_requirement_raises(self, data): reqset = RequirementSet() req = install_req_from_line( data.packages.join('simple.dist-0.1-py1-none-invalid.whl'), ) assert req.link is not None assert req.link.is_wheel assert req.link.scheme == "file" with pytest.raises(InstallationError): reqset.add_requirement(req) def test_installed_version_not_installed(self): req = install_req_from_line('simple-0.1-py2.py3-none-any.whl') assert req.installed_version is None def test_str(self): req = install_req_from_line('simple==0.1') assert str(req) == 'simple==0.1' def test_repr(self): req = install_req_from_line('simple==0.1') assert repr(req) == ( '<InstallRequirement object: simple==0.1 editable=False>' ) def test_invalid_wheel_requirement_raises(self): with pytest.raises(InvalidWheelFilename): install_req_from_line('invalid.whl') def test_wheel_requirement_sets_req_attribute(self): req = install_req_from_line('simple-0.1-py2.py3-none-any.whl') assert isinstance(req.req, Requirement) assert str(req.req) == 'simple==0.1' def test_url_preserved_line_req(self): """Confirm the url is preserved in a non-editable requirement""" url = 'git+http://foo.com@ref#egg=foo' req = install_req_from_line(url) assert req.link.url == url def test_url_preserved_editable_req(self): """Confirm the url is preserved in a editable requirement""" url = 'git+http://foo.com@ref#egg=foo' req = install_req_from_editable(url) assert req.link.url == url @pytest.mark.parametrize('path', ( '/path/to/foo.egg-info'.replace('/', os.path.sep), # Tests issue fixed by https://github.com/pypa/pip/pull/2530 '/path/to/foo.egg-info/'.replace('/', os.path.sep), )) def test_get_dist(self, path): req = install_req_from_line('foo') req._egg_info_path = path dist = req.get_dist() assert isinstance(dist, pkg_resources.Distribution) assert dist.project_name == 'foo' assert dist.location == '/path/to'.replace('/', os.path.sep) def test_markers(self): for line in ( # recommended syntax 'mock3; python_version >= "3"', # with more spaces 'mock3 ; python_version >= "3" ', # without spaces 'mock3;python_version >= "3"', ): req = install_req_from_line(line) assert req.req.name == 'mock3' assert str(req.req.specifier) == '' assert str(req.markers) == 'python_version >= "3"' def test_markers_semicolon(self): # check that the markers can contain a semicolon req = install_req_from_line('semicolon; os_name == "a; b"') assert req.req.name == 'semicolon' assert str(req.req.specifier) == '' assert str(req.markers) == 'os_name == "a; b"' def test_markers_url(self): # test "URL; markers" syntax url = 'http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz' line = '%s; python_version >= "3"' % url req = install_req_from_line(line) assert req.link.url == url, req.url assert str(req.markers) == 'python_version >= "3"' # without space, markers are part of the URL url = 'http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz' line = '%s;python_version >= "3"' % url req = install_req_from_line(line) assert req.link.url == line, req.url assert req.markers is None def test_markers_match_from_line(self): # match for markers in ( 'python_version >= "1.0"', 'sys_platform == %r' % sys.platform, ): line = 'name; ' + markers req = install_req_from_line(line) assert str(req.markers) == str(Marker(markers)) assert req.match_markers() # don't match for markers in ( 'python_version >= "5.0"', 'sys_platform != %r' % sys.platform, ): line = 'name; ' + markers req = install_req_from_line(line) assert str(req.markers) == str(Marker(markers)) assert not req.match_markers() def test_markers_match(self): # match for markers in ( 'python_version >= "1.0"', 'sys_platform == %r' % sys.platform, ): line = 'name; ' + markers req = install_req_from_line(line, comes_from='') assert str(req.markers) == str(Marker(markers)) assert req.match_markers() # don't match for markers in ( 'python_version >= "5.0"', 'sys_platform != %r' % sys.platform, ): line = 'name; ' + markers req = install_req_from_line(line, comes_from='') assert str(req.markers) == str(Marker(markers)) assert not req.match_markers() def test_extras_for_line_path_requirement(self): line = 'SomeProject[ex1,ex2]' filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = install_req_from_line(line, comes_from=comes_from) assert len(req.extras) == 2 assert req.extras == {'ex1', 'ex2'} def test_extras_for_line_url_requirement(self): line = 'git+https://url#egg=SomeProject[ex1,ex2]' filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = install_req_from_line(line, comes_from=comes_from) assert len(req.extras) == 2 assert req.extras == {'ex1', 'ex2'} def test_extras_for_editable_path_requirement(self): url = '.[ex1,ex2]' filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = install_req_from_editable(url, comes_from=comes_from) assert len(req.extras) == 2 assert req.extras == {'ex1', 'ex2'} def test_extras_for_editable_url_requirement(self): url = 'git+https://url#egg=SomeProject[ex1,ex2]' filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = install_req_from_editable(url, comes_from=comes_from) assert len(req.extras) == 2 assert req.extras == {'ex1', 'ex2'} def test_unexisting_path(self): with pytest.raises(InstallationError) as e: install_req_from_line( os.path.join('this', 'path', 'does', 'not', 'exist')) err_msg = e.value.args[0] assert "Invalid requirement" in err_msg assert "It looks like a path." in err_msg def test_single_equal_sign(self): with pytest.raises(InstallationError) as e: install_req_from_line('toto=42') err_msg = e.value.args[0] assert "Invalid requirement" in err_msg assert "= is not a valid operator. Did you mean == ?" in err_msg def test_unidentifiable_name(self): test_name = '-' with pytest.raises(InstallationError) as e: install_req_from_line(test_name) err_msg = e.value.args[0] assert ("Invalid requirement: '%s'\n" % test_name) == err_msg def test_requirement_file(self): req_file_path = os.path.join(self.tempdir, 'test.txt') with open(req_file_path, 'w') as req_file: req_file.write('pip\nsetuptools') with pytest.raises(InstallationError) as e: install_req_from_line(req_file_path) err_msg = e.value.args[0] assert "Invalid requirement" in err_msg assert "It looks like a path. It does exist." in err_msg assert "appears to be a requirements file." in err_msg assert "If that is the case, use the '-r' flag to install" in err_msg @patch('pip._internal.req.req_install.os.path.abspath') @patch('pip._internal.req.req_install.os.path.exists') @patch('pip._internal.req.req_install.os.path.isdir') def test_parse_editable_local( isdir_mock, exists_mock, abspath_mock): exists_mock.return_value = isdir_mock.return_value = True # mocks needed to support path operations on windows tests abspath_mock.return_value = "/some/path" assert parse_editable('.') == (None, 'file:///some/path', None) abspath_mock.return_value = "/some/path/foo" assert parse_editable('foo') == ( None, 'file:///some/path/foo', None, ) def test_parse_editable_explicit_vcs(): assert parse_editable('svn+https://foo#egg=foo') == ( 'foo', 'svn+https://foo#egg=foo', None, ) def test_parse_editable_vcs_extras(): assert parse_editable('svn+https://foo#egg=foo[extras]') == ( 'foo[extras]', 'svn+https://foo#egg=foo[extras]', None, ) @patch('pip._internal.req.req_install.os.path.abspath') @patch('pip._internal.req.req_install.os.path.exists') @patch('pip._internal.req.req_install.os.path.isdir') def test_parse_editable_local_extras( isdir_mock, exists_mock, abspath_mock): exists_mock.return_value = isdir_mock.return_value = True abspath_mock.return_value = "/some/path" assert parse_editable('.[extras]') == ( None, 'file://' + "/some/path", {'extras'}, ) abspath_mock.return_value = "/some/path/foo" assert parse_editable('foo[bar,baz]') == ( None, 'file:///some/path/foo', {'bar', 'baz'}, ) def test_exclusive_environment_markers(): """Make sure RequirementSet accepts several excluding env markers""" eq26 = install_req_from_line( "Django>=1.6.10,<1.7 ; python_version == '2.6'") eq26.is_direct = True ne26 = install_req_from_line( "Django>=1.6.10,<1.8 ; python_version != '2.6'") ne26.is_direct = True req_set = RequirementSet() req_set.add_requirement(eq26) req_set.add_requirement(ne26) assert req_set.has_requirement('Django') def test_mismatched_versions(caplog, tmpdir): original_source = os.path.join(DATA_DIR, 'src', 'simplewheel-1.0') source_dir = os.path.join(tmpdir, 'simplewheel') shutil.copytree(original_source, source_dir) req = InstallRequirement(req=Requirement('simplewheel==2.0'), comes_from=None, source_dir=source_dir) req.prepare_metadata() req.assert_source_matches_version() assert caplog.records[-1].message == ( 'Requested simplewheel==2.0, ' 'but installing version 1.0' )
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_req_file.py
import os import subprocess import textwrap import pytest from mock import Mock, patch from pretend import stub import pip._internal.index from pip._internal.download import PipSession from pip._internal.exceptions import ( InstallationError, RequirementsFileParseError, ) from pip._internal.index import PackageFinder from pip._internal.models.format_control import FormatControl from pip._internal.req.constructors import ( install_req_from_editable, install_req_from_line, ) from pip._internal.req.req_file import ( break_args_options, ignore_comments, join_lines, parse_requirements, preprocess, process_line, skip_regex, ) from tests.lib import requirements_file @pytest.fixture def session(): return PipSession() @pytest.fixture def finder(session): return PackageFinder([], [], session=session) @pytest.fixture def options(session): return stub( isolated_mode=False, index_url='default_url', skip_requirements_regex=False, format_control=FormatControl(set(), set())) class TestPreprocess(object): """tests for `preprocess`""" def test_comments_and_joins_case1(self): content = textwrap.dedent("""\ req1 \\ # comment \\ req2 """) result = preprocess(content, None) assert list(result) == [(1, 'req1'), (3, 'req2')] def test_comments_and_joins_case2(self): content = textwrap.dedent("""\ req1\\ # comment """) result = preprocess(content, None) assert list(result) == [(1, 'req1')] def test_comments_and_joins_case3(self): content = textwrap.dedent("""\ req1 \\ # comment req2 """) result = preprocess(content, None) assert list(result) == [(1, 'req1'), (3, 'req2')] def test_skip_regex_after_joining_case1(self, options): content = textwrap.dedent("""\ patt\\ ern line2 """) options.skip_requirements_regex = 'pattern' result = preprocess(content, options) assert list(result) == [(3, 'line2')] def test_skip_regex_after_joining_case2(self, options): content = textwrap.dedent("""\ pattern \\ line2 line3 """) options.skip_requirements_regex = 'pattern' result = preprocess(content, options) assert list(result) == [(3, 'line3')] class TestIgnoreComments(object): """tests for `ignore_comment`""" def test_ignore_line(self): lines = [(1, ''), (2, 'req1'), (3, 'req2')] result = ignore_comments(lines) assert list(result) == [(2, 'req1'), (3, 'req2')] def test_ignore_comment(self): lines = [(1, 'req1'), (2, '# comment'), (3, 'req2')] result = ignore_comments(lines) assert list(result) == [(1, 'req1'), (3, 'req2')] def test_strip_comment(self): lines = [(1, 'req1'), (2, 'req # comment'), (3, 'req2')] result = ignore_comments(lines) assert list(result) == [(1, 'req1'), (2, 'req'), (3, 'req2')] class TestJoinLines(object): """tests for `join_lines`""" def test_join_lines(self): lines = enumerate([ 'line 1', 'line 2:1 \\', 'line 2:2', 'line 3:1 \\', 'line 3:2 \\', 'line 3:3', 'line 4' ], start=1) expect = [ (1, 'line 1'), (2, 'line 2:1 line 2:2'), (4, 'line 3:1 line 3:2 line 3:3'), (7, 'line 4'), ] assert expect == list(join_lines(lines)) def test_last_line_with_escape(self): lines = enumerate([ 'line 1', 'line 2 \\', ], start=1) expect = [ (1, 'line 1'), (2, 'line 2 '), ] assert expect == list(join_lines(lines)) class TestSkipRegex(object): """tests for `skip_reqex``""" def test_skip_regex_pattern_match(self): options = stub(skip_requirements_regex='.*Bad.*') line = '--extra-index-url Bad' assert [] == list(skip_regex(enumerate([line]), options)) def test_skip_regex_pattern_not_match(self): options = stub(skip_requirements_regex='.*Bad.*') line = '--extra-index-url Good' assert [(0, line)] == list(skip_regex(enumerate([line]), options)) def test_skip_regex_no_options(self): options = None line = '--extra-index-url Good' assert [(0, line)] == list(skip_regex(enumerate([line]), options)) def test_skip_regex_no_skip_option(self): options = stub(skip_requirements_regex=None) line = '--extra-index-url Good' assert [(0, line)] == list(skip_regex(enumerate([line]), options)) class TestProcessLine(object): """tests for `process_line`""" def test_parser_error(self): with pytest.raises(RequirementsFileParseError): list(process_line("--bogus", "file", 1)) def test_parser_offending_line(self): line = 'pkg==1.0.0 --hash=somehash' with pytest.raises(RequirementsFileParseError) as err: list(process_line(line, 'file', 1)) assert line in str(err.value) def test_parser_non_offending_line(self): try: list(process_line('pkg==1.0.0 --hash=sha256:somehash', 'file', 1)) except RequirementsFileParseError: pytest.fail('Reported offending line where it should not.') def test_only_one_req_per_line(self): # pkg_resources raises the ValueError with pytest.raises(InstallationError): list(process_line("req1 req2", "file", 1)) def test_yield_line_requirement(self): line = 'SomeProject' filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = install_req_from_line(line, comes_from=comes_from) assert repr(list(process_line(line, filename, 1))[0]) == repr(req) def test_yield_line_constraint(self): line = 'SomeProject' filename = 'filename' comes_from = '-c %s (line %s)' % (filename, 1) req = install_req_from_line( line, comes_from=comes_from, constraint=True) found_req = list(process_line(line, filename, 1, constraint=True))[0] assert repr(found_req) == repr(req) assert found_req.constraint is True def test_yield_line_requirement_with_spaces_in_specifier(self): line = 'SomeProject >= 2' filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = install_req_from_line(line, comes_from=comes_from) assert repr(list(process_line(line, filename, 1))[0]) == repr(req) assert str(req.req.specifier) == '>=2' def test_yield_editable_requirement(self): url = 'git+https://url#egg=SomeProject' line = '-e %s' % url filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = install_req_from_editable(url, comes_from=comes_from) assert repr(list(process_line(line, filename, 1))[0]) == repr(req) def test_yield_editable_constraint(self): url = 'git+https://url#egg=SomeProject' line = '-e %s' % url filename = 'filename' comes_from = '-c %s (line %s)' % (filename, 1) req = install_req_from_editable( url, comes_from=comes_from, constraint=True) found_req = list(process_line(line, filename, 1, constraint=True))[0] assert repr(found_req) == repr(req) assert found_req.constraint is True def test_nested_requirements_file(self, monkeypatch): line = '-r another_file' req = install_req_from_line('SomeProject') import pip._internal.req.req_file def stub_parse_requirements(req_url, finder, comes_from, options, session, wheel_cache, constraint): return [(req, constraint)] parse_requirements_stub = stub(call=stub_parse_requirements) monkeypatch.setattr(pip._internal.req.req_file, 'parse_requirements', parse_requirements_stub.call) assert list(process_line(line, 'filename', 1)) == [(req, False)] def test_nested_constraints_file(self, monkeypatch): line = '-c another_file' req = install_req_from_line('SomeProject') import pip._internal.req.req_file def stub_parse_requirements(req_url, finder, comes_from, options, session, wheel_cache, constraint): return [(req, constraint)] parse_requirements_stub = stub(call=stub_parse_requirements) monkeypatch.setattr(pip._internal.req.req_file, 'parse_requirements', parse_requirements_stub.call) assert list(process_line(line, 'filename', 1)) == [(req, True)] def test_options_on_a_requirement_line(self): line = 'SomeProject --install-option=yo1 --install-option yo2 '\ '--global-option="yo3" --global-option "yo4"' filename = 'filename' req = list(process_line(line, filename, 1))[0] assert req.options == { 'global_options': ['yo3', 'yo4'], 'install_options': ['yo1', 'yo2']} def test_hash_options(self): """Test the --hash option: mostly its value storage. Make sure it reads and preserve multiple hashes. """ line = ('SomeProject --hash=sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b1' '61e5c1fa7425e73043362938b9824 ' '--hash=sha384:59e1748777448c69de6b800d7a33bbfb9ff1b463e44354c' '3553bcdb9c666fa90125a3c79f90397bdf5f6a13de828684f ' '--hash=sha256:486ea46224d1bb4fb680f34f7c9ad96a8f24ec88be73ea8' 'e5a6c65260e9cb8a7') filename = 'filename' req = list(process_line(line, filename, 1))[0] assert req.options == {'hashes': { 'sha256': ['2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e730433' '62938b9824', '486ea46224d1bb4fb680f34f7c9ad96a8f24ec88be73ea8e5a6c65' '260e9cb8a7'], 'sha384': ['59e1748777448c69de6b800d7a33bbfb9ff1b463e44354c3553bcd' 'b9c666fa90125a3c79f90397bdf5f6a13de828684f']}} def test_set_isolated(self, options): line = 'SomeProject' filename = 'filename' options.isolated_mode = True result = process_line(line, filename, 1, options=options) assert list(result)[0].isolated def test_set_finder_no_index(self, finder): list(process_line("--no-index", "file", 1, finder=finder)) assert finder.index_urls == [] def test_set_finder_index_url(self, finder): list(process_line("--index-url=url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_set_finder_find_links(self, finder): list(process_line("--find-links=url", "file", 1, finder=finder)) assert finder.find_links == ['url'] def test_set_finder_extra_index_urls(self, finder): list(process_line("--extra-index-url=url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_set_finder_trusted_host(self, finder): list(process_line("--trusted-host=url", "file", 1, finder=finder)) assert finder.secure_origins == [('*', 'url', '*')] def test_noop_always_unzip(self, finder): # noop, but confirm it can be set list(process_line("--always-unzip", "file", 1, finder=finder)) def test_set_finder_allow_all_prereleases(self, finder): list(process_line("--pre", "file", 1, finder=finder)) assert finder.allow_all_prereleases def test_relative_local_find_links(self, finder, monkeypatch): """ Test a relative find_links path is joined with the req file directory """ # Make sure the test also passes on windows req_file = os.path.normcase(os.path.abspath( os.path.normpath('/path/req_file.txt'))) nested_link = os.path.normcase(os.path.abspath( os.path.normpath('/path/rel_path'))) exists_ = os.path.exists def exists(path): if path == nested_link: return True else: exists_(path) monkeypatch.setattr(os.path, 'exists', exists) list(process_line("--find-links=rel_path", req_file, 1, finder=finder)) assert finder.find_links == [nested_link] def test_relative_http_nested_req_files(self, finder, monkeypatch): """ Test a relative nested req file path is joined with the req file url """ req_file = 'http://me.com/me/req_file.txt' def parse(*args, **kwargs): return iter([]) mock_parse = Mock() mock_parse.side_effect = parse monkeypatch.setattr(pip._internal.req.req_file, 'parse_requirements', mock_parse) list(process_line("-r reqs.txt", req_file, 1, finder=finder)) call = mock_parse.mock_calls[0] assert call[1][0] == 'http://me.com/me/reqs.txt' def test_relative_local_nested_req_files(self, finder, monkeypatch): """ Test a relative nested req file path is joined with the req file dir """ req_file = os.path.normpath('/path/req_file.txt') def parse(*args, **kwargs): return iter([]) mock_parse = Mock() mock_parse.side_effect = parse monkeypatch.setattr(pip._internal.req.req_file, 'parse_requirements', mock_parse) list(process_line("-r reqs.txt", req_file, 1, finder=finder)) call = mock_parse.mock_calls[0] assert call[1][0] == os.path.normpath('/path/reqs.txt') def test_absolute_local_nested_req_files(self, finder, monkeypatch): """ Test an absolute nested req file path """ req_file = '/path/req_file.txt' def parse(*args, **kwargs): return iter([]) mock_parse = Mock() mock_parse.side_effect = parse monkeypatch.setattr(pip._internal.req.req_file, 'parse_requirements', mock_parse) list(process_line("-r /other/reqs.txt", req_file, 1, finder=finder)) call = mock_parse.mock_calls[0] assert call[1][0] == '/other/reqs.txt' def test_absolute_http_nested_req_file_in_local(self, finder, monkeypatch): """ Test a nested req file url in a local req file """ req_file = '/path/req_file.txt' def parse(*args, **kwargs): return iter([]) mock_parse = Mock() mock_parse.side_effect = parse monkeypatch.setattr(pip._internal.req.req_file, 'parse_requirements', mock_parse) list(process_line("-r http://me.com/me/reqs.txt", req_file, 1, finder=finder)) call = mock_parse.mock_calls[0] assert call[1][0] == 'http://me.com/me/reqs.txt' class TestBreakOptionsArgs(object): def test_no_args(self): assert ('', '--option') == break_args_options('--option') def test_no_options(self): assert ('arg arg', '') == break_args_options('arg arg') def test_args_short_options(self): result = break_args_options('arg arg -s') assert ('arg arg', '-s') == result def test_args_long_options(self): result = break_args_options('arg arg --long') assert ('arg arg', '--long') == result class TestOptionVariants(object): # this suite is really just testing optparse, but added it anyway def test_variant1(self, finder): list(process_line("-i url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_variant2(self, finder): list(process_line("-i 'url'", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_variant3(self, finder): list(process_line("--index-url=url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_variant4(self, finder): list(process_line("--index-url url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_variant5(self, finder): list(process_line("--index-url='url'", "file", 1, finder=finder)) assert finder.index_urls == ['url'] class TestParseRequirements(object): """tests for `parse_requirements`""" @pytest.mark.network def test_remote_reqs_parse(self): """ Test parsing a simple remote requirements file """ # this requirements file just contains a comment previously this has # failed in py3: https://github.com/pypa/pip/issues/760 for req in parse_requirements( 'https://raw.githubusercontent.com/pypa/' 'pip-test-package/master/' 'tests/req_just_comment.txt', session=PipSession()): pass def test_multiple_appending_options(self, tmpdir, finder, options): with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("--extra-index-url url1 \n") fp.write("--extra-index-url url2 ") list(parse_requirements(tmpdir.join("req1.txt"), finder=finder, session=PipSession(), options=options)) assert finder.index_urls == ['url1', 'url2'] def test_skip_regex(self, tmpdir, finder, options): options.skip_requirements_regex = '.*Bad.*' with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("--extra-index-url Bad \n") fp.write("--extra-index-url Good ") list(parse_requirements(tmpdir.join("req1.txt"), finder=finder, options=options, session=PipSession())) assert finder.index_urls == ['Good'] def test_expand_existing_env_variables(self, tmpdir, finder): template = ( 'https://%s:x-oauth-basic@github.com/user/%s/archive/master.zip' ) env_vars = ( ('GITHUB_TOKEN', 'notarealtoken'), ('DO_12_FACTOR', 'awwyeah'), ) with open(tmpdir.join('req1.txt'), 'w') as fp: fp.write(template % tuple(['${%s}' % k for k, _ in env_vars])) with patch('pip._internal.req.req_file.os.getenv') as getenv: getenv.side_effect = lambda n: dict(env_vars)[n] reqs = list(parse_requirements( tmpdir.join('req1.txt'), finder=finder, session=PipSession() )) assert len(reqs) == 1, \ 'parsing requirement file with env variable failed' expected_url = template % tuple([v for _, v in env_vars]) assert reqs[0].link.url == expected_url, \ 'variable expansion in req file failed' def test_expand_missing_env_variables(self, tmpdir, finder): req_url = ( 'https://${NON_EXISTENT_VARIABLE}:$WRONG_FORMAT@' '%WINDOWS_FORMAT%github.com/user/repo/archive/master.zip' ) with open(tmpdir.join('req1.txt'), 'w') as fp: fp.write(req_url) with patch('pip._internal.req.req_file.os.getenv') as getenv: getenv.return_value = '' reqs = list(parse_requirements( tmpdir.join('req1.txt'), finder=finder, session=PipSession() )) assert len(reqs) == 1, \ 'parsing requirement file with env variable failed' assert reqs[0].link.url == req_url, \ 'ignoring invalid env variable in req file failed' def test_join_lines(self, tmpdir, finder): with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("--extra-index-url url1 \\\n--extra-index-url url2") list(parse_requirements(tmpdir.join("req1.txt"), finder=finder, session=PipSession())) assert finder.index_urls == ['url1', 'url2'] def test_req_file_parse_no_only_binary(self, data, finder): list(parse_requirements( data.reqfiles.join("supported_options2.txt"), finder, session=PipSession())) expected = FormatControl({'fred'}, {'wilma'}) assert finder.format_control == expected def test_req_file_parse_comment_start_of_line(self, tmpdir, finder): """ Test parsing comments in a requirements file """ with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("# Comment ") reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder, session=PipSession())) assert not reqs def test_req_file_parse_comment_end_of_line_with_url(self, tmpdir, finder): """ Test parsing comments in a requirements file """ with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("https://example.com/foo.tar.gz # Comment ") reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder, session=PipSession())) assert len(reqs) == 1 assert reqs[0].link.url == "https://example.com/foo.tar.gz" def test_req_file_parse_egginfo_end_of_line_with_url(self, tmpdir, finder): """ Test parsing comments in a requirements file """ with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("https://example.com/foo.tar.gz#egg=wat") reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder, session=PipSession())) assert len(reqs) == 1 assert reqs[0].name == "wat" def test_req_file_no_finder(self, tmpdir): """ Test parsing a requirements file without a finder """ with open(tmpdir.join("req.txt"), "w") as fp: fp.write(""" --find-links https://example.com/ --index-url https://example.com/ --extra-index-url https://two.example.com/ --no-use-wheel --no-index """) parse_requirements(tmpdir.join("req.txt"), session=PipSession()) def test_install_requirements_with_options(self, tmpdir, finder, session, options): global_option = '--dry-run' install_option = '--prefix=/opt' content = ''' --only-binary :all: INITools==2.0 --global-option="{global_option}" \ --install-option "{install_option}" '''.format(global_option=global_option, install_option=install_option) with requirements_file(content, tmpdir) as reqs_file: req = next(parse_requirements(reqs_file.abspath, finder=finder, options=options, session=session)) req.source_dir = os.curdir with patch.object(subprocess, 'Popen') as popen: popen.return_value.stdout.readline.return_value = b"" try: req.install([]) except Exception: pass last_call = popen.call_args_list[-1] args = last_call[0][0] assert ( 0 < args.index(global_option) < args.index('install') < args.index(install_option) ) assert options.format_control.no_binary == {':all:'} assert options.format_control.only_binary == set()
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_req_install.py
import os import tempfile import pytest from pip._internal.req.constructors import install_req_from_line from pip._internal.req.req_install import InstallRequirement class TestInstallRequirementBuildDirectory(object): # no need to test symlinks on Windows @pytest.mark.skipif("sys.platform == 'win32'") def test_tmp_build_directory(self): # when req is None, we can produce a temporary directory # Make sure we're handling it correctly with real path. requirement = InstallRequirement(None, None) tmp_dir = tempfile.mkdtemp('-build', 'pip-') tmp_build_dir = requirement.build_location(tmp_dir) assert ( os.path.dirname(tmp_build_dir) == os.path.realpath(os.path.dirname(tmp_dir)) ) # are we on a system where /tmp is a symlink if os.path.realpath(tmp_dir) != os.path.abspath(tmp_dir): assert os.path.dirname(tmp_build_dir) != os.path.dirname(tmp_dir) else: assert os.path.dirname(tmp_build_dir) == os.path.dirname(tmp_dir) os.rmdir(tmp_dir) assert not os.path.exists(tmp_dir) def test_forward_slash_results_in_a_link(self, tmpdir): install_dir = tmpdir / "foo" / "bar" # Just create a file for letting the logic work setup_py_path = install_dir / "setup.py" os.makedirs(str(install_dir)) with open(setup_py_path, 'w') as f: f.write('') requirement = install_req_from_line( str(install_dir).replace(os.sep, os.altsep or os.sep) ) assert requirement.link is not None
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_req_uninstall.py
import os import pytest from mock import Mock import pip._internal.req.req_uninstall from pip._internal.req.req_uninstall import ( StashedUninstallPathSet, UninstallPathSet, compact, compress_for_output_listing, compress_for_rename, uninstallation_paths, ) from tests.lib import create_file # Pretend all files are local, so UninstallPathSet accepts files in the tmpdir, # outside the virtualenv def mock_is_local(path): return True def test_uninstallation_paths(): class dist(object): def get_metadata_lines(self, record): return ['file.py,,', 'file.pyc,,', 'file.so,,', 'nopyc.py'] location = '' d = dist() paths = list(uninstallation_paths(d)) expected = ['file.py', 'file.pyc', 'file.pyo', 'file.so', 'nopyc.py', 'nopyc.pyc', 'nopyc.pyo'] assert paths == expected # Avoid an easy 'unique generator' bug paths2 = list(uninstallation_paths(d)) assert paths2 == paths def test_compressed_listing(tmpdir): def in_tmpdir(paths): li = [] for path in paths: li.append( str(os.path.join(tmpdir, path.replace("/", os.path.sep))) ) return li sample = in_tmpdir([ "lib/mypkg.dist-info/METADATA", "lib/mypkg.dist-info/PKG-INFO", "lib/mypkg/would_be_removed.txt", "lib/mypkg/would_be_skipped.skip.txt", "lib/mypkg/__init__.py", "lib/mypkg/my_awesome_code.py", "lib/mypkg/__pycache__/my_awesome_code-magic.pyc", "lib/mypkg/support/support_file.py", "lib/mypkg/support/more_support.py", "lib/mypkg/support/would_be_skipped.skip.py", "lib/mypkg/support/__pycache__/support_file-magic.pyc", "lib/random_other_place/file_without_a_dot_pyc", "bin/mybin", ]) # Create the required files for fname in sample: create_file(fname, "random blub") # Remove the files to be skipped from the paths sample = [path for path in sample if ".skip." not in path] expected_remove = in_tmpdir([ "bin/mybin", "lib/mypkg.dist-info/*", "lib/mypkg/*", "lib/random_other_place/file_without_a_dot_pyc", ]) expected_skip = in_tmpdir([ "lib/mypkg/would_be_skipped.skip.txt", "lib/mypkg/support/would_be_skipped.skip.py", ]) expected_rename = in_tmpdir([ "bin/", "lib/mypkg.dist-info/", "lib/mypkg/would_be_removed.txt", "lib/mypkg/__init__.py", "lib/mypkg/my_awesome_code.py", "lib/mypkg/__pycache__/", "lib/mypkg/support/support_file.py", "lib/mypkg/support/more_support.py", "lib/mypkg/support/__pycache__/", "lib/random_other_place/", ]) will_remove, will_skip = compress_for_output_listing(sample) will_rename = compress_for_rename(sample) assert sorted(expected_skip) == sorted(compact(will_skip)) assert sorted(expected_remove) == sorted(compact(will_remove)) assert sorted(expected_rename) == sorted(compact(will_rename)) class TestUninstallPathSet(object): def test_add(self, tmpdir, monkeypatch): monkeypatch.setattr(pip._internal.req.req_uninstall, 'is_local', mock_is_local) # Fix case for windows tests file_extant = os.path.normcase(os.path.join(tmpdir, 'foo')) file_nonexistent = os.path.normcase( os.path.join(tmpdir, 'nonexistent')) with open(file_extant, 'w'): pass ups = UninstallPathSet(dist=Mock()) assert ups.paths == set() ups.add(file_extant) assert ups.paths == {file_extant} ups.add(file_nonexistent) assert ups.paths == {file_extant} @pytest.mark.skipif("sys.platform == 'win32'") def test_add_symlink(self, tmpdir, monkeypatch): monkeypatch.setattr(pip._internal.req.req_uninstall, 'is_local', mock_is_local) f = os.path.join(tmpdir, 'foo') with open(f, 'w'): pass foo_link = os.path.join(tmpdir, 'foo_link') os.symlink(f, foo_link) ups = UninstallPathSet(dist=Mock()) ups.add(foo_link) assert ups.paths == {foo_link} def test_compact_shorter_path(self, monkeypatch): monkeypatch.setattr(pip._internal.req.req_uninstall, 'is_local', lambda p: True) monkeypatch.setattr('os.path.exists', lambda p: True) # This deals with nt/posix path differences short_path = os.path.normcase(os.path.abspath( os.path.join(os.path.sep, 'path'))) ups = UninstallPathSet(dist=Mock()) ups.add(short_path) ups.add(os.path.join(short_path, 'longer')) assert compact(ups.paths) == {short_path} @pytest.mark.skipif("sys.platform == 'win32'") def test_detect_symlink_dirs(self, monkeypatch, tmpdir): monkeypatch.setattr(pip._internal.req.req_uninstall, 'is_local', lambda p: True) # construct 2 paths: # tmpdir/dir/file # tmpdir/dirlink/file (where dirlink is a link to dir) d = tmpdir.join('dir') d.mkdir() dlink = tmpdir.join('dirlink') os.symlink(d, dlink) d.join('file').touch() path1 = str(d.join('file')) path2 = str(dlink.join('file')) ups = UninstallPathSet(dist=Mock()) ups.add(path1) ups.add(path2) assert ups.paths == {path1} class TestStashedUninstallPathSet(object): WALK_RESULT = [ ("A", ["B", "C"], ["a.py"]), ("A/B", ["D"], ["b.py"]), ("A/B/D", [], ["c.py"]), ("A/C", [], ["d.py", "e.py"]), ("A/E", ["F"], ["f.py"]), ("A/E/F", [], []), ("A/G", ["H"], ["g.py"]), ("A/G/H", [], ["h.py"]), ] @classmethod def mock_walk(cls, root): for dirname, subdirs, files in cls.WALK_RESULT: dirname = os.path.sep.join(dirname.split("/")) if dirname.startswith(root): yield dirname[len(root) + 1:], subdirs, files def test_compress_for_rename(self, monkeypatch): paths = [os.path.sep.join(p.split("/")) for p in [ "A/B/b.py", "A/B/D/c.py", "A/C/d.py", "A/E/f.py", "A/G/g.py", ]] expected_paths = [os.path.sep.join(p.split("/")) for p in [ "A/B/", # selected everything below A/B "A/C/d.py", # did not select everything below A/C "A/E/", # only empty folders remain under A/E "A/G/g.py", # non-empty folder remains under A/G ]] monkeypatch.setattr('os.walk', self.mock_walk) actual_paths = compress_for_rename(paths) assert set(expected_paths) == set(actual_paths) @classmethod def make_stash(cls, tmpdir, paths): for dirname, subdirs, files in cls.WALK_RESULT: root = os.path.join(tmpdir, *dirname.split("/")) if not os.path.exists(root): os.mkdir(root) for d in subdirs: os.mkdir(os.path.join(root, d)) for f in files: with open(os.path.join(root, f), "wb"): pass pathset = StashedUninstallPathSet() paths = [os.path.join(tmpdir, *p.split('/')) for p in paths] stashed_paths = [(p, pathset.stash(p)) for p in paths] return pathset, stashed_paths def test_stash(self, tmpdir): pathset, stashed_paths = self.make_stash(tmpdir, [ "A/B/", "A/C/d.py", "A/E/", "A/G/g.py", ]) for old_path, new_path in stashed_paths: assert not os.path.exists(old_path) assert os.path.exists(new_path) assert stashed_paths == pathset._moves def test_commit(self, tmpdir): pathset, stashed_paths = self.make_stash(tmpdir, [ "A/B/", "A/C/d.py", "A/E/", "A/G/g.py", ]) pathset.commit() for old_path, new_path in stashed_paths: assert not os.path.exists(old_path) assert not os.path.exists(new_path) def test_rollback(self, tmpdir): pathset, stashed_paths = self.make_stash(tmpdir, [ "A/B/", "A/C/d.py", "A/E/", "A/G/g.py", ]) pathset.rollback() for old_path, new_path in stashed_paths: assert os.path.exists(old_path) assert not os.path.exists(new_path)
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_unit_outdated.py
import datetime import os import sys from contextlib import contextmanager import freezegun import pretend import pytest from pip._vendor import lockfile, pkg_resources from pip._internal.index import InstallationCandidate from pip._internal.utils import outdated class MockPackageFinder(object): BASE_URL = 'https://pypi.org/simple/pip-{0}.tar.gz' PIP_PROJECT_NAME = 'pip' INSTALLATION_CANDIDATES = [ InstallationCandidate(PIP_PROJECT_NAME, '6.9.0', BASE_URL.format('6.9.0')), InstallationCandidate(PIP_PROJECT_NAME, '3.3.1', BASE_URL.format('3.3.1')), InstallationCandidate(PIP_PROJECT_NAME, '1.0', BASE_URL.format('1.0')), ] def __init__(self, *args, **kwargs): pass def find_all_candidates(self, project_name): return self.INSTALLATION_CANDIDATES class MockDistribution(object): def __init__(self, installer): self.installer = installer def has_metadata(self, name): return name == 'INSTALLER' def get_metadata_lines(self, name): if self.has_metadata(name): yield self.installer else: raise NotImplementedError('nope') def _options(): ''' Some default options that we pass to outdated.pip_version_check ''' return pretend.stub( find_links=False, extra_index_urls=[], index_url='default_url', pre=False, trusted_hosts=False, cache_dir='', ) @pytest.mark.parametrize( [ 'stored_time', 'installed_ver', 'new_ver', 'installer', 'check_if_upgrade_required', 'check_warn_logs', ], [ # Test we return None when installed version is None ('1970-01-01T10:00:00Z', None, '1.0', 'pip', False, False), # Need an upgrade - upgrade warning should print ('1970-01-01T10:00:00Z', '1.0', '6.9.0', 'pip', True, True), # Upgrade available, pip installed via rpm - warning should not print ('1970-01-01T10:00:00Z', '1.0', '6.9.0', 'rpm', True, False), # No upgrade - upgrade warning should not print ('1970-01-9T10:00:00Z', '6.9.0', '6.9.0', 'pip', False, False), ] ) def test_pip_version_check(monkeypatch, stored_time, installed_ver, new_ver, installer, check_if_upgrade_required, check_warn_logs): monkeypatch.setattr(outdated, 'get_installed_version', lambda name: installed_ver) monkeypatch.setattr(outdated, 'PackageFinder', MockPackageFinder) monkeypatch.setattr(outdated.logger, 'warning', pretend.call_recorder(lambda *a, **kw: None)) monkeypatch.setattr(outdated.logger, 'debug', pretend.call_recorder(lambda s, exc_info=None: None)) monkeypatch.setattr(pkg_resources, 'get_distribution', lambda name: MockDistribution(installer)) fake_state = pretend.stub( state={"last_check": stored_time, 'pypi_version': installed_ver}, save=pretend.call_recorder(lambda v, t: None), ) monkeypatch.setattr( outdated, 'SelfCheckState', lambda **kw: fake_state ) with freezegun.freeze_time( "1970-01-09 10:00:00", ignore=[ "six.moves", "pip._vendor.six.moves", "pip._vendor.requests.packages.urllib3.packages.six.moves", ] ): latest_pypi_version = outdated.pip_version_check(None, _options()) # See we return None if not installed_version if not installed_ver: assert not latest_pypi_version # See that we saved the correct version elif check_if_upgrade_required: assert fake_state.save.calls == [ pretend.call(new_ver, datetime.datetime(1970, 1, 9, 10, 00, 00)), ] else: # Make sure no Exceptions assert not outdated.logger.debug.calls # See that save was not called assert fake_state.save.calls == [] # Ensure we warn the user or not if check_warn_logs: assert len(outdated.logger.warning.calls) == 1 else: assert len(outdated.logger.warning.calls) == 0 def test_self_check_state(monkeypatch, tmpdir): CONTENT = '''{"pip_prefix": {"last_check": "1970-01-02T11:00:00Z", "pypi_version": "1.0"}}''' fake_file = pretend.stub( read=pretend.call_recorder(lambda: CONTENT), write=pretend.call_recorder(lambda s: None), ) @pretend.call_recorder @contextmanager def fake_open(filename, mode='r'): yield fake_file monkeypatch.setattr(outdated, 'open', fake_open, raising=False) @pretend.call_recorder @contextmanager def fake_lock(filename): yield monkeypatch.setattr(outdated, "check_path_owner", lambda p: True) monkeypatch.setattr(lockfile, 'LockFile', fake_lock) monkeypatch.setattr(os.path, "exists", lambda p: True) cache_dir = tmpdir / 'cache_dir' monkeypatch.setattr(sys, 'prefix', tmpdir / 'pip_prefix') state = outdated.SelfCheckState(cache_dir=cache_dir) state.save('2.0', datetime.datetime.utcnow()) expected_path = cache_dir / 'selfcheck.json' assert fake_lock.calls == [pretend.call(expected_path)] assert fake_open.calls == [ pretend.call(expected_path), pretend.call(expected_path), pretend.call(expected_path, 'w'), ] # json.dumps will call this a number of times assert len(fake_file.write.calls) def test_self_check_state_no_cache_dir(): state = outdated.SelfCheckState(cache_dir=False) assert state.state == {} assert state.statefile_path is None
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_utils.py
# -*- coding: utf-8 -*- """ util tests """ import codecs import itertools import logging import os import shutil import stat import sys import tempfile import time import warnings from io import BytesIO import pytest from mock import Mock, patch from pip._internal.exceptions import ( HashMismatch, HashMissing, InstallationError, UnsupportedPythonVersion, ) from pip._internal.utils.encoding import BOMS, auto_decode from pip._internal.utils.glibc import check_glibc_version from pip._internal.utils.hashes import Hashes, MissingHashes from pip._internal.utils.misc import ( call_subprocess, egg_link_path, ensure_dir, format_command_args, get_installed_distributions, get_prog, normalize_path, redact_netloc, redact_password_from_url, remove_auth_from_url, rmtree, split_auth_from_netloc, untar_file, unzip_file, ) from pip._internal.utils.packaging import check_dist_requires_python from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory from pip._internal.utils.ui import SpinnerInterface class Tests_EgglinkPath: "util.egg_link_path() tests" def setup(self): project = 'foo' self.mock_dist = Mock(project_name=project) self.site_packages = 'SITE_PACKAGES' self.user_site = 'USER_SITE' self.user_site_egglink = os.path.join( self.user_site, '%s.egg-link' % project ) self.site_packages_egglink = os.path.join( self.site_packages, '%s.egg-link' % project, ) # patches from pip._internal.utils import misc as utils self.old_site_packages = utils.site_packages self.mock_site_packages = utils.site_packages = 'SITE_PACKAGES' self.old_running_under_virtualenv = utils.running_under_virtualenv self.mock_running_under_virtualenv = utils.running_under_virtualenv = \ Mock() self.old_virtualenv_no_global = utils.virtualenv_no_global self.mock_virtualenv_no_global = utils.virtualenv_no_global = Mock() self.old_user_site = utils.user_site self.mock_user_site = utils.user_site = self.user_site from os import path self.old_isfile = path.isfile self.mock_isfile = path.isfile = Mock() def teardown(self): from pip._internal.utils import misc as utils utils.site_packages = self.old_site_packages utils.running_under_virtualenv = self.old_running_under_virtualenv utils.virtualenv_no_global = self.old_virtualenv_no_global utils.user_site = self.old_user_site from os import path path.isfile = self.old_isfile def eggLinkInUserSite(self, egglink): return egglink == self.user_site_egglink def eggLinkInSitePackages(self, egglink): return egglink == self.site_packages_egglink # ####################### # # # egglink in usersite # # # ####################### # def test_egglink_in_usersite_notvenv(self): self.mock_virtualenv_no_global.return_value = False self.mock_running_under_virtualenv.return_value = False self.mock_isfile.side_effect = self.eggLinkInUserSite assert egg_link_path(self.mock_dist) == self.user_site_egglink def test_egglink_in_usersite_venv_noglobal(self): self.mock_virtualenv_no_global.return_value = True self.mock_running_under_virtualenv.return_value = True self.mock_isfile.side_effect = self.eggLinkInUserSite assert egg_link_path(self.mock_dist) is None def test_egglink_in_usersite_venv_global(self): self.mock_virtualenv_no_global.return_value = False self.mock_running_under_virtualenv.return_value = True self.mock_isfile.side_effect = self.eggLinkInUserSite assert egg_link_path(self.mock_dist) == self.user_site_egglink # ####################### # # # egglink in sitepkgs # # # ####################### # def test_egglink_in_sitepkgs_notvenv(self): self.mock_virtualenv_no_global.return_value = False self.mock_running_under_virtualenv.return_value = False self.mock_isfile.side_effect = self.eggLinkInSitePackages assert egg_link_path(self.mock_dist) == self.site_packages_egglink def test_egglink_in_sitepkgs_venv_noglobal(self): self.mock_virtualenv_no_global.return_value = True self.mock_running_under_virtualenv.return_value = True self.mock_isfile.side_effect = self.eggLinkInSitePackages assert egg_link_path(self.mock_dist) == self.site_packages_egglink def test_egglink_in_sitepkgs_venv_global(self): self.mock_virtualenv_no_global.return_value = False self.mock_running_under_virtualenv.return_value = True self.mock_isfile.side_effect = self.eggLinkInSitePackages assert egg_link_path(self.mock_dist) == self.site_packages_egglink # ################################## # # # egglink in usersite & sitepkgs # # # ################################## # def test_egglink_in_both_notvenv(self): self.mock_virtualenv_no_global.return_value = False self.mock_running_under_virtualenv.return_value = False self.mock_isfile.return_value = True assert egg_link_path(self.mock_dist) == self.user_site_egglink def test_egglink_in_both_venv_noglobal(self): self.mock_virtualenv_no_global.return_value = True self.mock_running_under_virtualenv.return_value = True self.mock_isfile.return_value = True assert egg_link_path(self.mock_dist) == self.site_packages_egglink def test_egglink_in_both_venv_global(self): self.mock_virtualenv_no_global.return_value = False self.mock_running_under_virtualenv.return_value = True self.mock_isfile.return_value = True assert egg_link_path(self.mock_dist) == self.site_packages_egglink # ############## # # # no egglink # # # ############## # def test_noegglink_in_sitepkgs_notvenv(self): self.mock_virtualenv_no_global.return_value = False self.mock_running_under_virtualenv.return_value = False self.mock_isfile.return_value = False assert egg_link_path(self.mock_dist) is None def test_noegglink_in_sitepkgs_venv_noglobal(self): self.mock_virtualenv_no_global.return_value = True self.mock_running_under_virtualenv.return_value = True self.mock_isfile.return_value = False assert egg_link_path(self.mock_dist) is None def test_noegglink_in_sitepkgs_venv_global(self): self.mock_virtualenv_no_global.return_value = False self.mock_running_under_virtualenv.return_value = True self.mock_isfile.return_value = False assert egg_link_path(self.mock_dist) is None @patch('pip._internal.utils.misc.dist_in_usersite') @patch('pip._internal.utils.misc.dist_is_local') @patch('pip._internal.utils.misc.dist_is_editable') class Tests_get_installed_distributions: """test util.get_installed_distributions""" workingset = [ Mock(test_name="global"), Mock(test_name="editable"), Mock(test_name="normal"), Mock(test_name="user"), ] workingset_stdlib = [ Mock(test_name='normal', key='argparse'), Mock(test_name='normal', key='wsgiref') ] workingset_freeze = [ Mock(test_name='normal', key='pip'), Mock(test_name='normal', key='setuptools'), Mock(test_name='normal', key='distribute') ] def dist_is_editable(self, dist): return dist.test_name == "editable" def dist_is_local(self, dist): return dist.test_name != "global" and dist.test_name != 'user' def dist_in_usersite(self, dist): return dist.test_name == "user" @patch('pip._vendor.pkg_resources.working_set', workingset) def test_editables_only(self, mock_dist_is_editable, mock_dist_is_local, mock_dist_in_usersite): mock_dist_is_editable.side_effect = self.dist_is_editable mock_dist_is_local.side_effect = self.dist_is_local mock_dist_in_usersite.side_effect = self.dist_in_usersite dists = get_installed_distributions(editables_only=True) assert len(dists) == 1, dists assert dists[0].test_name == "editable" @patch('pip._vendor.pkg_resources.working_set', workingset) def test_exclude_editables(self, mock_dist_is_editable, mock_dist_is_local, mock_dist_in_usersite): mock_dist_is_editable.side_effect = self.dist_is_editable mock_dist_is_local.side_effect = self.dist_is_local mock_dist_in_usersite.side_effect = self.dist_in_usersite dists = get_installed_distributions(include_editables=False) assert len(dists) == 1 assert dists[0].test_name == "normal" @patch('pip._vendor.pkg_resources.working_set', workingset) def test_include_globals(self, mock_dist_is_editable, mock_dist_is_local, mock_dist_in_usersite): mock_dist_is_editable.side_effect = self.dist_is_editable mock_dist_is_local.side_effect = self.dist_is_local mock_dist_in_usersite.side_effect = self.dist_in_usersite dists = get_installed_distributions(local_only=False) assert len(dists) == 4 @patch('pip._vendor.pkg_resources.working_set', workingset) def test_user_only(self, mock_dist_is_editable, mock_dist_is_local, mock_dist_in_usersite): mock_dist_is_editable.side_effect = self.dist_is_editable mock_dist_is_local.side_effect = self.dist_is_local mock_dist_in_usersite.side_effect = self.dist_in_usersite dists = get_installed_distributions(local_only=False, user_only=True) assert len(dists) == 1 assert dists[0].test_name == "user" @patch('pip._vendor.pkg_resources.working_set', workingset_stdlib) def test_gte_py27_excludes(self, mock_dist_is_editable, mock_dist_is_local, mock_dist_in_usersite): mock_dist_is_editable.side_effect = self.dist_is_editable mock_dist_is_local.side_effect = self.dist_is_local mock_dist_in_usersite.side_effect = self.dist_in_usersite dists = get_installed_distributions() assert len(dists) == 0 @patch('pip._vendor.pkg_resources.working_set', workingset_freeze) def test_freeze_excludes(self, mock_dist_is_editable, mock_dist_is_local, mock_dist_in_usersite): mock_dist_is_editable.side_effect = self.dist_is_editable mock_dist_is_local.side_effect = self.dist_is_local mock_dist_in_usersite.side_effect = self.dist_in_usersite dists = get_installed_distributions( skip=('setuptools', 'pip', 'distribute')) assert len(dists) == 0 class TestUnpackArchives(object): """ test_tar.tgz/test_tar.zip have content as follows engineered to confirm 3 things: 1) confirm that reg files, dirs, and symlinks get unpacked 2) permissions are not preserved (and go by the 022 umask) 3) reg files with *any* execute perms, get chmod +x file.txt 600 regular file symlink.txt 777 symlink to file.txt script_owner.sh 700 script where owner can execute script_group.sh 610 script where group can execute script_world.sh 601 script where world can execute dir 744 directory dir/dirfile 622 regular file 4) the file contents are extracted correctly (though the content of each file isn't currently unique) """ def setup(self): self.tempdir = tempfile.mkdtemp() self.old_mask = os.umask(0o022) self.symlink_expected_mode = None def teardown(self): os.umask(self.old_mask) shutil.rmtree(self.tempdir, ignore_errors=True) def mode(self, path): return stat.S_IMODE(os.stat(path).st_mode) def confirm_files(self): # expectations based on 022 umask set above and the unpack logic that # sets execute permissions, not preservation for fname, expected_mode, test, expected_contents in [ ('file.txt', 0o644, os.path.isfile, b'file\n'), # We don't test the "symlink.txt" contents for now. ('symlink.txt', 0o644, os.path.isfile, None), ('script_owner.sh', 0o755, os.path.isfile, b'file\n'), ('script_group.sh', 0o755, os.path.isfile, b'file\n'), ('script_world.sh', 0o755, os.path.isfile, b'file\n'), ('dir', 0o755, os.path.isdir, None), (os.path.join('dir', 'dirfile'), 0o644, os.path.isfile, b''), ]: path = os.path.join(self.tempdir, fname) if path.endswith('symlink.txt') and sys.platform == 'win32': # no symlinks created on windows continue assert test(path), path if expected_contents is not None: with open(path, mode='rb') as f: contents = f.read() assert contents == expected_contents, 'fname: {}'.format(fname) if sys.platform == 'win32': # the permissions tests below don't apply in windows # due to os.chmod being a noop continue mode = self.mode(path) assert mode == expected_mode, ( "mode: %s, expected mode: %s" % (mode, expected_mode) ) def test_unpack_tgz(self, data): """ Test unpacking a *.tgz, and setting execute permissions """ test_file = data.packages.join("test_tar.tgz") untar_file(test_file, self.tempdir) self.confirm_files() # Check the timestamp of an extracted file file_txt_path = os.path.join(self.tempdir, 'file.txt') mtime = time.gmtime(os.stat(file_txt_path).st_mtime) assert mtime[0:6] == (2013, 8, 16, 5, 13, 37), mtime def test_unpack_zip(self, data): """ Test unpacking a *.zip, and setting execute permissions """ test_file = data.packages.join("test_zip.zip") unzip_file(test_file, self.tempdir) self.confirm_files() class Failer: def __init__(self, duration=1): self.succeed_after = time.time() + duration def call(self, *args, **kw): """Fail with OSError self.max_fails times""" if time.time() < self.succeed_after: raise OSError("Failed") def test_rmtree_retries(tmpdir, monkeypatch): """ Test pip._internal.utils.rmtree will retry failures """ monkeypatch.setattr(shutil, 'rmtree', Failer(duration=1).call) rmtree('foo') def test_rmtree_retries_for_3sec(tmpdir, monkeypatch): """ Test pip._internal.utils.rmtree will retry failures for no more than 3 sec """ monkeypatch.setattr(shutil, 'rmtree', Failer(duration=5).call) with pytest.raises(OSError): rmtree('foo') class Test_normalize_path(object): # Technically, symlinks are possible on Windows, but you need a special # permission bit to create them, and Python 2 doesn't support it anyway, so # it's easiest just to skip this test on Windows altogether. @pytest.mark.skipif("sys.platform == 'win32'") def test_resolve_symlinks(self, tmpdir): print(type(tmpdir)) print(dir(tmpdir)) orig_working_dir = os.getcwd() os.chdir(tmpdir) try: d = os.path.join('foo', 'bar') f = os.path.join(d, 'file1') os.makedirs(d) with open(f, 'w'): # Create the file pass os.symlink(d, 'dir_link') os.symlink(f, 'file_link') assert normalize_path( 'dir_link/file1', resolve_symlinks=True ) == os.path.join(tmpdir, f) assert normalize_path( 'dir_link/file1', resolve_symlinks=False ) == os.path.join(tmpdir, 'dir_link', 'file1') assert normalize_path( 'file_link', resolve_symlinks=True ) == os.path.join(tmpdir, f) assert normalize_path( 'file_link', resolve_symlinks=False ) == os.path.join(tmpdir, 'file_link') finally: os.chdir(orig_working_dir) class TestHashes(object): """Tests for pip._internal.utils.hashes""" def test_success(self, tmpdir): """Make sure no error is raised when at least one hash matches. Test check_against_path because it calls everything else. """ file = tmpdir / 'to_hash' file.write('hello') hashes = Hashes({ 'sha256': ['2cf24dba5fb0a30e26e83b2ac5b9e29e' '1b161e5c1fa7425e73043362938b9824'], 'sha224': ['wrongwrong'], 'md5': ['5d41402abc4b2a76b9719d911017c592']}) hashes.check_against_path(file) def test_failure(self): """Hashes should raise HashMismatch when no hashes match.""" hashes = Hashes({'sha256': ['wrongwrong']}) with pytest.raises(HashMismatch): hashes.check_against_file(BytesIO(b'hello')) def test_missing_hashes(self): """MissingHashes should raise HashMissing when any check is done.""" with pytest.raises(HashMissing): MissingHashes().check_against_file(BytesIO(b'hello')) def test_unknown_hash(self): """Hashes should raise InstallationError when it encounters an unknown hash.""" hashes = Hashes({'badbad': ['dummy']}) with pytest.raises(InstallationError): hashes.check_against_file(BytesIO(b'hello')) def test_non_zero(self): """Test that truthiness tests tell whether any known-good hashes exist.""" assert Hashes({'sha256': 'dummy'}) assert not Hashes() assert not Hashes({}) class TestEncoding(object): """Tests for pip._internal.utils.encoding""" def test_auto_decode_utf_16_le(self): data = ( b'\xff\xfeD\x00j\x00a\x00n\x00g\x00o\x00=\x00' b'=\x001\x00.\x004\x00.\x002\x00' ) assert data.startswith(codecs.BOM_UTF16_LE) assert auto_decode(data) == "Django==1.4.2" def test_auto_decode_utf_16_be(self): data = ( b'\xfe\xff\x00D\x00j\x00a\x00n\x00g\x00o\x00=' b'\x00=\x001\x00.\x004\x00.\x002' ) assert data.startswith(codecs.BOM_UTF16_BE) assert auto_decode(data) == "Django==1.4.2" def test_auto_decode_no_bom(self): assert auto_decode(b'foobar') == u'foobar' def test_auto_decode_pep263_headers(self): latin1_req = u'# coding=latin1\n# Pas trop de café' assert auto_decode(latin1_req.encode('latin1')) == latin1_req def test_auto_decode_no_preferred_encoding(self): om, em = Mock(), Mock() om.return_value = 'ascii' em.return_value = None data = u'data' with patch('sys.getdefaultencoding', om): with patch('locale.getpreferredencoding', em): ret = auto_decode(data.encode(sys.getdefaultencoding())) assert ret == data @pytest.mark.parametrize('encoding', [encoding for bom, encoding in BOMS]) def test_all_encodings_are_valid(self, encoding): # we really only care that there is no LookupError assert ''.encode(encoding).decode(encoding) == '' class TestTempDirectory(object): # No need to test symlinked directories on Windows @pytest.mark.skipif("sys.platform == 'win32'") def test_symlinked_path(self): with TempDirectory() as tmp_dir: assert os.path.exists(tmp_dir.path) alt_tmp_dir = tempfile.mkdtemp(prefix="pip-test-") assert ( os.path.dirname(tmp_dir.path) == os.path.dirname(os.path.realpath(alt_tmp_dir)) ) # are we on a system where /tmp is a symlink if os.path.realpath(alt_tmp_dir) != os.path.abspath(alt_tmp_dir): assert ( os.path.dirname(tmp_dir.path) != os.path.dirname(alt_tmp_dir) ) else: assert ( os.path.dirname(tmp_dir.path) == os.path.dirname(alt_tmp_dir) ) os.rmdir(tmp_dir.path) assert not os.path.exists(tmp_dir.path) def test_deletes_readonly_files(self): def create_file(*args): fpath = os.path.join(*args) ensure_dir(os.path.dirname(fpath)) with open(fpath, "w") as f: f.write("Holla!") def readonly_file(*args): fpath = os.path.join(*args) os.chmod(fpath, stat.S_IREAD) with TempDirectory() as tmp_dir: create_file(tmp_dir.path, "normal-file") create_file(tmp_dir.path, "readonly-file") readonly_file(tmp_dir.path, "readonly-file") create_file(tmp_dir.path, "subfolder", "normal-file") create_file(tmp_dir.path, "subfolder", "readonly-file") readonly_file(tmp_dir.path, "subfolder", "readonly-file") assert tmp_dir.path is None def test_create_and_cleanup_work(self): tmp_dir = TempDirectory() assert tmp_dir.path is None tmp_dir.create() created_path = tmp_dir.path assert tmp_dir.path is not None assert os.path.exists(created_path) tmp_dir.cleanup() assert tmp_dir.path is None assert not os.path.exists(created_path) @pytest.mark.parametrize("name", [ "ABC", "ABC.dist-info", "_+-", "_package", "A......B", "AB", "A", "2", ]) def test_adjacent_directory_names(self, name): def names(): return AdjacentTempDirectory._generate_names(name) chars = AdjacentTempDirectory.LEADING_CHARS # Ensure many names are unique # (For long *name*, this sequence can be extremely long. # However, since we're only ever going to take the first # result that works, provided there are many of those # and that shorter names result in totally unique sets, # it's okay to skip part of the test.) some_names = list(itertools.islice(names(), 1000)) # We should always get at least 1000 names assert len(some_names) == 1000 # Ensure original name does not appear early in the set assert name not in some_names if len(name) > 2: # Names should be at least 90% unique (given the infinite # range of inputs, and the possibility that generated names # may already exist on disk anyway, this is a much cheaper # criteria to enforce than complete uniqueness). assert len(some_names) > 0.9 * len(set(some_names)) # Ensure the first few names are the same length as the original same_len = list(itertools.takewhile( lambda x: len(x) == len(name), some_names )) assert len(same_len) > 10 # Check the first group are correct expected_names = ['~' + name[1:]] expected_names.extend('~' + c + name[2:] for c in chars) for x, y in zip(some_names, expected_names): assert x == y else: # All names are going to be longer than our original assert min(len(x) for x in some_names) > 1 # All names are going to be unique assert len(some_names) == len(set(some_names)) if len(name) == 2: # All but the first name are going to end with our original assert all(x.endswith(name) for x in some_names[1:]) else: # All names are going to end with our original assert all(x.endswith(name) for x in some_names) @pytest.mark.parametrize("name", [ "A", "ABC", "ABC.dist-info", "_+-", "_package", ]) def test_adjacent_directory_exists(self, name, tmpdir): block_name, expect_name = itertools.islice( AdjacentTempDirectory._generate_names(name), 2) original = os.path.join(tmpdir, name) blocker = os.path.join(tmpdir, block_name) ensure_dir(original) ensure_dir(blocker) with AdjacentTempDirectory(original) as atmp_dir: assert expect_name == os.path.split(atmp_dir.path)[1] def test_adjacent_directory_permission_error(self, monkeypatch): name = "ABC" def raising_mkdir(*args, **kwargs): raise OSError("Unknown OSError") with TempDirectory() as tmp_dir: original = os.path.join(tmp_dir.path, name) ensure_dir(original) monkeypatch.setattr("os.mkdir", raising_mkdir) with pytest.raises(OSError): with AdjacentTempDirectory(original): pass class TestGlibc(object): def test_manylinux_check_glibc_version(self): """ Test that the check_glibc_version function is robust against weird glibc version strings. """ for two_twenty in ["2.20", # used by "linaro glibc", see gh-3588 "2.20-2014.11", # weird possibilities that I just made up "2.20+dev", "2.20-custom", "2.20.1", ]: assert check_glibc_version(two_twenty, 2, 15) assert check_glibc_version(two_twenty, 2, 20) assert not check_glibc_version(two_twenty, 2, 21) assert not check_glibc_version(two_twenty, 3, 15) assert not check_glibc_version(two_twenty, 1, 15) # For strings that we just can't parse at all, we should warn and # return false for bad_string in ["asdf", "", "foo.bar"]: with warnings.catch_warnings(record=True) as ws: warnings.filterwarnings("always") assert not check_glibc_version(bad_string, 2, 5) for w in ws: if "Expected glibc version with" in str(w.message): break else: # Didn't find the warning we were expecting assert False class TestCheckRequiresPython(object): @pytest.mark.parametrize( ("metadata", "should_raise"), [ ("Name: test\n", False), ("Name: test\nRequires-Python:", False), ("Name: test\nRequires-Python: invalid_spec", False), ("Name: test\nRequires-Python: <=1", True), ], ) def test_check_requires(self, metadata, should_raise): fake_dist = Mock( has_metadata=lambda _: True, get_metadata=lambda _: metadata) if should_raise: with pytest.raises(UnsupportedPythonVersion): check_dist_requires_python(fake_dist) else: check_dist_requires_python(fake_dist) class TestGetProg(object): @pytest.mark.parametrize( ("argv", "executable", "expected"), [ ('/usr/bin/pip', '', 'pip'), ('-c', '/usr/bin/python', '/usr/bin/python -m pip'), ('__main__.py', '/usr/bin/python', '/usr/bin/python -m pip'), ('/usr/bin/pip3', '', 'pip3'), ] ) def test_get_prog(self, monkeypatch, argv, executable, expected): monkeypatch.setattr('pip._internal.utils.misc.sys.argv', [argv]) monkeypatch.setattr( 'pip._internal.utils.misc.sys.executable', executable ) assert get_prog() == expected @pytest.mark.parametrize('args, expected', [ (['pip', 'list'], 'pip list'), (['foo', 'space space', 'new\nline', 'double"quote', "single'quote"], """foo 'space space' 'new\nline' 'double"quote' 'single'"'"'quote'"""), ]) def test_format_command_args(args, expected): actual = format_command_args(args) assert actual == expected class FakeSpinner(SpinnerInterface): def __init__(self): self.spin_count = 0 self.final_status = None def spin(self): self.spin_count += 1 def finish(self, final_status): self.final_status = final_status class TestCallSubprocess(object): """ Test call_subprocess(). """ def check_result( self, capfd, caplog, log_level, spinner, result, expected, expected_spinner, ): """ Check the result of calling call_subprocess(). :param log_level: the logging level that caplog was set to. :param spinner: the FakeSpinner object passed to call_subprocess() to be checked. :param result: the call_subprocess() return value to be checked. :param expected: a 3-tuple (expected_proc, expected_out, expected_records), where 1) `expected_proc` is the expected return value of call_subprocess() as a list of lines, or None if the return value is expected to be None; 2) `expected_out` is the expected stdout captured from the subprocess call, as a list of lines; and 3) `expected_records` is the expected value of caplog.record_tuples. :param expected_spinner: a 2-tuple of the spinner's expected (spin_count, final_status). """ expected_proc, expected_out, expected_records = expected if expected_proc is None: assert result is expected_proc else: assert result.splitlines() == expected_proc captured = capfd.readouterr() stdout, stderr = captured.out, captured.err assert stdout.splitlines() == expected_out assert stderr == '' records = caplog.record_tuples if len(records) != len(expected_records): raise RuntimeError('{} != {}'.format(records, expected_records)) for record, expected_record in zip(records, expected_records): # Check the logger_name and log level parts exactly. assert record[:2] == expected_record[:2] # For the message portion, check only a substring. Also, we # can't use startswith() since the order of stdout and stderr # isn't guaranteed in cases where stderr is also present. # For example, we observed the stderr lines coming before stdout # in CI for PyPy 2.7 even though stdout happens first # chronologically. assert expected_record[2] in record[2] assert (spinner.spin_count, spinner.final_status) == expected_spinner def prepare_call(self, caplog, log_level, command=None): if command is None: command = 'print("Hello"); print("world")' caplog.set_level(log_level) spinner = FakeSpinner() args = [sys.executable, '-c', command] return (args, spinner) def test_debug_logging(self, capfd, caplog): """ Test DEBUG logging (and without passing show_stdout=True). """ log_level = logging.DEBUG args, spinner = self.prepare_call(caplog, log_level) result = call_subprocess(args, spinner=spinner) expected = (['Hello', 'world'], [], [ ('pip._internal.utils.misc', 10, 'Running command '), ('pip._internal.utils.misc', 10, 'Hello'), ('pip._internal.utils.misc', 10, 'world'), ]) # The spinner shouldn't spin in this case since the subprocess # output is already being logged to the console. self.check_result( capfd, caplog, log_level, spinner, result, expected, expected_spinner=(0, 'done'), ) def test_info_logging(self, capfd, caplog): """ Test INFO logging (and without passing show_stdout=True). """ log_level = logging.INFO args, spinner = self.prepare_call(caplog, log_level) result = call_subprocess(args, spinner=spinner) expected = (['Hello', 'world'], [], []) # The spinner should spin twice in this case since the subprocess # output isn't being written to the console. self.check_result( capfd, caplog, log_level, spinner, result, expected, expected_spinner=(2, 'done'), ) def test_info_logging__subprocess_error(self, capfd, caplog): """ Test INFO logging of a subprocess with an error (and without passing show_stdout=True). """ log_level = logging.INFO command = 'print("Hello"); print("world"); exit("fail")' args, spinner = self.prepare_call(caplog, log_level, command=command) with pytest.raises(InstallationError): call_subprocess(args, spinner=spinner) result = None expected = (None, [], [ ('pip._internal.utils.misc', 20, 'Complete output from command '), # The "failed" portion is later on in this "Hello" string. ('pip._internal.utils.misc', 20, 'Hello'), ]) # The spinner should spin three times in this case since the # subprocess output isn't being written to the console. self.check_result( capfd, caplog, log_level, spinner, result, expected, expected_spinner=(3, 'error'), ) # Do some further checking on the captured log records to confirm # that the subprocess output was logged. last_record = caplog.record_tuples[-1] last_message = last_record[2] lines = last_message.splitlines() # We have to sort before comparing the lines because we can't # guarantee the order in which stdout and stderr will appear. # For example, we observed the stderr lines coming before stdout # in CI for PyPy 2.7 even though stdout happens first chronologically. assert sorted(lines) == [ '----------------------------------------', 'Hello', 'fail', 'world', ], 'lines: {}'.format(lines) # Show the full output on failure. def test_info_logging_with_show_stdout_true(self, capfd, caplog): """ Test INFO logging with show_stdout=True. """ log_level = logging.INFO args, spinner = self.prepare_call(caplog, log_level) result = call_subprocess(args, spinner=spinner, show_stdout=True) expected = (None, ['Hello', 'world'], []) # The spinner shouldn't spin in this case since the subprocess # output is already being written to the console. self.check_result( capfd, caplog, log_level, spinner, result, expected, expected_spinner=(0, None), ) @pytest.mark.parametrize(( 'exit_status', 'show_stdout', 'extra_ok_returncodes', 'log_level', 'expected'), [ (0, False, None, logging.INFO, (None, 'done', 2)), # Test some cases that should result in show_spinner false. (0, False, None, logging.DEBUG, (None, 'done', 0)), # Test show_stdout=True. (0, True, None, logging.DEBUG, (None, None, 0)), (0, True, None, logging.INFO, (None, None, 0)), (0, True, None, logging.WARNING, (None, None, 0)), # Test a non-zero exit status. (3, False, None, logging.INFO, (InstallationError, 'error', 2)), # Test a non-zero exit status also in extra_ok_returncodes. (3, False, (3, ), logging.INFO, (None, 'done', 2)), ]) def test_spinner_finish( self, exit_status, show_stdout, extra_ok_returncodes, log_level, caplog, expected, ): """ Test that the spinner finishes correctly. """ expected_exc_type = expected[0] expected_final_status = expected[1] expected_spin_count = expected[2] command = ( 'print("Hello"); print("world"); exit({})'.format(exit_status) ) args, spinner = self.prepare_call(caplog, log_level, command=command) try: call_subprocess( args, show_stdout=show_stdout, extra_ok_returncodes=extra_ok_returncodes, spinner=spinner, ) except Exception as exc: exc_type = type(exc) else: exc_type = None assert exc_type == expected_exc_type assert spinner.final_status == expected_final_status assert spinner.spin_count == expected_spin_count def test_closes_stdin(self): with pytest.raises(InstallationError): call_subprocess( [sys.executable, '-c', 'input()'], show_stdout=True, ) @pytest.mark.parametrize('netloc, expected', [ # Test a basic case. ('example.com', ('example.com', (None, None))), # Test with username and no password. ('user@example.com', ('example.com', ('user', None))), # Test with username and password. ('user:pass@example.com', ('example.com', ('user', 'pass'))), # Test with username and empty password. ('user:@example.com', ('example.com', ('user', ''))), # Test the password containing an @ symbol. ('user:pass@word@example.com', ('example.com', ('user', 'pass@word'))), # Test the password containing a : symbol. ('user:pass:word@example.com', ('example.com', ('user', 'pass:word'))), # Test URL-encoded reserved characters. ('user%3Aname:%23%40%5E@example.com', ('example.com', ('user:name', '#@^'))), ]) def test_split_auth_from_netloc(netloc, expected): actual = split_auth_from_netloc(netloc) assert actual == expected @pytest.mark.parametrize('netloc, expected', [ # Test a basic case. ('example.com', 'example.com'), # Test with username and no password. ('user@example.com', 'user@example.com'), # Test with username and password. ('user:pass@example.com', 'user:****@example.com'), # Test with username and empty password. ('user:@example.com', 'user:****@example.com'), # Test the password containing an @ symbol. ('user:pass@word@example.com', 'user:****@example.com'), # Test the password containing a : symbol. ('user:pass:word@example.com', 'user:****@example.com'), # Test URL-encoded reserved characters. ('user%3Aname:%23%40%5E@example.com', 'user%3Aname:****@example.com'), ]) def test_redact_netloc(netloc, expected): actual = redact_netloc(netloc) assert actual == expected @pytest.mark.parametrize('auth_url, expected_url', [ ('https://user:pass@domain.tld/project/tags/v0.2', 'https://domain.tld/project/tags/v0.2'), ('https://domain.tld/project/tags/v0.2', 'https://domain.tld/project/tags/v0.2',), ('https://user:pass@domain.tld/svn/project/trunk@8181', 'https://domain.tld/svn/project/trunk@8181'), ('https://domain.tld/project/trunk@8181', 'https://domain.tld/project/trunk@8181',), ('git+https://pypi.org/something', 'git+https://pypi.org/something'), ('git+https://user:pass@pypi.org/something', 'git+https://pypi.org/something'), ('git+ssh://git@pypi.org/something', 'git+ssh://pypi.org/something'), ]) def test_remove_auth_from_url(auth_url, expected_url): url = remove_auth_from_url(auth_url) assert url == expected_url @pytest.mark.parametrize('auth_url, expected_url', [ ('https://user@example.com/abc', 'https://user@example.com/abc'), ('https://user:password@example.com', 'https://user:****@example.com'), ('https://user:@example.com', 'https://user:****@example.com'), ('https://example.com', 'https://example.com'), # Test URL-encoded reserved characters. ('https://user%3Aname:%23%40%5E@example.com', 'https://user%3Aname:****@example.com'), ]) def test_redact_password_from_url(auth_url, expected_url): url = redact_password_from_url(auth_url) assert url == expected_url
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_vcs.py
import pytest from mock import patch from pip._vendor.packaging.version import parse as parse_version from pip._internal.vcs import ( RevOptions, VersionControl, make_vcs_requirement_url, ) from pip._internal.vcs.bazaar import Bazaar from pip._internal.vcs.git import Git, looks_like_hash from pip._internal.vcs.mercurial import Mercurial from pip._internal.vcs.subversion import Subversion from tests.lib import pyversion if pyversion >= '3': VERBOSE_FALSE = False else: VERBOSE_FALSE = 0 @pytest.mark.parametrize('args, expected', [ # Test without subdir. (('git+https://example.com/pkg', 'dev', 'myproj'), 'git+https://example.com/pkg@dev#egg=myproj'), # Test with subdir. (('git+https://example.com/pkg', 'dev', 'myproj', 'sub/dir'), 'git+https://example.com/pkg@dev#egg=myproj&subdirectory=sub/dir'), # Test with None subdir. (('git+https://example.com/pkg', 'dev', 'myproj', None), 'git+https://example.com/pkg@dev#egg=myproj'), # Test an unescaped project name. (('git+https://example.com/pkg', 'dev', 'zope-interface'), 'git+https://example.com/pkg@dev#egg=zope_interface'), ]) def test_make_vcs_requirement_url(args, expected): actual = make_vcs_requirement_url(*args) assert actual == expected def test_rev_options_repr(): rev_options = RevOptions(Git, 'develop') assert repr(rev_options) == "<RevOptions git: rev='develop'>" @pytest.mark.parametrize(('vc_class', 'expected1', 'expected2', 'kwargs'), [ # First check VCS-specific RevOptions behavior. (Bazaar, [], ['-r', '123'], {}), (Git, ['HEAD'], ['123'], {}), (Mercurial, [], ['123'], {}), (Subversion, [], ['-r', '123'], {}), # Test extra_args. For this, test using a single VersionControl class. (Git, ['HEAD', 'opt1', 'opt2'], ['123', 'opt1', 'opt2'], dict(extra_args=['opt1', 'opt2'])), ]) def test_rev_options_to_args(vc_class, expected1, expected2, kwargs): """ Test RevOptions.to_args(). """ assert RevOptions(vc_class, **kwargs).to_args() == expected1 assert RevOptions(vc_class, '123', **kwargs).to_args() == expected2 def test_rev_options_to_display(): """ Test RevOptions.to_display(). """ # The choice of VersionControl class doesn't matter here since # the implementation is the same for all of them. rev_options = RevOptions(Git) assert rev_options.to_display() == '' rev_options = RevOptions(Git, 'master') assert rev_options.to_display() == ' (to revision master)' def test_rev_options_make_new(): """ Test RevOptions.make_new(). """ # The choice of VersionControl class doesn't matter here since # the implementation is the same for all of them. rev_options = RevOptions(Git, 'master', extra_args=['foo', 'bar']) new_options = rev_options.make_new('develop') assert new_options is not rev_options assert new_options.extra_args == ['foo', 'bar'] assert new_options.rev == 'develop' assert new_options.vc_class is Git def test_looks_like_hash(): assert looks_like_hash(40 * 'a') assert looks_like_hash(40 * 'A') # Test a string containing all valid characters. assert looks_like_hash(18 * 'a' + '0123456789abcdefABCDEF') assert not looks_like_hash(40 * 'g') assert not looks_like_hash(39 * 'a') @pytest.mark.parametrize('vcs_cls, remote_url, expected', [ # Git is one of the subclasses using the base class implementation. (Git, 'git://example.com/MyProject', False), (Git, 'http://example.com/MyProject', True), # Subversion is the only subclass overriding the base class implementation. (Subversion, 'svn://example.com/MyProject', True), ]) def test_should_add_vcs_url_prefix(vcs_cls, remote_url, expected): actual = vcs_cls.should_add_vcs_url_prefix(remote_url) assert actual == expected @patch('pip._internal.vcs.git.Git.get_revision') @patch('pip._internal.vcs.git.Git.get_remote_url') @pytest.mark.network def test_git_get_src_requirements(mock_get_remote_url, mock_get_revision): git_url = 'https://github.com/pypa/pip-test-package' sha = '5547fa909e83df8bd743d3978d6667497983a4b7' mock_get_remote_url.return_value = git_url mock_get_revision.return_value = sha ret = Git.get_src_requirement('.', 'pip-test-package') assert ret == ( 'git+https://github.com/pypa/pip-test-package' '@5547fa909e83df8bd743d3978d6667497983a4b7#egg=pip_test_package' ) @patch('pip._internal.vcs.git.Git.get_revision_sha') def test_git_resolve_revision_rev_exists(get_sha_mock): get_sha_mock.return_value = ('123456', False) url = 'git+https://git.example.com' rev_options = Git.make_rev_options('develop') git = Git() new_options = git.resolve_revision('.', url, rev_options) assert new_options.rev == '123456' @patch('pip._internal.vcs.git.Git.get_revision_sha') def test_git_resolve_revision_rev_not_found(get_sha_mock): get_sha_mock.return_value = (None, False) url = 'git+https://git.example.com' rev_options = Git.make_rev_options('develop') git = Git() new_options = git.resolve_revision('.', url, rev_options) assert new_options.rev == 'develop' @patch('pip._internal.vcs.git.Git.get_revision_sha') def test_git_resolve_revision_not_found_warning(get_sha_mock, caplog): get_sha_mock.return_value = (None, False) url = 'git+https://git.example.com' sha = 40 * 'a' rev_options = Git.make_rev_options(sha) git = Git() new_options = git.resolve_revision('.', url, rev_options) assert new_options.rev == sha rev_options = Git.make_rev_options(sha[:6]) new_options = git.resolve_revision('.', url, rev_options) assert new_options.rev == 'aaaaaa' # Check that a warning got logged only for the abbreviated hash. messages = [r.getMessage() for r in caplog.records] messages = [msg for msg in messages if msg.startswith('Did not find ')] assert messages == [ "Did not find branch or tag 'aaaaaa', assuming revision or ref." ] @pytest.mark.parametrize('rev_name,result', ( ('5547fa909e83df8bd743d3978d6667497983a4b7', True), ('5547fa909', False), ('5678', False), ('abc123', False), ('foo', False), (None, False), )) @patch('pip._internal.vcs.git.Git.get_revision') def test_git_is_commit_id_equal(mock_get_revision, rev_name, result): """ Test Git.is_commit_id_equal(). """ mock_get_revision.return_value = '5547fa909e83df8bd743d3978d6667497983a4b7' assert Git().is_commit_id_equal('/path', rev_name) is result # The non-SVN backends all use the same get_netloc_and_auth(), so only test # Git as a representative. @pytest.mark.parametrize('args, expected', [ # Test a basic case. (('example.com', 'https'), ('example.com', (None, None))), # Test with username and password. (('user:pass@example.com', 'https'), ('user:pass@example.com', (None, None))), ]) def test_git__get_netloc_and_auth(args, expected): """ Test VersionControl.get_netloc_and_auth(). """ netloc, scheme = args actual = Git.get_netloc_and_auth(netloc, scheme) assert actual == expected @pytest.mark.parametrize('args, expected', [ # Test https. (('example.com', 'https'), ('example.com', (None, None))), # Test https with username and no password. (('user@example.com', 'https'), ('example.com', ('user', None))), # Test https with username and password. (('user:pass@example.com', 'https'), ('example.com', ('user', 'pass'))), # Test https with URL-encoded reserved characters. (('user%3Aname:%23%40%5E@example.com', 'https'), ('example.com', ('user:name', '#@^'))), # Test ssh with username and password. (('user:pass@example.com', 'ssh'), ('user:pass@example.com', (None, None))), ]) def test_subversion__get_netloc_and_auth(args, expected): """ Test Subversion.get_netloc_and_auth(). """ netloc, scheme = args actual = Subversion.get_netloc_and_auth(netloc, scheme) assert actual == expected def test_git__get_url_rev__idempotent(): """ Check that Git.get_url_rev_and_auth() is idempotent for what the code calls "stub URLs" (i.e. URLs that don't contain "://"). Also check that it doesn't change self.url. """ url = 'git+git@git.example.com:MyProject#egg=MyProject' result1 = Git.get_url_rev_and_auth(url) result2 = Git.get_url_rev_and_auth(url) expected = ('git@git.example.com:MyProject', None, (None, None)) assert result1 == expected assert result2 == expected @pytest.mark.parametrize('url, expected', [ ('svn+https://svn.example.com/MyProject', ('https://svn.example.com/MyProject', None, (None, None))), # Test a "+" in the path portion. ('svn+https://svn.example.com/My+Project', ('https://svn.example.com/My+Project', None, (None, None))), ]) def test_version_control__get_url_rev_and_auth(url, expected): """ Test the basic case of VersionControl.get_url_rev_and_auth(). """ actual = VersionControl.get_url_rev_and_auth(url) assert actual == expected @pytest.mark.parametrize('url', [ 'https://svn.example.com/MyProject', # Test a URL containing a "+" (but not in the scheme). 'https://svn.example.com/My+Project', ]) def test_version_control__get_url_rev_and_auth__missing_plus(url): """ Test passing a URL to VersionControl.get_url_rev_and_auth() with a "+" missing from the scheme. """ with pytest.raises(ValueError) as excinfo: VersionControl.get_url_rev_and_auth(url) assert 'malformed VCS url' in str(excinfo.value) @pytest.mark.parametrize('url, expected', [ # Test http. ('bzr+http://bzr.myproject.org/MyProject/trunk/#egg=MyProject', 'http://bzr.myproject.org/MyProject/trunk/'), # Test https. ('bzr+https://bzr.myproject.org/MyProject/trunk/#egg=MyProject', 'https://bzr.myproject.org/MyProject/trunk/'), # Test ftp. ('bzr+ftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject', 'ftp://bzr.myproject.org/MyProject/trunk/'), # Test sftp. ('bzr+sftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject', 'sftp://bzr.myproject.org/MyProject/trunk/'), # Test launchpad. ('bzr+lp:MyLaunchpadProject#egg=MyLaunchpadProject', 'lp:MyLaunchpadProject'), # Test ssh (special handling). ('bzr+ssh://bzr.myproject.org/MyProject/trunk/#egg=MyProject', 'bzr+ssh://bzr.myproject.org/MyProject/trunk/'), ]) def test_bazaar__get_url_rev_and_auth(url, expected): """ Test Bazaar.get_url_rev_and_auth(). """ actual = Bazaar.get_url_rev_and_auth(url) assert actual == (expected, None, (None, None)) @pytest.mark.parametrize('url, expected', [ # Test an https URL. ('svn+https://svn.example.com/MyProject#egg=MyProject', ('https://svn.example.com/MyProject', None, (None, None))), # Test an https URL with a username and password. ('svn+https://user:pass@svn.example.com/MyProject#egg=MyProject', ('https://svn.example.com/MyProject', None, ('user', 'pass'))), # Test an ssh URL. ('svn+ssh://svn.example.com/MyProject#egg=MyProject', ('svn+ssh://svn.example.com/MyProject', None, (None, None))), # Test an ssh URL with a username. ('svn+ssh://user@svn.example.com/MyProject#egg=MyProject', ('svn+ssh://user@svn.example.com/MyProject', None, (None, None))), ]) def test_subversion__get_url_rev_and_auth(url, expected): """ Test Subversion.get_url_rev_and_auth(). """ actual = Subversion.get_url_rev_and_auth(url) assert actual == expected # The non-SVN backends all use the same make_rev_args(), so only test # Git as a representative. @pytest.mark.parametrize('username, password, expected', [ (None, None, []), ('user', None, []), ('user', 'pass', []), ]) def test_git__make_rev_args(username, password, expected): """ Test VersionControl.make_rev_args(). """ actual = Git.make_rev_args(username, password) assert actual == expected @pytest.mark.parametrize('username, password, expected', [ (None, None, []), ('user', None, ['--username', 'user']), ('user', 'pass', ['--username', 'user', '--password', 'pass']), ]) def test_subversion__make_rev_args(username, password, expected): """ Test Subversion.make_rev_args(). """ actual = Subversion.make_rev_args(username, password) assert actual == expected def test_subversion__get_url_rev_options(): """ Test Subversion.get_url_rev_options(). """ url = 'svn+https://user:pass@svn.example.com/MyProject@v1.0#egg=MyProject' url, rev_options = Subversion().get_url_rev_options(url) assert url == 'https://svn.example.com/MyProject' assert rev_options.rev == 'v1.0' assert rev_options.extra_args == ( ['--username', 'user', '--password', 'pass'] ) def test_get_git_version(): git_version = Git().get_git_version() assert git_version >= parse_version('1.0.0')
[]
[]
[]
archives/1346520853_-.zip
tests/unit/test_wheel.py
"""Tests for wheel binary packages and .dist-info.""" import csv import logging import os import textwrap import pytest from mock import Mock, patch from pip._vendor.packaging.requirements import Requirement from pip._internal import pep425tags, wheel from pip._internal.exceptions import InvalidWheelFilename, UnsupportedWheel from pip._internal.index import FormatControl from pip._internal.models.link import Link from pip._internal.req.req_install import InstallRequirement from pip._internal.utils.compat import WINDOWS from pip._internal.utils.misc import unpack_file from tests.lib import DATA_DIR, assert_paths_equal @pytest.mark.parametrize( "s, expected", [ # Trivial. ("pip-18.0", True), # Ambiguous. ("foo-2-2", True), ("im-valid", True), # Invalid. ("invalid", False), ("im_invalid", False), ], ) def test_contains_egg_info(s, expected): result = wheel._contains_egg_info(s) assert result == expected def make_test_install_req(base_name=None): """ Return an InstallRequirement object for testing purposes. """ if base_name is None: base_name = 'pendulum-2.0.4' req = Requirement('pendulum') link_url = ( 'https://files.pythonhosted.org/packages/aa/{base_name}.tar.gz' '#sha256=cf535d36c063575d4752af36df928882b2e0e31541b4482c97d637527' '85f9fcb' ).format(base_name=base_name) link = Link( url=link_url, comes_from='https://pypi.org/simple/pendulum/', requires_python='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', ) req = InstallRequirement( req=req, comes_from=None, constraint=False, editable=False, link=link, source_dir='/tmp/pip-install-9py5m2z1/pendulum', ) return req @pytest.mark.parametrize( "base_name, autobuilding, cache_available, expected", [ ('pendulum-2.0.4', False, False, False), # The following cases test autobuilding=True. # Test _contains_egg_info() returning True. ('pendulum-2.0.4', True, True, False), ('pendulum-2.0.4', True, False, True), # Test _contains_egg_info() returning False. ('pendulum', True, True, True), ('pendulum', True, False, True), ], ) def test_should_use_ephemeral_cache__issue_6197( base_name, autobuilding, cache_available, expected, ): """ Regression test for: https://github.com/pypa/pip/issues/6197 """ req = make_test_install_req(base_name=base_name) assert not req.is_wheel assert req.link.is_artifact format_control = FormatControl() ephem_cache = wheel.should_use_ephemeral_cache( req, format_control=format_control, autobuilding=autobuilding, cache_available=cache_available, ) assert ephem_cache is expected def test_format_command_result__INFO(caplog): caplog.set_level(logging.INFO) actual = wheel.format_command_result( # Include an argument with a space to test argument quoting. command_args=['arg1', 'second arg'], command_output='output line 1\noutput line 2\n', ) assert actual.splitlines() == [ "Command arguments: arg1 'second arg'", 'Command output: [use --verbose to show]', ] @pytest.mark.parametrize('command_output', [ # Test trailing newline. 'output line 1\noutput line 2\n', # Test no trailing newline. 'output line 1\noutput line 2', ]) def test_format_command_result__DEBUG(caplog, command_output): caplog.set_level(logging.DEBUG) actual = wheel.format_command_result( command_args=['arg1', 'arg2'], command_output=command_output, ) assert actual.splitlines() == [ "Command arguments: arg1 arg2", 'Command output:', 'output line 1', 'output line 2', '----------------------------------------', ] @pytest.mark.parametrize('log_level', ['DEBUG', 'INFO']) def test_format_command_result__empty_output(caplog, log_level): caplog.set_level(log_level) actual = wheel.format_command_result( command_args=['arg1', 'arg2'], command_output='', ) assert actual.splitlines() == [ "Command arguments: arg1 arg2", 'Command output: None', ] def call_get_legacy_build_wheel_path(caplog, names): req = make_test_install_req() wheel_path = wheel.get_legacy_build_wheel_path( names=names, temp_dir='/tmp/abcd', req=req, command_args=['arg1', 'arg2'], command_output='output line 1\noutput line 2\n', ) return wheel_path def test_get_legacy_build_wheel_path(caplog): actual = call_get_legacy_build_wheel_path(caplog, names=['name']) assert_paths_equal(actual, '/tmp/abcd/name') assert not caplog.records def test_get_legacy_build_wheel_path__no_names(caplog): actual = call_get_legacy_build_wheel_path(caplog, names=[]) assert actual is None assert len(caplog.records) == 1 record = caplog.records[0] assert record.levelname == 'WARNING' assert record.message.splitlines() == [ "Legacy build of wheel for 'pendulum' created no files.", "Command arguments: arg1 arg2", 'Command output: [use --verbose to show]', ] def test_get_legacy_build_wheel_path__multiple_names(caplog): # Deliberately pass the names in non-sorted order. actual = call_get_legacy_build_wheel_path( caplog, names=['name2', 'name1'], ) assert_paths_equal(actual, '/tmp/abcd/name1') assert len(caplog.records) == 1 record = caplog.records[0] assert record.levelname == 'WARNING' assert record.message.splitlines() == [ "Legacy build of wheel for 'pendulum' created more than one file.", "Filenames (choosing first): ['name1', 'name2']", "Command arguments: arg1 arg2", 'Command output: [use --verbose to show]', ] @pytest.mark.parametrize("console_scripts", ["pip = pip._internal.main:pip", "pip:pip = pip._internal.main:pip"]) def test_get_entrypoints(tmpdir, console_scripts): entry_points = tmpdir.join("entry_points.txt") with open(str(entry_points), "w") as fp: fp.write(""" [console_scripts] {} [section] common:one = module:func common:two = module:other_func """.format(console_scripts)) assert wheel.get_entrypoints(str(entry_points)) == ( dict([console_scripts.split(' = ')]), {}, ) @pytest.mark.parametrize("outrows, expected", [ ([ ('', '', 'a'), ('', '', ''), ], [ ('', '', ''), ('', '', 'a'), ]), ([ # Include an int to check avoiding the following error: # > TypeError: '<' not supported between instances of 'str' and 'int' ('', '', 1), ('', '', ''), ], [ ('', '', ''), ('', '', 1), ]), ]) def test_sorted_outrows(outrows, expected): actual = wheel.sorted_outrows(outrows) assert actual == expected def call_get_csv_rows_for_installed(tmpdir, text): path = tmpdir.join('temp.txt') path.write(text) # Test that an installed file appearing in RECORD has its filename # updated in the new RECORD file. installed = {'a': 'z'} changed = set() generated = [] lib_dir = '/lib/dir' with wheel.open_for_csv(path, 'r') as f: reader = csv.reader(f) outrows = wheel.get_csv_rows_for_installed( reader, installed=installed, changed=changed, generated=generated, lib_dir=lib_dir, ) return outrows def test_get_csv_rows_for_installed(tmpdir, caplog): text = textwrap.dedent("""\ a,b,c d,e,f """) outrows = call_get_csv_rows_for_installed(tmpdir, text) expected = [ ('z', 'b', 'c'), ('d', 'e', 'f'), ] assert outrows == expected # Check there were no warnings. assert len(caplog.records) == 0 def test_get_csv_rows_for_installed__long_lines(tmpdir, caplog): text = textwrap.dedent("""\ a,b,c,d e,f,g h,i,j,k """) outrows = call_get_csv_rows_for_installed(tmpdir, text) expected = [ ('z', 'b', 'c', 'd'), ('e', 'f', 'g'), ('h', 'i', 'j', 'k'), ] assert outrows == expected messages = [rec.message for rec in caplog.records] expected = [ "RECORD line has more than three elements: ['a', 'b', 'c', 'd']", "RECORD line has more than three elements: ['h', 'i', 'j', 'k']" ] assert messages == expected def test_wheel_version(tmpdir, data): future_wheel = 'futurewheel-1.9-py2.py3-none-any.whl' broken_wheel = 'brokenwheel-1.0-py2.py3-none-any.whl' future_version = (1, 9) unpack_file(data.packages.join(future_wheel), tmpdir + 'future', None, None) unpack_file(data.packages.join(broken_wheel), tmpdir + 'broken', None, None) assert wheel.wheel_version(tmpdir + 'future') == future_version assert not wheel.wheel_version(tmpdir + 'broken') def test_python_tag(): wheelnames = [ 'simplewheel-1.0-py2.py3-none-any.whl', 'simplewheel-1.0-py27-none-any.whl', 'simplewheel-2.0-1-py2.py3-none-any.whl', ] newnames = [ 'simplewheel-1.0-py37-none-any.whl', 'simplewheel-1.0-py37-none-any.whl', 'simplewheel-2.0-1-py37-none-any.whl', ] for name, new in zip(wheelnames, newnames): assert wheel.replace_python_tag(name, 'py37') == new def test_check_compatibility(): name = 'test' vc = wheel.VERSION_COMPATIBLE # Major version is higher - should be incompatible higher_v = (vc[0] + 1, vc[1]) # test raises with correct error with pytest.raises(UnsupportedWheel) as e: wheel.check_compatibility(higher_v, name) assert 'is not compatible' in str(e) # Should only log.warning - minor version is greater higher_v = (vc[0], vc[1] + 1) wheel.check_compatibility(higher_v, name) # These should work fine wheel.check_compatibility(wheel.VERSION_COMPATIBLE, name) # E.g if wheel to install is 1.0 and we support up to 1.2 lower_v = (vc[0], max(0, vc[1] - 1)) wheel.check_compatibility(lower_v, name) class TestWheelFile(object): def test_std_wheel_pattern(self): w = wheel.Wheel('simple-1.1.1-py2-none-any.whl') assert w.name == 'simple' assert w.version == '1.1.1' assert w.pyversions == ['py2'] assert w.abis == ['none'] assert w.plats == ['any'] def test_wheel_pattern_multi_values(self): w = wheel.Wheel('simple-1.1-py2.py3-abi1.abi2-any.whl') assert w.name == 'simple' assert w.version == '1.1' assert w.pyversions == ['py2', 'py3'] assert w.abis == ['abi1', 'abi2'] assert w.plats == ['any'] def test_wheel_with_build_tag(self): # pip doesn't do anything with build tags, but theoretically, we might # see one, in this case the build tag = '4' w = wheel.Wheel('simple-1.1-4-py2-none-any.whl') assert w.name == 'simple' assert w.version == '1.1' assert w.pyversions == ['py2'] assert w.abis == ['none'] assert w.plats == ['any'] def test_single_digit_version(self): w = wheel.Wheel('simple-1-py2-none-any.whl') assert w.version == '1' def test_non_pep440_version(self): w = wheel.Wheel('simple-_invalid_-py2-none-any.whl') assert w.version == '-invalid-' def test_missing_version_raises(self): with pytest.raises(InvalidWheelFilename): wheel.Wheel('Cython-cp27-none-linux_x86_64.whl') def test_invalid_filename_raises(self): with pytest.raises(InvalidWheelFilename): wheel.Wheel('invalid.whl') def test_supported_single_version(self): """ Test single-version wheel is known to be supported """ w = wheel.Wheel('simple-0.1-py2-none-any.whl') assert w.supported(tags=[('py2', 'none', 'any')]) def test_supported_multi_version(self): """ Test multi-version wheel is known to be supported """ w = wheel.Wheel('simple-0.1-py2.py3-none-any.whl') assert w.supported(tags=[('py3', 'none', 'any')]) def test_not_supported_version(self): """ Test unsupported wheel is known to be unsupported """ w = wheel.Wheel('simple-0.1-py2-none-any.whl') assert not w.supported(tags=[('py1', 'none', 'any')]) @patch('sys.platform', 'darwin') @patch('pip._internal.pep425tags.get_abbr_impl', lambda: 'cp') @patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_9_intel') def test_supported_osx_version(self): """ Wheels built for macOS 10.6 are supported on 10.9 """ tags = pep425tags.get_supported(['27'], False) w = wheel.Wheel('simple-0.1-cp27-none-macosx_10_6_intel.whl') assert w.supported(tags=tags) w = wheel.Wheel('simple-0.1-cp27-none-macosx_10_9_intel.whl') assert w.supported(tags=tags) @patch('sys.platform', 'darwin') @patch('pip._internal.pep425tags.get_abbr_impl', lambda: 'cp') @patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_6_intel') def test_not_supported_osx_version(self): """ Wheels built for macOS 10.9 are not supported on 10.6 """ tags = pep425tags.get_supported(['27'], False) w = wheel.Wheel('simple-0.1-cp27-none-macosx_10_9_intel.whl') assert not w.supported(tags=tags) @patch('sys.platform', 'darwin') @patch('pip._internal.pep425tags.get_abbr_impl', lambda: 'cp') def test_supported_multiarch_darwin(self): """ Multi-arch wheels (intel) are supported on components (i386, x86_64) """ with patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_5_universal'): universal = pep425tags.get_supported(['27'], False) with patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_5_intel'): intel = pep425tags.get_supported(['27'], False) with patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_5_x86_64'): x64 = pep425tags.get_supported(['27'], False) with patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_5_i386'): i386 = pep425tags.get_supported(['27'], False) with patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_5_ppc'): ppc = pep425tags.get_supported(['27'], False) with patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_5_ppc64'): ppc64 = pep425tags.get_supported(['27'], False) w = wheel.Wheel('simple-0.1-cp27-none-macosx_10_5_intel.whl') assert w.supported(tags=intel) assert w.supported(tags=x64) assert w.supported(tags=i386) assert not w.supported(tags=universal) assert not w.supported(tags=ppc) assert not w.supported(tags=ppc64) w = wheel.Wheel('simple-0.1-cp27-none-macosx_10_5_universal.whl') assert w.supported(tags=universal) assert w.supported(tags=intel) assert w.supported(tags=x64) assert w.supported(tags=i386) assert w.supported(tags=ppc) assert w.supported(tags=ppc64) @patch('sys.platform', 'darwin') @patch('pip._internal.pep425tags.get_abbr_impl', lambda: 'cp') def test_not_supported_multiarch_darwin(self): """ Single-arch wheels (x86_64) are not supported on multi-arch (intel) """ with patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_5_universal'): universal = pep425tags.get_supported(['27'], False) with patch('pip._internal.pep425tags.get_platform', lambda: 'macosx_10_5_intel'): intel = pep425tags.get_supported(['27'], False) w = wheel.Wheel('simple-0.1-cp27-none-macosx_10_5_i386.whl') assert not w.supported(tags=intel) assert not w.supported(tags=universal) w = wheel.Wheel('simple-0.1-cp27-none-macosx_10_5_x86_64.whl') assert not w.supported(tags=intel) assert not w.supported(tags=universal) def test_support_index_min(self): """ Test results from `support_index_min` """ tags = [ ('py2', 'none', 'TEST'), ('py2', 'TEST', 'any'), ('py2', 'none', 'any'), ] w = wheel.Wheel('simple-0.1-py2-none-any.whl') assert w.support_index_min(tags=tags) == 2 w = wheel.Wheel('simple-0.1-py2-none-TEST.whl') assert w.support_index_min(tags=tags) == 0 def test_support_index_min_none(self): """ Test `support_index_min` returns None, when wheel not supported """ w = wheel.Wheel('simple-0.1-py2-none-any.whl') assert w.support_index_min(tags=[]) is None def test_unpack_wheel_no_flatten(self): from pip._internal.utils import misc as utils from tempfile import mkdtemp from shutil import rmtree filepath = os.path.join(DATA_DIR, 'packages', 'meta-1.0-py2.py3-none-any.whl') try: tmpdir = mkdtemp() utils.unpack_file(filepath, tmpdir, 'application/zip', None) assert os.path.isdir(os.path.join(tmpdir, 'meta-1.0.dist-info')) finally: rmtree(tmpdir) pass def test_purelib_platlib(self, data): """ Test the "wheel is purelib/platlib" code. """ packages = [ ("pure_wheel", data.packages.join("pure_wheel-1.7"), True), ("plat_wheel", data.packages.join("plat_wheel-1.7"), False), ("pure_wheel", data.packages.join( "pure_wheel-_invalidversion_"), True), ("plat_wheel", data.packages.join( "plat_wheel-_invalidversion_"), False), ] for name, path, expected in packages: assert wheel.root_is_purelib(name, path) == expected def test_version_underscore_conversion(self): """ Test that we convert '_' to '-' for versions parsed out of wheel filenames """ w = wheel.Wheel('simple-0.1_1-py2-none-any.whl') assert w.version == '0.1-1' class TestMoveWheelFiles(object): """ Tests for moving files from wheel src to scheme paths """ def prep(self, data, tmpdir): self.name = 'sample' self.wheelpath = data.packages.join( 'sample-1.2.0-py2.py3-none-any.whl') self.req = Requirement('sample') self.src = os.path.join(tmpdir, 'src') self.dest = os.path.join(tmpdir, 'dest') unpack_file(self.wheelpath, self.src, None, None) self.scheme = { 'scripts': os.path.join(self.dest, 'bin'), 'purelib': os.path.join(self.dest, 'lib'), 'data': os.path.join(self.dest, 'data'), } self.src_dist_info = os.path.join( self.src, 'sample-1.2.0.dist-info') self.dest_dist_info = os.path.join( self.scheme['purelib'], 'sample-1.2.0.dist-info') def assert_installed(self): # lib assert os.path.isdir( os.path.join(self.scheme['purelib'], 'sample')) # dist-info metadata = os.path.join(self.dest_dist_info, 'METADATA') assert os.path.isfile(metadata) # data files data_file = os.path.join(self.scheme['data'], 'my_data', 'data_file') assert os.path.isfile(data_file) # package data pkg_data = os.path.join( self.scheme['purelib'], 'sample', 'package_data.dat') assert os.path.isfile(pkg_data) def test_std_install(self, data, tmpdir): self.prep(data, tmpdir) wheel.move_wheel_files( self.name, self.req, self.src, scheme=self.scheme) self.assert_installed() def test_install_prefix(self, data, tmpdir): prefix = os.path.join(os.path.sep, 'some', 'path') self.prep(data, tmpdir) wheel.move_wheel_files( self.name, self.req, self.src, root=tmpdir, prefix=prefix, ) bin_dir = 'Scripts' if WINDOWS else 'bin' assert os.path.exists(os.path.join(tmpdir, 'some', 'path', bin_dir)) assert os.path.exists(os.path.join(tmpdir, 'some', 'path', 'my_data')) def test_dist_info_contains_empty_dir(self, data, tmpdir): """ Test that empty dirs are not installed """ # e.g. https://github.com/pypa/pip/issues/1632#issuecomment-38027275 self.prep(data, tmpdir) src_empty_dir = os.path.join( self.src_dist_info, 'empty_dir', 'empty_dir') os.makedirs(src_empty_dir) assert os.path.isdir(src_empty_dir) wheel.move_wheel_files( self.name, self.req, self.src, scheme=self.scheme) self.assert_installed() assert not os.path.isdir( os.path.join(self.dest_dist_info, 'empty_dir')) class TestWheelBuilder(object): def test_skip_building_wheels(self, caplog): with patch('pip._internal.wheel.WheelBuilder._build_one') \ as mock_build_one: wheel_req = Mock(is_wheel=True, editable=False, constraint=False) wb = wheel.WheelBuilder( finder=Mock(), preparer=Mock(), wheel_cache=None, ) with caplog.at_level(logging.INFO): wb.build([wheel_req], session=Mock()) assert "due to already being wheel" in caplog.text assert mock_build_one.mock_calls == [] class TestMessageAboutScriptsNotOnPATH(object): def _template(self, paths, scripts): with patch.dict('os.environ', {'PATH': os.pathsep.join(paths)}): return wheel.message_about_scripts_not_on_PATH(scripts) def test_no_script(self): retval = self._template( paths=['/a/b', '/c/d/bin'], scripts=[] ) assert retval is None def test_single_script__single_dir_not_on_PATH(self): retval = self._template( paths=['/a/b', '/c/d/bin'], scripts=['/c/d/foo'] ) assert retval is not None assert "--no-warn-script-location" in retval assert "foo is installed in '/c/d'" in retval def test_two_script__single_dir_not_on_PATH(self): retval = self._template( paths=['/a/b', '/c/d/bin'], scripts=['/c/d/foo', '/c/d/baz'] ) assert retval is not None assert "--no-warn-script-location" in retval assert "baz and foo are installed in '/c/d'" in retval def test_multi_script__multi_dir_not_on_PATH(self): retval = self._template( paths=['/a/b', '/c/d/bin'], scripts=['/c/d/foo', '/c/d/bar', '/c/d/baz', '/a/b/c/spam'] ) assert retval is not None assert "--no-warn-script-location" in retval assert "bar, baz and foo are installed in '/c/d'" in retval assert "spam is installed in '/a/b/c'" in retval def test_multi_script_all__multi_dir_not_on_PATH(self): retval = self._template( paths=['/a/b', '/c/d/bin'], scripts=[ '/c/d/foo', '/c/d/bar', '/c/d/baz', '/a/b/c/spam', '/a/b/c/eggs' ] ) assert retval is not None assert "--no-warn-script-location" in retval assert "bar, baz and foo are installed in '/c/d'" in retval assert "eggs and spam are installed in '/a/b/c'" in retval def test_two_script__single_dir_on_PATH(self): retval = self._template( paths=['/a/b', '/c/d/bin'], scripts=['/a/b/foo', '/a/b/baz'] ) assert retval is None def test_multi_script__multi_dir_on_PATH(self): retval = self._template( paths=['/a/b', '/c/d/bin'], scripts=['/a/b/foo', '/a/b/bar', '/a/b/baz', '/c/d/bin/spam'] ) assert retval is None def test_multi_script__single_dir_on_PATH(self): retval = self._template( paths=['/a/b', '/c/d/bin'], scripts=['/a/b/foo', '/a/b/bar', '/a/b/baz'] ) assert retval is None def test_single_script__single_dir_on_PATH(self): retval = self._template( paths=['/a/b', '/c/d/bin'], scripts=['/a/b/foo'] ) assert retval is None def test_PATH_check_case_insensitive_on_windows(self): retval = self._template( paths=['C:\\A\\b'], scripts=['c:\\a\\b\\c', 'C:/A/b/d'] ) if WINDOWS: assert retval is None else: assert retval is not None def test_trailing_ossep_removal(self): retval = self._template( paths=[os.path.join('a', 'b', '')], scripts=[os.path.join('a', 'b', 'c')] ) assert retval is None def test_missing_PATH_env_treated_as_empty_PATH_env(self): scripts = ['a/b/foo'] env = os.environ.copy() del env['PATH'] with patch.dict('os.environ', env, clear=True): retval_missing = wheel.message_about_scripts_not_on_PATH(scripts) with patch.dict('os.environ', {'PATH': ''}): retval_empty = wheel.message_about_scripts_not_on_PATH(scripts) assert retval_missing == retval_empty
[]
[]
[]
archives/1346520853_-.zip
tools/tox_pip.py
import os import shutil import subprocess import sys from glob import glob VIRTUAL_ENV = os.environ['VIRTUAL_ENV'] TOX_PIP_DIR = os.path.join(VIRTUAL_ENV, 'pip') def pip(args): # First things first, get a recent (stable) version of pip. if not os.path.exists(TOX_PIP_DIR): subprocess.check_call([sys.executable, '-m', 'pip', '--disable-pip-version-check', 'install', '-t', TOX_PIP_DIR, 'pip']) shutil.rmtree(glob(os.path.join(TOX_PIP_DIR, 'pip-*.dist-info'))[0]) # And use that version. pypath = os.environ.get('PYTHONPATH') pypath = pypath.split(os.pathsep) if pypath is not None else [] pypath.insert(0, TOX_PIP_DIR) os.environ['PYTHONPATH'] = os.pathsep.join(pypath) subprocess.check_call([sys.executable, '-m', 'pip'] + args) if __name__ == '__main__': pip(sys.argv[1:])
[]
[]
[]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/__init__.py
[]
[]
[]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/lib/__init__.py
[]
[]
[]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/lib/counts.py
import time from collections import OrderedDict, defaultdict from datetime import datetime, timedelta import logging from typing import Any, Callable, Dict, List, \ Optional, Tuple, Type, Union from django.conf import settings from django.db import connection, models from django.db.models import F from analytics.models import Anomaly, BaseCount, \ FillState, InstallationCount, RealmCount, StreamCount, \ UserCount, installation_epoch, last_successful_fill from zerver.lib.logging_util import log_to_file from zerver.lib.timestamp import ceiling_to_day, \ ceiling_to_hour, floor_to_hour, verify_UTC from zerver.models import Message, Realm, \ Stream, UserActivityInterval, UserProfile, models ## Logging setup ## logger = logging.getLogger('zulip.management') log_to_file(logger, settings.ANALYTICS_LOG_PATH) # You can't subtract timedelta.max from a datetime, so use this instead TIMEDELTA_MAX = timedelta(days=365*1000) ## Class definitions ## class CountStat: HOUR = 'hour' DAY = 'day' FREQUENCIES = frozenset([HOUR, DAY]) def __init__(self, property: str, data_collector: 'DataCollector', frequency: str, interval: Optional[timedelta]=None) -> None: self.property = property self.data_collector = data_collector # might have to do something different for bitfields if frequency not in self.FREQUENCIES: raise AssertionError("Unknown frequency: %s" % (frequency,)) self.frequency = frequency if interval is not None: self.interval = interval elif frequency == CountStat.HOUR: self.interval = timedelta(hours=1) else: # frequency == CountStat.DAY self.interval = timedelta(days=1) def __str__(self) -> str: return "<CountStat: %s>" % (self.property,) class LoggingCountStat(CountStat): def __init__(self, property: str, output_table: Type[BaseCount], frequency: str) -> None: CountStat.__init__(self, property, DataCollector(output_table, None), frequency) class DependentCountStat(CountStat): def __init__(self, property: str, data_collector: 'DataCollector', frequency: str, interval: Optional[timedelta]=None, dependencies: List[str]=[]) -> None: CountStat.__init__(self, property, data_collector, frequency, interval=interval) self.dependencies = dependencies class DataCollector: def __init__(self, output_table: Type[BaseCount], pull_function: Optional[Callable[[str, datetime, datetime], int]]) -> None: self.output_table = output_table self.pull_function = pull_function ## CountStat-level operations ## def process_count_stat(stat: CountStat, fill_to_time: datetime) -> None: if stat.frequency == CountStat.HOUR: time_increment = timedelta(hours=1) elif stat.frequency == CountStat.DAY: time_increment = timedelta(days=1) else: raise AssertionError("Unknown frequency: %s" % (stat.frequency,)) verify_UTC(fill_to_time) if floor_to_hour(fill_to_time) != fill_to_time: raise ValueError("fill_to_time must be on an hour boundary: %s" % (fill_to_time,)) fill_state = FillState.objects.filter(property=stat.property).first() if fill_state is None: currently_filled = installation_epoch() fill_state = FillState.objects.create(property=stat.property, end_time=currently_filled, state=FillState.DONE) logger.info("INITIALIZED %s %s" % (stat.property, currently_filled)) elif fill_state.state == FillState.STARTED: logger.info("UNDO START %s %s" % (stat.property, fill_state.end_time)) do_delete_counts_at_hour(stat, fill_state.end_time) currently_filled = fill_state.end_time - time_increment do_update_fill_state(fill_state, currently_filled, FillState.DONE) logger.info("UNDO DONE %s" % (stat.property,)) elif fill_state.state == FillState.DONE: currently_filled = fill_state.end_time else: raise AssertionError("Unknown value for FillState.state: %s." % (fill_state.state,)) if isinstance(stat, DependentCountStat): for dependency in stat.dependencies: dependency_fill_time = last_successful_fill(dependency) if dependency_fill_time is None: logger.warning("DependentCountStat %s run before dependency %s." % (stat.property, dependency)) return fill_to_time = min(fill_to_time, dependency_fill_time) currently_filled = currently_filled + time_increment while currently_filled <= fill_to_time: logger.info("START %s %s" % (stat.property, currently_filled)) start = time.time() do_update_fill_state(fill_state, currently_filled, FillState.STARTED) do_fill_count_stat_at_hour(stat, currently_filled) do_update_fill_state(fill_state, currently_filled, FillState.DONE) end = time.time() currently_filled = currently_filled + time_increment logger.info("DONE %s (%dms)" % (stat.property, (end-start)*1000)) def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None: fill_state.end_time = end_time fill_state.state = state fill_state.save() # We assume end_time is valid (e.g. is on a day or hour boundary as appropriate) # and is timezone aware. It is the caller's responsibility to enforce this! def do_fill_count_stat_at_hour(stat: CountStat, end_time: datetime) -> None: start_time = end_time - stat.interval if not isinstance(stat, LoggingCountStat): timer = time.time() assert(stat.data_collector.pull_function is not None) rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time) logger.info("%s run pull_function (%dms/%sr)" % (stat.property, (time.time()-timer)*1000, rows_added)) do_aggregate_to_summary_table(stat, end_time) def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None: if isinstance(stat, LoggingCountStat): InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete() if stat.data_collector.output_table in [UserCount, StreamCount]: RealmCount.objects.filter(property=stat.property, end_time=end_time).delete() else: UserCount.objects.filter(property=stat.property, end_time=end_time).delete() StreamCount.objects.filter(property=stat.property, end_time=end_time).delete() RealmCount.objects.filter(property=stat.property, end_time=end_time).delete() InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete() def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime) -> None: cursor = connection.cursor() # Aggregate into RealmCount output_table = stat.data_collector.output_table if output_table in (UserCount, StreamCount): realmcount_query = """ INSERT INTO analytics_realmcount (realm_id, value, property, subgroup, end_time) SELECT zerver_realm.id, COALESCE(sum(%(output_table)s.value), 0), '%(property)s', %(output_table)s.subgroup, %%(end_time)s FROM zerver_realm JOIN %(output_table)s ON zerver_realm.id = %(output_table)s.realm_id WHERE %(output_table)s.property = '%(property)s' AND %(output_table)s.end_time = %%(end_time)s GROUP BY zerver_realm.id, %(output_table)s.subgroup """ % {'output_table': output_table._meta.db_table, 'property': stat.property} start = time.time() cursor.execute(realmcount_query, {'end_time': end_time}) end = time.time() logger.info("%s RealmCount aggregation (%dms/%sr)" % ( stat.property, (end - start) * 1000, cursor.rowcount)) # Aggregate into InstallationCount installationcount_query = """ INSERT INTO analytics_installationcount (value, property, subgroup, end_time) SELECT sum(value), '%(property)s', analytics_realmcount.subgroup, %%(end_time)s FROM analytics_realmcount WHERE property = '%(property)s' AND end_time = %%(end_time)s GROUP BY analytics_realmcount.subgroup """ % {'property': stat.property} start = time.time() cursor.execute(installationcount_query, {'end_time': end_time}) end = time.time() logger.info("%s InstallationCount aggregation (%dms/%sr)" % ( stat.property, (end - start) * 1000, cursor.rowcount)) cursor.close() ## Utility functions called from outside counts.py ## # called from zerver/lib/actions.py; should not throw any errors def do_increment_logging_stat(zerver_object: Union[Realm, UserProfile, Stream], stat: CountStat, subgroup: Optional[Union[str, int, bool]], event_time: datetime, increment: int=1) -> None: table = stat.data_collector.output_table if table == RealmCount: id_args = {'realm': zerver_object} elif table == UserCount: id_args = {'realm': zerver_object.realm, 'user': zerver_object} else: # StreamCount id_args = {'realm': zerver_object.realm, 'stream': zerver_object} if stat.frequency == CountStat.DAY: end_time = ceiling_to_day(event_time) else: # CountStat.HOUR: end_time = ceiling_to_hour(event_time) row, created = table.objects.get_or_create( property=stat.property, subgroup=subgroup, end_time=end_time, defaults={'value': increment}, **id_args) if not created: row.value = F('value') + increment row.save(update_fields=['value']) def do_drop_all_analytics_tables() -> None: UserCount.objects.all().delete() StreamCount.objects.all().delete() RealmCount.objects.all().delete() InstallationCount.objects.all().delete() FillState.objects.all().delete() Anomaly.objects.all().delete() def do_drop_single_stat(property: str) -> None: UserCount.objects.filter(property=property).delete() StreamCount.objects.filter(property=property).delete() RealmCount.objects.filter(property=property).delete() InstallationCount.objects.filter(property=property).delete() FillState.objects.filter(property=property).delete() ## DataCollector-level operations ## def do_pull_by_sql_query(property: str, start_time: datetime, end_time: datetime, query: str, group_by: Optional[Tuple[models.Model, str]]) -> int: if group_by is None: subgroup = 'NULL' group_by_clause = '' else: subgroup = '%s.%s' % (group_by[0]._meta.db_table, group_by[1]) group_by_clause = ', ' + subgroup # We do string replacement here because cursor.execute will reject a # group_by_clause given as a param. # We pass in the datetimes as params to cursor.execute so that we don't have to # think about how to convert python datetimes to SQL datetimes. query_ = query % {'property': property, 'subgroup': subgroup, 'group_by_clause': group_by_clause} cursor = connection.cursor() cursor.execute(query_, {'time_start': start_time, 'time_end': end_time}) rowcount = cursor.rowcount cursor.close() return rowcount def sql_data_collector(output_table: Type[BaseCount], query: str, group_by: Optional[Tuple[models.Model, str]]) -> DataCollector: def pull_function(property: str, start_time: datetime, end_time: datetime) -> int: return do_pull_by_sql_query(property, start_time, end_time, query, group_by) return DataCollector(output_table, pull_function) def do_pull_minutes_active(property: str, start_time: datetime, end_time: datetime) -> int: user_activity_intervals = UserActivityInterval.objects.filter( end__gt=start_time, start__lt=end_time ).select_related( 'user_profile' ).values_list( 'user_profile_id', 'user_profile__realm_id', 'start', 'end') seconds_active = defaultdict(float) # type: Dict[Tuple[int, int], float] for user_id, realm_id, interval_start, interval_end in user_activity_intervals: start = max(start_time, interval_start) end = min(end_time, interval_end) seconds_active[(user_id, realm_id)] += (end - start).total_seconds() rows = [UserCount(user_id=ids[0], realm_id=ids[1], property=property, end_time=end_time, value=int(seconds // 60)) for ids, seconds in seconds_active.items() if seconds >= 60] UserCount.objects.bulk_create(rows) return len(rows) count_message_by_user_query = """ INSERT INTO analytics_usercount (user_id, realm_id, value, property, subgroup, end_time) SELECT zerver_userprofile.id, zerver_userprofile.realm_id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s FROM zerver_userprofile JOIN zerver_message ON zerver_userprofile.id = zerver_message.sender_id WHERE zerver_userprofile.date_joined < %%(time_end)s AND zerver_message.pub_date >= %%(time_start)s AND zerver_message.pub_date < %%(time_end)s GROUP BY zerver_userprofile.id %(group_by_clause)s """ # Note: ignores the group_by / group_by_clause. count_message_type_by_user_query = """ INSERT INTO analytics_usercount (realm_id, user_id, value, property, subgroup, end_time) SELECT realm_id, id, SUM(count) AS value, '%(property)s', message_type, %%(time_end)s FROM ( SELECT zerver_userprofile.realm_id, zerver_userprofile.id, count(*), CASE WHEN zerver_recipient.type = 1 THEN 'private_message' WHEN zerver_recipient.type = 3 THEN 'huddle_message' WHEN zerver_stream.invite_only = TRUE THEN 'private_stream' ELSE 'public_stream' END message_type FROM zerver_userprofile JOIN zerver_message ON zerver_userprofile.id = zerver_message.sender_id AND zerver_message.pub_date >= %%(time_start)s AND zerver_message.pub_date < %%(time_end)s JOIN zerver_recipient ON zerver_message.recipient_id = zerver_recipient.id LEFT JOIN zerver_stream ON zerver_recipient.type_id = zerver_stream.id GROUP BY zerver_userprofile.realm_id, zerver_userprofile.id, zerver_recipient.type, zerver_stream.invite_only ) AS subquery GROUP BY realm_id, id, message_type """ # This query joins to the UserProfile table since all current queries that # use this also subgroup on UserProfile.is_bot. If in the future there is a # stat that counts messages by stream and doesn't need the UserProfile # table, consider writing a new query for efficiency. count_message_by_stream_query = """ INSERT INTO analytics_streamcount (stream_id, realm_id, value, property, subgroup, end_time) SELECT zerver_stream.id, zerver_stream.realm_id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s FROM zerver_stream JOIN zerver_recipient ON zerver_stream.id = zerver_recipient.type_id JOIN zerver_message ON zerver_recipient.id = zerver_message.recipient_id JOIN zerver_userprofile ON zerver_message.sender_id = zerver_userprofile.id WHERE zerver_stream.date_created < %%(time_end)s AND zerver_recipient.type = 2 AND zerver_message.pub_date >= %%(time_start)s AND zerver_message.pub_date < %%(time_end)s GROUP BY zerver_stream.id %(group_by_clause)s """ # Hardcodes the query needed by active_users:is_bot:day, since that is # currently the only stat that uses this. count_user_by_realm_query = """ INSERT INTO analytics_realmcount (realm_id, value, property, subgroup, end_time) SELECT zerver_realm.id, count(*),'%(property)s', %(subgroup)s, %%(time_end)s FROM zerver_realm JOIN zerver_userprofile ON zerver_realm.id = zerver_userprofile.realm_id WHERE zerver_realm.date_created < %%(time_end)s AND zerver_userprofile.date_joined >= %%(time_start)s AND zerver_userprofile.date_joined < %%(time_end)s AND zerver_userprofile.is_active = TRUE GROUP BY zerver_realm.id %(group_by_clause)s """ # Currently hardcodes the query needed for active_users_audit:is_bot:day. # Assumes that a user cannot have two RealmAuditLog entries with the same event_time and # event_type in ['user_created', 'user_deactivated', etc]. # In particular, it's important to ensure that migrations don't cause that to happen. check_realmauditlog_by_user_query = """ INSERT INTO analytics_usercount (user_id, realm_id, value, property, subgroup, end_time) SELECT ral1.modified_user_id, ral1.realm_id, 1, '%(property)s', %(subgroup)s, %%(time_end)s FROM zerver_realmauditlog ral1 JOIN ( SELECT modified_user_id, max(event_time) AS max_event_time FROM zerver_realmauditlog WHERE event_type in ('user_created', 'user_deactivated', 'user_activated', 'user_reactivated') AND event_time < %%(time_end)s GROUP BY modified_user_id ) ral2 ON ral1.event_time = max_event_time AND ral1.modified_user_id = ral2.modified_user_id JOIN zerver_userprofile ON ral1.modified_user_id = zerver_userprofile.id WHERE ral1.event_type in ('user_created', 'user_activated', 'user_reactivated') """ check_useractivityinterval_by_user_query = """ INSERT INTO analytics_usercount (user_id, realm_id, value, property, subgroup, end_time) SELECT zerver_userprofile.id, zerver_userprofile.realm_id, 1, '%(property)s', %(subgroup)s, %%(time_end)s FROM zerver_userprofile JOIN zerver_useractivityinterval ON zerver_userprofile.id = zerver_useractivityinterval.user_profile_id WHERE zerver_useractivityinterval.end >= %%(time_start)s AND zerver_useractivityinterval.start < %%(time_end)s GROUP BY zerver_userprofile.id %(group_by_clause)s """ count_realm_active_humans_query = """ INSERT INTO analytics_realmcount (realm_id, value, property, subgroup, end_time) SELECT usercount1.realm_id, count(*), '%(property)s', NULL, %%(time_end)s FROM ( SELECT realm_id, user_id FROM analytics_usercount WHERE property = 'active_users_audit:is_bot:day' AND subgroup = 'false' AND end_time = %%(time_end)s ) usercount1 JOIN ( SELECT realm_id, user_id FROM analytics_usercount WHERE property = '15day_actives::day' AND end_time = %%(time_end)s ) usercount2 ON usercount1.user_id = usercount2.user_id GROUP BY usercount1.realm_id """ # Currently unused and untested count_stream_by_realm_query = """ INSERT INTO analytics_realmcount (realm_id, value, property, subgroup, end_time) SELECT zerver_realm.id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s FROM zerver_realm JOIN zerver_stream ON zerver_realm.id = zerver_stream.realm_id AND WHERE zerver_realm.date_created < %%(time_end)s AND zerver_stream.date_created >= %%(time_start)s AND zerver_stream.date_created < %%(time_end)s GROUP BY zerver_realm.id %(group_by_clause)s """ ## CountStat declarations ## count_stats_ = [ # Messages Sent stats # Stats that count the number of messages sent in various ways. # These are also the set of stats that read from the Message table. CountStat('messages_sent:is_bot:hour', sql_data_collector(UserCount, count_message_by_user_query, (UserProfile, 'is_bot')), CountStat.HOUR), CountStat('messages_sent:message_type:day', sql_data_collector(UserCount, count_message_type_by_user_query, None), CountStat.DAY), CountStat('messages_sent:client:day', sql_data_collector(UserCount, count_message_by_user_query, (Message, 'sending_client_id')), CountStat.DAY), CountStat('messages_in_stream:is_bot:day', sql_data_collector(StreamCount, count_message_by_stream_query, (UserProfile, 'is_bot')), CountStat.DAY), # Number of Users stats # Stats that count the number of active users in the UserProfile.is_active sense. # 'active_users_audit:is_bot:day' is the canonical record of which users were # active on which days (in the UserProfile.is_active sense). # Important that this stay a daily stat, so that 'realm_active_humans::day' works as expected. CountStat('active_users_audit:is_bot:day', sql_data_collector(UserCount, check_realmauditlog_by_user_query, (UserProfile, 'is_bot')), CountStat.DAY), # Sanity check on 'active_users_audit:is_bot:day', and a archetype for future LoggingCountStats. # In RealmCount, 'active_users_audit:is_bot:day' should be the partial # sum sequence of 'active_users_log:is_bot:day', for any realm that # started after the latter stat was introduced. LoggingCountStat('active_users_log:is_bot:day', RealmCount, CountStat.DAY), # Another sanity check on 'active_users_audit:is_bot:day'. Is only an # approximation, e.g. if a user is deactivated between the end of the # day and when this stat is run, they won't be counted. However, is the # simplest of the three to inspect by hand. CountStat('active_users:is_bot:day', sql_data_collector(RealmCount, count_user_by_realm_query, (UserProfile, 'is_bot')), CountStat.DAY, interval=TIMEDELTA_MAX), # User Activity stats # Stats that measure user activity in the UserActivityInterval sense. CountStat('1day_actives::day', sql_data_collector(UserCount, check_useractivityinterval_by_user_query, None), CountStat.DAY, interval=timedelta(days=1)-UserActivityInterval.MIN_INTERVAL_LENGTH), CountStat('15day_actives::day', sql_data_collector(UserCount, check_useractivityinterval_by_user_query, None), CountStat.DAY, interval=timedelta(days=15)-UserActivityInterval.MIN_INTERVAL_LENGTH), CountStat('minutes_active::day', DataCollector(UserCount, do_pull_minutes_active), CountStat.DAY), # Rate limiting stats # Used to limit the number of invitation emails sent by a realm LoggingCountStat('invites_sent::day', RealmCount, CountStat.DAY), # Dependent stats # Must come after their dependencies. # Canonical account of the number of active humans in a realm on each day. DependentCountStat('realm_active_humans::day', sql_data_collector(RealmCount, count_realm_active_humans_query, None), CountStat.DAY, dependencies=['active_users_audit:is_bot:day', '15day_actives::day']) ] COUNT_STATS = OrderedDict([(stat.property, stat) for stat in count_stats_])
[ "str", "'DataCollector'", "str", "str", "Type[BaseCount]", "str", "str", "'DataCollector'", "str", "Type[BaseCount]", "Optional[Callable[[str, datetime, datetime], int]]", "CountStat", "datetime", "FillState", "datetime", "int", "CountStat", "datetime", "CountStat", "datetime", "CountStat", "datetime", "Union[Realm, UserProfile, Stream]", "CountStat", "Optional[Union[str, int, bool]]", "datetime", "str", "str", "datetime", "datetime", "str", "Optional[Tuple[models.Model, str]]", "Type[BaseCount]", "str", "Optional[Tuple[models.Model, str]]", "str", "datetime", "datetime", "str", "datetime", "datetime" ]
[ 1101, 1122, 1150, 1911, 1930, 1958, 2132, 2153, 2181, 2465, 2514, 2723, 2748, 5257, 5278, 5295, 5590, 5611, 6118, 6139, 6859, 6880, 8978, 9019, 9070, 9115, 10246, 10630, 10647, 10667, 10684, 10724, 11580, 11604, 11642, 11728, 11745, 11765, 11960, 11977, 11997 ]
[ 1104, 1137, 1153, 1914, 1945, 1961, 2135, 2168, 2184, 2480, 2564, 2732, 2756, 5266, 5286, 5298, 5599, 5619, 6127, 6147, 6868, 6888, 9011, 9028, 9101, 9123, 10249, 10633, 10655, 10675, 10687, 10758, 11595, 11607, 11676, 11731, 11753, 11773, 11963, 11985, 12005 ]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/lib/fixtures.py
from math import sqrt from random import gauss, random, seed from typing import List from analytics.lib.counts import CountStat def generate_time_series_data(days: int=100, business_hours_base: float=10, non_business_hours_base: float=10, growth: float=1, autocorrelation: float=0, spikiness: float=1, holiday_rate: float=0, frequency: str=CountStat.DAY, partial_sum: bool=False, random_seed: int=26) -> List[int]: """ Generate semi-realistic looking time series data for testing analytics graphs. days -- Number of days of data. Is the number of data points generated if frequency is CountStat.DAY. business_hours_base -- Average value during a business hour (or day) at beginning of time series, if frequency is CountStat.HOUR (CountStat.DAY, respectively). non_business_hours_base -- The above, for non-business hours/days. growth -- Ratio between average values at end of time series and beginning of time series. autocorrelation -- Makes neighboring data points look more like each other. At 0 each point is unaffected by the previous point, and at 1 each point is a deterministic function of the previous point. spikiness -- 0 means no randomness (other than holiday_rate), higher values increase the variance. holiday_rate -- Fraction of days randomly set to 0, largely for testing how we handle 0s. frequency -- Should be CountStat.HOUR or CountStat.DAY. partial_sum -- If True, return partial sum of the series. random_seed -- Seed for random number generator. """ if frequency == CountStat.HOUR: length = days*24 seasonality = [non_business_hours_base] * 24 * 7 for day in range(5): for hour in range(8): seasonality[24*day + hour] = business_hours_base holidays = [] for i in range(days): holidays.extend([random() < holiday_rate] * 24) elif frequency == CountStat.DAY: length = days seasonality = [8*business_hours_base + 16*non_business_hours_base] * 5 + \ [24*non_business_hours_base] * 2 holidays = [random() < holiday_rate for i in range(days)] else: raise AssertionError("Unknown frequency: %s" % (frequency,)) if length < 2: raise AssertionError("Must be generating at least 2 data points. " "Currently generating %s" % (length,)) growth_base = growth ** (1. / (length-1)) values_no_noise = [seasonality[i % len(seasonality)] * (growth_base**i) for i in range(length)] seed(random_seed) noise_scalars = [gauss(0, 1)] for i in range(1, length): noise_scalars.append(noise_scalars[-1]*autocorrelation + gauss(0, 1)*(1-autocorrelation)) values = [0 if holiday else int(v + sqrt(v)*noise_scalar*spikiness) for v, noise_scalar, holiday in zip(values_no_noise, noise_scalars, holidays)] if partial_sum: for i in range(1, length): values[i] = values[i-1] + values[i] return [max(v, 0) for v in values]
[]
[]
[]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/lib/time_utils.py
from datetime import datetime, timedelta from typing import List, Optional from analytics.lib.counts import CountStat from zerver.lib.timestamp import floor_to_day, floor_to_hour, verify_UTC # If min_length is None, returns end_times from ceiling(start) to floor(end), inclusive. # If min_length is greater than 0, pads the list to the left. # So informally, time_range(Sep 20, Sep 22, day, None) returns [Sep 20, Sep 21, Sep 22], # and time_range(Sep 20, Sep 22, day, 5) returns [Sep 18, Sep 19, Sep 20, Sep 21, Sep 22] def time_range(start: datetime, end: datetime, frequency: str, min_length: Optional[int]) -> List[datetime]: verify_UTC(start) verify_UTC(end) if frequency == CountStat.HOUR: end = floor_to_hour(end) step = timedelta(hours=1) elif frequency == CountStat.DAY: end = floor_to_day(end) step = timedelta(days=1) else: raise AssertionError("Unknown frequency: %s" % (frequency,)) times = [] if min_length is not None: start = min(start, end - (min_length-1)*step) current = end while current >= start: times.append(current) current -= step return list(reversed(times))
[ "datetime", "datetime", "str", "Optional[int]" ]
[ 545, 560, 581, 613 ]
[ 553, 568, 584, 626 ]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/management/__init__.py
[]
[]
[]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/management/commands/__init__.py
[]
[]
[]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/management/commands/analyze_mit.py
import datetime import logging import time from typing import Any, Dict from django.core.management.base import BaseCommand, CommandParser from zerver.lib.timestamp import timestamp_to_datetime from zerver.models import Message, Recipient def compute_stats(log_level: int) -> None: logger = logging.getLogger() logger.setLevel(log_level) one_week_ago = timestamp_to_datetime(time.time()) - datetime.timedelta(weeks=1) mit_query = Message.objects.filter(sender__realm__string_id="zephyr", recipient__type=Recipient.STREAM, pub_date__gt=one_week_ago) for bot_sender_start in ["imap.", "rcmd.", "sys."]: mit_query = mit_query.exclude(sender__email__startswith=(bot_sender_start)) # Filtering for "/" covers tabbott/extra@ and all the daemon/foo bots. mit_query = mit_query.exclude(sender__email__contains=("/")) mit_query = mit_query.exclude(sender__email__contains=("aim.com")) mit_query = mit_query.exclude( sender__email__in=["rss@mit.edu", "bash@mit.edu", "apache@mit.edu", "bitcoin@mit.edu", "lp@mit.edu", "clocks@mit.edu", "root@mit.edu", "nagios@mit.edu", "www-data|local-realm@mit.edu"]) user_counts = {} # type: Dict[str, Dict[str, int]] for m in mit_query.select_related("sending_client", "sender"): email = m.sender.email user_counts.setdefault(email, {}) user_counts[email].setdefault(m.sending_client.name, 0) user_counts[email][m.sending_client.name] += 1 total_counts = {} # type: Dict[str, int] total_user_counts = {} # type: Dict[str, int] for email, counts in user_counts.items(): total_user_counts.setdefault(email, 0) for client_name, count in counts.items(): total_counts.setdefault(client_name, 0) total_counts[client_name] += count total_user_counts[email] += count logging.debug("%40s | %10s | %s" % ("User", "Messages", "Percentage Zulip")) top_percents = {} # type: Dict[int, float] for size in [10, 25, 50, 100, 200, len(total_user_counts.keys())]: top_percents[size] = 0.0 for i, email in enumerate(sorted(total_user_counts.keys(), key=lambda x: -total_user_counts[x])): percent_zulip = round(100 - (user_counts[email].get("zephyr_mirror", 0)) * 100. / total_user_counts[email], 1) for size in top_percents.keys(): top_percents.setdefault(size, 0) if i < size: top_percents[size] += (percent_zulip * 1.0 / size) logging.debug("%40s | %10s | %s%%" % (email, total_user_counts[email], percent_zulip)) logging.info("") for size in sorted(top_percents.keys()): logging.info("Top %6s | %s%%" % (size, round(top_percents[size], 1))) grand_total = sum(total_counts.values()) print(grand_total) logging.info("%15s | %s" % ("Client", "Percentage")) for client in total_counts.keys(): logging.info("%15s | %s%%" % (client, round(100. * total_counts[client] / grand_total, 1))) class Command(BaseCommand): help = "Compute statistics on MIT Zephyr usage." def add_arguments(self, parser: CommandParser) -> None: parser.add_argument('--verbose', default=False, action='store_true') def handle(self, *args: Any, **options: Any) -> None: level = logging.INFO if options["verbose"]: level = logging.DEBUG compute_stats(level)
[ "int", "CommandParser", "Any", "Any" ]
[ 271, 3380, 3510, 3526 ]
[ 274, 3393, 3513, 3529 ]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/management/commands/analyze_user_activity.py
import datetime from typing import Any, Dict from django.core.management.base import BaseCommand, CommandParser from django.utils.timezone import utc from zerver.lib.statistics import seconds_usage_between from zerver.models import UserProfile def analyze_activity(options: Dict[str, Any]) -> None: day_start = datetime.datetime.strptime(options["date"], "%Y-%m-%d").replace(tzinfo=utc) day_end = day_start + datetime.timedelta(days=options["duration"]) user_profile_query = UserProfile.objects.all() if options["realm"]: user_profile_query = user_profile_query.filter(realm__string_id=options["realm"]) print("Per-user online duration:\n") total_duration = datetime.timedelta(0) for user_profile in user_profile_query: duration = seconds_usage_between(user_profile, day_start, day_end) if duration == datetime.timedelta(0): continue total_duration += duration print("%-*s%s" % (37, user_profile.email, duration,)) print("\nTotal Duration: %s" % (total_duration,)) print("\nTotal Duration in minutes: %s" % (total_duration.total_seconds() / 60.,)) print("Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)) class Command(BaseCommand): help = """Report analytics of user activity on a per-user and realm basis. This command aggregates user activity data that is collected by each user using Zulip. It attempts to approximate how much each user has been using Zulip per day, measured by recording each 15 minute period where some activity has occurred (mouse move or keyboard activity). It will correctly not count server-initiated reloads in the activity statistics. The duration flag can be used to control how many days to show usage duration for Usage: ./manage.py analyze_user_activity [--realm=zulip] [--date=2013-09-10] [--duration=1] By default, if no date is selected 2013-09-10 is used. If no realm is provided, information is shown for all realms""" def add_arguments(self, parser: CommandParser) -> None: parser.add_argument('--realm', action='store') parser.add_argument('--date', action='store', default="2013-09-06") parser.add_argument('--duration', action='store', default=1, type=int, help="How many days to show usage information for") def handle(self, *args: Any, **options: Any) -> None: analyze_activity(options)
[ "Dict[str, Any]", "CommandParser", "Any", "Any" ]
[ 277, 2075, 2418, 2434 ]
[ 291, 2088, 2421, 2437 ]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/management/commands/check_analytics_state.py
from argparse import ArgumentParser from datetime import timedelta from django.core.management.base import BaseCommand from django.utils.timezone import now as timezone_now from analytics.models import InstallationCount, installation_epoch, \ last_successful_fill from analytics.lib.counts import COUNT_STATS, CountStat from zerver.lib.timestamp import floor_to_hour, floor_to_day, verify_UTC, \ TimezoneNotUTCException from zerver.models import Realm import os import sys import time from typing import Any, Dict states = { 0: "OK", 1: "WARNING", 2: "CRITICAL", 3: "UNKNOWN" } class Command(BaseCommand): help = """Checks FillState table. Run as a cron job that runs every hour.""" def handle(self, *args: Any, **options: Any) -> None: fill_state = self.get_fill_state() status = fill_state['status'] message = fill_state['message'] state_file_path = "/var/lib/nagios_state/check-analytics-state" state_file_tmp = state_file_path + "-tmp" with open(state_file_tmp, "w") as f: f.write("%s|%s|%s|%s\n" % ( int(time.time()), status, states[status], message)) os.rename(state_file_tmp, state_file_path) def get_fill_state(self) -> Dict[str, Any]: if not Realm.objects.exists(): return {'status': 0, 'message': 'No realms exist, so not checking FillState.'} warning_unfilled_properties = [] critical_unfilled_properties = [] for property, stat in COUNT_STATS.items(): last_fill = last_successful_fill(property) if last_fill is None: last_fill = installation_epoch() try: verify_UTC(last_fill) except TimezoneNotUTCException: return {'status': 2, 'message': 'FillState not in UTC for %s' % (property,)} if stat.frequency == CountStat.DAY: floor_function = floor_to_day warning_threshold = timedelta(hours=26) critical_threshold = timedelta(hours=50) else: # CountStat.HOUR floor_function = floor_to_hour warning_threshold = timedelta(minutes=90) critical_threshold = timedelta(minutes=150) if floor_function(last_fill) != last_fill: return {'status': 2, 'message': 'FillState not on %s boundary for %s' % (stat.frequency, property)} time_to_last_fill = timezone_now() - last_fill if time_to_last_fill > critical_threshold: critical_unfilled_properties.append(property) elif time_to_last_fill > warning_threshold: warning_unfilled_properties.append(property) if len(critical_unfilled_properties) == 0 and len(warning_unfilled_properties) == 0: return {'status': 0, 'message': 'FillState looks fine.'} if len(critical_unfilled_properties) == 0: return {'status': 1, 'message': 'Missed filling %s once.' % (', '.join(warning_unfilled_properties),)} return {'status': 2, 'message': 'Missed filling %s once. Missed filling %s at least twice.' % (', '.join(warning_unfilled_properties), ', '.join(critical_unfilled_properties))}
[ "Any", "Any" ]
[ 750, 766 ]
[ 753, 769 ]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/management/commands/clear_analytics_tables.py
import sys from argparse import ArgumentParser from typing import Any from django.core.management.base import BaseCommand from analytics.lib.counts import do_drop_all_analytics_tables class Command(BaseCommand): help = """Clear analytics tables.""" def add_arguments(self, parser: ArgumentParser) -> None: parser.add_argument('--force', action='store_true', help="Clear analytics tables.") def handle(self, *args: Any, **options: Any) -> None: if options['force']: do_drop_all_analytics_tables() else: print("Would delete all data from analytics tables (!); use --force to do so.") sys.exit(1)
[ "ArgumentParser", "Any", "Any" ]
[ 293, 495, 511 ]
[ 307, 498, 514 ]
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip
analytics/management/commands/clear_single_stat.py
import sys from argparse import ArgumentParser from typing import Any from django.core.management.base import BaseCommand from analytics.lib.counts import COUNT_STATS, do_drop_single_stat class Command(BaseCommand): help = """Clear analytics tables.""" def add_arguments(self, parser: ArgumentParser) -> None: parser.add_argument('--force', action='store_true', help="Actually do it.") parser.add_argument('--property', type=str, help="The property of the stat to be cleared.") def handle(self, *args: Any, **options: Any) -> None: property = options['property'] if property not in COUNT_STATS: print("Invalid property: %s" % (property,)) sys.exit(1) if not options['force']: print("No action taken. Use --force.") sys.exit(1) do_drop_single_stat(property)
[ "ArgumentParser", "Any", "Any" ]
[ 297, 647, 663 ]
[ 311, 650, 666 ]