gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def connect_gridfs_mongodb(hostname=None,db_name=None):
import pymongo, gridfs
if not hostname:
hostname='127.0.0.1'
try:
mongo = pymongo.MongoClient(hostname, waitQueueMultiple=10)
except pymongo.errors.ConnectionFailure:
hostname = '192.168.20.59'
mongo = pymongo.MongoClient(hostname, waitQueueMultiple=10)
mongo_db = mongo[db_name]
#mongo_db = mongo[db_name]
mongo_db.authenticate('mongo', 'mongo')
fs = ''
fs = gridfs.GridFS(mongo_db)
return mongo_db, fs
def insert_filerecord_pymongo(db_name=None, collection_name=None, filename=None, metadata=None, colorstyle=None, alt=None, format=None, timestamp=None, **kwargs):
# Insert a New Document
import pymongo
mongo = pymongo.MongoClient('127.0.0.1', waitQueueMultiple=10)
mongo_db = mongo[db_name]
mongo_collection = mongo_db[collection_name]
# Returns the '_id' key associated with the newly created document
new_insertobj_id = mongo_collection.insert({'colorstyle': colorstyle,'format': format,'metadata': metadata,'alt': alt, 'upload_ct': 1,'timestamp': timestamp})
# new_insertobj_id = mongo_collection.insert({'colorstyle': colorstyle,'format': format,'metadata': metadata,'alt': alt, 'upload_ct': 1,'timestamp': timestamp})
# new_insertobj_id = mongo_collection.insert({'colorstyle': colorstyle,'format': format,'metadata': metadata,'alt': alt, 'upload_ct': 1,'timestamp': timestamp}, continue_on_error=True, upsert=True)
print "Inserted: {0}\nImageNumber: {1}\nFormat: {2}\nID: {3}".format(colorstyle,alt, format,new_insertobj_id)
return new_insertobj_id
def update_filerecord_pymongo(db_name=None, collection_name=None, filename=None, filepath=None, metadata=None, colorstyle=None, alt=None, format=None, timestamp=None, **kwargs):
# Insert a New Document
# (filepath=None, metadata=None, db_name=None):
import os
import pymongo, bson
from bson import Binary, Code
from bson.json_util import dumps
import datetime
mongo_db, fs = connect_gridfs_mongodb(db_name=db_name)
if fs:
collection_name = 'fs.files'
if not alt:
alt = '1'
tmpfilename = str(filepath.split('/')[-1])
colorstyle = str(tmpfilename[:9])
image_number = str(tmpfilename.split('.')[-2][-1])
alt = image_number
content_type = str(tmpfilename.split('.')[-1]).lower().replace('jpg', 'jpeg')
if not timestamp:
timestamp = datetime.datetime.now()
mongo_collection = mongo_db[collection_name]
key = {'colorstyle': colorstyle} #, 'alt': alt, 'upload_ct': 1}
# data = { "$set":{'format': format,'metadata': metadata,'alt': alt, upload_ct: 1,'timestamp': timestamp}},
datarow = {'colorstyle': colorstyle, 'format': format,'metadata': metadata,'alt': alt, 'upload_ct': 1,'timestamp': timestamp}
key_str = key.keys()[0]
check = mongo_collection.find({key_str: colorstyle}).count()
if check == 1:
print 'REFRESH IT ', check
data = { "$set":{
'colorstyle': colorstyle,
'alt': {'$min': {'alt': alt}},
'format': format,
'metadata': metadata,
'content_type': content_type,
#'upload_ct':
'$inc': {'upload_ct': 1},
'timestamp': { '$max': {'timestamp': timestamp}}
}
}
return check
else:
print 'NEW IT ', check
data = { "$set":{ 'colorstyle': colorstyle, 'format': format, 'metadata': metadata, 'alt': alt, 'upload_ct': 1,'timestamp': timestamp}}
# mongo_collection.create_index([("colorstyle", pymongo.ASCENDING)], unique=True, sparse=True, background=True)
try:
mongo_collection.create_index("md5", unique=True, sparse=False, background=True)
except pymongo.errors.DuplicateKeyError:
print ' DuplicateKey Error', key_str
pass
# mongo_collection.create_index([("colorstyle", pymongo.ASCENDING),("alt", pymongo.DECENDING)], background=True)
new_insertobj_id = mongo_collection.update(key, data, upsert=True, multi=True)
print "Inserted: {0}\nImageNumber: {1}\nFormat: {2}\nID: {3}".format(colorstyle,alt, format,new_insertobj_id)
return new_insertobj_id
def get_duplicate_records(db_name=None, collection_name=None):
# Insert a New Document
import pymongo, bson, datetime
from bson import Binary, Code
from bson.json_util import dumps
db, fs = connect_gridfs_mongodb(db_name=db_name)
mongo_collection = db[collection_name]
data = { "$group": {"_id": { "firstField": "$filename","secondField": "$md5" },"uniqueIds": { "$addToSet": "$_id" },"count": { "$sum": 1 }}},{ "$match": {"count": { "$gt": 1 }}}
res = mongo_collection.aggregate([data][0])
return res
def retrieve_last_instance_gridfs(filepath=None, db_name=None):
db, fs = connect_gridfs_mongodb(db_name=db_name)
return fs
def find_record_gridfs(key=None, md5checksum=None, db_name=None, collection_name=None):
import pymongo, bson, datetime
from bson import Binary, Code
from bson.json_util import dumps
# client = .authenticate('user', 'password', mechanism='SCRAM-SHA-1')
db, fs = connect_gridfs_mongodb(db_name=db_name)
mongo_collection = db[collection_name]
if not key:
key = {'md5checksum': md5checksum}
key_str = key.keys()[0]
key_val = key.values()[0]
check = mongo_collection.find({key_str: key_val}).count()
return check
def insert_file_gridfs(filepath=None, metadata=None, db_name=None, **kwargs):
import os
db, fs = connect_gridfs_mongodb(db_name=db_name)
try:
filename = os.path.basename(filepath)
ext = filename.split('.')[-1].lower()
if ext == 'jpg' or ext == 'jpeg':
content_type = 'image/jpeg'
elif ext == 'tif' or ext == 'tiff':
content_type= 'image/tiff'
else:
content_type= 'image/' + str(ext)
#content-type=content_type
if not find_record_gridfs(key={"filename": filename}, db_name=db_name, collection_name='fs.files'):
try:
with fs.new_file(filename=filename, content_type=content_type, metadata=metadata) as fp:
with open(filepath) as filedata:
fp.write(filedata.read())
return fp, db
except IOError:
print ' IO ERROR '
return False
else:
r = find_record_gridfs(key={"filename": filename}, db_name=db_name, collection_name='fs.files')
print r
except OSError:
print 'Failed ', filepath
def update_file_gridfs(filepath=None, metadata=None, db_name=None, **kwargs):
import os
db, fs = connect_gridfs_mongodb(db_name=db_name)
try:
filename = os.path.basename(filepath)
ext = filename.split('.')[-1].lower()
if ext == 'jpg' or ext == 'jpeg':
content_type = 'image/jpeg'
elif ext == 'tif' or ext == 'tiff':
content_type= 'image/tiff'
else:
content_type= 'image/' + str(ext)
#content-type=content_type
if not find_record_gridfs(key={"filename": filename}, db_name=db_name, collection_name='fs.files'):
try:
with fs.new_file(filename=filename, content_type=content_type, metadata=metadata) as fp:
with open(filepath) as filedata:
fp.write(filedata.read())
return fp, db
except IOError:
print ' IO ERROR '
return False
else:
r = find_record_gridfs(key={"filename": filename}, db_name=db_name, collection_name='fs.files')
update_filerecord_pymongo(filepath=filepath,metadata=metadata,db_name=db_name)
print r
except OSError:
print 'Failed ', filepath
def main(filepath=None,metadata=None,db_name=None):
print filepath
if not db_name:
db_name = 'gridfs_file7'
insert_res = insert_file_gridfs(filepath=filepath,metadata=metadata,db_name=db_name)
try:
return insert_res.items()
except AttributeError:
return insert_res
if __name__ == '__main__':
import sys
try:
filepath = sys.argv[1]
res = insert_file_gridfs(filepath=filepath)[0]
print res._id
except IndexError:
print 'No File supplied for insert'
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class QueryKeysOperations(object):
"""QueryKeysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.search.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create(
self,
resource_group_name, # type: str
search_service_name, # type: str
name, # type: str
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> "_models.QueryKey"
"""Generates a new query key for the specified search service. You can create up to 50 query keys
per service.
:param resource_group_name: The name of the resource group within the current subscription. You
can obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Cognitive Search service associated with the
specified resource group.
:type search_service_name: str
:param name: The name of the new query API key.
:type name: str
:param search_management_request_options: Parameter group.
:type search_management_request_options: ~azure.mgmt.search.models.SearchManagementRequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QueryKey, or the result of cls(response)
:rtype: ~azure.mgmt.search.models.QueryKey
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.QueryKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_client_request_id = None
if search_management_request_options is not None:
_client_request_id = search_management_request_options.client_request_id
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", _client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('QueryKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/createQueryKey/{name}'} # type: ignore
def list_by_search_service(
self,
resource_group_name, # type: str
search_service_name, # type: str
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListQueryKeysResult"]
"""Returns the list of query API keys for the given Azure Cognitive Search service.
:param resource_group_name: The name of the resource group within the current subscription. You
can obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Cognitive Search service associated with the
specified resource group.
:type search_service_name: str
:param search_management_request_options: Parameter group.
:type search_management_request_options: ~azure.mgmt.search.models.SearchManagementRequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListQueryKeysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.search.models.ListQueryKeysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListQueryKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_client_request_id = None
if search_management_request_options is not None:
_client_request_id = search_management_request_options.client_request_id
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", _client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_search_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListQueryKeysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_search_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/listQueryKeys'} # type: ignore
def delete(
self,
resource_group_name, # type: str
search_service_name, # type: str
key, # type: str
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the specified query key. Unlike admin keys, query keys are not regenerated. The process
for regenerating a query key is to delete and then recreate it.
:param resource_group_name: The name of the resource group within the current subscription. You
can obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Cognitive Search service associated with the
specified resource group.
:type search_service_name: str
:param key: The query key to be deleted. Query keys are identified by value, not by name.
:type key: str
:param search_management_request_options: Parameter group.
:type search_management_request_options: ~azure.mgmt.search.models.SearchManagementRequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_client_request_id = None
if search_management_request_options is not None:
_client_request_id = search_management_request_options.client_request_id
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'key': self._serialize.url("key", key, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", _client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/deleteQueryKey/{key}'} # type: ignore
|
|
# commit.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.util import (
Actor,
Iterable,
Stats,
)
from git.diff import Diffable
from tree import Tree
from gitdb import IStream
from cStringIO import StringIO
import base
from gitdb.util import (
hex_to_bin
)
from util import (
Traversable,
Serializable,
parse_date,
altz_to_utctz_str,
parse_actor_and_date
)
from time import (
time,
altzone
)
import os
import sys
__all__ = ('Commit', )
class Commit(base.Object, Iterable, Diffable, Traversable, Serializable):
"""Wraps a git Commit object.
This class will act lazily on some of its attributes and will query the
value on demand only if it involves calling the git binary."""
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_date = "GIT_AUTHOR_DATE"
env_committer_date = "GIT_COMMITTER_DATE"
# CONFIGURATION KEYS
conf_encoding = 'i18n.commitencoding'
# INVARIANTS
default_encoding = "UTF-8"
# object configuration
type = "commit"
__slots__ = ("tree",
"author", "authored_date", "author_tz_offset",
"committer", "committed_date", "committer_tz_offset",
"message", "parents", "encoding")
_id_attribute_ = "binsha"
def __init__(self, repo, binsha, tree=None, author=None, authored_date=None, author_tz_offset=None,
committer=None, committed_date=None, committer_tz_offset=None,
message=None, parents=None, encoding=None):
"""Instantiate a new Commit. All keyword arguments taking None as default will
be implicitly set on first query.
:param binsha: 20 byte sha1
:param parents: tuple( Commit, ... )
is a tuple of commit ids or actual Commits
:param tree: Tree
Tree object
:param author: Actor
is the author string ( will be implicitly converted into an Actor object )
:param authored_date: int_seconds_since_epoch
is the authored DateTime - use time.gmtime() to convert it into a
different format
:param author_tz_offset: int_seconds_west_of_utc
is the timezone that the authored_date is in
:param committer: Actor
is the committer string
:param committed_date: int_seconds_since_epoch
is the committed DateTime - use time.gmtime() to convert it into a
different format
:param committer_tz_offset: int_seconds_west_of_utc
is the timezone that the authored_date is in
:param message: string
is the commit message
:param encoding: string
encoding of the message, defaults to UTF-8
:param parents:
List or tuple of Commit objects which are our parent(s) in the commit
dependency graph
:return: git.Commit
:note: Timezone information is in the same format and in the same sign
as what time.altzone returns. The sign is inverted compared to git's
UTC timezone."""
super(Commit,self).__init__(repo, binsha)
if tree is not None:
assert isinstance(tree, Tree), "Tree needs to be a Tree instance, was %s" % type(tree)
if tree is not None:
self.tree = tree
if author is not None:
self.author = author
if authored_date is not None:
self.authored_date = authored_date
if author_tz_offset is not None:
self.author_tz_offset = author_tz_offset
if committer is not None:
self.committer = committer
if committed_date is not None:
self.committed_date = committed_date
if committer_tz_offset is not None:
self.committer_tz_offset = committer_tz_offset
if message is not None:
self.message = message
if parents is not None:
self.parents = parents
if encoding is not None:
self.encoding = encoding
@classmethod
def _get_intermediate_items(cls, commit):
return commit.parents
def _set_cache_(self, attr):
if attr in Commit.__slots__:
# read the data in a chunk, its faster - then provide a file wrapper
binsha, typename, self.size, stream = self.repo.odb.stream(self.binsha)
self._deserialize(StringIO(stream.read()))
else:
super(Commit, self)._set_cache_(attr)
# END handle attrs
@property
def summary(self):
""":return: First line of the commit message"""
return self.message.split('\n', 1)[0]
def count(self, paths='', **kwargs):
"""Count the number of commits reachable from this commit
:param paths:
is an optinal path or a list of paths restricting the return value
to commits actually containing the paths
:param kwargs:
Additional options to be passed to git-rev-list. They must not alter
the ouput style of the command, or parsing will yield incorrect results
:return: int defining the number of reachable commits"""
# yes, it makes a difference whether empty paths are given or not in our case
# as the empty paths version will ignore merge commits for some reason.
if paths:
return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines())
else:
return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
@property
def name_rev(self):
"""
:return:
String describing the commits hex sha based on the closest Reference.
Mostly useful for UI purposes"""
return self.repo.git.name_rev(self)
@classmethod
def iter_items(cls, repo, rev, paths='', **kwargs):
"""Find all commits matching the given criteria.
:param repo: is the Repo
:param rev: revision specifier, see git-rev-parse for viable options
:param paths:
is an optinal path or list of paths, if set only Commits that include the path
or paths will be considered
:param kwargs:
optional keyword arguments to git rev-list where
``max_count`` is the maximum number of commits to fetch
``skip`` is the number of commits to skip
``since`` all commits since i.e. '1970-01-01'
:return: iterator yielding Commit items"""
if 'pretty' in kwargs:
raise ValueError("--pretty cannot be used as parsing expects single sha's only")
# END handle pretty
args = list()
if paths:
args.extend(('--', paths))
# END if paths
proc = repo.git.rev_list(rev, args, as_process=True, **kwargs)
return cls._iter_from_process_or_stream(repo, proc)
def iter_parents(self, paths='', **kwargs):
"""Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self """
# skip ourselves
skip = kwargs.get("skip", 1)
if skip == 0: # skip ourselves
skip = 1
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs)
@property
def stats(self):
"""Create a git stat from changes between this commit and its first parent
or from all changes done if this is the very first commit.
:return: git.Stats"""
if not self.parents:
text = self.repo.git.diff_tree(self.hexsha, '--', numstat=True, root=True)
text2 = ""
for line in text.splitlines()[1:]:
(insertions, deletions, filename) = line.split("\t")
text2 += "%s\t%s\t%s\n" % (insertions, deletions, filename)
text = text2
else:
text = self.repo.git.diff(self.parents[0].hexsha, self.hexsha, '--', numstat=True)
return Stats._list_from_string(self.repo, text)
@classmethod
def _iter_from_process_or_stream(cls, repo, proc_or_stream):
"""Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects"""
stream = proc_or_stream
if not hasattr(stream,'readline'):
stream = proc_or_stream.stdout
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, rest = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield Commit(repo, hex_to_bin(hexsha))
# END for each line in stream
@classmethod
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False):
"""Commit the given tree, creating a commit object.
:param repo: Repo object the commit should be part of
:param tree: Tree object or hex or bin sha
the tree of the new commit
:param message: Commit message. It may be an empty string if no message is provided.
It will be converted to a string in any case.
:param parent_commits:
Optional Commit objects to use as parents for the new commit.
If empty list, the commit will have no parents at all and become
a root commit.
If None , the current head commit will be the parent of the
new commit object
:param head:
If True, the HEAD will be advanced to the new commit automatically.
Else the HEAD will remain pointing on the previous commit. This could
lead to undesired results when diffing files.
:return: Commit object representing the new commit
:note:
Additional information about the committer and Author are taken from the
environment or from the git configuration, see git-commit-tree for
more information"""
parents = parent_commits
if parent_commits is None:
try:
parent_commits = [ repo.head.commit ]
except ValueError:
# empty repositories have no head commit
parent_commits = list()
# END handle parent commits
# END if parent commits are unset
# retrieve all additional information, create a commit object, and
# serialize it
# Generally:
# * Environment variables override configuration values
# * Sensible defaults are set according to the git documentation
# COMMITER AND AUTHOR INFO
cr = repo.config_reader()
env = os.environ
committer = Actor.committer(cr)
author = Actor.author(cr)
# PARSE THE DATES
unix_time = int(time())
offset = altzone
author_date_str = env.get(cls.env_author_date, '')
if author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
# END set author time
committer_date_str = env.get(cls.env_committer_date, '')
if committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
# END set committer time
# assume utf8 encoding
enc_section, enc_option = cls.conf_encoding.split('.')
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
# if the tree is no object, make sure we create one - otherwise
# the created commit object is invalid
if isinstance(tree, str):
tree = repo.tree(tree)
# END tree conversion
# CREATE NEW COMMIT
new_commit = cls(repo, cls.NULL_BIN_SHA, tree,
author, author_time, author_offset,
committer, committer_time, committer_offset,
message, parent_commits, conf_encoding)
stream = StringIO()
new_commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
new_commit.binsha = istream.binsha
if head:
# need late import here, importing git at the very beginning throws
# as well ...
import git.refs
try:
repo.head.set_commit(new_commit, logmsg="commit: %s" % message)
except ValueError:
# head is not yet set to the ref our HEAD points to
# Happens on first commit
import git.refs
master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message)
repo.head.set_reference(master, logmsg='commit: Switching to %s' % master)
# END handle empty repositories
# END advance head handling
return new_commit
#{ Serializable Implementation
def _serialize(self, stream):
write = stream.write
write("tree %s\n" % self.tree)
for p in self.parents:
write("parent %s\n" % p)
a = self.author
aname = a.name
if isinstance(aname, unicode):
aname = aname.encode(self.encoding)
# END handle unicode in name
c = self.committer
fmt = "%s %s <%s> %s %s\n"
write(fmt % ("author", aname, a.email,
self.authored_date,
altz_to_utctz_str(self.author_tz_offset)))
# encode committer
aname = c.name
if isinstance(aname, unicode):
aname = aname.encode(self.encoding)
# END handle unicode in name
write(fmt % ("committer", aname, c.email,
self.committed_date,
altz_to_utctz_str(self.committer_tz_offset)))
if self.encoding != self.default_encoding:
write("encoding %s\n" % self.encoding)
write("\n")
# write plain bytes, be sure its encoded according to our encoding
if isinstance(self.message, unicode):
write(self.message.encode(self.encoding))
else:
write(self.message)
# END handle encoding
return self
def _deserialize(self, stream):
""":param from_rev_list: if true, the stream format is coming from the rev-list command
Otherwise it is assumed to be a plain data stream from our object"""
readline = stream.readline
self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id<<12, '')
self.parents = list()
next_line = None
while True:
parent_line = readline()
if not parent_line.startswith('parent'):
next_line = parent_line
break
# END abort reading parents
self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1])))
# END for each parent line
self.parents = tuple(self.parents)
self.author, self.authored_date, self.author_tz_offset = parse_actor_and_date(next_line)
self.committer, self.committed_date, self.committer_tz_offset = parse_actor_and_date(readline())
# we may now have the gpgsig line, the coding line, or an empty line followed by the optional message
line = readline().strip()
if line.startswith("gpgsig"):
while line != "-----END PGP SIGNATURE-----":
line = readline().strip()
enc = readline()
else:
enc = line
# now we can have the encoding line, or an empty line followed by the optional
# message.
self.encoding = self.default_encoding
# read encoding or empty line to separate message
enc = enc.strip()
if enc:
self.encoding = enc[enc.find(' ')+1:]
# now comes the message separator
readline()
# END handle encoding
# decode the authors name
try:
self.author.name = self.author.name.decode(self.encoding)
except UnicodeDecodeError:
print >> sys.stderr, "Failed to decode author name '%s' using encoding %s" % (self.author.name, self.encoding)
# END handle author's encoding
# decode committer name
try:
self.committer.name = self.committer.name.decode(self.encoding)
except UnicodeDecodeError:
print >> sys.stderr, "Failed to decode committer name '%s' using encoding %s" % (self.committer.name, self.encoding)
# END handle author's encoding
# a stream from our data simply gives us the plain message
# The end of our message stream is marked with a newline that we strip
self.message = stream.read()
try:
self.message = self.message.decode(self.encoding)
except UnicodeDecodeError:
print >> sys.stderr, "Failed to decode message '%s' using encoding %s" % (self.message, self.encoding)
# END exception handling
return self
#} END serializable implementation
|
|
import unittest
import numpy as np
from pycqed.instrument_drivers.meta_instrument import kernel_object as ko
from pycqed.measurement import kernel_functions as kf
from qcodes import station
"""
Kernel TODO's:
- Rename distortions class
- path for saving the kernels should not be the notebook directory
- test saving and loading
- add calculate only if parameters changed option
- Way to include RT corrections or any generic file
- Change parameters to SI units
- Automatically pick order of distortions to speed
up convolutions
- Add shortening of kernel if possible
"""
class Test_KernelObject(unittest.TestCase):
@classmethod
def setUpClass(self):
self.station = station.Station()
self.k0 = ko.DistortionKernel('k0')
self.k1 = ko.DistortionKernel('k1')
self.station.add_component(self.k0)
self.station.add_component(self.k1)
self.k0.sampling_rate(1e9)
self.k1.sampling_rate(1e9)
def test_skin_kernel(self):
self.k0.skineffect_alpha(0.1)
self.k0.skineffect_length(40e-9)
kObj_skin = self.k0.get_skin_kernel()
kf_skin = kf.skin_kernel(alpha=.1, length=40)
np.testing.assert_almost_equal(kObj_skin, kf_skin)
def test_bounce_kernel(self):
bl = 40e-9
ba = .2
bt = 12e-9
self.k0.bounce_amp_1(ba)
self.k0.bounce_tau_1(bt)
self.k0.bounce_length_1(bl)
kObj_bounce = self.k0.get_bounce_kernel_1()
kf_bounce = kf.bounce_kernel(amp=ba, time=bt*1e9, length=bl*1e9)
np.testing.assert_almost_equal(kObj_bounce, kf_bounce)
def test_decay_kernel(self):
dA = 3
dtau = 15e-9
dl = 100e-9
for i in [1, 2]:
self.k0.set('decay_amp_{}'.format(i), dA)
self.k0.set('decay_tau_{}'.format(i), dtau)
self.k0.set('decay_length_{}'.format(i), dl)
kObj_dec1 = self.k0.get_decay_kernel_1()
kObj_dec2 = self.k0.get_decay_kernel_1()
kf_dec = kf.decay_kernel(amp=dA, tau=dtau*1e9, length=dl*1e9)
np.testing.assert_almost_equal(kf_dec, kObj_dec1)
np.testing.assert_almost_equal(kf_dec, kObj_dec2)
def test_config_changed_flag(self):
print('config_changed_flag')
self.k0.decay_amp_1(.9)
self.assertEqual(self.k0.config_changed(), True)
self.k0.kernel()
self.assertEqual(self.k0.config_changed(), False)
self.k0.decay_amp_1(.9)
self.assertEqual(self.k0.config_changed(), False)
self.k0.decay_amp_1(.91)
self.assertEqual(self.k0.config_changed(), True)
def test_kernel_loading(self):
pass
# FIXME: this preloaded kernel should be added to the repo and the test
# restored
# datadir = os.path.join(pq.__path__[0], 'tests', 'test_data',
# 'test_kernels')
# self.k0.kernel_dir(datadir)
# print(self.k0.kernel_dir())
# self.k0.kernel_list(['precompiled_RT_20161206.txt'])
# kernel = self.k0.kernel()
# def test_convolve_kernels(self):
# kernel_list
# self.k0.convolve_kernel(kernel_list, length)
def test_convolve_kernel(self):
pass
# def test_kernel_loading(self):
# self.k0.corrections_length(50) # ns todo rescale.
# self.k0.kernel_to_cache()
# self.k0.get_corrections_kernel()
# def test_smart_loading(self):
# pass
@classmethod
def tearDownClass(self):
self.k0.close()
class Test_Kernel_functions(unittest.TestCase):
def test_bounce(self):
t0 = np.arange(100)
y0 = kf.bounce(t0, .2, 20, sampling_rate=1)
t1 = np.arange(100)/1e9
y1 = kf.bounce(t1, .2, 20/1e9, sampling_rate=1e9)
np.testing.assert_almost_equal(y0, y1)
expected_bounce = np.concatenate([np.ones(20)*.8, np.ones(80)])
np.testing.assert_almost_equal(expected_bounce, y1)
def test_bounce_kernel(self):
amp = 0.1
tau = 10e-9
length = 100e-9
sampling_rate = 1e9
ker_real = kf.bounce_kernel(amp=amp, time=tau, length=length,
sampling_rate=sampling_rate)
ker_sampled = kf.bounce_kernel(amp=amp, time=tau*sampling_rate,
length=length*sampling_rate,
sampling_rate=1)
np.testing.assert_almost_equal(ker_real, ker_sampled)
nr_samples = int(length*sampling_rate)
t_kernel = np.arange(nr_samples)/sampling_rate
bounce = kf.bounce(t_kernel, amp=amp, time=tau,
sampling_rate=sampling_rate)
y_corr0 = np.convolve(ker_real, bounce)
np.testing.assert_almost_equal(y_corr0[10:80], np.ones(70), decimal=2)
def test_bounce_kernel_2p4GS(self):
amp = 0.1
tau = 10e-9
length = 100e-9
sampling_rate = 2.4e9
ker_real = kf.bounce_kernel(amp=amp, time=tau, length=length,
sampling_rate=sampling_rate)
nr_samples = int(length*sampling_rate)
t_kernel = np.arange(nr_samples)/sampling_rate
bounce = kf.bounce(t_kernel, amp=amp, time=tau,
sampling_rate=sampling_rate)
y_corr0 = np.convolve(ker_real, bounce)
np.testing.assert_almost_equal(y_corr0[10:80], np.ones(70), decimal=2)
def test_decay_kernel(self):
A = -.4
tau = 10e-9
x = np.arange(200)/1e9
y_signal = 1 + A * np.exp(-x/tau)
sampling_rate = 1e9
kf_dec = kf.decay_kernel(
amp=A, tau=tau, length=100e-6, sampling_rate=sampling_rate)
kf_dec_2 = kf.decay_kernel(amp=A, tau=tau*sampling_rate,
length=100e-6*sampling_rate,
sampling_rate=1)
y_corr0 = np.convolve(y_signal, kf_dec)
y_corr1 = np.convolve(y_signal, kf_dec_2)
np.testing.assert_almost_equal(y_corr0, y_corr1)
# Test that the correction produces a square wave
# not the entire wave is tested as the beginning shows a small
# imperfection
np.testing.assert_almost_equal(y_corr0[10:80], np.ones(70), decimal=2)
# Testing on a different sampling rate
sampling_rate = 2.4e9
offset = .95
x24GS = np.arange(200)/sampling_rate
y24Gs_signal = A * np.exp(-x24GS/tau) + offset
kf_dec = kf.decay_kernel(
amp=A, tau=tau, length=100e-6, offset=offset,
sampling_rate=sampling_rate)
y24Gs_corr0 = np.convolve(y24Gs_signal, kf_dec)
np.testing.assert_almost_equal(y24Gs_corr0[10:80], np.ones(70),
decimal=2)
def test_decay_small_offset(self):
A = 1
tau = 4e-6
sampling_rate = 2.4e9
offset = 0.2
x24GS = np.arange(200)/sampling_rate
y24Gs_signal = A * np.exp(-x24GS/tau) + offset
kf_dec = kf.decay_kernel(
amp=A, tau=tau, length=100e-6, offset=offset,
sampling_rate=sampling_rate)
y24Gs_corr0 = np.convolve(y24Gs_signal, kf_dec)
np.testing.assert_almost_equal(y24Gs_corr0[10:80], np.ones(70),
decimal=2)
def test_heaviside(self):
hs = kf.heaviside(np.array([-1, -.5, 0, 1, 2]))
np.testing.assert_almost_equal(hs, [0, 0, 1, 1, 1])
def test_square(self):
sq = kf.square(np.arange(-2, 5), 3)
np.testing.assert_almost_equal(sq, [0, 0, 1, 1, 1, 0, 0])
def test_skin_kernel(self):
skin_kernel_test = kf.skin_kernel(alpha=.1, length=40)
known_skin_vals = np.array([
1.00540222e+00, -1.59080709e-03, -7.02241770e-04,
-4.17894781e-04, -2.84886822e-04, -2.10146281e-04,
-1.63242389e-04, -1.31535177e-04, -1.08919606e-04,
-9.21203433e-05, -7.92379832e-05, -6.91027435e-05,
-6.09587865e-05, -5.42982090e-05, -4.87683793e-05,
-4.41176036e-05, -4.01619210e-05, -3.67640800e-05,
-3.38198160e-05, -3.12486520e-05, -2.89875850e-05,
-2.69866621e-05, -2.52058216e-05, -2.36126000e-05,
-2.21804419e-05, -2.08874370e-05, -1.97153637e-05,
-1.86489578e-05, -1.76753461e-05, -1.67836041e-05,
-1.59644070e-05, -1.52097526e-05, -1.45127390e-05,
-1.38673850e-05, -1.32684847e-05, -1.27114874e-05,
-1.21924004e-05, -1.17077070e-05, -1.12542990e-05,
-1.08294205e-05])
np.testing.assert_array_almost_equal(
skin_kernel_test, known_skin_vals, decimal=7)
def test_poly_kernel(self):
test_kernel = kf.poly_kernel([0, 0, 1], length=40)
known_vals = np.zeros(40)
known_vals[0] = 1
np.testing.assert_array_almost_equal(
test_kernel, known_vals, decimal=7)
coeffs = [1, 0, 1]
length = 10e-9
sampling_rate = 1e9
test_kernel = kf.poly_kernel(coeffs, length=length*sampling_rate,
sampling_rate=1)
known_vals = np.arange(10)*2-1
known_vals[0] = 1
np.testing.assert_array_almost_equal(
test_kernel, known_vals, decimal=7)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import six
import jsonschema
from jsonschema import _validators
from jsonschema.validators import create
from st2common.exceptions.action import InvalidActionParameterException
from st2common.util import jsonify
from st2common.util.misc import deep_update
__all__ = [
'get_validator',
'get_draft_schema',
'get_action_parameters_schema',
'get_schema_for_action_parameters',
'get_schema_for_resource_parameters',
'is_property_type_single',
'is_property_type_list',
'is_property_type_anyof',
'is_property_type_oneof',
'is_property_nullable',
'is_attribute_type_array',
'is_attribute_type_object',
'validate'
]
# https://github.com/json-schema/json-schema/blob/master/draft-04/schema
# The source material is licensed under the AFL or BSD license.
# Both draft 4 and custom schema has additionalProperties set to false by default.
# The custom schema differs from draft 4 with the extension of position, immutable,
# and draft 3 version of required.
PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)))
SCHEMAS = {
'draft4': jsonify.load_file(os.path.join(PATH, 'draft4.json')),
'custom': jsonify.load_file(os.path.join(PATH, 'custom.json')),
# Custom schema for action params which doesn't allow parameter "type" attribute to be array
'action_params': jsonify.load_file(os.path.join(PATH, 'action_params.json'))
}
SCHEMA_ANY_TYPE = {
"anyOf": [
{"type": "array"},
{"type": "boolean"},
{"type": "integer"},
{"type": "number"},
{"type": "object"},
{"type": "string"}
]
}
RUNNER_PARAM_OVERRIDABLE_ATTRS = [
'default',
'description',
'enum',
'immutable',
'required'
]
def get_draft_schema(version='custom', additional_properties=False):
schema = copy.deepcopy(SCHEMAS[version])
if additional_properties and 'additionalProperties' in schema:
del schema['additionalProperties']
return schema
def get_action_parameters_schema(additional_properties=False):
"""
Return a generic schema which is used for validating action parameters definition.
"""
return get_draft_schema(version='action_params', additional_properties=additional_properties)
CustomValidator = create(
meta_schema=get_draft_schema(version='custom', additional_properties=True),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"allOf": _validators.allOf_draft4,
u"anyOf": _validators.anyOf_draft4,
u"dependencies": _validators.dependencies,
u"enum": _validators.enum,
u"format": _validators.format,
u"items": _validators.items,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maxProperties": _validators.maxProperties_draft4,
u"maximum": _validators.maximum,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minProperties": _validators.minProperties_draft4,
u"minimum": _validators.minimum,
u"multipleOf": _validators.multipleOf,
u"not": _validators.not_draft4,
u"oneOf": _validators.oneOf_draft4,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _validators.properties_draft3,
u"type": _validators.type_draft4,
u"uniqueItems": _validators.uniqueItems,
},
version="custom_validator",
)
def is_property_type_single(property_schema):
return (isinstance(property_schema, dict) and
'anyOf' not in property_schema.keys() and
'oneOf' not in property_schema.keys() and
not isinstance(property_schema.get('type', 'string'), list))
def is_property_type_list(property_schema):
return (isinstance(property_schema, dict) and
isinstance(property_schema.get('type', 'string'), list))
def is_property_type_anyof(property_schema):
return isinstance(property_schema, dict) and 'anyOf' in property_schema.keys()
def is_property_type_oneof(property_schema):
return isinstance(property_schema, dict) and 'oneOf' in property_schema.keys()
def is_property_nullable(property_type_schema):
# For anyOf and oneOf, the property_schema is a list of types.
if isinstance(property_type_schema, list):
return len([t for t in property_type_schema
if ((isinstance(t, six.string_types) and t == 'null') or
(isinstance(t, dict) and t.get('type', 'string') == 'null'))]) > 0
return (isinstance(property_type_schema, dict) and
property_type_schema.get('type', 'string') == 'null')
def is_attribute_type_array(attribute_type):
return (attribute_type == 'array' or
(isinstance(attribute_type, list) and 'array' in attribute_type))
def is_attribute_type_object(attribute_type):
return (attribute_type == 'object' or
(isinstance(attribute_type, list) and 'object' in attribute_type))
def assign_default_values(instance, schema):
"""
Assign default values on the provided instance based on the schema default specification.
"""
instance = copy.deepcopy(instance)
instance_is_dict = isinstance(instance, dict)
instance_is_array = isinstance(instance, list)
if not instance_is_dict and not instance_is_array:
return instance
properties = schema.get('properties', {})
for property_name, property_data in six.iteritems(properties):
has_default_value = 'default' in property_data
default_value = property_data.get('default', None)
# Assign default value on the instance so the validation doesn't fail if requires is true
# but the value is not provided
if has_default_value:
if instance_is_dict and instance.get(property_name, None) is None:
instance[property_name] = default_value
elif instance_is_array:
for index, _ in enumerate(instance):
if instance[index].get(property_name, None) is None:
instance[index][property_name] = default_value
# Support for nested properties (array and object)
attribute_type = property_data.get('type', None)
schema_items = property_data.get('items', {})
# Array
if (is_attribute_type_array(attribute_type) and
schema_items and schema_items.get('properties', {})):
array_instance = instance.get(property_name, None)
array_schema = schema['properties'][property_name]['items']
if array_instance is not None:
# Note: We don't perform subschema assignment if no value is provided
instance[property_name] = assign_default_values(instance=array_instance,
schema=array_schema)
# Object
if is_attribute_type_object(attribute_type) and property_data.get('properties', {}):
object_instance = instance.get(property_name, None)
object_schema = schema['properties'][property_name]
if object_instance is not None:
# Note: We don't perform subschema assignment if no value is provided
instance[property_name] = assign_default_values(instance=object_instance,
schema=object_schema)
return instance
def modify_schema_allow_default_none(schema):
"""
Manipulate the provided schema so None is also an allowed value for each attribute which
defines a default value of None.
"""
schema = copy.deepcopy(schema)
properties = schema.get('properties', {})
for property_name, property_data in six.iteritems(properties):
is_optional = not property_data.get('required', False)
has_default_value = 'default' in property_data
default_value = property_data.get('default', None)
property_schema = schema['properties'][property_name]
if (has_default_value or is_optional) and default_value is None:
# If property is anyOf and oneOf then it has to be process differently.
if (is_property_type_anyof(property_schema) and
not is_property_nullable(property_schema['anyOf'])):
property_schema['anyOf'].append({'type': 'null'})
elif (is_property_type_oneof(property_schema) and
not is_property_nullable(property_schema['oneOf'])):
property_schema['oneOf'].append({'type': 'null'})
elif (is_property_type_list(property_schema) and
not is_property_nullable(property_schema.get('type'))):
property_schema['type'].append('null')
elif (is_property_type_single(property_schema) and
not is_property_nullable(property_schema.get('type'))):
property_schema['type'] = [property_schema.get('type', 'string'), 'null']
# Support for nested properties (array and object)
attribute_type = property_data.get('type', None)
schema_items = property_data.get('items', {})
# Array
if (is_attribute_type_array(attribute_type) and
schema_items and schema_items.get('properties', {})):
array_schema = schema_items
array_schema = modify_schema_allow_default_none(schema=array_schema)
schema['properties'][property_name]['items'] = array_schema
# Object
if is_attribute_type_object(attribute_type) and property_data.get('properties', {}):
object_schema = property_data
object_schema = modify_schema_allow_default_none(schema=object_schema)
schema['properties'][property_name] = object_schema
return schema
def validate(instance, schema, cls=None, use_default=True, allow_default_none=False, *args,
**kwargs):
"""
Custom validate function which supports default arguments combined with the "required"
property.
Note: This function returns cleaned instance with default values assigned.
:param use_default: True to support the use of the optional "default" property.
:type use_default: ``bool``
"""
instance = copy.deepcopy(instance)
schema_type = schema.get('type', None)
instance_is_dict = isinstance(instance, dict)
if use_default and allow_default_none:
schema = modify_schema_allow_default_none(schema=schema)
if use_default and schema_type == 'object' and instance_is_dict:
instance = assign_default_values(instance=instance, schema=schema)
# pylint: disable=assignment-from-no-return
jsonschema.validate(instance=instance, schema=schema, cls=cls, *args, **kwargs)
return instance
VALIDATORS = {
'draft4': jsonschema.Draft4Validator,
'custom': CustomValidator
}
def get_validator(version='custom'):
validator = VALIDATORS[version]
return validator
def validate_runner_parameter_attribute_override(action_ref, param_name, attr_name,
runner_param_attr_value, action_param_attr_value):
"""
Validate that the provided parameter from the action schema can override the
runner parameter.
"""
param_values_are_the_same = action_param_attr_value == runner_param_attr_value
if (attr_name not in RUNNER_PARAM_OVERRIDABLE_ATTRS and not param_values_are_the_same):
raise InvalidActionParameterException(
'The attribute "%s" for the runner parameter "%s" in action "%s" '
'cannot be overridden.' % (attr_name, param_name, action_ref))
return True
def get_schema_for_action_parameters(action_db):
"""
Dynamically construct JSON schema for the provided action from the parameters metadata.
Note: This schema is used to validate parameters which are passed to the action.
"""
from st2common.util.action_db import get_runnertype_by_name
runner_type = get_runnertype_by_name(action_db.runner_type['name'])
# Note: We need to perform a deep merge because user can only specify a single parameter
# attribute when overriding it in an action metadata.
parameters_schema = {}
deep_update(parameters_schema, runner_type.runner_parameters)
deep_update(parameters_schema, action_db.parameters)
# Perform validation, make sure user is not providing parameters which can't
# be overriden
runner_parameter_names = runner_type.runner_parameters.keys()
for name, schema in six.iteritems(action_db.parameters):
if name not in runner_parameter_names:
continue
for attribute, value in six.iteritems(schema):
runner_param_value = runner_type.runner_parameters[name].get(attribute)
validate_runner_parameter_attribute_override(action_ref=action_db.ref,
param_name=name,
attr_name=attribute,
runner_param_attr_value=runner_param_value,
action_param_attr_value=value)
schema = get_schema_for_resource_parameters(parameters_schema=parameters_schema)
if parameters_schema:
schema['title'] = action_db.name
if action_db.description:
schema['description'] = action_db.description
return schema
def get_schema_for_resource_parameters(parameters_schema):
"""
Dynamically construct JSON schema for the provided resource from the parameters metadata.
"""
def normalize(x):
return {k: v if v else SCHEMA_ANY_TYPE for k, v in six.iteritems(x)}
schema = {}
properties = {}
properties.update(normalize(parameters_schema))
if properties:
schema['type'] = 'object'
schema['properties'] = properties
schema['additionalProperties'] = False
return schema
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from senlinclient import plugin
from senlinclient.v1 import client
@mock.patch.object(plugin, 'create_connection')
class ClientTest(testtools.TestCase):
def setUp(self):
super(ClientTest, self).setUp()
self.conn = mock.Mock()
self.service = mock.Mock()
self.conn.cluster = self.service
def test_init_default(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
self.assertEqual(self.conn, sc.conn)
self.assertEqual(self.service, sc.service)
mock_conn.assert_called_once_with(prof=None, user_agent=None)
def test_init_with_params(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client(prof='FOO', user_agent='BAR', zoo='LARR')
self.assertEqual(self.conn, sc.conn)
self.assertEqual(self.service, sc.service)
mock_conn.assert_called_once_with(prof='FOO', user_agent='BAR',
zoo='LARR')
def test_profile_types(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.profile_types(foo='bar')
self.assertEqual(self.service.profile_types.return_value, res)
self.service.profile_types.assert_called_once_with(foo='bar')
def test_get_profile_type(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_profile_type('FOOBAR')
self.assertEqual(self.service.get_profile_type.return_value, res)
self.service.get_profile_type.assert_called_once_with('FOOBAR')
def test_profiles(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.profiles(foo='bar')
self.assertEqual(self.service.profiles.return_value, res)
self.service.profiles.assert_called_once_with(foo='bar')
def test_get_profile(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_profile('FOOBAR')
self.assertEqual(self.service.get_profile.return_value, res)
self.service.get_profile.assert_called_once_with('FOOBAR')
def test_update_profile(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.update_profile('FAKE_ID', foo='bar')
self.assertEqual(self.service.update_profile.return_value, res)
self.service.update_profile.assert_called_once_with('FAKE_ID',
foo='bar')
def test_delete_profile(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_profile('FAKE_ID')
self.assertEqual(self.service.delete_profile.return_value, res)
self.service.delete_profile.assert_called_once_with(
'FAKE_ID', True)
def test_delete_profile_ignore_missing(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_profile('FAKE_ID', False)
self.assertEqual(self.service.delete_profile.return_value, res)
self.service.delete_profile.assert_called_once_with(
'FAKE_ID', False)
def test_policy_types(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.policy_types(foo='bar')
self.assertEqual(self.service.policy_types.return_value, res)
self.service.policy_types.assert_called_once_with(foo='bar')
def test_get_policy_type(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_policy_type('FOOBAR')
self.assertEqual(self.service.get_policy_type.return_value, res)
self.service.get_policy_type.assert_called_once_with('FOOBAR')
def test_policies(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.policies(foo='bar')
self.assertEqual(self.service.policies.return_value, res)
self.service.policies.assert_called_once_with(foo='bar')
def test_get_policy(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_policy('FOOBAR')
self.assertEqual(self.service.get_policy.return_value, res)
self.service.get_policy.assert_called_once_with('FOOBAR')
def test_update_policy(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.update_policy('FAKE_ID', foo='bar')
self.assertEqual(self.service.update_policy.return_value, res)
self.service.update_policy.assert_called_once_with(
'FAKE_ID', foo='bar')
def test_delete_policy(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_policy('FAKE_ID')
self.assertEqual(self.service.delete_policy.return_value, res)
self.service.delete_policy.assert_called_once_with(
'FAKE_ID', True)
def test_delete_policy_ignore_missing(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_policy('FAKE_ID', False)
self.assertEqual(self.service.delete_policy.return_value, res)
self.service.delete_policy.assert_called_once_with(
'FAKE_ID', False)
def test_clusters(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.clusters(foo='bar')
self.assertEqual(self.service.clusters.return_value, res)
self.service.clusters.assert_called_once_with(foo='bar')
def test_get_cluster(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_cluster('FOOBAR')
self.assertEqual(self.service.get_cluster.return_value, res)
self.service.get_cluster.assert_called_once_with('FOOBAR')
def test_create_cluster(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.create_cluster(name='FOO', bar='zoo')
self.assertEqual(self.service.create_cluster.return_value, res)
self.service.create_cluster.assert_called_once_with(
name='FOO', bar='zoo')
def test_update_cluster(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.update_cluster('FAKE_ID', foo='bar')
self.assertEqual(self.service.update_cluster.return_value, res)
self.service.update_cluster.assert_called_once_with(
'FAKE_ID', foo='bar')
def test_delete_cluster(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_cluster('FAKE_ID', True)
self.assertEqual(self.service.delete_cluster.return_value, res)
self.service.delete_cluster.assert_called_once_with(
'FAKE_ID', True, False)
def test_delete_cluster_ignore_missing(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_cluster('FAKE_ID', True, False)
self.assertEqual(self.service.delete_cluster.return_value, res)
self.service.delete_cluster.assert_called_once_with(
'FAKE_ID', True, False)
def test_cluster_add_nodes(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.cluster_add_nodes('FAKE_ID', ['NODE1', 'NODE2'])
self.assertEqual(self.service.add_nodes_to_cluster.return_value, res)
self.service.add_nodes_to_cluster.assert_called_once_with(
'FAKE_ID', ['NODE1', 'NODE2'])
def test_cluster_del_nodes(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.cluster_del_nodes('FAKE_ID', ['NODE1', 'NODE2'])
self.assertEqual(self.service.remove_nodes_from_cluster.return_value,
res)
self.service.remove_nodes_from_cluster.assert_called_once_with(
'FAKE_ID', ['NODE1', 'NODE2'])
def test_cluster_resize(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.cluster_resize('FAKE_ID', foo='bar', zoo=1)
self.assertEqual(self.service.resize_cluster.return_value, res)
self.service.resize_cluster.assert_called_once_with(
'FAKE_ID', foo='bar', zoo=1)
def test_cluster_scale_in(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.cluster_scale_in('FAKE_ID', 3)
self.assertEqual(self.service.scale_in_cluster.return_value, res)
self.service.scale_in_cluster.assert_called_once_with(
'FAKE_ID', 3)
def test_cluster_scale_out(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.cluster_scale_out('FAKE_ID', 3)
self.assertEqual(self.service.scale_out_cluster.return_value, res)
self.service.scale_out_cluster.assert_called_once_with(
'FAKE_ID', 3)
def test_cluster_policies(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.cluster_policies('CLUSTER', foo='bar')
self.assertEqual(self.service.cluster_policies.return_value, res)
self.service.cluster_policies.assert_called_once_with(
'CLUSTER', foo='bar')
def test_get_cluster_policy(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_cluster_policy('PID', 'CID')
self.assertEqual(self.service.get_cluster_policy.return_value, res)
self.service.get_cluster_policy.assert_called_once_with(
'PID', 'CID')
def test_cluster_attach_policy(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.cluster_attach_policy('FOO', 'BAR', zoo='car')
self.assertEqual(self.service.attach_policy_to_cluster.return_value,
res)
self.service.attach_policy_to_cluster.assert_called_once_with(
'FOO', 'BAR', zoo='car')
def test_cluster_detach_policy(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.cluster_detach_policy('FOO', 'BAR')
self.assertEqual(self.service.detach_policy_from_cluster.return_value,
res)
self.service.detach_policy_from_cluster.assert_called_once_with(
'FOO', 'BAR')
def test_cluster_update_policy(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.cluster_update_policy('FOO', 'BAR', foo='bar')
self.assertEqual(self.service.update_cluster_policy.return_value, res)
self.service.update_cluster_policy.assert_called_once_with(
'FOO', 'BAR', foo='bar')
def test_check_cluster(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.check_cluster('FAKE_CLUSTER_ID')
self.assertEqual(self.service.check_cluster.return_value, res)
self.service.check_cluster.assert_called_once_with('FAKE_CLUSTER_ID')
def test_recover_cluster(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.recover_cluster('FAKE_CLUSTER_ID')
self.assertEqual(self.service.recover_cluster.return_value, res)
self.service.recover_cluster.assert_called_once_with(
'FAKE_CLUSTER_ID')
def test_nodes(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.nodes(foo='bar')
self.assertEqual(self.service.nodes.return_value, res)
self.service.nodes.assert_called_once_with(foo='bar')
def test_get_node(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_node('FOOBAR')
self.assertEqual(self.service.get_node.return_value, res)
self.service.get_node.assert_called_once_with('FOOBAR', details=False)
def test_get_node_with_details(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_node('FOOBAR', details=True)
self.assertEqual(self.service.get_node.return_value, res)
self.service.get_node.assert_called_once_with(
'FOOBAR', details=True)
def test_create_node(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.create_node(name='FAKE_NAME', foo='bar')
self.assertEqual(self.service.create_node.return_value, res)
self.service.create_node.assert_called_once_with(
name='FAKE_NAME', foo='bar')
def test_update_node(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.update_node('FAKE_ID', foo='bar')
self.assertEqual(self.service.update_node.return_value, res)
self.service.update_node.assert_called_once_with(
'FAKE_ID', foo='bar')
def test_delete_node(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_node('FAKE_ID', True)
self.assertEqual(self.service.delete_node.return_value, res)
self.service.delete_node.assert_called_once_with(
'FAKE_ID', True, False)
def test_check_node(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.check_node('FAKE_ID')
self.assertEqual(self.service.check_node.return_value, res)
self.service.check_node.assert_called_once_with('FAKE_ID')
def test_recover_node(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.recover_node('FAKE_ID')
self.assertEqual(self.service.recover_node.return_value, res)
self.service.recover_node.assert_called_once_with(
'FAKE_ID')
def test_delete_node_ignore_missing(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_node('FAKE_ID', True, False)
self.assertEqual(self.service.delete_node.return_value, res)
self.service.delete_node.assert_called_once_with(
'FAKE_ID', True, False)
def test_receivers(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.receivers(foo='bar')
self.assertEqual(self.service.receivers.return_value, res)
self.service.receivers.assert_called_once_with(foo='bar')
def test_get_receiver(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_receiver('FOOBAR')
self.assertEqual(self.service.get_receiver.return_value, res)
self.service.get_receiver.assert_called_once_with('FOOBAR')
def test_create_receiver(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.create_receiver(name='FAKE_NAME', foo='bar')
self.assertEqual(self.service.create_receiver.return_value, res)
self.service.create_receiver.assert_called_once_with(
name='FAKE_NAME', foo='bar')
def test_delete_receiver(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_receiver('FAKE_ID')
self.assertEqual(self.service.delete_receiver.return_value, res)
self.service.delete_receiver.assert_called_once_with(
'FAKE_ID', True)
def test_delete_receiver_ignore_missing(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.delete_receiver('FAKE_ID', False)
self.assertEqual(self.service.delete_receiver.return_value, res)
self.service.delete_receiver.assert_called_once_with(
'FAKE_ID', False)
def test_actions(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.actions(foo='bar')
self.assertEqual(self.service.actions.return_value, res)
self.service.actions.assert_called_once_with(foo='bar')
def test_get_action(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_action('FOOBAR')
self.assertEqual(self.service.get_action.return_value, res)
self.service.get_action.assert_called_once_with('FOOBAR')
def test_events(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.events(foo='bar')
self.assertEqual(self.service.events.return_value, res)
self.service.events.assert_called_once_with(foo='bar')
def test_get_event(self, mock_conn):
mock_conn.return_value = self.conn
sc = client.Client()
res = sc.get_event('FOOBAR')
self.assertEqual(self.service.get_event.return_value, res)
self.service.get_event.assert_called_once_with('FOOBAR')
|
|
# (C) Datadog, Inc. 2010-2016
# (C) Luca Cipriani <luca@c9.io> 2013
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
import time
# 3p
import psutil
# project
from checks import AgentCheck
from config import _is_affirmative
from utils.platform import Platform
DEFAULT_AD_CACHE_DURATION = 120
DEFAULT_PID_CACHE_DURATION = 120
ATTR_TO_METRIC = {
'thr': 'threads',
'cpu': 'cpu.pct',
'rss': 'mem.rss',
'vms': 'mem.vms',
'real': 'mem.real',
'open_fd': 'open_file_descriptors',
'open_handle': 'open_handles', # win32 only
'r_count': 'ioread_count', # FIXME: namespace me correctly (6.x), io.r_count
'w_count': 'iowrite_count', # FIXME: namespace me correctly (6.x) io.r_bytes
'r_bytes': 'ioread_bytes', # FIXME: namespace me correctly (6.x) io.w_count
'w_bytes': 'iowrite_bytes', # FIXME: namespace me correctly (6.x) io.w_bytes
'ctx_swtch_vol': 'voluntary_ctx_switches', # FIXME: namespace me correctly (6.x), ctx_swt.voluntary
'ctx_swtch_invol': 'involuntary_ctx_switches', # FIXME: namespace me correctly (6.x), ctx_swt.involuntary
'run_time': 'run_time',
'mem_pct': 'mem.pct'
}
ATTR_TO_METRIC_RATE = {
'minflt': 'mem.page_faults.minor_faults',
'cminflt': 'mem.page_faults.children_minor_faults',
'majflt': 'mem.page_faults.major_faults',
'cmajflt': 'mem.page_faults.children_major_faults'
}
class ProcessCheck(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# ad stands for access denied
# We cache the PIDs getting this error and don't iterate on them
# more often than `access_denied_cache_duration`
# This cache is for all PIDs so it's global, but it should
# be refreshed by instance
self.last_ad_cache_ts = {}
self.ad_cache = set()
self.access_denied_cache_duration = int(
init_config.get(
'access_denied_cache_duration',
DEFAULT_AD_CACHE_DURATION
)
)
# By default cache the PID list for a while
# Sometimes it's not wanted b/c it can mess with no-data monitoring
# This cache is indexed per instance
self.last_pid_cache_ts = {}
self.pid_cache = {}
self.pid_cache_duration = int(
init_config.get(
'pid_cache_duration',
DEFAULT_PID_CACHE_DURATION
)
)
self._conflicting_procfs = False
self._deprecated_init_procfs = False
if Platform.is_linux():
procfs_path = init_config.get('procfs_path')
if procfs_path:
if 'procfs_path' in agentConfig and procfs_path != agentConfig.get('procfs_path').rstrip('/'):
self._conflicting_procfs = True
else:
self._deprecated_init_procfs = True
psutil.PROCFS_PATH = procfs_path
# Process cache, indexed by instance
self.process_cache = defaultdict(dict)
def should_refresh_ad_cache(self, name):
now = time.time()
return now - self.last_ad_cache_ts.get(name, 0) > self.access_denied_cache_duration
def should_refresh_pid_cache(self, name):
now = time.time()
return now - self.last_pid_cache_ts.get(name, 0) > self.pid_cache_duration
def find_pids(self, name, search_string, exact_match, ignore_ad=True):
"""
Create a set of pids of selected processes.
Search for search_string
"""
if not self.should_refresh_pid_cache(name):
return self.pid_cache[name]
ad_error_logger = self.log.debug
if not ignore_ad:
ad_error_logger = self.log.error
refresh_ad_cache = self.should_refresh_ad_cache(name)
matching_pids = set()
for proc in psutil.process_iter():
# Skip access denied processes
if not refresh_ad_cache and proc.pid in self.ad_cache:
continue
found = False
for string in search_string:
try:
# FIXME 6.x: All has been deprecated from the doc, should be removed
if string == 'All':
found = True
if exact_match:
if proc.name() == string:
found = True
else:
cmdline = proc.cmdline()
if string in ' '.join(cmdline):
found = True
except psutil.NoSuchProcess:
self.log.warning('Process disappeared while scanning')
except psutil.AccessDenied as e:
ad_error_logger('Access denied to process with PID %s', proc.pid)
ad_error_logger('Error: %s', e)
if refresh_ad_cache:
self.ad_cache.add(proc.pid)
if not ignore_ad:
raise
else:
if refresh_ad_cache:
self.ad_cache.discard(proc.pid)
if found:
matching_pids.add(proc.pid)
break
self.pid_cache[name] = matching_pids
self.last_pid_cache_ts[name] = time.time()
if refresh_ad_cache:
self.last_ad_cache_ts[name] = time.time()
return matching_pids
def psutil_wrapper(self, process, method, accessors, *args, **kwargs):
"""
A psutil wrapper that is calling
* psutil.method(*args, **kwargs) and returns the result
OR
* psutil.method(*args, **kwargs).accessor[i] for each accessors given in
a list, the result being indexed in a dictionary by the accessor name
"""
if accessors is None:
result = None
else:
result = {}
# Ban certain method that we know fail
if method == 'memory_info_ex'\
and (Platform.is_win32() or Platform.is_solaris()):
return result
elif method == 'num_fds' and not Platform.is_unix():
return result
elif method == 'num_handles' and not Platform.is_win32():
return result
try:
res = getattr(process, method)(*args, **kwargs)
if accessors is None:
result = res
else:
for acc in accessors:
try:
result[acc] = getattr(res, acc)
except AttributeError:
self.log.debug("psutil.%s().%s attribute does not exist", method, acc)
except (NotImplementedError, AttributeError):
self.log.debug("psutil method %s not implemented", method)
except psutil.AccessDenied:
self.log.debug("psutil was denied acccess for method %s", method)
except psutil.NoSuchProcess:
self.warning("Process {0} disappeared while scanning".format(process.pid))
return result
def get_process_state(self, name, pids):
st = defaultdict(list)
# Remove from cache the processes that are not in `pids`
cached_pids = set(self.process_cache[name].keys())
pids_to_remove = cached_pids - pids
for pid in pids_to_remove:
del self.process_cache[name][pid]
for pid in pids:
st['pids'].append(pid)
new_process = False
# If the pid's process is not cached, retrieve it
if pid not in self.process_cache[name] or not self.process_cache[name][pid].is_running():
new_process = True
try:
self.process_cache[name][pid] = psutil.Process(pid)
self.log.debug('New process in cache: %s' % pid)
# Skip processes dead in the meantime
except psutil.NoSuchProcess:
self.warning('Process %s disappeared while scanning' % pid)
# reset the PID cache now, something changed
self.last_pid_cache_ts[name] = 0
continue
p = self.process_cache[name][pid]
meminfo = self.psutil_wrapper(p, 'memory_info', ['rss', 'vms'])
st['rss'].append(meminfo.get('rss'))
st['vms'].append(meminfo.get('vms'))
mem_percent = self.psutil_wrapper(p, 'memory_percent', None)
st['mem_pct'].append(mem_percent)
# will fail on win32 and solaris
shared_mem = self.psutil_wrapper(p, 'memory_info_ex', ['shared']).get('shared')
if shared_mem is not None and meminfo.get('rss') is not None:
st['real'].append(meminfo['rss'] - shared_mem)
else:
st['real'].append(None)
ctxinfo = self.psutil_wrapper(p, 'num_ctx_switches', ['voluntary', 'involuntary'])
st['ctx_swtch_vol'].append(ctxinfo.get('voluntary'))
st['ctx_swtch_invol'].append(ctxinfo.get('involuntary'))
st['thr'].append(self.psutil_wrapper(p, 'num_threads', None))
cpu_percent = self.psutil_wrapper(p, 'cpu_percent', None)
if not new_process:
# psutil returns `0.` for `cpu_percent` the first time it's sampled on a process,
# so save the value only on non-new processes
st['cpu'].append(cpu_percent)
st['open_fd'].append(self.psutil_wrapper(p, 'num_fds', None))
st['open_handle'].append(self.psutil_wrapper(p, 'num_handles', None))
ioinfo = self.psutil_wrapper(p, 'io_counters', ['read_count', 'write_count', 'read_bytes', 'write_bytes'])
st['r_count'].append(ioinfo.get('read_count'))
st['w_count'].append(ioinfo.get('write_count'))
st['r_bytes'].append(ioinfo.get('read_bytes'))
st['w_bytes'].append(ioinfo.get('write_bytes'))
pagefault_stats = self.get_pagefault_stats(pid)
if pagefault_stats is not None:
(minflt, cminflt, majflt, cmajflt) = pagefault_stats
st['minflt'].append(minflt)
st['cminflt'].append(cminflt)
st['majflt'].append(majflt)
st['cmajflt'].append(cmajflt)
else:
st['minflt'].append(None)
st['cminflt'].append(None)
st['majflt'].append(None)
st['cmajflt'].append(None)
#calculate process run time
create_time = self.psutil_wrapper(p, 'create_time', None)
if create_time is not None:
now = time.time()
run_time = now - create_time
st['run_time'].append(run_time)
return st
def get_pagefault_stats(self, pid):
if not Platform.is_linux():
return None
def file_to_string(path):
with open(path, 'r') as f:
res = f.read()
return res
# http://man7.org/linux/man-pages/man5/proc.5.html
try:
data = file_to_string('/%s/%s/stat' % (psutil.PROCFS_PATH, pid))
except Exception:
self.log.debug('error getting proc stats: file_to_string failed'
'for /%s/%s/stat' % (psutil.PROCFS_PATH, pid))
return None
return map(lambda i: int(i), data.split()[9:13])
def check(self, instance):
name = instance.get('name', None)
tags = instance.get('tags', [])
exact_match = _is_affirmative(instance.get('exact_match', True))
search_string = instance.get('search_string', None)
ignore_ad = _is_affirmative(instance.get('ignore_denied_access', True))
pid = instance.get('pid')
pid_file = instance.get('pid_file', None)
if self._conflicting_procfs:
self.warning('The `procfs_path` defined in `process.yaml` is different from the one defined in '
'`datadog.conf`. This is currently not supported by the Agent. Defaulting to the '
'value defined in `datadog.conf`: {}'.format(psutil.PROCFS_PATH))
elif self._deprecated_init_procfs:
self.warning('DEPRECATION NOTICE: Specifying `procfs_path` in `process.yaml` is deprecated. '
'Please specify it in `datadog.conf` instead')
if not isinstance(search_string, list) and pid is None and pid_file is None:
raise ValueError('"search_string" or "pid" or "pid_file" parameter is required')
# FIXME 6.x remove me
if search_string is not None:
if "All" in search_string:
self.warning('Deprecated: Having "All" in your search_string will'
'greatly reduce the performance of the check and '
'will be removed in a future version of the agent.')
if name is None:
raise KeyError('The "name" of process groups is mandatory')
if search_string is not None:
pids = self.find_pids(
name,
search_string,
exact_match,
ignore_ad=ignore_ad
)
elif pid is not None:
# we use Process(pid) as a means to search, if pid not found
# psutil.NoSuchProcess is raised.
pids = set([psutil.Process(pid).pid])
elif pid_file is not None:
with open(pid_file, 'r') as file_pid:
pid_line = file_pid.readline().strip()
pids = set([psutil.Process(int(pid_line)).pid])
else:
raise ValueError('The "search_string" or "pid" options are required for process identification')
proc_state = self.get_process_state(name, pids)
# FIXME 6.x remove the `name` tag
tags.extend(['process_name:%s' % name, name])
self.log.debug('ProcessCheck: process %s analysed', name)
self.gauge('system.processes.number', len(pids), tags=tags)
for attr, mname in ATTR_TO_METRIC.iteritems():
vals = [x for x in proc_state[attr] if x is not None]
# skip []
if vals:
if attr == 'run_time':
self.gauge('system.processes.%s.avg' % mname, sum(vals)/len(vals), tags=tags)
self.gauge('system.processes.%s.max' % mname, max(vals), tags=tags)
self.gauge('system.processes.%s.min' % mname, min(vals), tags=tags)
# FIXME 6.x: change this prefix?
else:
self.gauge('system.processes.%s' % mname, sum(vals), tags=tags)
for attr, mname in ATTR_TO_METRIC_RATE.iteritems():
vals = [x for x in proc_state[attr] if x is not None]
if vals:
self.rate('system.processes.%s' % mname, sum(vals), tags=tags)
self._process_service_check(name, len(pids), instance.get('thresholds', None))
def _process_service_check(self, name, nb_procs, bounds):
'''
Report a service check, for each process in search_string.
Report as OK if the process is in the warning thresholds
CRITICAL out of the critical thresholds
WARNING out of the warning thresholds
'''
tag = ["process:%s" % name]
status = AgentCheck.OK
message_str = "PROCS %s: %s processes found for %s"
status_str = {
AgentCheck.OK: "OK",
AgentCheck.WARNING: "WARNING",
AgentCheck.CRITICAL: "CRITICAL"
}
if not bounds and nb_procs < 1:
status = AgentCheck.CRITICAL
elif bounds:
warning = bounds.get('warning', [1, float('inf')])
critical = bounds.get('critical', [1, float('inf')])
if warning[1] < nb_procs or nb_procs < warning[0]:
status = AgentCheck.WARNING
if critical[1] < nb_procs or nb_procs < critical[0]:
status = AgentCheck.CRITICAL
self.service_check(
"process.up",
status,
tags=tag,
message=message_str % (status_str[status], nb_procs, name)
)
|
|
"""
These managers define helper methods for accessing the database from
Comm system components.
"""
from __future__ import print_function
from django.db.models import Q
from evennia.typeclasses.managers import (TypedObjectManager, TypeclassManager)
from evennia.utils import logger
_GA = object.__getattribute__
_AccountDB = None
_ObjectDB = None
_ChannelDB = None
_SESSIONS = None
# error class
class CommError(Exception):
"""
Raised by comm system, to allow feedback to player when caught.
"""
pass
#
# helper functions
#
def dbref(inp, reqhash=True):
"""
Valid forms of dbref (database reference number) are either a
string '#N' or an integer N.
Args:
inp (int or str): A possible dbref to check syntactically.
reqhash (bool): Require an initial hash `#` to accept.
Returns:
is_dbref (int or None): The dbref integer part if a valid
dbref, otherwise `None`.
"""
if reqhash and not (isinstance(inp, basestring) and inp.startswith("#")):
return None
if isinstance(inp, basestring):
inp = inp.lstrip('#')
try:
if int(inp) < 0:
return None
except Exception:
return None
return inp
def identify_object(inp):
"""
Helper function. Identifies if an object is an account or an object;
return its database model
Args:
inp (any): Entity to be idtified.
Returns:
identified (tuple): This is a tuple with (`inp`, identifier)
where `identifier` is one of "account", "object", "channel",
"string", "dbref" or None.
"""
if hasattr(inp, "__dbclass__"):
clsname = inp.__dbclass__.__name__
if clsname == "AccountDB":
return inp, "account"
elif clsname == "ObjectDB":
return inp, "object"
elif clsname == "ChannelDB":
return inp, "channel"
if isinstance(inp, basestring):
return inp, "string"
elif dbref(inp):
return dbref(inp), "dbref"
else:
return inp, None
def to_object(inp, objtype='account'):
"""
Locates the object related to the given accountname or channel key.
If input was already the correct object, return it.
Args:
inp (any): The input object/string
objtype (str): Either 'account' or 'channel'.
Returns:
obj (object): The correct object related to `inp`.
"""
obj, typ = identify_object(inp)
if typ == objtype:
return obj
if objtype == 'account':
if typ == 'object':
return obj.account
if typ == 'string':
return _AccountDB.objects.get(user_username__iexact=obj)
if typ == 'dbref':
return _AccountDB.objects.get(id=obj)
logger.log_err("%s %s %s %s %s" % (objtype, inp, obj, typ, type(inp)))
raise CommError()
elif objtype == 'object':
if typ == 'account':
return obj.obj
if typ == 'string':
return _ObjectDB.objects.get(db_key__iexact=obj)
if typ == 'dbref':
return _ObjectDB.objects.get(id=obj)
logger.log_err("%s %s %s %s %s" % (objtype, inp, obj, typ, type(inp)))
raise CommError()
elif objtype == 'channel':
if typ == 'string':
return _ChannelDB.objects.get(db_key__iexact=obj)
if typ == 'dbref':
return _ChannelDB.objects.get(id=obj)
logger.log_err("%s %s %s %s %s" % (objtype, inp, obj, typ, type(inp)))
raise CommError()
# an unknown
return None
#
# Msg manager
#
class MsgManager(TypedObjectManager):
"""
This MsgManager implements methods for searching and manipulating
Messages directly from the database.
These methods will all return database objects (or QuerySets)
directly.
A Message represents one unit of communication, be it over a
Channel or via some form of in-game mail system. Like an e-mail,
it always has a sender and can have any number of receivers (some
of which may be Channels).
"""
def identify_object(self, inp):
"""
Wrapper to identify_object if accessing via the manager directly.
Args:
inp (any): Entity to be idtified.
Returns:
identified (tuple): This is a tuple with (`inp`, identifier)
where `identifier` is one of "account", "object", "channel",
"string", "dbref" or None.
"""
return identify_object(inp)
def get_message_by_id(self, idnum):
"""
Retrieve message by its id.
Args:
idnum (int or str): The dbref to retrieve.
Returns:
message (Msg): The message.
"""
try:
return self.get(id=self.dbref(idnum, reqhash=False))
except Exception:
return None
def get_messages_by_sender(self, sender, exclude_channel_messages=False):
"""
Get all messages sent by one entity - this could be either a
account or an object
Args:
sender (Account or Object): The sender of the message.
exclude_channel_messages (bool, optional): Only return messages
not aimed at a channel (that is, private tells for example)
Returns:
messages (list): List of matching messages
Raises:
CommError: For incorrect sender types.
"""
obj, typ = identify_object(sender)
if exclude_channel_messages:
# explicitly exclude channel recipients
if typ == 'account':
return list(self.filter(db_sender_accounts=obj,
db_receivers_channels__isnull=True).exclude(db_hide_from_accounts=obj))
elif typ == 'object':
return list(self.filter(db_sender_objects=obj,
db_receivers_channels__isnull=True).exclude(db_hide_from_objects=obj))
else:
raise CommError
else:
# get everything, channel or not
if typ == 'account':
return list(self.filter(db_sender_accounts=obj).exclude(db_hide_from_accounts=obj))
elif typ == 'object':
return list(self.filter(db_sender_objects=obj).exclude(db_hide_from_objects=obj))
else:
raise CommError
def get_messages_by_receiver(self, recipient):
"""
Get all messages sent to one given recipient.
Args:
recipient (Object, Account or Channel): The recipient of the messages to search for.
Returns:
messages (list): Matching messages.
Raises:
CommError: If the `recipient` is not of a valid type.
"""
obj, typ = identify_object(recipient)
if typ == 'account':
return list(self.filter(db_receivers_accounts=obj).exclude(db_hide_from_accounts=obj))
elif typ == 'object':
return list(self.filter(db_receivers_objects=obj).exclude(db_hide_from_objects=obj))
elif typ == 'channel':
return list(self.filter(db_receivers_channels=obj).exclude(db_hide_from_channels=obj))
else:
raise CommError
def get_messages_by_channel(self, channel):
"""
Get all persistent messages sent to one channel.
Args:
channel (Channel): The channel to find messages for.
Returns:
messages (list): Persistent Msg objects saved for this channel.
"""
return self.filter(db_receivers_channels=channel).exclude(db_hide_from_channels=channel)
def search_message(self, sender=None, receiver=None, freetext=None, dbref=None):
"""
Search the message database for particular messages. At least
one of the arguments must be given to do a search.
Args:
sender (Object or Account, optional): Get messages sent by a particular account or object
receiver (Object, Account or Channel, optional): Get messages
received by a certain account,object or channel
freetext (str): Search for a text string in a message. NOTE:
This can potentially be slow, so make sure to supply one of
the other arguments to limit the search.
dbref (int): The exact database id of the message. This will override
all other search criteria since it's unique and
always gives only one match.
Returns:
messages (list or Msg): A list of message matches or a single match if `dbref` was given.
"""
# unique msg id
if dbref:
msg = self.objects.filter(id=dbref)
if msg:
return msg[0]
# We use Q objects to gradually build up the query - this way we only
# need to do one database lookup at the end rather than gradually
# refining with multiple filter:s. Django Note: Q objects can be
# combined with & and | (=AND,OR). ~ negates the queryset
# filter by sender
sender, styp = identify_object(sender)
if styp == 'account':
sender_restrict = Q(db_sender_accounts=sender) & ~Q(db_hide_from_accounts=sender)
elif styp == 'object':
sender_restrict = Q(db_sender_objects=sender) & ~Q(db_hide_from_objects=sender)
else:
sender_restrict = Q()
# filter by receiver
receiver, rtyp = identify_object(receiver)
if rtyp == 'account':
receiver_restrict = Q(db_receivers_accounts=receiver) & ~Q(db_hide_from_accounts=receiver)
elif rtyp == 'object':
receiver_restrict = Q(db_receivers_objects=receiver) & ~Q(db_hide_from_objects=receiver)
elif rtyp == 'channel':
receiver_restrict = Q(db_receivers_channels=receiver) & ~Q(db_hide_from_channels=receiver)
else:
receiver_restrict = Q()
# filter by full text
if freetext:
fulltext_restrict = Q(db_header__icontains=freetext) | Q(db_message__icontains=freetext)
else:
fulltext_restrict = Q()
# execute the query
return list(self.filter(sender_restrict & receiver_restrict & fulltext_restrict))
# back-compatibility alias
message_search = search_message
#
# Channel manager
#
class ChannelDBManager(TypedObjectManager):
"""
This ChannelManager implements methods for searching and
manipulating Channels directly from the database.
These methods will all return database objects (or QuerySets)
directly.
A Channel is an in-game venue for communication. It's essentially
representation of a re-sender: Users sends Messages to the
Channel, and the Channel re-sends those messages to all users
subscribed to the Channel.
"""
def get_all_channels(self):
"""
Get all channels.
Returns:
channels (list): All channels in game.
"""
return self.all()
def get_channel(self, channelkey):
"""
Return the channel object if given its key.
Also searches its aliases.
Args:
channelkey (str): Channel key to search for.
Returns:
channel (Channel or None): A channel match.
"""
# first check the channel key
channels = self.filter(db_key__iexact=channelkey)
if not channels:
# also check aliases
channels = [channel for channel in self.all()
if channelkey in channel.aliases.all()]
if channels:
return channels[0]
return None
def get_subscriptions(self, subscriber):
"""
Return all channels a given entity is subscribed to.
Args:
subscriber (Object or Account): The one subscribing.
Returns:
subscriptions (list): Channel subscribed to.
"""
clsname = subscriber.__dbclass__.__name__
if clsname == "AccountDB":
return subscriber.account_subscription_set.all()
if clsname == "ObjectDB":
return subscriber.object_subscription_set.all()
return []
def search_channel(self, ostring, exact=True):
"""
Search the channel database for a particular channel.
Args:
ostring (str): The key or database id of the channel.
exact (bool, optional): Require an exact (but not
case sensitive) match.
"""
channels = []
if not ostring:
return channels
try:
# try an id match first
dbref = int(ostring.strip('#'))
channels = self.filter(id=dbref)
except Exception:
# Usually because we couldn't convert to int - not a dbref
pass
if not channels:
# no id match. Search on the key.
if exact:
channels = self.filter(db_key__iexact=ostring)
else:
channels = self.filter(db_key__icontains=ostring)
if not channels:
# still no match. Search by alias.
channels = [channel for channel in self.all()
if ostring.lower() in [a.lower for a in channel.aliases.all()]]
return channels
# back-compatibility alias
channel_search = search_channel
class ChannelManager(ChannelDBManager, TypeclassManager):
"""
Wrapper to group the typeclass manager to a consistent name.
"""
pass
|
|
from django.conf import settings
from django.http import *
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.contrib.auth import logout
from django.core.context_processors import csrf
from django.template import RequestContext
from profile.models import *
from posts.models import *
from utils import *
import math
def index(request):
"""
main view of app, either public page or dashboard
"""
# if we haven't authorised yet, direct to login page
if check_key(request):
return HttpResponseRedirect(reverse('dashboard'))
else:
return HttpResponseRedirect('/public')
def unauth(request):
"""
logout and remove all session data
"""
if check_key(request):
api = get_api(request)
request.session.clear()
logout(request)
return HttpResponseRedirect(reverse('index'))
def yours(request, page=1):
"""
your posts
"""
if check_key(request):
user = get_api(request)
if request.GET.get('page'):
page = int(request.GET.get('page'))
else:
page = 1
post_item = Post.my_posts(user,user,page)
posts = post_item[0]
total_posts = post_item[1]
next_page = page + 1
prev_page = page - 1
post_count = math.floor(len(posts) / PAGE_LIMIT)
return render_to_response('profile/dashboard.html', {
'posts' : posts,
'user' : user,
'title' : "Your Posts",
'next_page' : next_page,
'prev_page' : prev_page,
'post_count': post_count,
'total_posts' : total_posts,
}, context_instance=RequestContext(request))
else:
return HttpResponseRedirect(reverse('index'))
def dashboard(request, page=1):
"""
dashboard followng posts
"""
if check_key(request):
user = get_api(request)
if request.GET.get('page'):
page = int(request.GET.get('page'))
else:
page = 1
post_item = Post.dashboard_posts(user,page)
posts = post_item[0]
total_posts = post_item[1]
next_page = page + 1
prev_page = page - 1
post_count = math.floor(len(posts) / PAGE_LIMIT)
return render_to_response('profile/dashboard.html', {
'posts' : posts,
'user' : user,
'title' : 'Dashboard',
'next_page' : next_page,
'prev_page' : prev_page,
'post_count': post_count,
'total_posts' : total_posts,
}, context_instance=RequestContext(request))
else:
return HttpResponseRedirect(reverse('index'))
def user_view(request, user_id, page=1):
"""
display user posts
"""
public_user = Profile.objects(id=user_id).first()
if check_key(request):
user = get_api(request)
else:
user = None
post_item = Post.my_posts(public_user,user,page)
posts = post_item[0]
total_posts = post_item[1]
next_page = page + 1
prev_page = page - 1
post_count = math.floor(len(posts) / PAGE_LIMIT)
return render_to_response('profile/user_view.html', {
'posts' : posts,
'public_user' : public_user,
'user' : user,
'next_page' : next_page,
'prev_page' : prev_page,
'post_count': post_count,
'total_posts' : total_posts,
}, context_instance=RequestContext(request))
def follow(request, user_id):
"""
follow a user
"""
if check_key(request):
user = get_api(request)
follow_user = Profile.objects(id=user_id).first()
user.follow(follow_user)
return HttpResponseRedirect(reverse('community'))
else:
return HttpResponseRedirect(reverse('index'))
def unfollow(request, user_id):
"""
unfollow a user
"""
if check_key(request):
user = get_api(request)
unfollow_user = Profile.objects(id=user_id).first()
user.unfollow(unfollow_user)
return HttpResponseRedirect(reverse('community'))
else:
return HttpResponseRedirect(reverse('index'))
def community(request):
"""
display users
"""
users = Profile.objects
if check_key(request):
user = get_api(request)
else:
user = None
return render_to_response('profile/community.html', {
'users' : users,
'user' : user,
}, context_instance=RequestContext(request))
def auth(request):
# start the OAuth process, set up a handler with our details
oauth = tweepy.OAuthHandler(settings.CONSUMER_KEY, settings.CONSUMER_SECRET)
# direct the user to the authentication url
auth_url = oauth.get_authorization_url()
response = HttpResponseRedirect(auth_url)
# store the request token
request.session['unauthed_token_tw'] = (oauth.request_token.key, oauth.request_token.secret)
return response
def callback(request):
verifier = request.GET.get('oauth_verifier')
oauth = tweepy.OAuthHandler(settings.CONSUMER_KEY, settings.CONSUMER_SECRET)
token = request.session.get('unauthed_token_tw', None)
# remove the request token now we don't need it
request.session.delete('unauthed_token_tw')
oauth.set_request_token(token[0], token[1])
# get the access token and store
try:
oauth.get_access_token(verifier)
# check if user already exists, and load up their info, else set them up in the db
except tweepy.TweepError:
print 'Error, failed to get access token'
request.session['access_key_tw'] = oauth.access_token.key
request.session['access_secret_tw'] = oauth.access_token.secret
response = HttpResponseRedirect(reverse('dashboard'))
return response
def check_key(request):
"""
Check to see if we already have an access_key or user profile stored, if we do then we have already gone through OAuth. If not then we haven't and we probably need to.
"""
try:
if not request.session.get('profile', None):
if not request.session.get('access_key_tw', None):
return False
except KeyError:
return False
return True
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import weakref
from .. import gloo
from .. import app
from .node import Node
from ..visuals.transforms import STTransform, TransformCache
from ..color import Color
from ..util import logger
from ..util.profiler import Profiler
from .subscene import SubScene
from .events import SceneDrawEvent, SceneMouseEvent
from .widgets import Widget
class SceneCanvas(app.Canvas):
"""A Canvas that automatically draws the contents of a scene
Parameters
----------
title : str
The widget title
size : (width, height)
The size of the window.
position : (x, y)
The position of the window in screen coordinates.
show : bool
Whether to show the widget immediately. Default False.
autoswap : bool
Whether to swap the buffers automatically after a draw event.
Default True. If True, the ``swap_buffers`` Canvas method will
be called last (by default) by the ``canvas.draw`` event handler.
app : Application | str
Give vispy Application instance to use as a backend.
(vispy.app is used by default.) If str, then an application
using the chosen backend (e.g., 'pyglet') will be created.
Note the canvas application can be accessed at ``canvas.app``.
create_native : bool
Whether to create the widget immediately. Default True.
vsync : bool
Enable vertical synchronization.
resizable : bool
Allow the window to be resized.
decorate : bool
Decorate the window. Default True.
fullscreen : bool | int
If False, windowed mode is used (default). If True, the default
monitor is used. If int, the given monitor number is used.
config : dict
A dict with OpenGL configuration options, which is combined
with the default configuration options and used to initialize
the context. See ``canvas.context.config`` for possible
options.
shared : Canvas | GLContext | None
An existing canvas or context to share OpenGL objects with.
keys : str | dict | None
Default key mapping to use. If 'interactive', escape and F11 will
close the canvas and toggle full-screen mode, respectively.
If dict, maps keys to functions. If dict values are strings,
they are assumed to be ``Canvas`` methods, otherwise they should
be callable.
parent : widget-object
The parent widget if this makes sense for the used backend.
dpi : float | None
Resolution in dots-per-inch to use for the canvas. If dpi is None,
then the value will be determined by querying the global config first,
and then the operating system.
always_on_top : bool
If True, try to create the window in always-on-top mode.
px_scale : int > 0
A scale factor to apply between logical and physical pixels in addition
to the actual scale factor determined by the backend. This option
allows the scale factor to be adjusted for testing.
bgcolor : Color
The background color to use.
See also
--------
vispy.app.Canvas
Notes
-----
Receives the following events:
* initialize
* resize
* draw
* mouse_press
* mouse_release
* mouse_double_click
* mouse_move
* mouse_wheel
* key_press
* key_release
* stylus
* touch
* close
The ordering of the mouse_double_click, mouse_press, and mouse_release
events are not guaranteed to be consistent between backends. Only certain
backends natively support double-clicking (currently Qt and WX); on other
backends, they are detected manually with a fixed time delay.
This can cause problems with accessibility, as increasing the OS detection
time or using a dedicated double-click button will not be respected.
"""
def __init__(self, title='Vispy canvas', size=(800, 600), position=None,
show=False, autoswap=True, app=None, create_native=True,
vsync=False, resizable=True, decorate=True, fullscreen=False,
config=None, shared=None, keys=None, parent=None, dpi=None,
always_on_top=False, px_scale=1, bgcolor='black'):
self._fb_stack = [] # for storing information about framebuffers used
self._vp_stack = [] # for storing information about viewports used
self._scene = None
# A default widget that follows the shape of the canvas
self._central_widget = None
self._bgcolor = Color(bgcolor).rgba
super(SceneCanvas, self).__init__(
title, size, position, show, autoswap, app, create_native, vsync,
resizable, decorate, fullscreen, config, shared, keys, parent, dpi,
always_on_top, px_scale)
self.events.mouse_press.connect(self._process_mouse_event)
self.events.mouse_move.connect(self._process_mouse_event)
self.events.mouse_release.connect(self._process_mouse_event)
self.events.mouse_wheel.connect(self._process_mouse_event)
# Collection of transform caches; one for each root visual used in
# self.draw_visual(...)
self._transform_caches = weakref.WeakKeyDictionary()
# Set up default node stack: ndc -> fb -> canvas -> scene
self.render_cs = Node(name="render_cs")
self.framebuffer_cs = Node(parent=self.render_cs,
name="framebuffer_cs")
self.framebuffer_cs.transform = STTransform()
self.canvas_cs = Node(parent=self.framebuffer_cs,
name="canvas_cs")
self.canvas_cs.transform = STTransform()
# By default, the document coordinate system is the canvas.
self.canvas_cs.document = self.canvas_cs
self.scene = SubScene(parent=self.canvas_cs)
@property
def scene(self):
""" The SubScene object that represents the root node of the
scene graph to be displayed.
"""
return self._scene
@scene.setter
def scene(self, e):
if self._scene is not None:
self._scene.events.update.disconnect(self._scene_update)
self._scene = e
self._scene.events.update.connect(self._scene_update)
@property
def central_widget(self):
""" Returns the default widget that occupies the entire area of the
canvas.
"""
if self._central_widget is None:
self._central_widget = Widget(size=self.size, parent=self.scene)
return self._central_widget
def _scene_update(self, event):
self.update()
def on_draw(self, event):
"""Draw handler
Parameters
----------
event : instance of Event
The draw event.
"""
if self._scene is None:
return # Can happen on initialization
logger.debug('Canvas draw')
self._draw_scene()
def render(self, region=None, size=None):
""" Render the scene to an offscreen buffer and return the image array.
Parameters
----------
region : tuple | None
Specifies the region of the canvas to render. Format is
(x, y, w, h). By default, the entire canvas is rendered.
size : tuple | None
Specifies the size of the image array to return. If no size is
given, then the size of the *region* is used. This argument allows
the scene to be rendered at resolutions different from the native
canvas resolution.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
"""
# Set up a framebuffer to render to
offset = (0, 0) if region is None else region[:2]
csize = self.size if region is None else region[2:]
size = csize if size is None else size
fbo = gloo.FrameBuffer(color=gloo.RenderBuffer(size[::-1]),
depth=gloo.RenderBuffer(size[::-1]))
self.push_fbo(fbo, offset, csize)
try:
self._draw_scene(viewport=(0, 0) + size)
return fbo.read()
finally:
self.pop_fbo()
def _draw_scene(self, viewport=None):
self.context.clear(color=self._bgcolor, depth=True)
# Draw the scene, but first disconnect its change signal--
# any changes that take place during the paint should not trigger
# a subsequent repaint.
with self.scene.events.update.blocker(self._scene_update):
self.draw_visual(self.scene, viewport=viewport)
def draw_visual(self, visual, event=None, viewport=None):
""" Draw a visual to the canvas or currently active framebuffer.
Parameters
----------
visual : Visual
The visual to draw
event : None or DrawEvent
Optionally specifies the original canvas draw event that initiated
this draw.
viewport : tuple | None
Optionally specifies the viewport to use. If None, the entire
physical size is used.
"""
self.set_current()
prof = Profiler()
nfb = len(self._fb_stack)
nvp = len(self._vp_stack)
# Create draw event, which keeps track of the path of transforms
self._process_node_count = 0 # for debugging
# Get the cache of transforms used for this visual
tr_cache = self._transform_caches.setdefault(visual, TransformCache())
# and mark the entire cache as aged
tr_cache.roll()
prof('roll transform cache')
scene_event = SceneDrawEvent(canvas=self, event=event,
transform_cache=tr_cache)
prof('create SceneDrawEvent')
vp = (0, 0) + self.physical_size if viewport is None else viewport
scene_event.push_viewport(vp)
prof('push_viewport')
try:
# Force update of transforms on base entities
# TODO: this should happen as a reaction to resize, push_viewport,
# etc.; not here. (but note the transforms must change
# following push_viewport)
self.fb_ndc_transform
self.canvas_fb_transform
scene_event.push_node(self.render_cs)
scene_event.push_node(self.framebuffer_cs)
scene_event.push_node(self.canvas_cs)
scene_event.push_node(visual)
prof('initialize event scenegraph')
visual.draw(scene_event)
prof('draw scene')
finally:
scene_event.pop_viewport()
if len(self._vp_stack) > nvp:
logger.warning("Viewport stack not fully cleared after draw.")
if len(self._fb_stack) > nfb:
logger.warning("Framebuffer stack not fully cleared after draw.")
def _process_mouse_event(self, event):
prof = Profiler()
tr_cache = self._transform_caches.setdefault(self.scene,
TransformCache())
scene_event = SceneMouseEvent(canvas=self, event=event,
transform_cache=tr_cache)
scene_event.push_node(self.render_cs)
scene_event.push_node(self.framebuffer_cs)
scene_event.push_node(self.canvas_cs)
scene_event.push_node(self._scene)
prof('prepare mouse event')
self._scene._process_mouse_event(scene_event)
prof('process')
# If something in the scene handled the scene_event, then we mark
# the original event accordingly.
event.handled = scene_event.handled
def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The resize event.
"""
if self._central_widget is not None:
self._central_widget.size = self.size
# -------------------------------------------------- transform handling ---
def push_viewport(self, viewport):
""" Push a viewport on the stack
It is the responsibility of the caller to ensure the given values are
int. The viewport's origin is defined relative to the current
viewport.
Parameters
----------
viewport : tuple
The viewport as (x, y, w, h).
"""
vp = list(viewport)
# Normalize viewport before setting;
if vp[2] < 0:
vp[0] += vp[2]
vp[2] *= -1
if vp[3] < 0:
vp[1] += vp[3]
vp[3] *= -1
self._vp_stack.append(vp)
self.fb_ndc_transform # update!
# Apply
try:
self._set_viewport(vp)
except:
self._vp_stack.pop()
self.fb_ndc_transform # update!
raise
def pop_viewport(self):
""" Pop a viewport from the stack.
"""
vp = self._vp_stack.pop()
# Activate latest
if len(self._vp_stack) > 0:
self._set_viewport(self._vp_stack[-1])
self.fb_ndc_transform # update!
return vp
def _set_viewport(self, vp):
self.context.set_viewport(*vp)
def push_fbo(self, fbo, offset, csize):
""" Push an FBO on the stack, together with the new viewport.
and the transform to the FBO.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer.
offset : tuple
The offset.
csize : tuple
The size to use.
"""
self._fb_stack.append((fbo, offset, csize))
self.canvas_fb_transform # update!
# Apply
try:
fbo.activate()
h, w = fbo.color_buffer.shape[:2]
self.push_viewport((0, 0, w, h))
except Exception:
self._fb_stack.pop()
raise
def pop_fbo(self):
""" Pop an FBO from the stack.
"""
fbo = self._fb_stack.pop()
fbo[0].deactivate()
self.pop_viewport()
if len(self._fb_stack) > 0:
old_fbo = self._fb_stack[-1]
old_fbo[0].activate()
self.canvas_fb_transform # update!
return fbo
def _current_framebuffer(self):
""" Return (fbo, origin, canvas_size) for the current
FBO on the stack, or for the canvas if there is no FBO.
"""
if len(self._fb_stack) == 0:
return None, (0, 0), self.size
else:
return self._fb_stack[-1]
@property
def canvas_fb_transform(self):
""" The transform that maps from the canvas coordinate system to the
current framebuffer coordinate system.
The framebuffer coordinate
system is used for antialiasing calculations, and is also the
system used when specifying coordinates for glViewport
(or gloo.set_viewport). Its origin is in the lower-left corner (as
opposed to the document / canvas coordinate system, which has its
origin in the upper-left corner).
Often the canvas and framebuffer coordinate systems are identical.
However, some systems with high-resolution
displays may use framebuffers with higher resolution than the reported
size of the canvas. Likewise, when rendering to an FBO, the resolution
and offset of the framebuffer may not match the canvas.
"""
fbo, offset, csize = self._current_framebuffer()
if fbo is None:
fbsize = self.physical_size
else:
fbsize = fbo.color_buffer.shape
# image shape is (rows, cols), unlike canvas shape.
fbsize = fbsize[1], fbsize[0]
map_from = [list(offset), [offset[0] + csize[0], offset[1] + csize[1]]]
map_to = [[0, fbsize[1]], [fbsize[0], 0]]
self.canvas_cs.transform.set_mapping(map_from, map_to)
return self.canvas_cs.transform
@property
def fb_ndc_transform(self):
""" The transform that maps from the framebuffer coordinate system to
normalized device coordinates (which is the obligatory output
coordinate system for all vertex shaders). This transform accounts for
the current glViewport.
"""
offset, csize, fbsize = self._current_framebuffer()
x, y, w, h = self._vp_stack[-1]
map_from = [[x, y], [x+w, y+h]]
map_to = [[-1, -1], [1, 1]]
self.framebuffer_cs.transform.set_mapping(map_from, map_to)
return self.framebuffer_cs.transform
@property
def render_transform(self):
""" The transform that maps from the Canvas pixel coordinate system
<(0, 0) at top-left, (w, h) at bottom-right> to normalized device
coordinates within the current glViewport and FBO.
Most visuals should use this transform when drawing.
"""
return self.fb_ndc_transform * self.canvas_fb_transform
@property
def bgcolor(self):
return Color(self._bgcolor)
@bgcolor.setter
def bgcolor(self, color):
self._bgcolor = Color(color).rgba
if hasattr(self, '_backend'):
self.update()
|
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for :mod:`datetime`."""
import calendar
import datetime
import re
import pytz
_UTC_EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
_RFC3339_MICROS = '%Y-%m-%dT%H:%M:%S.%fZ'
_RFC3339_NO_FRACTION = '%Y-%m-%dT%H:%M:%S'
# datetime.strptime cannot handle nanosecond precision: parse w/ regex
_RFC3339_NANOS = re.compile(r"""
(?P<no_fraction>
\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2} # YYYY-MM-DDTHH:MM:SS
)
( # Optional decimal part
\. # decimal point
(?P<nanos>\d{1,9}) # nanoseconds, maybe truncated
)?
Z # Zulu
""", re.VERBOSE)
def utcnow():
"""A :meth:`datetime.datetime.utcnow()` alias to allow mocking in tests."""
return datetime.datetime.utcnow()
def to_milliseconds(value):
"""Convert a zone-aware datetime to milliseconds since the unix epoch.
Args:
value (datetime.datetime): The datetime to covert.
Returns:
int: Milliseconds since the unix epoch.
"""
micros = to_microseconds(value)
return micros // 1000
def from_microseconds(value):
"""Convert timestamp in microseconds since the unix epoch to datetime.
Args:
value (float): The timestamp to convert, in microseconds.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC.
"""
return _UTC_EPOCH + datetime.timedelta(microseconds=value)
def to_microseconds(value):
"""Convert a datetime to microseconds since the unix epoch.
Args:
value (datetime.datetime): The datetime to covert.
Returns:
int: Microseconds since the unix epoch.
"""
if not value.tzinfo:
value = value.replace(tzinfo=pytz.utc)
# Regardless of what timezone is on the value, convert it to UTC.
value = value.astimezone(pytz.utc)
# Convert the datetime to a microsecond timestamp.
return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond
def from_iso8601_date(value):
"""Convert a ISO8601 date string to a date.
Args:
value (str): The ISO8601 date string.
Returns:
datetime.date: A date equivalent to the date string.
"""
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
def from_iso8601_time(value):
"""Convert a zoneless ISO8601 time string to a time.
Args:
value (str): The ISO8601 time string.
Returns:
datetime.time: A time equivalent to the time string.
"""
return datetime.datetime.strptime(value, '%H:%M:%S').time()
def from_rfc3339(value):
"""Convert a microsecond-precision timestamp to datetime.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC.
"""
return datetime.datetime.strptime(
value, _RFC3339_MICROS).replace(tzinfo=pytz.utc)
def from_rfc3339_nanos(value):
"""Convert a nanosecond-precision timestamp to a native datetime.
.. note::
Python datetimes do not support nanosecond precision; this function
therefore truncates such values to microseconds.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC.
Raises:
ValueError: If the timestamp does not match the RFC 3339
regular expression.
"""
with_nanos = _RFC3339_NANOS.match(value)
if with_nanos is None:
raise ValueError(
'Timestamp: {!r}, does not match pattern: {!r}'.format(
value, _RFC3339_NANOS.pattern))
bare_seconds = datetime.datetime.strptime(
with_nanos.group('no_fraction'), _RFC3339_NO_FRACTION)
fraction = with_nanos.group('nanos')
if fraction is None:
micros = 0
else:
scale = 9 - len(fraction)
nanos = int(fraction) * (10 ** scale)
micros = nanos // 1000
return bare_seconds.replace(microsecond=micros, tzinfo=pytz.utc)
def to_rfc3339(value, ignore_zone=True):
"""Convert a datetime to an RFC3339 timestamp string.
Args:
value (datetime.datetime):
The datetime object to be converted to a string.
ignore_zone (bool): If True, then the timezone (if any) of the
datetime object is ignored and the datetime is treated as UTC.
Returns:
str: The RFC3339 formated string representing the datetime.
"""
if not ignore_zone and value.tzinfo is not None:
# Convert to UTC and remove the time zone info.
value = value.replace(tzinfo=None) - value.utcoffset()
return value.strftime(_RFC3339_MICROS)
class DatetimeWithNanoseconds(datetime.datetime):
"""Track nanosecond in addition to normal datetime attrs.
Nanosecond can be passed only as a keyword argument.
"""
__slots__ = ('_nanosecond',)
# pylint: disable=arguments-differ
def __new__(cls, *args, **kw):
nanos = kw.pop('nanosecond', 0)
if nanos > 0:
if 'microsecond' in kw:
raise TypeError(
"Specify only one of 'microsecond' or 'nanosecond'")
kw['microsecond'] = nanos // 1000
inst = datetime.datetime.__new__(cls, *args, **kw)
inst._nanosecond = nanos or 0
return inst
# pylint: disable=arguments-differ
@property
def nanosecond(self):
"""Read-only: nanosecond precision."""
return self._nanosecond
def rfc3339(self):
"""Return an RFC 3339-compliant timestamp.
Returns:
(str): Timestamp string according to RFC 3339 spec.
"""
if self._nanosecond == 0:
return to_rfc3339(self)
nanos = str(self._nanosecond).rstrip('0')
return '{}.{}Z'.format(self.strftime(_RFC3339_NO_FRACTION), nanos)
@classmethod
def from_rfc3339(cls, stamp):
"""Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (str): RFC 3339 stamp, with up to nanosecond precision
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp string
Raises:
ValueError: if `stamp` does not match the expected format
"""
with_nanos = _RFC3339_NANOS.match(stamp)
if with_nanos is None:
raise ValueError(
'Timestamp: {}, does not match pattern: {}'.format(
stamp, _RFC3339_NANOS.pattern))
bare = datetime.datetime.strptime(
with_nanos.group('no_fraction'), _RFC3339_NO_FRACTION)
fraction = with_nanos.group('nanos')
if fraction is None:
nanos = 0
else:
scale = 9 - len(fraction)
nanos = int(fraction) * (10 ** scale)
return cls(bare.year, bare.month, bare.day,
bare.hour, bare.minute, bare.second,
nanosecond=nanos, tzinfo=pytz.UTC)
|
|
from __future__ import annotations
import numbers
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas._libs import lib
from pandas._typing import (
Dtype,
NpDtype,
Scalar,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.missing import isna
from pandas.core import (
nanops,
ops,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.strings.object_array import ObjectStringArrayMixin
class PandasArray(
OpsMixin,
NDArrayBackedExtensionArray,
NDArrayOperatorsMixin,
ObjectStringArrayMixin,
):
"""
A pandas ExtensionArray for NumPy data.
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
values : ndarray
The NumPy ndarray to wrap. Must be 1-dimensional.
copy : bool, default False
Whether to copy `values`.
Attributes
----------
None
Methods
-------
None
"""
# If you're wondering why pd.Series(cls) doesn't put the array in an
# ExtensionBlock, search for `ABCPandasArray`. We check for
# that _typ to ensure that users don't unnecessarily use EAs inside
# pandas internals, which turns off things like block consolidation.
_typ = "npy_extension"
__array_priority__ = 1000
_ndarray: np.ndarray
_dtype: PandasDtype
# ------------------------------------------------------------------------
# Constructors
def __init__(self, values: np.ndarray | PandasArray, copy: bool = False):
if isinstance(values, type(self)):
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"'values' must be a NumPy array, not {type(values).__name__}"
)
if values.ndim == 0:
# Technically we support 2, but do not advertise that fact.
raise ValueError("PandasArray must be 1-dimensional.")
if copy:
values = values.copy()
dtype = PandasDtype(values.dtype)
super().__init__(values, dtype)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> PandasArray:
if isinstance(dtype, PandasDtype):
dtype = dtype._dtype
# error: Argument "dtype" to "asarray" has incompatible type
# "Union[ExtensionDtype, str, dtype[Any], dtype[floating[_64Bit]], Type[object],
# None]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
# _DTypeDict, Tuple[Any, Any]]]"
result = np.asarray(scalars, dtype=dtype) # type: ignore[arg-type]
if (
result.ndim > 1
and not hasattr(scalars, "dtype")
and (dtype is None or dtype == object)
):
# e.g. list-of-tuples
result = construct_1d_object_array_from_listlike(scalars)
if copy and result is scalars:
result = result.copy()
return cls(result)
@classmethod
def _from_factorized(cls, values, original) -> PandasArray:
return cls(values)
def _from_backing_data(self, arr: np.ndarray) -> PandasArray:
return type(self)(arr)
# ------------------------------------------------------------------------
# Data
@property
def dtype(self) -> PandasDtype:
return self._dtype
# ------------------------------------------------------------------------
# NumPy Array Interface
def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
return np.asarray(self._ndarray, dtype=dtype)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
# Lightly modified version of
# https://numpy.org/doc/stable/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html
# The primary modification is not boxing scalar return values
# in PandasArray, since pandas' ExtensionArrays are 1-d.
out = kwargs.get("out", ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use PandasArray instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle PandasArray objects.
if not isinstance(x, self._HANDLED_TYPES + (PandasArray,)):
return NotImplemented
if ufunc not in [np.logical_or, np.bitwise_or, np.bitwise_xor]:
# For binary ops, use our custom dunder methods
# We haven't implemented logical dunder funcs, so exclude these
# to avoid RecursionError
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._ndarray if isinstance(x, PandasArray) else x for x in inputs)
if out:
kwargs["out"] = tuple(
x._ndarray if isinstance(x, PandasArray) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple and len(result):
# multiple return values
if not lib.is_scalar(result[0]):
# re-box array-like results
return tuple(type(self)(x) for x in result)
else:
# but not scalar reductions
return result
elif method == "at":
# no return value
return None
else:
# one return value
if not lib.is_scalar(result):
# re-box array-like results, but not scalar reductions
result = type(self)(result)
return result
# ------------------------------------------------------------------------
# Pandas ExtensionArray Interface
def isna(self) -> np.ndarray:
return isna(self._ndarray)
def _validate_scalar(self, fill_value):
if fill_value is None:
# Primarily for subclasses
fill_value = self.dtype.na_value
return fill_value
def _values_for_factorize(self) -> tuple[np.ndarray, int]:
return self._ndarray, -1
# ------------------------------------------------------------------------
# Reductions
def any(
self,
*,
axis: int | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_any((), {"out": out, "keepdims": keepdims})
result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def all(
self,
*,
axis: int | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_all((), {"out": out, "keepdims": keepdims})
result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def min(self, *, axis: int | None = None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_min((), kwargs)
result = nanops.nanmin(
values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def max(self, *, axis: int | None = None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_max((), kwargs)
result = nanops.nanmax(
values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def sum(
self, *, axis: int | None = None, skipna: bool = True, min_count=0, **kwargs
) -> Scalar:
nv.validate_sum((), kwargs)
result = nanops.nansum(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
return self._wrap_reduction_result(axis, result)
def prod(
self, *, axis: int | None = None, skipna: bool = True, min_count=0, **kwargs
) -> Scalar:
nv.validate_prod((), kwargs)
result = nanops.nanprod(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
return self._wrap_reduction_result(axis, result)
def mean(
self,
*,
axis: int | None = None,
dtype: NpDtype | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims})
result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def median(
self,
*,
axis: int | None = None,
out=None,
overwrite_input: bool = False,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_median(
(), {"out": out, "overwrite_input": overwrite_input, "keepdims": keepdims}
)
result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def std(
self,
*,
axis: int | None = None,
dtype: NpDtype | None = None,
out=None,
ddof=1,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
)
result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
def var(
self,
*,
axis: int | None = None,
dtype: NpDtype | None = None,
out=None,
ddof=1,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var"
)
result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
def sem(
self,
*,
axis: int | None = None,
dtype: NpDtype | None = None,
out=None,
ddof=1,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem"
)
result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
def kurt(
self,
*,
axis: int | None = None,
dtype: NpDtype | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt"
)
result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def skew(
self,
*,
axis: int | None = None,
dtype: NpDtype | None = None,
out=None,
keepdims: bool = False,
skipna: bool = True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew"
)
result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
# ------------------------------------------------------------------------
# Additional Methods
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
result = np.asarray(self._ndarray, dtype=dtype)
if (copy or na_value is not lib.no_default) and result is self._ndarray:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
# ------------------------------------------------------------------------
# Ops
def __invert__(self) -> PandasArray:
return type(self)(~self._ndarray)
def _cmp_method(self, other, op):
if isinstance(other, PandasArray):
other = other._ndarray
other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
pd_op = ops.get_array_op(op)
other = ensure_wrapped_if_datetimelike(other)
with np.errstate(all="ignore"):
result = pd_op(self._ndarray, other)
if op is divmod or op is ops.rdivmod:
a, b = result
if isinstance(a, np.ndarray):
# for e.g. op vs TimedeltaArray, we may already
# have an ExtensionArray, in which case we do not wrap
return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b)
return a, b
if isinstance(result, np.ndarray):
# for e.g. multiplication vs TimedeltaArray, we may already
# have an ExtensionArray, in which case we do not wrap
return self._wrap_ndarray_result(result)
return result
_arith_method = _cmp_method
def _wrap_ndarray_result(self, result: np.ndarray):
# If we have timedelta64[ns] result, return a TimedeltaArray instead
# of a PandasArray
if result.dtype == "timedelta64[ns]":
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray._simple_new(result)
return type(self)(result)
# ------------------------------------------------------------------------
# String methods interface
_str_na_value = np.nan
|
|
from __future__ import absolute_import, print_function
import cython
from .. import __version__
import re, os, sys, time
from glob import iglob
try:
import gzip
gzip_open = gzip.open
gzip_ext = '.gz'
except ImportError:
gzip_open = open
gzip_ext = ''
import shutil
import subprocess
try:
import hashlib
except ImportError:
import md5 as hashlib
try:
from io import open as io_open
except ImportError:
from codecs import open as io_open
try:
from os.path import relpath as _relpath
except ImportError:
# Py<2.6
def _relpath(path, start=os.path.curdir):
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
from distutils.extension import Extension
from .. import Utils
from ..Utils import (cached_function, cached_method, path_exists,
safe_makedirs, copy_file_to_dir_if_newer, is_package_dir)
from ..Compiler.Main import Context, CompilationOptions, default_options
join_path = cached_function(os.path.join)
copy_once_if_newer = cached_function(copy_file_to_dir_if_newer)
safe_makedirs_once = cached_function(safe_makedirs)
if sys.version_info[0] < 3:
# stupid Py2 distutils enforces str type in list of sources
_fs_encoding = sys.getfilesystemencoding()
if _fs_encoding is None:
_fs_encoding = sys.getdefaultencoding()
def encode_filename_in_py2(filename):
if not isinstance(filename, bytes):
return filename.encode(_fs_encoding)
return filename
else:
def encode_filename_in_py2(filename):
return filename
basestring = str
def extended_iglob(pattern):
if '{' in pattern:
m = re.match('(.*){([^}]+)}(.*)', pattern)
if m:
before, switch, after = m.groups()
for case in switch.split(','):
for path in extended_iglob(before + case + after):
yield path
return
if '**/' in pattern:
seen = set()
first, rest = pattern.split('**/', 1)
if first:
first = iglob(first+'/')
else:
first = ['']
for root in first:
for path in extended_iglob(join_path(root, rest)):
if path not in seen:
seen.add(path)
yield path
for path in extended_iglob(join_path(root, '*', '**/' + rest)):
if path not in seen:
seen.add(path)
yield path
else:
for path in iglob(pattern):
yield path
def nonempty(it, error_msg="expected non-empty iterator"):
empty = True
for value in it:
empty = False
yield value
if empty:
raise ValueError(error_msg)
@cached_function
def file_hash(filename):
path = os.path.normpath(filename.encode("UTF-8"))
m = hashlib.md5(str(len(path)) + ":")
m.update(path)
f = open(filename, 'rb')
try:
data = f.read(65000)
while data:
m.update(data)
data = f.read(65000)
finally:
f.close()
return m.hexdigest()
def parse_list(s):
"""
>>> parse_list("")
[]
>>> parse_list("a")
['a']
>>> parse_list("a b c")
['a', 'b', 'c']
>>> parse_list("[a, b, c]")
['a', 'b', 'c']
>>> parse_list('a " " b')
['a', ' ', 'b']
>>> parse_list('[a, ",a", "a,", ",", ]')
['a', ',a', 'a,', ',']
"""
if len(s) >= 2 and s[0] == '[' and s[-1] == ']':
s = s[1:-1]
delimiter = ','
else:
delimiter = ' '
s, literals = strip_string_literals(s)
def unquote(literal):
literal = literal.strip()
if literal[0] in "'\"":
return literals[literal[1:-1]]
else:
return literal
return [unquote(item) for item in s.split(delimiter) if item.strip()]
transitive_str = object()
transitive_list = object()
distutils_settings = {
'name': str,
'sources': list,
'define_macros': list,
'undef_macros': list,
'libraries': transitive_list,
'library_dirs': transitive_list,
'runtime_library_dirs': transitive_list,
'include_dirs': transitive_list,
'extra_objects': list,
'extra_compile_args': transitive_list,
'extra_link_args': transitive_list,
'export_symbols': list,
'depends': transitive_list,
'language': transitive_str,
}
@cython.locals(start=cython.Py_ssize_t, end=cython.Py_ssize_t)
def line_iter(source):
if isinstance(source, basestring):
start = 0
while True:
end = source.find('\n', start)
if end == -1:
yield source[start:]
return
yield source[start:end]
start = end+1
else:
for line in source:
yield line
class DistutilsInfo(object):
def __init__(self, source=None, exn=None):
self.values = {}
if source is not None:
for line in line_iter(source):
line = line.lstrip()
if not line:
continue
if line[0] != '#':
break
line = line[1:].lstrip()
if line[:10] == 'distutils:':
key, _, value = [s.strip() for s in line[10:].partition('=')]
type = distutils_settings[key]
if type in (list, transitive_list):
value = parse_list(value)
if key == 'define_macros':
value = [tuple(macro.split('=', 1))
if '=' in macro else (macro, None)
for macro in value]
self.values[key] = value
elif exn is not None:
for key in distutils_settings:
if key in ('name', 'sources'):
continue
value = getattr(exn, key, None)
if value:
self.values[key] = value
def merge(self, other):
if other is None:
return self
for key, value in other.values.items():
type = distutils_settings[key]
if type is transitive_str and key not in self.values:
self.values[key] = value
elif type is transitive_list:
if key in self.values:
# Change a *copy* of the list (Trac #845)
all = self.values[key][:]
for v in value:
if v not in all:
all.append(v)
value = all
self.values[key] = value
return self
def subs(self, aliases):
if aliases is None:
return self
resolved = DistutilsInfo()
for key, value in self.values.items():
type = distutils_settings[key]
if type in [list, transitive_list]:
new_value_list = []
for v in value:
if v in aliases:
v = aliases[v]
if isinstance(v, list):
new_value_list += v
else:
new_value_list.append(v)
value = new_value_list
else:
if value in aliases:
value = aliases[value]
resolved.values[key] = value
return resolved
def apply(self, extension):
for key, value in self.values.items():
type = distutils_settings[key]
if type in [list, transitive_list]:
value = getattr(extension, key) + list(value)
setattr(extension, key, value)
@cython.locals(start=cython.Py_ssize_t, q=cython.Py_ssize_t,
single_q=cython.Py_ssize_t, double_q=cython.Py_ssize_t,
hash_mark=cython.Py_ssize_t, end=cython.Py_ssize_t,
k=cython.Py_ssize_t, counter=cython.Py_ssize_t, quote_len=cython.Py_ssize_t)
def strip_string_literals(code, prefix='__Pyx_L'):
"""
Normalizes every string literal to be of the form '__Pyx_Lxxx',
returning the normalized code and a mapping of labels to
string literals.
"""
new_code = []
literals = {}
counter = 0
start = q = 0
in_quote = False
hash_mark = single_q = double_q = -1
code_len = len(code)
quote_type = quote_len = None
while True:
if hash_mark < q:
hash_mark = code.find('#', q)
if single_q < q:
single_q = code.find("'", q)
if double_q < q:
double_q = code.find('"', q)
q = min(single_q, double_q)
if q == -1:
q = max(single_q, double_q)
# We're done.
if q == -1 and hash_mark == -1:
new_code.append(code[start:])
break
# Try to close the quote.
elif in_quote:
if code[q-1] == u'\\':
k = 2
while q >= k and code[q-k] == u'\\':
k += 1
if k % 2 == 0:
q += 1
continue
if code[q] == quote_type and (
quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])):
counter += 1
label = "%s%s_" % (prefix, counter)
literals[label] = code[start+quote_len:q]
full_quote = code[q:q+quote_len]
new_code.append(full_quote)
new_code.append(label)
new_code.append(full_quote)
q += quote_len
in_quote = False
start = q
else:
q += 1
# Process comment.
elif -1 != hash_mark and (hash_mark < q or q == -1):
new_code.append(code[start:hash_mark+1])
end = code.find('\n', hash_mark)
counter += 1
label = "%s%s_" % (prefix, counter)
if end == -1:
end_or_none = None
else:
end_or_none = end
literals[label] = code[hash_mark+1:end_or_none]
new_code.append(label)
if end == -1:
break
start = q = end
# Open the quote.
else:
if code_len >= q+3 and (code[q] == code[q+1] == code[q+2]):
quote_len = 3
else:
quote_len = 1
in_quote = True
quote_type = code[q]
new_code.append(code[start:q])
start = q
q += quote_len
return "".join(new_code), literals
dependency_regex = re.compile(r"(?:^from +([0-9a-zA-Z_.]+) +cimport)|"
r"(?:^cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|"
r"(?:^cdef +extern +from +['\"]([^'\"]+)['\"])|"
r"(?:^include +['\"]([^'\"]+)['\"])", re.M)
def normalize_existing(base_path, rel_paths):
return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths)))
@cached_function
def normalize_existing0(base_dir, rel_paths):
normalized = []
for rel in rel_paths:
path = join_path(base_dir, rel)
if path_exists(path):
normalized.append(os.path.normpath(path))
else:
normalized.append(rel)
return normalized
def resolve_depends(depends, include_dirs):
include_dirs = tuple(include_dirs)
resolved = []
for depend in depends:
path = resolve_depend(depend, include_dirs)
if path is not None:
resolved.append(path)
return resolved
@cached_function
def resolve_depend(depend, include_dirs):
if depend[0] == '<' and depend[-1] == '>':
return None
for dir in include_dirs:
path = join_path(dir, depend)
if path_exists(path):
return os.path.normpath(path)
return None
@cached_function
def package(filename):
dir = os.path.dirname(os.path.abspath(str(filename)))
if dir != filename and is_package_dir(dir):
return package(dir) + (os.path.basename(dir),)
else:
return ()
@cached_function
def fully_qualified_name(filename):
module = os.path.splitext(os.path.basename(filename))[0]
return '.'.join(package(filename) + (module,))
@cached_function
def parse_dependencies(source_filename):
# Actual parsing is way too slow, so we use regular expressions.
# The only catch is that we must strip comments and string
# literals ahead of time.
fh = Utils.open_source_file(source_filename, error_handling='ignore')
try:
source = fh.read()
finally:
fh.close()
distutils_info = DistutilsInfo(source)
source, literals = strip_string_literals(source)
source = source.replace('\\\n', ' ').replace('\t', ' ')
# TODO: pure mode
cimports = []
includes = []
externs = []
for m in dependency_regex.finditer(source):
cimport_from, cimport_list, extern, include = m.groups()
if cimport_from:
cimports.append(cimport_from)
elif cimport_list:
cimports.extend(x.strip() for x in cimport_list.split(","))
elif extern:
externs.append(literals[extern])
else:
includes.append(literals[include])
return cimports, includes, externs, distutils_info
class DependencyTree(object):
def __init__(self, context, quiet=False):
self.context = context
self.quiet = quiet
self._transitive_cache = {}
def parse_dependencies(self, source_filename):
if path_exists(source_filename):
source_filename = os.path.normpath(source_filename)
return parse_dependencies(source_filename)
@cached_method
def included_files(self, filename):
# This is messy because included files are textually included, resolving
# cimports (but not includes) relative to the including file.
all = set()
for include in self.parse_dependencies(filename)[1]:
include_path = join_path(os.path.dirname(filename), include)
if not path_exists(include_path):
include_path = self.context.find_include_file(include, None)
if include_path:
if '.' + os.path.sep in include_path:
include_path = os.path.normpath(include_path)
all.add(include_path)
all.update(self.included_files(include_path))
elif not self.quiet:
print("Unable to locate '%s' referenced from '%s'" % (filename, include))
return all
@cached_method
def cimports_and_externs(self, filename):
# This is really ugly. Nested cimports are resolved with respect to the
# includer, but includes are resolved with respect to the includee.
cimports, includes, externs = self.parse_dependencies(filename)[:3]
cimports = set(cimports)
externs = set(externs)
for include in self.included_files(filename):
included_cimports, included_externs = self.cimports_and_externs(include)
cimports.update(included_cimports)
externs.update(included_externs)
return tuple(cimports), normalize_existing(filename, externs)
def cimports(self, filename):
return self.cimports_and_externs(filename)[0]
def package(self, filename):
return package(filename)
def fully_qualified_name(self, filename):
return fully_qualified_name(filename)
@cached_method
def find_pxd(self, module, filename=None):
is_relative = module[0] == '.'
if is_relative and not filename:
raise NotImplementedError("New relative imports.")
if filename is not None:
module_path = module.split('.')
if is_relative:
module_path.pop(0) # just explicitly relative
package_path = list(self.package(filename))
while module_path and not module_path[0]:
try:
package_path.pop()
except IndexError:
return None # FIXME: error?
module_path.pop(0)
relative = '.'.join(package_path + module_path)
pxd = self.context.find_pxd_file(relative, None)
if pxd:
return pxd
if is_relative:
return None # FIXME: error?
return self.context.find_pxd_file(module, None)
@cached_method
def cimported_files(self, filename):
if filename[-4:] == '.pyx' and path_exists(filename[:-4] + '.pxd'):
pxd_list = [filename[:-4] + '.pxd']
else:
pxd_list = []
for module in self.cimports(filename):
if module[:7] == 'cython.' or module == 'cython':
continue
pxd_file = self.find_pxd(module, filename)
if pxd_file is not None:
pxd_list.append(pxd_file)
elif not self.quiet:
print("%s: cannot find cimported module '%s'" % (filename, module))
return tuple(pxd_list)
@cached_method
def immediate_dependencies(self, filename):
all = set([filename])
all.update(self.cimported_files(filename))
all.update(self.included_files(filename))
return all
def all_dependencies(self, filename):
return self.transitive_merge(filename, self.immediate_dependencies, set.union)
@cached_method
def timestamp(self, filename):
return os.path.getmtime(filename)
def extract_timestamp(self, filename):
return self.timestamp(filename), filename
def newest_dependency(self, filename):
return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)])
def transitive_fingerprint(self, filename, extra=None):
try:
m = hashlib.md5(__version__)
m.update(file_hash(filename))
for x in sorted(self.all_dependencies(filename)):
if os.path.splitext(x)[1] not in ('.c', '.cpp', '.h'):
m.update(file_hash(x))
if extra is not None:
m.update(str(extra))
return m.hexdigest()
except IOError:
return None
def distutils_info0(self, filename):
info = self.parse_dependencies(filename)[3]
externs = self.cimports_and_externs(filename)[1]
if externs:
if 'depends' in info.values:
info.values['depends'] = list(set(info.values['depends']).union(externs))
else:
info.values['depends'] = list(externs)
return info
def distutils_info(self, filename, aliases=None, base=None):
return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)
.subs(aliases)
.merge(base))
def transitive_merge(self, node, extract, merge):
try:
seen = self._transitive_cache[extract, merge]
except KeyError:
seen = self._transitive_cache[extract, merge] = {}
return self.transitive_merge_helper(
node, extract, merge, seen, {}, self.cimported_files)[0]
def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):
if node in seen:
return seen[node], None
deps = extract(node)
if node in stack:
return deps, node
try:
stack[node] = len(stack)
loop = None
for next in outgoing(node):
sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing)
if sub_loop is not None:
if loop is not None and stack[loop] < stack[sub_loop]:
pass
else:
loop = sub_loop
deps = merge(deps, sub_deps)
if loop == node:
loop = None
if loop is None:
seen[node] = deps
return deps, loop
finally:
del stack[node]
_dep_tree = None
def create_dependency_tree(ctx=None, quiet=False):
global _dep_tree
if _dep_tree is None:
if ctx is None:
ctx = Context(["."], CompilationOptions(default_options))
_dep_tree = DependencyTree(ctx, quiet=quiet)
return _dep_tree
# This may be useful for advanced users?
def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None,
exclude_failures=False):
if language is not None:
print('Please put "# distutils: language=%s" in your .pyx or .pxd file(s)' % language)
if exclude is None:
exclude = []
if not isinstance(patterns, (list, tuple)):
patterns = [patterns]
explicit_modules = set([m.name for m in patterns if isinstance(m, Extension)])
seen = set()
deps = create_dependency_tree(ctx, quiet=quiet)
to_exclude = set()
if not isinstance(exclude, list):
exclude = [exclude]
for pattern in exclude:
to_exclude.update(map(os.path.abspath, extended_iglob(pattern)))
module_list = []
module_metadata = {}
# workaround for setuptools
if 'setuptools' in sys.modules:
Extension_distutils = sys.modules['setuptools.extension']._Extension
Extension_setuptools = sys.modules['setuptools'].Extension
else:
# dummy class, in case we do not have setuptools
Extension_distutils = Extension
class Extension_setuptools(Extension): pass
for pattern in patterns:
if isinstance(pattern, str):
filepattern = pattern
template = None
name = '*'
base = None
exn_type = Extension
ext_language = language
elif isinstance(pattern, (Extension_distutils, Extension_setuptools)):
for filepattern in pattern.sources:
if os.path.splitext(filepattern)[1] in ('.py', '.pyx'):
break
else:
# ignore non-cython modules
module_list.append(pattern)
continue
template = pattern
name = template.name
base = DistutilsInfo(exn=template)
exn_type = template.__class__
ext_language = None # do not override whatever the Extension says
else:
msg = str("pattern is not of type str nor subclass of Extension (%s)"
" but of type %s and class %s" % (repr(Extension),
type(pattern),
pattern.__class__))
raise TypeError(msg)
for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern):
if os.path.abspath(file) in to_exclude:
continue
pkg = deps.package(file)
if '*' in name:
module_name = deps.fully_qualified_name(file)
if module_name in explicit_modules:
continue
else:
module_name = name
if module_name not in seen:
try:
kwds = deps.distutils_info(file, aliases, base).values
except Exception:
if exclude_failures:
continue
raise
if base is not None:
for key, value in base.values.items():
if key not in kwds:
kwds[key] = value
sources = [file]
if template is not None:
sources += [m for m in template.sources if m != filepattern]
if 'sources' in kwds:
# allow users to add .c files etc.
for source in kwds['sources']:
source = encode_filename_in_py2(source)
if source not in sources:
sources.append(source)
extra_sources = kwds['sources']
del kwds['sources']
else:
extra_sources = None
if 'depends' in kwds:
depends = resolve_depends(kwds['depends'], (kwds.get('include_dirs') or []) + ["."])
if template is not None:
# Always include everything from the template.
depends = set(template.depends).union(depends)
# Sort depends to make the metadata dump in the
# Cython-generated C code predictable.
kwds['depends'] = sorted(depends)
if ext_language and 'language' not in kwds:
kwds['language'] = ext_language
module_list.append(exn_type(
name=module_name,
sources=sources,
**kwds))
if extra_sources:
kwds['sources'] = extra_sources
module_metadata[module_name] = {'distutils': kwds, 'module_name': module_name}
m = module_list[-1]
seen.add(name)
return module_list, module_metadata
# This is the user-exposed entry point.
def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None,
exclude_failures=False, **options):
"""
Compile a set of source modules into C/C++ files and return a list of distutils
Extension objects for them.
As module list, pass either a glob pattern, a list of glob patterns or a list of
Extension objects. The latter allows you to configure the extensions separately
through the normal distutils options.
When using glob patterns, you can exclude certain module names explicitly
by passing them into the 'exclude' option.
To globally enable C++ mode, you can pass language='c++'. Otherwise, this
will be determined at a per-file level based on compiler directives. This
affects only modules found based on file names. Extension instances passed
into cythonize() will not be changed.
For parallel compilation, set the 'nthreads' option to the number of
concurrent builds.
For a broad 'try to compile' mode that ignores compilation failures and
simply excludes the failed extensions, pass 'exclude_failures=True'. Note
that this only really makes sense for compiling .py files which can also
be used without compilation.
Additional compilation options can be passed as keyword arguments.
"""
if exclude is None:
exclude = []
if 'include_path' not in options:
options['include_path'] = ['.']
if 'common_utility_include_dir' in options:
if options.get('cache'):
raise NotImplementedError("common_utility_include_dir does not yet work with caching")
safe_makedirs(options['common_utility_include_dir'])
c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = c_options.create_context()
options = c_options
module_list, module_metadata = create_extension_list(
module_list,
exclude=exclude,
ctx=ctx,
quiet=quiet,
exclude_failures=exclude_failures,
language=language,
aliases=aliases)
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
modules_by_cfile = {}
to_compile = []
for m in module_list:
if build_dir:
root = os.getcwd() # distutil extension depends are relative to cwd
def copy_to_build_dir(filepath, root=root):
filepath_abs = os.path.abspath(filepath)
if os.path.isabs(filepath):
filepath = filepath_abs
if filepath_abs.startswith(root):
mod_dir = join_path(build_dir,
os.path.dirname(_relpath(filepath, root)))
copy_once_if_newer(filepath_abs, mod_dir)
for dep in m.depends:
copy_to_build_dir(dep)
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'):
if m.language == 'c++':
c_file = base + '.cpp'
options = cpp_options
else:
c_file = base + '.c'
options = c_options
# setup for out of place build directory if enabled
if build_dir:
c_file = os.path.join(build_dir, c_file)
dir = os.path.dirname(c_file)
safe_makedirs_once(dir)
if os.path.exists(c_file):
c_timestamp = os.path.getmtime(c_file)
else:
c_timestamp = -1
# Priority goes first to modified files, second to direct
# dependents, and finally to indirect dependents.
if c_timestamp < deps.timestamp(source):
dep_timestamp, dep = deps.timestamp(source), source
priority = 0
else:
dep_timestamp, dep = deps.newest_dependency(source)
priority = 2 - (dep in deps.immediate_dependencies(source))
if force or c_timestamp < dep_timestamp:
if not quiet:
if source == dep:
print("Compiling %s because it changed." % source)
else:
print("Compiling %s because it depends on %s." % (source, dep))
if not force and hasattr(options, 'cache'):
extra = m.language
fingerprint = deps.transitive_fingerprint(source, extra)
else:
fingerprint = None
to_compile.append((priority, source, c_file, fingerprint, quiet,
options, not exclude_failures, module_metadata.get(m.name)))
new_sources.append(c_file)
if c_file not in modules_by_cfile:
modules_by_cfile[c_file] = [m]
else:
modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
copy_to_build_dir(source)
m.sources = new_sources
if hasattr(options, 'cache'):
if not os.path.exists(options.cache):
os.makedirs(options.cache)
to_compile.sort()
# Drop "priority" component of "to_compile" entries and add a
# simple progress indicator.
N = len(to_compile)
progress_fmt = "[{0:%d}/{1}] " % len(str(N))
for i in range(N):
progress = progress_fmt.format(i+1, N)
to_compile[i] = to_compile[i][1:] + (progress,)
if N <= 1:
nthreads = 0
if nthreads:
# Requires multiprocessing (or Python >= 2.6)
try:
import multiprocessing
pool = multiprocessing.Pool(
nthreads, initializer=_init_multiprocessing_helper)
except (ImportError, OSError):
print("multiprocessing required for parallel cythonization")
nthreads = 0
else:
# This is a bit more involved than it should be, because KeyboardInterrupts
# break the multiprocessing workers when using a normal pool.map().
# See, for example:
# http://noswap.com/blog/python-multiprocessing-keyboardinterrupt
try:
result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
pool.close()
while not result.ready():
try:
result.get(99999) # seconds
except multiprocessing.TimeoutError:
pass
except KeyboardInterrupt:
pool.terminate()
raise
pool.join()
if not nthreads:
for args in to_compile:
cythonize_one(*args)
if exclude_failures:
failed_modules = set()
for c_file, modules in modules_by_cfile.items():
if not os.path.exists(c_file):
failed_modules.update(modules)
elif os.path.getsize(c_file) < 200:
f = io_open(c_file, 'r', encoding='iso8859-1')
try:
if f.read(len('#error ')) == '#error ':
# dead compilation result
failed_modules.update(modules)
finally:
f.close()
if failed_modules:
for module in failed_modules:
module_list.remove(module)
print("Failed compilations: %s" % ', '.join(sorted([
module.name for module in failed_modules])))
if hasattr(options, 'cache'):
cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
# cythonize() is often followed by the (non-Python-buffered)
# compiler output, flush now to avoid interleaving output.
sys.stdout.flush()
return module_list
if os.environ.get('XML_RESULTS'):
compile_result_dir = os.environ['XML_RESULTS']
def record_results(func):
def with_record(*args):
t = time.time()
success = True
try:
try:
func(*args)
except:
success = False
finally:
t = time.time() - t
module = fully_qualified_name(args[0])
name = "cythonize." + module
failures = 1 - success
if success:
failure_item = ""
else:
failure_item = "failure"
output = open(os.path.join(compile_result_dir, name + ".xml"), "w")
output.write("""
<?xml version="1.0" ?>
<testsuite name="%(name)s" errors="0" failures="%(failures)s" tests="1" time="%(t)s">
<testcase classname="%(name)s" name="cythonize">
%(failure_item)s
</testcase>
</testsuite>
""".strip() % locals())
output.close()
return with_record
else:
def record_results(func):
return func
# TODO: Share context? Issue: pyx processing leaks into pxd module
@record_results
def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, raise_on_failure=True, embedded_metadata=None, progress=""):
from ..Compiler.Main import compile, default_options
from ..Compiler.Errors import CompileError, PyrexError
if fingerprint:
if not os.path.exists(options.cache):
try:
os.mkdir(options.cache)
except:
if not os.path.exists(options.cache):
raise
# Cython-generated c files are highly compressible.
# (E.g. a compression ratio of about 10 for Sage).
fingerprint_file = join_path(
options.cache, "%s-%s%s" % (os.path.basename(c_file), fingerprint, gzip_ext))
if os.path.exists(fingerprint_file):
if not quiet:
print("%sFound compiled %s in cache" % (progress, pyx_file))
os.utime(fingerprint_file, None)
g = gzip_open(fingerprint_file, 'rb')
try:
f = open(c_file, 'wb')
try:
shutil.copyfileobj(g, f)
finally:
f.close()
finally:
g.close()
return
if not quiet:
print("%sCythonizing %s" % (progress, pyx_file))
if options is None:
options = CompilationOptions(default_options)
options.output_file = c_file
options.embedded_metadata = embedded_metadata
any_failures = 0
try:
result = compile([pyx_file], options)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError) as e:
sys.stderr.write('%s\n' % e)
any_failures = 1
# XXX
import traceback
traceback.print_exc()
except Exception:
if raise_on_failure:
raise
import traceback
traceback.print_exc()
any_failures = 1
if any_failures:
if raise_on_failure:
raise CompileError(None, pyx_file)
elif os.path.exists(c_file):
os.remove(c_file)
elif fingerprint:
f = open(c_file, 'rb')
try:
g = gzip_open(fingerprint_file, 'wb')
try:
shutil.copyfileobj(f, g)
finally:
g.close()
finally:
f.close()
def cythonize_one_helper(m):
import traceback
try:
return cythonize_one(*m)
except Exception:
traceback.print_exc()
raise
def _init_multiprocessing_helper():
# KeyboardInterrupt kills workers, so don't let them get it
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
def cleanup_cache(cache, target_size, ratio=.85):
try:
p = subprocess.Popen(['du', '-s', '-k', os.path.abspath(cache)], stdout=subprocess.PIPE)
res = p.wait()
if res == 0:
total_size = 1024 * int(p.stdout.read().strip().split()[0])
if total_size < target_size:
return
except (OSError, ValueError):
pass
total_size = 0
all = []
for file in os.listdir(cache):
path = join_path(cache, file)
s = os.stat(path)
total_size += s.st_size
all.append((s.st_atime, s.st_size, path))
if total_size > target_size:
for time, size, file in reversed(sorted(all)):
os.unlink(file)
total_size -= size
if total_size < target_size * ratio:
break
|
|
# This code is original from jsmin by Douglas Crockford, it was translated to
# Python by Baruch Even. It was rewritten by Dave St.Germain for speed.
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Dave St.Germain
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
is_3 = sys.version_info >= (3, 0)
if is_3:
import io
else:
import StringIO
try:
import cStringIO
except ImportError:
cStringIO = None
__all__ = ['jsmin', 'JavascriptMinify']
__version__ = '2.1.6'
def jsmin(js, **kwargs):
"""
returns a minified version of the javascript string
"""
if not is_3:
if cStringIO and not isinstance(js, unicode):
# strings can use cStringIO for a 3x performance
# improvement, but unicode (in python2) cannot
klass = cStringIO.StringIO
else:
klass = StringIO.StringIO
else:
klass = io.StringIO
ins = klass(js)
outs = klass()
JavascriptMinify(ins, outs, **kwargs).minify()
return outs.getvalue()
class JavascriptMinify(object):
"""
Minify an input stream of javascript, writing
to an output stream
"""
def __init__(self, instream=None, outstream=None, quote_chars="'\""):
self.ins = instream
self.outs = outstream
self.quote_chars = quote_chars
def minify(self, instream=None, outstream=None):
if instream and outstream:
self.ins, self.outs = instream, outstream
self.is_return = False
self.return_buf = ''
def write(char):
# all of this is to support literal regular expressions.
# sigh
if char in 'return':
self.return_buf += char
self.is_return = self.return_buf == 'return'
else:
self.return_buf = ''
self.outs.write(char)
if self.is_return:
self.return_buf = ''
read = self.ins.read
space_strings = "abcdefghijklmnopqrstuvwxyz"\
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$\\"
self.space_strings = space_strings
starters, enders = '{[(+-', '}])+-/' + self.quote_chars
newlinestart_strings = starters + space_strings + self.quote_chars
newlineend_strings = enders + space_strings + self.quote_chars
self.newlinestart_strings = newlinestart_strings
self.newlineend_strings = newlineend_strings
do_newline = False
do_space = False
escape_slash_count = 0
in_quote = ''
quote_buf = []
previous = ';'
previous_non_space = ';'
next1 = read(1)
while next1:
next2 = read(1)
if in_quote:
quote_buf.append(next1)
if next1 == in_quote:
numslashes = 0
for c in reversed(quote_buf[:-1]):
if c != '\\':
break
else:
numslashes += 1
if numslashes % 2 == 0:
in_quote = ''
write(''.join(quote_buf))
elif next1 in '\r\n':
next2, do_newline = self.newline(
previous_non_space, next2, do_newline)
elif next1 < '!':
if (previous_non_space in space_strings \
or previous_non_space > '~') \
and (next2 in space_strings or next2 > '~'):
do_space = True
elif previous_non_space in '-+' and next2 == previous_non_space:
# protect against + ++ or - -- sequences
do_space = True
elif self.is_return and next2 == '/':
# returning a regex...
write(' ')
elif next1 == '/':
if do_space:
write(' ')
if next2 == '/':
# Line comment: treat it as a newline, but skip it
next2 = self.line_comment(next1, next2)
next1 = '\n'
next2, do_newline = self.newline(
previous_non_space, next2, do_newline)
elif next2 == '*':
self.block_comment(next1, next2)
next2 = read(1)
if previous_non_space in space_strings:
do_space = True
next1 = previous
else:
if previous_non_space in '{(,=:[?!&|;' or self.is_return:
self.regex_literal(next1, next2)
# hackish: after regex literal next1 is still /
# (it was the initial /, now it's the last /)
next2 = read(1)
else:
write('/')
else:
if do_newline:
write('\n')
do_newline = False
do_space = False
if do_space:
do_space = False
write(' ')
write(next1)
if next1 in self.quote_chars:
in_quote = next1
quote_buf = []
if next1 >= '!':
previous_non_space = next1
if next1 == '\\':
escape_slash_count += 1
else:
escape_slash_count = 0
previous = next1
next1 = next2
def regex_literal(self, next1, next2):
assert next1 == '/' # otherwise we should not be called!
self.return_buf = ''
read = self.ins.read
write = self.outs.write
in_char_class = False
write('/')
next = next2
while next != '/' or in_char_class:
write(next)
if next == '\\':
write(read(1)) # whatever is next is escaped
elif next == '[':
write(read(1)) # character class cannot be empty
in_char_class = True
elif next == ']':
in_char_class = False
next = read(1)
write('/')
def line_comment(self, next1, next2):
assert next1 == next2 == '/'
read = self.ins.read
while next1 and next1 not in '\r\n':
next1 = read(1)
while next1 and next1 in '\r\n':
next1 = read(1)
return next1
def block_comment(self, next1, next2):
assert next1 == '/'
assert next2 == '*'
read = self.ins.read
# Skip past first /* and avoid catching on /*/...*/
next1 = read(1)
next2 = read(1)
while next1 != '*' or next2 != '/':
next1 = next2
next2 = read(1)
def newline(self, previous_non_space, next2, do_newline):
read = self.ins.read
if previous_non_space and (
previous_non_space in self.newlineend_strings
or previous_non_space > '~'):
while 1:
if next2 < '!':
next2 = read(1)
if not next2:
break
else:
if next2 in self.newlinestart_strings \
or next2 > '~' or next2 == '/':
do_newline = True
break
return next2, do_newline
|
|
from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import traceback
import warnings
import zipfile
from distutils import sysconfig
from distutils.util import change_root
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.distlib.markers import interpret as markers_interpret
from pip._vendor.packaging import specifiers
from pip._vendor.six.moves import configparser
import pip.wheel
from pip.compat import native_str, get_stdlib, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path,
call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir,
get_installed_version, canonicalize_name, normalize_path, dist_is_local,
)
from pip.utils.hashes import Hashes
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.ui import open_spinner
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel
from pip._vendor.packaging.version import Version
logger = logging.getLogger(__name__)
operators = specifiers.Specifier._operators.keys()
def _strip_extras(path):
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
link=None, as_egg=False, update=True, editable_options=None,
pycompile=True, markers=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
self.extras = ()
if isinstance(req, six.string_types):
try:
req = pkg_resources.Requirement.parse(req)
except pkg_resources.RequirementParseError:
if os.path.sep in req:
add_msg = "It looks like a path. Does it exist ?"
elif '=' in req and not any(op in req for op in operators):
add_msg = "= is not a valid operator. Did you mean == ?"
else:
add_msg = traceback.format_exc()
raise InstallationError(
"Invalid requirement: '%s'\n%s" % (req, add_msg))
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self._wheel_cache = wheel_cache
self.link = self.original_link = link
self.as_egg = as_egg
self.markers = markers
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
# Temporary build location
self._temp_build_dir = None
# Used to store the global directory where the _temp_build_dir should
# have been created. Cf _correct_build_location method.
self._ideal_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
# Set True if a legitimate do-nothing-on-uninstall has happened - e.g.
# system site packages, stdlib packages.
self.nothing_to_uninstall = False
self.use_user_site = False
self.target_dir = None
self.options = options if options else {}
self.pycompile = pycompile
# Set to True after successful preparation of this requirement
self.prepared = False
self.isolated = isolated
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False, options=None, wheel_cache=None,
constraint=False):
from pip.index import Link
name, url, extras_override, editable_options = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
constraint=constraint,
editable_options=editable_options,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(
cls, name, comes_from=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
if (os.path.isdir(p) and
(os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' "
"not found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
options = options if options else {}
res = cls(req, comes_from, link=link, markers=markers,
isolated=isolated, options=options,
wheel_cache=wheel_cache, constraint=constraint)
if extras:
res.extras = pkg_resources.Requirement.parse('__placeholder__' +
extras).extras
return res
def __str__(self):
if self.req:
s = str(self.req)
if self.link:
s += ' from %s' % self.link.url
else:
s = self.link.url if self.link else None
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def __repr__(self):
return '<%s object: %s editable=%r>' % (
self.__class__.__name__, str(self), self.editable)
def populate_link(self, finder, upgrade, require_hashes):
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
If require_hashes is True, don't use the wheel cache, because cached
wheels, always built locally, have different hashes than the files
downloaded from the index server and thus throw false hash mismatches.
Furthermore, cached wheels at present have undeterministic contents due
to file modification times.
"""
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
if self._wheel_cache is not None and not require_hashes:
old_link = self.link
self.link = self._wheel_cache.cached_wheel(self.link, self.name)
if old_link != self.link:
logger.debug('Using cached wheel link: %s', self.link)
@property
def specifier(self):
return self.req.specifier
@property
def is_pinned(self):
"""Return whether I am pinned to an exact version.
For example, some-package==1.2 is pinned; some-package>1.2 is not.
"""
specifiers = self.specifier
return (len(specifiers) == 1 and
next(iter(specifiers)).operator in ('==', '==='))
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
# for requirement via a path to a directory: the name of the
# package is not available yet so we create a temp directory
# Once run_egg_info will have run, we'll be able
# to fix it via _correct_build_location
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
assert self._ideal_build_dir
old_location = self._temp_build_dir
self._temp_build_dir = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self._ideal_build_dir = None
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def setup_py(self):
assert self.source_dir, "No source dir for %s" % self
try:
import setuptools # noqa
except ImportError:
if get_installed_version('setuptools') is None:
add_msg = "Please install setuptools."
else:
add_msg = traceback.format_exc()
# Setuptools is not available
raise InstallationError(
"Could not import setuptools which is required to "
"install from a source distribution.\n%s" % add_msg
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.link,
)
with indent_log():
script = SETUPTOOLS_SHIM % self.setup_py
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
ensure_dir(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=cwd,
show_stdout=False,
command_level=logging.DEBUG,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(
pkg_resources.parse_version(self.pkg_info()["Version"]),
Version):
op = "=="
else:
op = "==="
self.req = pkg_resources.Requirement.parse(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
]))
self._correct_build_location()
else:
metadata_name = canonicalize_name(self.pkg_info()["Name"])
if canonicalize_name(self.req.project_name) != metadata_name:
logger.warning(
'Running setup.py (path:%s) egg_info for package %s '
'produced metadata for project name %s. Fix your '
'#egg=%s fragments.',
self.setup_py, self.name, metadata_name, self.name
)
self.req = pkg_resources.Requirement.parse(metadata_name)
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
) or
os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep) +
(os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
return get_installed_version(self.name)
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if version not in self.req:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, "bad url: %r" % self.link.url
if not self.update:
return
vc_type, url = self.link.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.link, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
dist_path = normalize_path(dist.location)
if not dist_is_local(dist):
logger.info(
"Not uninstalling %s at %s, outside environment %s",
dist.key,
dist_path,
sys.prefix,
)
self.nothing_to_uninstall = True
return
if dist_path in get_stdlib():
logger.info(
"Not uninstalling %s at %s, as it is in the standard library.",
dist.key,
dist_path,
)
self.nothing_to_uninstall = True
return
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
paths_to_remove.add(path + '.pyo')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(self.name),
RemovedInPip10Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
if six.PY2:
options = {}
else:
options = {"delimiters": ('=', )}
config = configparser.SafeConfigParser(**options)
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
elif not self.nothing_to_uninstall:
logger.error(
"Can't commit %s, nothing uninstalled.", self.name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self):
if self.markers is not None:
return markers_interpret(self.markers)
else:
return True
def install(self, install_options, global_options=[], root=None,
prefix=None):
if self.editable:
self.install_editable(
install_options, global_options, prefix=prefix)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root, prefix=prefix)
self.install_succeeded = True
return
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options += self.options.get('global_options', [])
install_options += self.options.get('install_options', [])
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable, "-u"]
install_args.append('-c')
install_args.append(SETUPTOOLS_SHIM % self.setup_py)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if prefix is not None:
install_args += ['--prefix', prefix]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str, self.name)]
msg = 'Running setup.py install for %s' % (self.name,)
with open_spinner(msg) as spinner:
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.source_dir,
show_stdout=False,
spinner=spinner,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
os.path.relpath(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
rmtree(temp_location)
def ensure_has_source_dir(self, parent_dir):
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.build_location(parent_dir)
return self.source_dir
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options,
global_options=(), prefix=None):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
if prefix:
prefix_param = ['--prefix={0}'.format(prefix)]
install_options = list(install_options) + prefix_param
with indent_log():
# FIXME: should we do --install-headers here too?
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
[
sys.executable,
'-c',
SETUPTOOLS_SHIM % self.setup_py
] +
list(global_options) +
['develop', '--no-deps'] +
list(install_options),
cwd=cwd,
show_stdout=False)
self.install_succeeded = True
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately.
"""
if self.req is None:
return False
try:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.link and self.link.is_wheel
def move_wheel_files(self, wheeldir, root=None, prefix=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
prefix=prefix,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('').rstrip('/')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
@property
def has_hash_options(self):
"""Return whether any known-good hashes are specified as options.
These activate --require-hashes mode; hashes specified as part of a
URL do not.
"""
return bool(self.options.get('hashes', {}))
def hashes(self, trust_internet=True):
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.options.get('hashes', {}).copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
from pip.index import Link
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
{},
)
else:
return package_name, url_no_extras, None, {}
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception as exc:
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, exc)
)
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, None, options
|
|
"""
Copyright 2017 Pani Networks Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Functions dealing with VPC.
#
import datetime
import logging
import random
import boto.vpc
import boto.utils
from vpcrouter.errors import VpcRouteSetError
from vpcrouter.currentstate import CURRENT_STATE
from vpcrouter.utils import is_cidr_in_cidr
def get_ec2_meta_data():
"""
Get meta data about ourselves, if we are on an EC2 instance.
In particular, this returns the VPC ID and region of this instance.
If we are not on an EC2 instance it returns an empty dict.
"""
# The timeout is just for the connection attempt, but between retries there
# is an exponential back off in seconds. So, too many retries and it can
# possibly block for a very long time here. Main contributor to waiting
# time here is the number of retries, rather than the timeout time.
try:
md = boto.utils.get_instance_metadata(timeout=2, num_retries=2)
vpc_id = md['network']['interfaces']['macs'].values()[0]['vpc-id']
region = md['placement']['availability-zone'][:-1]
return {"vpc_id" : vpc_id, "region_name" : region}
except:
# Any problem while getting the meta data? Assume we are not on an EC2
# instance.
return {}
def connect_to_region(region_name):
"""
Establish connection to AWS API.
"""
logging.debug("Connecting to AWS region '%s'" % region_name)
con = boto.vpc.connect_to_region(region_name)
if not con:
raise VpcRouteSetError("Could not establish connection to "
"region '%s'." % region_name)
return con
def _make_ip_subnet_lookup(vpc_info):
"""
Updates the vpc-info object with a lookup for IP -> subnet.
"""
# We create a reverse lookup from the instances private IP addresses to the
# subnets they are associated with. This is used later on in order to
# determine whether routes should be set in an RT: Is the RT's subnet
# associated with ANY of the IP addresses in the route spec? To make this
# easy, we collect the IPs and subnets of all EC2 instances in the VPC.
# Once we get a route spec, we create a list of subnets for only the
# cluster nodes. The assumption is that not all EC2 instances in the VPC
# necessarily belong to the cluster. We really want to narrow it down to
# the cluster nodes only. See make_cluster_node_subnet_list().
vpc_info['ip_subnet_lookup'] = {}
for instance in vpc_info['instances']:
for interface in instance.interfaces:
subnet_id = interface.subnet_id
for priv_addr in interface.private_ip_addresses:
vpc_info['ip_subnet_lookup'][priv_addr.private_ip_address] = \
subnet_id
def get_vpc_overview(con, vpc_id, region_name):
"""
Retrieve information for the specified VPC.
If no VPC ID was specified then just pick the first VPC we find.
Returns a dict with the VPC's zones, subnets, route tables and
instances.
"""
logging.debug("Retrieving information for VPC '%s'" % vpc_id)
d = {}
d['zones'] = con.get_all_zones()
# Find the specified VPC, or just use the first one
all_vpcs = con.get_all_vpcs()
if not all_vpcs:
raise VpcRouteSetError("Cannot find any VPCs.")
if not vpc_id:
# Just grab the first available VPC and use it, if no VPC specified
vpc = all_vpcs[0]
vpc_id = vpc.id
else:
# Search through the list of VPCs for the one with the specified ID
vpc = None
for v in all_vpcs:
if v.id == vpc_id:
vpc = v
break
if not vpc:
raise VpcRouteSetError("Cannot find specified VPC '%s' "
"in region '%s'." % (vpc_id, region_name))
d['vpc'] = vpc
vpc_filter = {"vpc-id" : vpc_id} # Will use this filter expression a lot
# Now find the subnets, route tables and instances within this VPC
d['subnets'] = con.get_all_subnets(filters=vpc_filter)
d['route_tables'] = con.get_all_route_tables(filters=vpc_filter)
# Route tables are associated with subnets. Maintain a lookup table from
# RT ID to (list of) subnets. This is necessary later on, because
# we only want to set a route in an RT if at least one of the ENIs we are
# dealing with is associated with the RT. That way, we don't have to set
# the route in all tables all the time.
d['rt_subnet_lookup'] = {}
for rt in d['route_tables']:
for assoc in rt.associations:
if hasattr(assoc, 'subnet_id'):
subnet_id = assoc.subnet_id
if subnet_id:
d['rt_subnet_lookup'].setdefault(rt.id, []). \
append(subnet_id)
# Get all the cluster instances (all EC2 instances associated with this
# VPC).
reservations = con.get_all_reservations(filters=vpc_filter)
d['instances'] = []
for r in reservations: # a reservation may have multiple instances
d['instances'].extend(r.instances)
# Add reverse lookup from the instances private IP addresses to the
# subnets they are associated with. More info in the doc for that function.
_make_ip_subnet_lookup(d)
# Maintain a quick instance lookup for convenience
d['instance_by_id'] = {}
for i in d['instances']:
d['instance_by_id'][i.id] = i
return d
def find_instance_and_eni_by_ip(vpc_info, ip):
"""
Given a specific IP address, find the EC2 instance and ENI.
We need this information for setting the route.
Returns instance and emi in a tuple.
"""
for instance in vpc_info['instances']:
for eni in instance.interfaces:
for pa in eni.private_ip_addresses:
if pa.private_ip_address == ip:
return instance, eni
raise VpcRouteSetError("Could not find instance/eni for '%s' "
"in VPC '%s'." % (ip, vpc_info['vpc'].id))
def get_instance_private_ip_from_route(instance, route):
"""
Find the private IP and ENI of an instance that's pointed to in a route.
Returns (ipaddr, eni) tuple.
"""
ipaddr = None
for eni in instance.interfaces:
if eni.id == route.interface_id:
ipaddr = eni.private_ip_address
break
return ipaddr, eni if ipaddr else None
def _choose_different_host(old_ip, ip_list, failed_ips, questionable_ips):
"""
Randomly choose a different host from a list of hosts.
Pick from fully healthy IPs first (neither failed nor questionable).
If we don't have any of those, pick from questionable ones next.
If no suitable hosts can be found in the list (if it's empty or all hosts
are in the failed_ips list) it will return None.
The old IP (if any) is passed in. We will try to avoid returning this same
old IP under the right circumstances. If no old IP is known, None can be
passed in for it instead.
"""
if not ip_list:
# We don't have any hosts to choose from.
return None
ip_set = set(ip_list)
failed_set = set(failed_ips)
# Consider only those questionable IPs that aren't also failed and make
# sure all of the ones in the questionable list are at least also present
# in the overall IP list.
questionable_set = set(questionable_ips).intersection(ip_set). \
difference(failed_set)
# Get all healthy IPs that are neither failed, nor questionable
healthy_ips = list(ip_set.difference(failed_set, questionable_set))
if healthy_ips:
# Return one of the completely healthy IPs
return random.choice(healthy_ips)
if questionable_set:
# Don't have any completely healthy ones, so return one of the
# questionable ones. Not perfect, but at least may still provide
# routing functionality for some time.
if old_ip not in questionable_set:
# We may be here because the original address was questionable. If
# only other questionable ones are available then there's no point
# changing the address. We only change if the old address wasn't
# one of the questionable ones already.
return random.choice(list(questionable_set))
# We got nothing...
return None
def _rt_state_update(route_table_id, dcidr, router_ip="(none)",
instance_id="(none)", eni_id="(none)",
old_router_ip="(none)", msg="(none)"):
"""
Store a message about a VPC route in the current state.
"""
buf = "inst: %s, eni: %s, r_ip: %-15s, o_r_ip: %-15s, msg: %s" % \
(instance_id, eni_id, router_ip, old_router_ip, msg)
CURRENT_STATE.vpc_state.setdefault('route_tables', {}). \
setdefault(route_table_id, {})[dcidr] = buf
def _update_route(dcidr, router_ip, old_router_ip,
vpc_info, con, route_table_id, update_reason):
"""
Update an existing route entry in the route table.
"""
instance = eni = None
try:
instance, eni = find_instance_and_eni_by_ip(vpc_info, router_ip)
logging.info("--- updating existing route in RT '%s' "
"%s -> %s (%s, %s) (old IP: %s, reason: %s)" %
(route_table_id, dcidr, router_ip,
instance.id, eni.id, old_router_ip, update_reason))
try:
con.replace_route(
route_table_id = route_table_id,
destination_cidr_block = dcidr,
instance_id = instance.id,
interface_id = eni.id)
except Exception as e:
raise Exception("replace_route failed: %s" % str(e))
CURRENT_STATE.routes[dcidr] = \
(router_ip, str(instance.id), str(eni.id))
except Exception as e:
msg = "*** failed to update route in RT '%s' %s -> %s (%s)" % \
(route_table_id, dcidr, old_router_ip, e.message)
update_reason += " [ERROR update route: %s]" % e.message
logging.error(msg)
_rt_state_update(route_table_id, dcidr, router_ip,
instance.id if instance else "(none)",
eni.id if eni else "(none)",
old_router_ip, update_reason)
def _add_new_route(dcidr, router_ip, vpc_info, con, route_table_id):
"""
Add a new route to the route table.
"""
try:
instance, eni = find_instance_and_eni_by_ip(vpc_info, router_ip)
# Only set the route if the RT is associated with any of the subnets
# used for the cluster.
rt_subnets = \
set(vpc_info['rt_subnet_lookup'].get(route_table_id, []))
cluster_node_subnets = \
set(vpc_info['cluster_node_subnets'])
if not rt_subnets or not rt_subnets.intersection(cluster_node_subnets):
logging.debug("--- skipping adding route in RT '%s' "
"%s -> %s (%s, %s) since RT's subnets (%s) are not "
"part of the cluster (%s)." %
(route_table_id, dcidr, router_ip, instance.id,
eni.id,
", ".join(rt_subnets) if rt_subnets else "none",
", ".join(cluster_node_subnets)))
return
logging.info("--- adding route in RT '%s' "
"%s -> %s (%s, %s)" %
(route_table_id, dcidr, router_ip, instance.id, eni.id))
con.create_route(route_table_id = route_table_id,
destination_cidr_block = dcidr,
instance_id = instance.id,
interface_id = eni.id)
CURRENT_STATE.routes[dcidr] = \
(router_ip, str(instance.id), str(eni.id))
_rt_state_update(route_table_id, dcidr, router_ip, instance.id, eni.id,
msg="Added route")
except Exception as e:
logging.error("*** failed to add route in RT '%s' "
"%s -> %s (%s)" %
(route_table_id, dcidr, router_ip, e.message))
_rt_state_update(route_table_id, dcidr,
msg="[ERROR add route: %s]" % e.message)
def _get_real_instance_if_mismatch(vpc_info, ipaddr, instance, eni):
"""
Return the real instance for the given IP address, if that instance is
different than the passed in instance or has a different eni.
If the ipaddr belongs to the same instance and eni that was passed in then
this returns None.
"""
# Careful! A route may be a black-hole route, which still has instance and
# ENI information for an instance that doesn't exist anymore. If a host was
# terminated and a new host got the same IP then this route won't be
# updated and will keep pointing to a non-existing node. So we find the
# instance by IP and check that the route really points to this instance.
inst_id = instance.id if instance else ""
eni_id = eni.id if eni else ""
if ipaddr:
real_instance, real_eni = \
find_instance_and_eni_by_ip(vpc_info, ipaddr)
if real_instance.id != inst_id or real_eni.id != eni_id:
return real_instance
return None
def _get_host_for_route(vpc_info, route, route_table, dcidr):
"""
Given a specific route, return information about the instance to which it
points.
Returns 3-tuple: instance-id, ipaddr, eni-id
Need to take care of scenarios where the instance isn't set anymore in the
route (the instance may have disappeared).
"""
class _CouldNotIdentifyHost(Exception):
# If we can't find both the instance as well as an eni for the route,
# we will raise this exception. In that case, we'll return None/unknown
# values, which indicate to the calling code that a new instance should
# be found.
pass
try:
# The instance_id in the route may be None. We can get this in case of
# a black-hole route.
if route.instance_id:
instance = vpc_info['instance_by_id'].get(route.instance_id)
if not instance:
logging.info("--- instance in route in RT '%s' can't "
"be found: %s -> ... (instance '%s')" %
(route_table.id, dcidr, route.instance_id))
raise _CouldNotIdentifyHost()
inst_id = instance.id
ipaddr, eni = get_instance_private_ip_from_route(instance, route)
if not eni:
logging.info("--- eni in route in RT '%s' can't "
"be found: %s -> %s (instance '%s')" %
(route_table.id, dcidr,
ipaddr if ipaddr else "(none)",
inst_id))
raise _CouldNotIdentifyHost()
eni_id = eni.id
# If route points to outdated instance, set ipaddr and eni to
# None to signal that route needs to be updated
real_instance = _get_real_instance_if_mismatch(
vpc_info, ipaddr, instance, eni)
if real_instance:
logging.info("--- obsoleted route in RT '%s' "
"%s -> %s (%s, %s) (new instance with same "
"IP address should be used: %s)" %
(route_table.id, dcidr, ipaddr, inst_id, eni_id,
real_instance.id))
# Setting the ipaddr and eni to None signals code further
# down that the route must be updated.
raise _CouldNotIdentifyHost()
else:
# This route didn't point to an instance anymore, probably
# a black hole route
logging.info("--- obsoleted route in RT '%s' "
"%s -> ... (doesn't point to instance anymore)" %
(route_table.id, dcidr))
raise _CouldNotIdentifyHost()
except _CouldNotIdentifyHost:
inst_id = eni_id = "(unknown)"
ipaddr = None
return inst_id, ipaddr, eni_id
def _is_cidr_in_ignore_routes(cidr):
"""
Checks the CIDR to see if it falls into any CIDRs specified via the
ignore_routes parameter.
This is used mostly to protect special routes to specific instances (for
example proxies, etc.), so that the vpc-router does not clean those up.
Only used to govern cleanup of routes, is not consulted when creating
routes.
"""
for ignore_cidr in CURRENT_STATE.ignore_routes:
if is_cidr_in_cidr(cidr, ignore_cidr):
return True
return False
def _update_existing_routes(route_spec, failed_ips, questionable_ips,
vpc_info, con, routes_in_rts):
"""
Go over the existing routes and check whether they still match the spec.
If the chosen router has failed or is questionable or is not in the host
list anymore, the route needs to be updated. If the CIDR isn't in the spec
at all anymore then it needs to be deleted.
Keeps track of the routes we have seen in each RT and populates the
passed-in routes_in_rts dictionary with that info.
Returns a dict with the routers chosen for the various routes we
encountered.
"""
chosen_routers = {} # keep track of chosen routers for CIDRs
NONE_HEALTHY = "none-healthy" # used as marker in chosen_routers
for rt in vpc_info['route_tables']:
# Check whether this RT is associated with cluster node subnets. If it
# is not then we will perform NO operations on this RT. We will also
# not collect the routes from this RT.
rt_subnets = \
set(vpc_info.get('rt_subnet_lookup', {}).get(rt.id, []))
cluster_node_subnets = \
set(vpc_info.get('cluster_node_subnets', []))
if cluster_node_subnets and \
(not rt_subnets or
not rt_subnets.intersection(cluster_node_subnets)):
logging.debug("Skipping processing of RT '%s' since RT's "
"subnets (%s) are not part of cluster (%s)." %
(rt.id,
", ".join(rt_subnets) if rt_subnets else "none",
", ".join(cluster_node_subnets)))
continue
routes_in_rts[rt.id] = []
# Iterate over all the routes we find in each RT
for r in rt.routes:
dcidr = r.destination_cidr_block
if _is_cidr_in_ignore_routes(dcidr):
# A list of CIDRs may have been specified on the command line
# via the --ignore_routes option. If the destination CIDR of
# this route here is contained in any of those specified CIDRs
# then we will not touch or change this route. Often this is
# used to protect routes to special instances, such as
# proxies or NAT instances.
_rt_state_update(rt.id, dcidr,
msg="Ignored: Protected CIDR.")
continue
if r.instance_id is None and r.interface_id is None:
# There are some routes already present in the route table,
# which we don't need to mess with. Specifically, routes that
# aren't attached to a particular instance or interface.
# We skip those.
_rt_state_update(rt.id, dcidr,
msg="Ignored: Not a route to an instance")
continue
routes_in_rts[rt.id].append(dcidr) # remember we've seen the route
hosts = route_spec.get(dcidr) # eligible routers for CIDR
# Current router host for this CIDR/route.
inst_id, ipaddr, eni_id = \
_get_host_for_route(vpc_info, r, rt, dcidr)
if not hosts:
# The route isn't in the spec anymore and should be
# deleted.
logging.info("--- route not in spec, deleting in "
"RT '%s': %s -> ... (%s, %s)" %
(rt.id, dcidr, inst_id, eni_id))
con.delete_route(route_table_id = rt.id,
destination_cidr_block = dcidr)
if dcidr in CURRENT_STATE.routes:
del CURRENT_STATE.routes[dcidr]
continue
# We have a route and it's still in the spec. Do we need to update
# the router? Multiple reasons for that:
# - Router is not in the list of eligible hosts anymore
# - Router has failed or is questionable
# - In a different route table we used a different host as the
# router.
# Seen this route in another RT before? This will be None if we'be
# not seen it before, and will be NONE_HEALTHY, but we couldn't
# find a single healthy eligible router host for it.
stored_router_ip = chosen_routers.get(dcidr)
# Has our current router failed or is questionable?
ipaddr_should_be_replaced = ipaddr in failed_ips or \
ipaddr in questionable_ips
# Is the host not eligible anymore?
ipaddr_not_eligible = ipaddr not in hosts
shouldnt_use_ipaddr = \
ipaddr_should_be_replaced or ipaddr_not_eligible
if (not stored_router_ip or stored_router_ip == ipaddr) and \
not shouldnt_use_ipaddr:
# Haven't seen it before, or points to same router AND
# router is healthy: All good
if not stored_router_ip:
# Remember this IP as a suitable router for CIDR
chosen_routers[dcidr] = ipaddr
logging.info("--- route exists already in RT '%s': "
"%s -> %s (%s, %s)" %
(rt.id, dcidr,
ipaddr, inst_id, eni_id))
CURRENT_STATE.routes[dcidr] = (ipaddr, inst_id, eni_id)
_rt_state_update(rt.id, dcidr, ipaddr, inst_id, eni_id,
msg="Current: Route exist and up to date")
continue
if stored_router_ip == NONE_HEALTHY:
# We've tried to set a route for this before, but
# couldn't find any healthy hosts. Can't do anything and
# need to skip.
CURRENT_STATE.routes[dcidr] = (ipaddr, inst_id, eni_id)
_rt_state_update(rt.id, dcidr, ipaddr, inst_id, eni_id,
msg="None healthy, black hole: "
"Determined earlier")
continue
if stored_router_ip:
# Just use the router we've seen before. We know that'll work,
# because only healthy hosts make it into the chosen_routers
# dict.
new_router_ip = stored_router_ip
update_reason = "other RT used different IP"
else:
# Haven't seen this route in another RT, so we'll
# choose a new router
new_router_ip = _choose_different_host(ipaddr, hosts,
failed_ips,
questionable_ips)
if new_router_ip is None:
# Couldn't find healthy host to be router, forced
# to skip this one.
CURRENT_STATE.routes[dcidr] = (ipaddr, inst_id, eni_id)
chosen_routers[dcidr] = NONE_HEALTHY
logging.warning("--- cannot find available target "
"for route update %s! "
"Nothing I can do..." % (dcidr))
continue
chosen_routers[dcidr] = new_router_ip
update_reason = "old IP failed/questionable or " \
"not eligible anymore"
_update_route(dcidr, new_router_ip, ipaddr,
vpc_info, con, rt.id, update_reason)
return chosen_routers
def _add_missing_routes(route_spec, failed_ips, questionable_ips,
chosen_routers, vpc_info, con, routes_in_rts):
"""
Iterate over route spec and add all the routes we haven't set yet.
This relies on being told what routes we HAVE already. This is passed
in via the routes_in_rts dict.
Furthermore, some routes may be set in some RTs, but not in others. In that
case, we may already have seen which router was chosen for a certain route.
This information is passed in via the chosen_routers dict. We should choose
routers that were used before.
"""
for dcidr, hosts in route_spec.items():
new_router_ip = chosen_routers.get(dcidr)
# Look at the routes we have seen in each of the route tables.
for rt_id, dcidr_list in routes_in_rts.items():
if dcidr not in dcidr_list:
if not new_router_ip:
# We haven't chosen a target host for this CIDR.
new_router_ip = _choose_different_host(None, hosts,
failed_ips,
questionable_ips)
if not new_router_ip:
logging.warning("--- cannot find available target "
"for route addition %s! "
"Nothing I can do..." % (dcidr))
# Skipping the check on any further RT, breaking out to
# outer most loop over route spec
break
_add_new_route(dcidr, new_router_ip, vpc_info, con, rt_id)
def process_route_spec_config(con, vpc_info, route_spec,
failed_ips, questionable_ips):
"""
Look through the route spec and update routes accordingly.
Idea: Make sure we have a route for each CIDR.
If we have a route to any of the IP addresses for a given CIDR then we are
good. Otherwise, pick one (usually the first) IP and create a route to that
IP.
If a route points at a failed or questionable IP then a new candidate is
chosen, if possible.
"""
if CURRENT_STATE._stop_all:
logging.debug("Routespec processing. Stop requested, abort operation")
return
if failed_ips:
logging.debug("Route spec processing. Failed IPs: %s" %
",".join(failed_ips))
else:
logging.debug("Route spec processing. No failed IPs.")
# Iterate over all the routes in the VPC, check they are contained in
# the spec, update the routes as needed.
# Need to remember the routes we saw in different RTs, so that we can later
# add them, if needed.
routes_in_rts = {}
CURRENT_STATE.vpc_state.setdefault("time",
datetime.datetime.now().isoformat())
# Passed through the functions and filled in, state accumulates information
# about all the routes we encounted in the VPC and what we are doing with
# them. This is then available in the CURRENT_STATE
chosen_routers = _update_existing_routes(route_spec,
failed_ips, questionable_ips,
vpc_info, con, routes_in_rts)
# Now go over all the routes in the spec and add those that aren't in VPC,
# yet.
_add_missing_routes(route_spec, failed_ips, questionable_ips,
chosen_routers,
vpc_info, con, routes_in_rts)
def make_cluster_node_subnet_list(vpc_info, route_spec):
"""
Create a list of subnets, which are associated with nodes in the current
route spec (cluster).
This is needed when later on we set routes, but only want to set routes in
those route tables, which are associated with a subnet that in turn is
associated with a cluster node (to avoid having to set the routes in all
RTs). We can check whether the RT's subnet is in this list here.
Returns list of subnet IDs.
"""
# VPC-info already contains a lookup between the EC2 instances of the VPC
# and their subnets ('ip_subnet_lookup'). This may also contain some
# non-cluster nodes, but we can use this to look up the subnet for each IP
# address of cluster nodes.
subnets = set()
for cidr_ignore, hosts in route_spec.items():
for host_addr in hosts:
subnet_id = vpc_info['ip_subnet_lookup'].get(host_addr)
if not subnet_id:
logging.warning("Did not find subnet associated with "
"host address '%s'!" % host_addr)
else:
subnets.add(subnet_id)
return list(subnets)
def handle_spec(region_name, vpc_id, route_spec, failed_ips, questionable_ips):
"""
Connect to region and update routes according to route spec.
"""
if CURRENT_STATE._stop_all:
logging.debug("handle_spec: Stop requested, abort operation")
return
if not route_spec:
logging.debug("handle_spec: No route spec provided")
return
logging.debug("Handle route spec")
try:
con = connect_to_region(region_name)
vpc_info = get_vpc_overview(con, vpc_id, region_name)
vpc_info['cluster_node_subnets'] = \
make_cluster_node_subnet_list(vpc_info, route_spec)
process_route_spec_config(con, vpc_info, route_spec,
failed_ips, questionable_ips)
con.close()
except boto.exception.StandardError as e:
logging.warning("vpc-router could not set route: %s - %s" %
(e.message, e.args))
raise
except boto.exception.NoAuthHandlerFound:
logging.error("vpc-router could not authenticate")
|
|
# Copyright (C) 2003, 2004 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import dns.set
# for convenience
S = dns.set.Set
class SimpleSetTestCase(unittest.TestCase):
def testLen1(self):
s1 = S()
self.failUnless(len(s1) == 0)
def testLen2(self):
s1 = S([1, 2, 3])
self.failUnless(len(s1) == 3)
def testLen3(self):
s1 = S([1, 2, 3, 3, 3])
self.failUnless(len(s1) == 3)
def testUnion1(self):
s1 = S([1, 2, 3])
s2 = S([1, 2, 3])
e = S([1, 2, 3])
self.failUnless(s1 | s2 == e)
def testUnion2(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([1, 2, 3])
self.failUnless(s1 | s2 == e)
def testUnion3(self):
s1 = S([1, 2, 3])
s2 = S([3, 4])
e = S([1, 2, 3, 4])
self.failUnless(s1 | s2 == e)
def testIntersection1(self):
s1 = S([1, 2, 3])
s2 = S([1, 2, 3])
e = S([1, 2, 3])
self.failUnless(s1 & s2 == e)
def testIntersection2(self):
s1 = S([0, 1, 2, 3])
s2 = S([1, 2, 3, 4])
e = S([1, 2, 3])
self.failUnless(s1 & s2 == e)
def testIntersection3(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([])
self.failUnless(s1 & s2 == e)
def testIntersection4(self):
s1 = S([1, 2, 3])
s2 = S([5, 4])
e = S([])
self.failUnless(s1 & s2 == e)
def testDifference1(self):
s1 = S([1, 2, 3])
s2 = S([5, 4])
e = S([1, 2, 3])
self.failUnless(s1 - s2 == e)
def testDifference2(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([1, 2, 3])
self.failUnless(s1 - s2 == e)
def testDifference3(self):
s1 = S([1, 2, 3])
s2 = S([3, 2])
e = S([1])
self.failUnless(s1 - s2 == e)
def testDifference4(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
e = S([])
self.failUnless(s1 - s2 == e)
def testSubset1(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
self.failUnless(s1.issubset(s2))
def testSubset2(self):
s1 = S([1, 2, 3])
self.failUnless(s1.issubset(s1))
def testSubset3(self):
s1 = S([])
s2 = S([1, 2, 3])
self.failUnless(s1.issubset(s2))
def testSubset4(self):
s1 = S([1])
s2 = S([1, 2, 3])
self.failUnless(s1.issubset(s2))
def testSubset5(self):
s1 = S([])
s2 = S([])
self.failUnless(s1.issubset(s2))
def testSubset6(self):
s1 = S([1, 4])
s2 = S([1, 2, 3])
self.failUnless(not s1.issubset(s2))
def testSuperset1(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
self.failUnless(s1.issuperset(s2))
def testSuperset2(self):
s1 = S([1, 2, 3])
self.failUnless(s1.issuperset(s1))
def testSuperset3(self):
s1 = S([1, 2, 3])
s2 = S([])
self.failUnless(s1.issuperset(s2))
def testSuperset4(self):
s1 = S([1, 2, 3])
s2 = S([1])
self.failUnless(s1.issuperset(s2))
def testSuperset5(self):
s1 = S([])
s2 = S([])
self.failUnless(s1.issuperset(s2))
def testSuperset6(self):
s1 = S([1, 2, 3])
s2 = S([1, 4])
self.failUnless(not s1.issuperset(s2))
def testUpdate1(self):
s1 = S([1, 2, 3])
u = (4, 5, 6)
e = S([1, 2, 3, 4, 5, 6])
s1.update(u)
self.failUnless(s1 == e)
def testUpdate2(self):
s1 = S([1, 2, 3])
u = []
e = S([1, 2, 3])
s1.update(u)
self.failUnless(s1 == e)
def testGetitem(self):
s1 = S([1, 2, 3])
i0 = s1[0]
i1 = s1[1]
i2 = s1[2]
s2 = S([i0, i1, i2])
self.failUnless(s1 == s2)
def testGetslice(self):
s1 = S([1, 2, 3])
slice = s1[0:2]
self.failUnless(len(slice) == 2)
item = s1[2]
slice.append(item)
s2 = S(slice)
self.failUnless(s1 == s2)
def testDelitem(self):
s1 = S([1, 2, 3])
del s1[0]
i1 = s1[0]
i2 = s1[1]
self.failUnless(i1 != i2)
self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
self.failUnless(i2 == 1 or i2 == 2 or i2 == 3)
def testDelslice(self):
s1 = S([1, 2, 3])
del s1[0:2]
i1 = s1[0]
self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
if __name__ == '__main__':
unittest.main()
|
|
import ast
from os import path
from copy import deepcopy
from operator import lt, gt
from itertools import takewhile
import fnmatch
from .code_util import DocStringInheritor
__all__ = ['keywords', 'EvaluateQueries', 'eval_queries', 'queries_syntax_check', 'queries_preprocess']
keywords = [
'and',
'or',
'not in',
'not',
'only',
'endpoints_in',
'both_endpoints_in',
'anterior_of',
'posterior_of',
'medial_of',
'lateral_of',
'inferior_of',
'superior_of',
]
class FiberQueryInfo(object):
r"""
Information about a processed query
Attribute
---------
tracts : set
set of tract indices resulting from the query
labels : set
set of labels resulting by the query
tracts_endpoints : (set, set)
sets of labels of where the tract endpoints are
"""
def __init__(self, tracts=None, labels=None, tracts_endpoints=None):
if tracts is None:
tracts = set()
if labels is None:
labels = set()
if tracts_endpoints is None:
tracts_endpoints = (set(), set())
self.tracts = tracts
self.labels = labels
self.tracts_endpoints = tracts_endpoints
def __getattribute__(self, name):
if name in (
'update', 'intersection_update', 'union', 'intersection',
'difference', 'difference_update'
):
return self.set_operation(name)
else:
return object.__getattribute__(self, name)
def copy(self):
return FiberQueryInfo(
self.tracts.copy(), self.labels.copy(),
(self.tracts_endpoints[0].copy(), self.tracts_endpoints[1].copy()),
# (self.labels_endpoints[0].copy(), self.labels_endpoints[1].copy()),
)
def set_operation(self, name):
def operation(tract_query_info):
tracts_op = getattr(self.tracts, name)
if name == 'intersection':
name_labels = 'union'
elif name == 'intersection_update':
name_labels = 'update'
else:
name_labels = name
labels_op = getattr(self.labels, name_labels)
new_tracts = tracts_op(tract_query_info.tracts)
new_labels = labels_op(tract_query_info.labels)
new_tracts_endpoints = (
getattr(self.tracts_endpoints[0], name)(tract_query_info.tracts_endpoints[0]),
getattr(self.tracts_endpoints[1], name)(tract_query_info.tracts_endpoints[1])
)
# new_labels_endpoints = (
# getattr(self.labels_endpoints[0], name_labels)(tract_query_info.labels_endpoints[0]),
# getattr(self.labels_endpoints[1], name_labels)(tract_query_info.labels_endpoints[1])
# )
if name.endswith('update'):
return self
else:
return FiberQueryInfo(
new_tracts, new_labels,
new_tracts_endpoints,
)
return operation
class EndpointQueryInfo:
def __init__(
self,
endpoint_tracts=None,
endpoint_labels=None,
endpoint_points=None,
):
if endpoint_tracts is None:
endpoint_tracts = (set(), set())
if endpoint_labels is None:
endpoint_labels = (set(), set())
if endpoint_points is None:
endpoint_points = (set(), set())
self.endpoint_tracts = endpoint_tracts
self.endpoint_labels = endpoint_labels
self.endpoint_points = endpoint_points
def __getattribute__(self, name):
if name in (
'update', 'intersection_update', 'union', 'intersection',
'difference', 'difference_update'
):
return self.set_operation(name)
else:
return object.__getattribute__(self, name)
def set_operation(self, name):
def operation(endpoint_query_info):
tracts_op = (
getattr(self.endpoint_tracts[0], name),
getattr(self.endpoint_tracts[1], name)
)
labels_op = (
getattr(self.endpoint_labels[0], name),
getattr(self.endpoint_labels[1], name)
)
points_op = (
getattr(self.endpoint_points[0], name),
getattr(self.endpoint_points[1], name)
)
new_tracts = (
tracts_op[0](endpoint_query_info.endpoint_tracts[0]),
tracts_op[1](endpoint_query_info.endpoint_tracts[1])
)
new_labels = (
labels_op[0](endpoint_query_info.endpoint_labels[0]),
labels_op[1](endpoint_query_info.endpoint_labels[1])
)
new_points = (
points_op[0](endpoint_query_info.endpoint_points[0]),
points_op[1](endpoint_query_info.endpoint_points[1])
)
if name.endswith('update'):
return self
else:
return EndpointQueryInfo(new_tracts, new_labels, new_points)
return operation
class EvaluateQueries(ast.NodeVisitor):
r"""
This class implements the parser to process
White Matter Query Language modules. By inheriting from
:py:mod:`ast.NodeVisitor` it uses a syntax close to the
python language.
Every node expression visitor has the following signature
Parameters
----------
node : ast.Node
Returns
-------
tracts : set
numbers of the tracts that result of this
query
labels : set
numbers of the labels that are traversed by
the tracts resulting from this query
"""
__metaclass__ = DocStringInheritor
relative_terms = [
'anterior_of',
'posterior_of',
'medial_of',
'lateral_of',
'inferior_of',
'superior_of'
]
def __init__(
self,
tractography_spatial_indexing,
):
self.tractography_spatial_indexing = tractography_spatial_indexing
self.evaluated_queries_info = {}
self.queries_to_save = set()
self.evaluating_endpoints = False
def visit_Module(self, node):
for line in node.body:
self.visit(line)
def visit_Compare(self, node):
if any(not isinstance(op, ast.NotIn) for op in node.ops):
raise TractQuerierSyntaxError(
"Invalid syntax in query line %d" % node.lineno
)
query_info = self.visit(node.left).copy()
for value in node.comparators:
query_info_ = self.visit(value)
query_info.difference_update(query_info_)
return query_info
def visit_BoolOp(self, node):
query_info = self.visit(node.values[0])
query_info = query_info.copy()
if isinstance(node.op, ast.Or):
for value in node.values[1:]:
query_info_ = self.visit(value)
query_info.update(query_info_)
elif isinstance(node.op, ast.And):
for value in node.values[1:]:
query_info_ = self.visit(value)
query_info.intersection_update(query_info_)
else:
return self.generic_visit(node)
return query_info
def visit_BinOp(self, node):
info_left = self.visit(node.left)
info_right = self.visit(node.right)
if isinstance(node.op, ast.Add):
return info_left.union(info_right)
if isinstance(node.op, ast.Mult):
return info_left.intersection(info_right)
if isinstance(node.op, ast.Sub):
return (
info_left.difference(info_right)
)
else:
return self.generic_visit(node)
def visit_UnaryOp(self, node):
query_info = self.visit(node.operand)
if isinstance(node.op, ast.Invert):
return FiberQueryInfo(
set(
tract for tract in query_info.tracts
if self.tractography_spatial_indexing.crossing_tracts_labels[tract].issubset(query_info.labels)
),
query_info.labels
)
elif isinstance(node.op, ast.UAdd):
return query_info
elif isinstance(node.op, ast.USub) or isinstance(node.op, ast.Not):
all_labels = set(self.tractography_spatial_indexing.crossing_labels_tracts.keys())
all_labels.difference_update(query_info.labels)
all_tracts = set().union(*tuple(
(self.tractography_spatial_indexing.crossing_labels_tracts[label] for label in all_labels)
))
new_info = FiberQueryInfo(all_tracts, all_labels)
return new_info
else:
raise TractQuerierSyntaxError(
"Syntax error in query line %d" % node.lineno)
def visit_Str(self, node):
query_info = FiberQueryInfo()
for name in fnmatch.filter(self.evaluated_queries_info.keys(), node.s):
query_info.update(self.evaluated_queries_info[name])
return query_info
def visit_Call(self, node):
# Single string argument function
if (
isinstance(node.func, ast.Name) and
len(node.args) == 1 and
len(node.args) == 1 and
node.starargs is None and
node.keywords == [] and
node.kwargs is None
):
if (node.func.id.lower() == 'only'):
query_info = self.visit(node.args[0])
only_tracts = set(
tract for tract in query_info.tracts
if self.tractography_spatial_indexing.crossing_tracts_labels[tract].issubset(query_info.labels)
)
only_endpoints = tuple((
set(
tract for tract in query_info.tracts_endpoints[i]
if self.tractography_spatial_indexing.ending_tracts_labels[i][tract] in query_info.labels
)
for i in (0, 1)
))
return FiberQueryInfo(
only_tracts,
query_info.labels,
only_endpoints
)
elif (node.func.id.lower() == 'endpoints_in'):
query_info = self.visit(node.args[0])
new_tracts = query_info.tracts_endpoints[0].union(query_info.tracts_endpoints[1])
# tracts = set().union(set(
# tract for tract in query_info.tracts
# if (
# self.tractography_spatial_indexing.ending_tracts_labels[i][tract] in query_info.labels
# )
#))
# labels = set().union(
# *tuple((self.tractography_spatial_indexing.crossing_tracts_labels[tract] for tract in tracts))
#)
return FiberQueryInfo(new_tracts, query_info.labels, query_info.tracts_endpoints)
elif (node.func.id.lower() == 'both_endpoints_in'):
query_info = self.visit(node.args[0])
new_tracts = (
query_info.tracts_endpoints[0].intersection(query_info.tracts_endpoints[1])
)
return FiberQueryInfo(new_tracts, query_info.labels, query_info.tracts_endpoints)
elif (node.func.id.lower() == 'save' and isinstance(node.args, ast.Str)):
self.queries_to_save.add(node.args[0].s)
return
elif node.func.id.lower() in self.relative_terms:
return self.process_relative_term(node)
raise TractQuerierSyntaxError("Invalid query in line %d" % node.lineno)
def process_relative_term(self, node):
r"""
Processes the relative terms
* anterior_of
* posterior_of
* superior_of
* inferior_of
* medial_of
* lateral_of
Parameters
----------
node : :py:class:`ast.Node`
Parsed tree
Returns
-------
tracts, labels
tracts : set
Numbers of the tracts that result of this
query
labels : set
Numbers of the labels that are traversed by
the tracts resulting from this query
"""
if len(self.tractography_spatial_indexing.label_bounding_boxes) == 0:
return FiberQueryInfo()
arg = node.args[0]
if isinstance(arg, ast.Name):
query_info = self.visit(arg)
elif isinstance(arg, ast.Attribute):
if arg.attr.lower() in ('left', 'right'):
side = arg.attr.lower()
query_info = self.visit(arg)
else:
raise TractQuerierSyntaxError(
"Attribute not recognized for relative specification."
"Line %d" % node.lineno
)
labels = query_info.labels
labels_generator = (l for l in labels)
bounding_box = self.tractography_spatial_indexing.label_bounding_boxes[labels_generator.next()]
for label in labels_generator:
bounding_box = bounding_box.union(self.tractography_spatial_indexing.label_bounding_boxes[label])
function_name = node.func.id.lower()
name = function_name.replace('_of', '')
if (
name in ('anterior', 'inferior') or
name == 'medial' and side == 'left' or
name == 'lateral' and side == 'right'
):
operator = gt
else:
operator = lt
if name == 'medial':
if side == 'left':
name = 'right'
else:
name = 'left'
elif name == 'lateral':
if side == 'left':
name = 'left'
else:
name = 'right'
tract_bounding_box_coordinate =\
self.tractography_spatial_indexing.tract_bounding_boxes[name]
tract_endpoints_pos = self.tractography_spatial_indexing.tract_endpoints_pos
bounding_box_coordinate = getattr(bounding_box, name)
if name in ('left', 'right'):
column = 0
elif name in ('anterior', 'posterior'):
column = 1
elif name in ('superior', 'inferior'):
column = 2
tracts = set(
operator(tract_bounding_box_coordinate, bounding_box_coordinate).nonzero()[0]
)
endpoints = tuple((
set(
operator(
tract_endpoints_pos[:, i, column],
bounding_box_coordinate
).nonzero()[0]
)
for i in (0, 1)
))
labels = set().union(*tuple((
self.tractography_spatial_indexing.crossing_tracts_labels[tract]
for tract in tracts
)))
return FiberQueryInfo(tracts, labels, endpoints)
def visit_Assign(self, node):
if len(node.targets) > 1:
raise TractQuerierSyntaxError(
"Invalid assignment in line %d" % node.lineno)
queries_to_evaluate = self.process_assignment(node)
for query_name, value_node in queries_to_evaluate.items():
self.queries_to_save.add(query_name)
self.evaluated_queries_info[query_name] = self.visit(value_node)
def visit_AugAssign(self, node):
if not isinstance(node.op, ast.BitOr):
raise TractQuerierSyntaxError(
"Invalid assignment in line %d" % node.lineno)
queries_to_evaluate = self.process_assignment(node)
for query_name, value_node in queries_to_evaluate.items():
query_info = self.visit(value_node)
self.evaluated_queries_info[query_name] = query_info
def process_assignment(self, node):
r"""
Processes the assignment operations
Parameters
----------
node : :py:class:`ast.Node`
Parsed tree
Returns
-------
queries_to_evaluate: dict
A dictionary or pairs '<name of the query>'= <node to evaluate>
"""
queries_to_evaluate = {}
if 'target' in node._fields:
target = node.target
if 'targets' in node._fields:
target = node.targets[0]
if isinstance(target, ast.Name):
queries_to_evaluate[target.id] = node.value
elif (
isinstance(target, ast.Attribute) and
target.attr == 'side'
):
node_left, node_right = self.rewrite_side_query(node)
self.visit(node_left)
self.visit(node_right)
elif (
isinstance(target, ast.Attribute) and
isinstance(target.value, ast.Name)
):
queries_to_evaluate[
target.value.id.lower() + '.' + target.attr.lower()] = node.value
else:
raise TractQuerierSyntaxError(
"Invalid assignment in line %d" % node.lineno)
return queries_to_evaluate
def rewrite_side_query(self, node):
r"""
Processes the side suffixes in a query
Parameters
----------
node : :py:class:`ast.Node`
Parsed tree
Returns
-------
node_left, node_right: nodes
two AST nodes, one for the query instantiated on the left hemisphere
one for the query instantiated on the right hemisphere
"""
node_left = deepcopy(node)
node_right = deepcopy(node)
for node_ in ast.walk(node_left):
if isinstance(node_, ast.Attribute):
if node_.attr == 'side':
node_.attr = 'left'
elif node_.attr == 'opposite':
node_.attr = 'right'
for node_ in ast.walk(node_right):
if isinstance(node_, ast.Attribute):
if node_.attr == 'side':
node_.attr = 'right'
elif node_.attr == 'opposite':
node_.attr = 'left'
return node_left, node_right
def visit_Name(self, node):
if node.id in self.evaluated_queries_info:
return self.evaluated_queries_info[node.id]
else:
raise TractQuerierSyntaxError(
"Invalid query name in line %d: %s" % (node.lineno, node.id))
def visit_Attribute(self, node):
if not isinstance(node.value, ast.Name):
raise TractQuerierSyntaxError(
"Invalid query in line %d: %s" % node.lineno)
query_name = node.value.id + '.' + node.attr
if query_name in self.evaluated_queries_info:
return self.evaluated_queries_info[query_name]
else:
raise TractQuerierSyntaxError(
"Invalid query name in line %d: %s" % (node.lineno, query_name))
def visit_Num(self, node):
if node.n in self.tractography_spatial_indexing.crossing_labels_tracts:
tracts = self.tractography_spatial_indexing.crossing_labels_tracts[node.n]
else:
tracts = set()
endpoints = (set(), set())
for i in (0, 1):
elt = self.tractography_spatial_indexing.ending_labels_tracts[i]
if node.n in elt:
endpoints[i].update(elt[node.n])
labelset = set((node.n,))
tract_info = FiberQueryInfo(
tracts, labelset,
endpoints
)
return tract_info
def visit_Expr(self, node):
if isinstance(node.value, ast.Name):
if node.value.id in self.evaluated_queries_info.keys():
self.queries_to_save.add(node.value.id)
else:
raise TractQuerierSyntaxError(
"Query %s not known line: %d" % (node.value.id, node.lineno))
elif isinstance(node.value, ast.Module):
self.visit(node.value)
else:
raise TractQuerierSyntaxError(
"Invalid expression at line: %d" % (node.lineno))
def generic_visit(self, node):
raise TractQuerierSyntaxError(
"Invalid Operation %s line: %d" % (type(node), node.lineno))
def visit_For(self, node):
id_to_replace = node.target.id.lower()
iter_ = node.iter
if isinstance(iter_, ast.Str):
list_items = fnmatch.filter(
self.evaluated_queries_info.keys(), iter_.s.lower())
elif isinstance(iter_, ast.List):
list_items = []
for item in iter_.elts:
if isinstance(item, ast.Name):
list_items.append(item.id.lower())
else:
raise TractQuerierSyntaxError(
'Error in FOR statement in line %d, elements in the list must be query names' % node.lineno)
original_body = ast.Module(body=node.body)
for item in list_items:
aux_body = deepcopy(original_body)
for node_ in ast.walk(aux_body):
if isinstance(node_, ast.Name) and node_.id.lower() == id_to_replace:
node_.id = item
self.visit(aux_body)
class TractQuerierSyntaxError(ValueError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RewriteChangeNotInPrescedence(ast.NodeTransformer):
def visit_BoolOp(self, node):
predicate = lambda value: not (
isinstance(value, ast.Compare) and
isinstance(value.ops[0], ast.NotIn)
)
values_which_are_not_in_op = [value for value in takewhile(
predicate,
node.values[1:]
)]
if (len(values_which_are_not_in_op) == len(node.values) - 1):
return node
old_CompareNode = node.values[len(values_which_are_not_in_op) + 1]
new_CompareNodeLeft = ast.copy_location(
ast.BoolOp(
op=node.op,
values=(
[node.values[0]] +
values_which_are_not_in_op +
[old_CompareNode.left]
)
),
node
)
new_CompareNode = ast.copy_location(
ast.Compare(
left=new_CompareNodeLeft,
ops=old_CompareNode.ops,
comparators=old_CompareNode.comparators
),
node
)
rest_of_the_values = node.values[len(values_which_are_not_in_op) + 2:]
if len(rest_of_the_values) == 0:
return self.visit(new_CompareNode)
else:
return self.visit(ast.copy_location(
ast.BoolOp(
op=node.op,
values=(
[new_CompareNode] +
rest_of_the_values
)
),
node
))
class RewritePreprocess(ast.NodeTransformer):
def __init__(self, *args, **kwargs):
if 'include_folders' in kwargs:
self.include_folders = kwargs['include_folders']
kwargs['include_folders'] = None
del kwargs['include_folders']
else:
self.include_folders = ['.']
super(RewritePreprocess, self).__init__(*args, **kwargs)
def visit_Attribute(self, node):
return ast.copy_location(
ast.Attribute(
value=self.visit(node.value),
attr=node.attr.lower()
),
node
)
def visit_Name(self, node):
return ast.copy_location(
ast.Name(id=node.id.lower()),
node
)
def visit_Str(self, node):
return ast.copy_location(
ast.Str(s=node.s.lower()),
node
)
def visit_Import(self, node):
try:
module_names = []
for module_name in node.names:
file_name = module_name.name
found = False
for folder in self.include_folders:
file_ = path.join(folder, file_name)
if path.exists(file_) and path.isfile(file_):
module_names.append(file_)
found = True
break
if not found:
raise TractQuerierSyntaxError(
'Imported file not found: %s' % file_name
)
imported_modules = [
ast.parse(file(module_name).read(), filename=module_name)
for module_name in module_names
]
except SyntaxError:
import sys
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
formatted_lines = traceback.format_exc().splitlines()
raise TractQuerierSyntaxError(
'syntax error in line %s line %d: \n%s\n%s' %
(
module_name,
exc_value[1][1],
formatted_lines[-3],
formatted_lines[-2]
)
)
new_node = ast.Module(imported_modules)
return ast.copy_location(
self.visit(new_node),
node
)
def queries_preprocess(query_file, filename='<unknown>', include_folders=[]):
try:
query_file_module = ast.parse(query_file, filename='<unknown>')
except SyntaxError:
import sys
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
formatted_lines = traceback.format_exc().splitlines()
raise TractQuerierSyntaxError(
'syntax error in line %s line %d: \n%s\n%s' %
(
filename,
exc_value[1][1],
formatted_lines[-3],
formatted_lines[-2]
)
)
rewrite_preprocess = RewritePreprocess(include_folders=include_folders)
rewrite_precedence_not_in = RewriteChangeNotInPrescedence()
preprocessed_module = rewrite_precedence_not_in.visit(
rewrite_preprocess.visit(query_file_module)
)
return preprocessed_module.body
def eval_queries(
query_file_body,
tractography_spatial_indexing
):
eq = EvaluateQueries(tractography_spatial_indexing)
if isinstance(query_file_body, list):
eq.visit(ast.Module(query_file_body))
else:
eq.visit(query_file_body)
return dict([(key, eq.evaluated_queries_info[key].tracts) for key in eq.queries_to_save])
def queries_syntax_check(query_file_body):
class DummySpatialIndexing:
def __init__(self):
self.crossing_tracts_labels = {}
self.crossing_labels_tracts = {}
self.ending_tracts_labels = ({}, {})
self.ending_labels_tracts = ({}, {})
self.label_bounding_boxes = {}
self.tract_bounding_boxes = {}
eval_queries(query_file_body, DummySpatialIndexing())
def labels_for_tracts(crossing_tracts_labels):
crossing_labels_tracts = {}
for i, f in crossing_tracts_labels.items():
for l in f:
if l in crossing_labels_tracts:
crossing_labels_tracts[l].add(i)
else:
crossing_labels_tracts[l] = set((i,))
return crossing_labels_tracts
|
|
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the hinge loss (standard SVM)
while 'l2' is the squared hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='l2', dual=True, tol=1e-4, C=1.0,
multi_class='ovr', fit_intercept=True, intercept_scaling=1,
class_weight=None, verbose=0, random_state=None, max_iter=1000):
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss
)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0, loss='l1', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0, random_state=None,
max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
loss = {'l1': 'ei', 'l2': 'se'}.get(self.loss)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, 'l2', self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each,
see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, class_weight, verbose, max_iter,
random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, cache_size=200,
verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces with the parameter epsilon of SVR.
The implementation is based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken. Only available if impl='nu_svc'.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outliers Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Index of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficient of the support vector in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Miscellaneous utility functions."""
import contextlib
import datetime
import errno
import json
import os
import shutil
import subprocess
import sys
import tempfile
import time
# UTC datetime corresponding to zero Unix timestamp.
EPOCH = datetime.datetime.utcfromtimestamp(0)
def parse_rfc3339_epoch(value):
"""Parses RFC 3339 datetime string as epoch
(as used in Timestamp proto JSON encoding).
Keeps only second precision (dropping micro- and nanoseconds).
Examples of the input:
2017-08-17T04:21:32.722952943Z
1972-01-01T10:00:20.021-05:00
Returns:
epoch timestamp
Raises:
ValueError on errors.
"""
# Adapted from protobuf/internal/well_known_types.py Timestamp.FromJsonString.
# We can't use the original, since it's marked as internal. Also instantiating
# proto messages here to parse a string would been odd.
timezone_offset = value.find('Z')
if timezone_offset == -1:
timezone_offset = value.find('+')
if timezone_offset == -1:
timezone_offset = value.rfind('-')
if timezone_offset == -1:
raise ValueError('Failed to parse timestamp: missing valid timezone offset')
time_value = value[0:timezone_offset]
# Parse datetime and nanos.
point_position = time_value.find('.')
if point_position == -1:
second_value = time_value
nano_value = ''
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
date_object = datetime.datetime.strptime(second_value, '%Y-%m-%dT%H:%M:%S')
td = date_object - EPOCH
seconds = td.seconds + td.days * 86400
if len(nano_value) > 9:
raise ValueError(
'Failed to parse timestamp: nanos %r more than 9 fractional digits'
% nano_value)
# Parse timezone offsets.
if value[timezone_offset] == 'Z':
if len(value) != timezone_offset + 1:
raise ValueError('Failed to parse timestamp: invalid trailing data %r'
% value)
else:
timezone = value[timezone_offset:]
pos = timezone.find(':')
if pos == -1:
raise ValueError('Invalid timezone offset value: %r' % timezone)
if timezone[0] == '+':
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
return seconds
def read_json_as_utf8(filename=None, text=None):
"""Read and deserialize a json file or string.
This function is different from json.load and json.loads in that it
returns utf8-encoded string for keys and values instead of unicode.
Args:
filename (str): path of a file to parse
text (str): json string to parse
``filename`` and ``text`` are mutually exclusive. ValueError is raised if
both are provided.
"""
if filename is not None and text is not None:
raise ValueError('Only one of "filename" and "text" can be provided at '
'the same time')
if filename is None and text is None:
raise ValueError('One of "filename" and "text" must be provided')
def to_utf8(obj):
if isinstance(obj, dict):
return {to_utf8(key): to_utf8(value) for key, value in obj.iteritems()}
if isinstance(obj, list):
return [to_utf8(item) for item in obj]
if isinstance(obj, unicode):
return obj.encode('utf-8')
return obj
if filename:
with open(filename, 'rb') as f:
obj = json.load(f)
else:
obj = json.loads(text)
return to_utf8(obj)
# TODO(hinoka): Add tests crbug.com/500781
def rmtree(file_path): # pragma: no cover
"""Recursively removes a directory, even if it's marked read-only.
Remove the directory located at file_path, if it exists.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
"""
if not os.path.exists(file_path):
return
if os.path.isfile(file_path):
for i in xrange(3):
try:
os.remove(file_path)
return
except OSError:
if i == 2:
raise
time.sleep(3)
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
file_path = os.path.normcase(file_path)
for i in xrange(3):
try:
subprocess.check_call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path])
return
except subprocess.CalledProcessError:
if i == 2:
raise
time.sleep(3)
def remove_with_retry(rmfunc, path):
if os.path.islink(path):
return os.remove(path)
else:
return rmfunc(path)
def rmtree_on_error(function, _, excinfo):
"""This works around a problem whereby python 2.x on Windows has no ability
to check for symbolic links. os.path.islink always returns False. But
shutil.rmtree will fail if invoked on a symbolic link whose target was
deleted before the link. E.g., reproduce like this:
> mkdir test
> mkdir test\1
> mklink /D test\current test\1
> python -c "import infra_libs; infra_libs.rmtree('test')"
To avoid this issue, we pass this error-handling function to rmtree. If
we see the exact sort of failure, we ignore it. All other failures we re-
raise.
"""
exception_type = excinfo[0]
exception_value = excinfo[1]
# If shutil.rmtree encounters a symbolic link on Windows, os.listdir will
# fail with a WindowsError exception with an ENOENT errno (i.e., file not
# found). We'll ignore that error. Note that WindowsError is not defined
# for non-Windows platforms, so we use OSError (of which it is a subclass)
# to avoid lint complaints about an undefined global on non-Windows
# platforms.
if (function is os.listdir) and issubclass(exception_type, OSError):
if exception_value.errno != errno.ENOENT:
raise
else:
raise
for root, dirs, files in os.walk(file_path, topdown=False):
# For POSIX: making the directory writable guarantees removability.
# Windows will ignore the non-read-only bits in the chmod value.
os.chmod(root, 0770)
for name in files:
remove_with_retry(os.remove, os.path.join(root, name))
for name in dirs:
remove_with_retry(lambda p: shutil.rmtree(p, onerror=rmtree_on_error),
os.path.join(root, name))
remove_with_retry(os.rmdir, file_path)
# We're trying to be compatible with Python3 tempfile.TemporaryDirectory
# context manager here. And they used 'dir' as a keyword argument.
# pylint: disable=redefined-builtin
@contextlib.contextmanager
def temporary_directory(suffix="", prefix="tmp", dir=None,
keep_directory=False):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with temporary_directory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
Args:
suffix, prefix, dir: same arguments as for tempfile.mkdtemp.
keep_directory (bool): if True, do not delete the temporary directory
when exiting. Useful for debugging.
Returns:
tempdir (str): full path to the temporary directory.
"""
tempdir = None # Handle mkdtemp raising an exception
try:
tempdir = tempfile.mkdtemp(suffix, prefix, dir)
yield tempdir
finally:
if tempdir and not keep_directory: # pragma: no branch
try:
# TODO(pgervais,496347) Make this work reliably on Windows.
shutil.rmtree(tempdir, ignore_errors=True)
except OSError as ex: # pragma: no cover
print >> sys.stderr, (
"ERROR: {!r} while cleaning up {!r}".format(ex, tempdir))
|
|
# -*- coding: utf-8 -*
#py.test --cov remdups --cov-report term-missing
#This succeeds on Windows with c:\msys64\usr\bin in %PATH% and
#mv C:\Windows\System32\find.exe C:\Windows\System32\findw.exe
#for .sh, .bat, .py
#On linux .bat cannot be executed
import os
import tempfile
import subprocess
import shutil
from itertools import product
import PIL
from PIL import ImageDraw
try: # pragma: no cover
from StringIO import StringIO # pragma: no cover
except ImportError: # pragma: no cover
from io import StringIO # pragma: no cover
import pytest
#requirements
import piexif
#pytest-cov
#pytest-toolbox
from remdups import *
def run(s):
if '.sh' in s:
return subprocess.run('sh '+s,shell=True)
elif '.bat' in s and sys.platform=='win32':
return subprocess.run('cmd /C '+s)
elif '.py' in s:
return subprocess.run('python '+s,shell=True)
##fixtures
#https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/EXIF.html
someexif={33434: (1, 382), 33437: (200, 100), 34850: 2, 34855: 50, 36864: b'0210',
36867: b'2017:11:02 14:03:36', 36868: b'2017:11:02 14:03:36', 37121:
b'\x01\x02\x03\x00', 37377: (85774, 10000), 37378: (200, 100), 37379: (0,
1), 37380: (0, 1000000), 37383: 1, 37384: 1, 37385: 0, 37386: (3790,
1000), 37500: b'M[16] [80,1] [d0,ca]\x00', 37510:
b'ASCII\x00\x00\x00Hisilicon K3\x00', 37520: b'017511', 37521: b'017511',
37522: b'017511', 40960: b'0100', 40961: 1, 40962: 2336, 40963: 4160,
40965: 942, 41495: 2, 41728: b'\x03', 41729: b'\x01', 41985: 1, 41986: 0,
41987: 0, 41988: (100, 100), 41989: 28, 41990: 0, 41991: 0, 41992: 0,
41993: 0, 41994: 0, 41996: 0}
@pytest.fixture
def dirwithfiles(tmpworkdir):
img = PIL.Image.new('RGB', (100, 100))
draw = ImageDraw.Draw(img)
draw.text((10, 10), "img", fill=(255, 0, 0))
del draw
exifdata= piexif.dump({"0th":{},
"Exif":someexif,
"GPS":{},
"Interop":{},
"1st":{},
"thumbnail":None})
img.save('img.jpg','jpeg',exif=exifdata)
img.save('newimg.jpg','jpeg')
with open('some.html','w') as f: f.write("""
<!DOCTYPE html>
<html>
<head>
<title>Page Title</title>
</head>
<body>
<img src="some_files/img.jpg">
</body>
</html>
""")
shutil.copy2('some.html','sometxt.txt')
os.mkdir('sub')
[shutil.copy2(x,'sub') for x in glob('*.jpg')]
os.mkdir('some_files')
shutil.copy2('img.jpg','some_files')
return str(tmpworkdir)
@pytest.fixture
def emptyhashfiles(dirwithfiles):
ehf = ['.remdups_{}.sha256'.format(x) for x in 'c b e'.split()]
for hf in ehf:
with open(hf,'w'):pass
return ehf
@pytest.fixture
def dups(emptyhashfiles):
rd = Command()
rd.hasher.hashall()
with pytest.raises(AttributeError):
rd.with_same_tail
with pytest.raises(AttributeError):
rd.no_same_tail
for hf in emptyhashfiles:
with open(hf,'r') as f:
lns = f.readlines()
assert len(lns)==7 #.remdups_* ignored
return rd
@pytest.fixture
def othertmpdir(request):
tempdir = tempfile.mkdtemp()
request.addfinalizer(lambda :shutil.rmtree(tempdir))
return tempdir
@pytest.yield_fixture
def here_otherdir(request,dirwithfiles,othertmpdir):
os.chdir(othertmpdir)
ehf = ['.remdups_{}.sha256'.format(x) for x in Hasher.sources]
for hf in ehf:
with open(hf,'w'):pass
yield othertmpdir,dirwithfiles
os.chdir(dirwithfiles)
##command line
def test_help_global(capfd):
with pytest.raises(SystemExit) as e:
pa = parse_args(['remdups','-h'])
out, err = capfd.readouterr()
assert ','.join(['update','rm','mv','cp','dupsof','dupsoftail']) in out
@pytest.mark.parametrize('x',['update','rm','mv','cp','dupsof','dupsoftail'])
def test_help_command(capfd,x):
with pytest.raises(SystemExit) as e:
pa = parse_args(['remdups',x,'-h'])
out, err = capfd.readouterr()
assert "remdups "+x in out
def test_defaults_update(tmpworkdir):
pa=parse_args(['remdups'])
assert ('update', [], [], '.') == \
(pa.cmd,pa.filter,pa.exclude,pa.fromdir)
@pytest.mark.parametrize('x',['rm','mv','cp'])
def test_defaults(tmpworkdir,x):
pa=parse_args(['remdups',x,'-s','s.sh'])
assert (x, 's.sh', [], [], [], False, False) == \
(pa.cmd,pa.script.name,pa.comment_out,pa.keep_in,pa.keep_out,pa.only_same_name,pa.safe)
assert os.path.exists('s.sh')
del pa
os.remove('s.sh')
@pytest.mark.parametrize('x',['dupsof','dupsoftail'])
def test_defaults_query(tmpworkdir,x):
pa=parse_args(['remdups',x,'anystring'])
assert (x, 'anystring') == (pa.cmd,pa.substr)
@pytest.mark.parametrize('a',['--keep-in','--keep-out','--comment-out'])#-i,-o,-c
def test_append(request,a):
pa=parse_args(['remdups','cp','-s','s.sh',a, 'a',a, 'b'])
assert vars(pa)[a.strip('-').replace('-','_')]==['a','b']
del pa
os.remove('s.sh')
@pytest.mark.parametrize('a',['--filter','--exclude'])#-f,-e
def test_append_update(request,a):
pa=parse_args(['remdups','update',a, 'a',a, 'b'])
assert vars(pa)[a.strip('-')]==['a','b']
##Hasher
def test_hashlist_default(tmpworkdir):
h=Hasher()
assert h.hashfiles==['.remdups_c.sha256']
def test_hashlist_more(tmpworkdir):
lenah = len(Hasher.hashfilenames)
for hf in Hasher.hashfilenames:
with open(hf,'w'):pass
assert len(os.listdir()) == lenah
for hf in Hasher.hashfilenames:
assert os.path.exists(hf)
h=Hasher()
h.load_hashes()
h.hashall()
for hf in Hasher.hashfilenames:
with open(hf,'r') as f:
lns = f.readlines()
assert len(lns)==0 #.remdups_* ignored
def test_hash_and_write(emptyhashfiles,othertmpdir):
allduplicates = []
assert othertmpdir != os.getcwd()
hshr = Hasher()
for f,duplicates,content in hshr.foreachcontent('.'):
if duplicates:
allduplicates.append(f)
else:
_,nfd,nff = fn2dirfn(f,"%y%m/%d%H%M%S")
nfnf = joinp(othertmpdir,nff)
nfnd= normp(joinp(othertmpdir,nfd))
try:
os.makedirs(nfnd)
except: pass
assert content!=[] #because some .remdups_ with (c)ontent
n = len(glob(nfnf+'*'))
if n:
nfnf = nfnf+'_'+str(n)
with open(nfnf,'wb') as nf:
for buf in content:
nf.write(buf)
shutil.copystat(f, nfnf)
same = filecmp.cmp(f, nfnf, False)
assert same
if sys.platform == 'win32':
assert allduplicates==['.\\sometxt.txt','.\\some_files\\img.jpg','.\\sub\\img.jpg','.\\sub\\newimg.jpg']
else:
assert allduplicates==['./sometxt.txt','./some_files/img.jpg','./sub/img.jpg','./sub/newimg.jpg']
hshr.clear()
assert len(hshr.path_hash)==0
allduplicates = []
for f in hshr.scandir(fromdir=othertmpdir):
if hshr.duplicates(f):
allduplicates.append(f)
with pytest.raises(ValueError):
hshr.duplicates(f+'no')
assert len(hshr.path_hash)==3
assert allduplicates==[]
#reload with no .remdups_c, but .remdups_e for non-exif files becomes _c
for hf in glob('.remdups_*'):
if '_c.' in hf:
os.remove(hf)
assert len(glob('.remdups_c.*'))==0
hshr = Hasher()
withcontent = 0
for f,duplicates,content in hshr.foreachcontent('.'):
withcontent += (content!=[])
assert withcontent == 4
#reload with no .remdups_c* and no .remdups_e*
for hf in glob('.remdups_*'):
if '_e.' in hf:
os.remove(hf)
assert len(glob('.remdups_e.*'))==0
hshr = Hasher()
for f,duplicates,content in hshr.foreachcontent('.'):
assert content==[] #because no (c)ontent
def test_resort(emptyhashfiles,othertmpdir):
resort(othertmpdir,"%y%m/%d_%H%M%S")
hshr = Hasher()
hshr.load_hashes()
assert len(hshr.hash_paths)>0
cd = os.getcwd()
os.chdir(othertmpdir)
hf = remdupsfile('e','md5')
with open(hf,'w'): pass
hshr = Hasher()
assert len(glob(othertmpdir+'/**/*')) == 3
assert len(glob(othertmpdir+'/**/*_1.jpg')) == 1
assert len([f for f in hshr.scandir(othertmpdir,filter=['*.jpg'])]) == 2
assert len([f for f in hshr.scandir(othertmpdir,filter=['*.no'])]) == 0
os.chdir(cd)
def test_resort_no_content(emptyhashfiles,othertmpdir):
for hf in glob('.remdups_*'):
os.remove(hf)
hf = remdupsfile('b','md5')
with open(hf,'w'): pass
with pytest.raises(ValueError):
resort(othertmpdir,"%y%m/%d_%H%M%S")
##Command
@pytest.mark.parametrize('cmd,script',zip(['rm','cp','mv'],['script.sh','script.bat']))
def test_find_dups(dups,cmd,script):
if cmd!='rm':
with pytest.raises(ValueError):#see fn2dirfn
cmds=getattr(dups,cmd)(script=argparse.FileType('w',encoding='utf-8')(script))
cmds=getattr(dups,cmd)(script=argparse.FileType('w',encoding='utf-8')(script),sort="%y%m/%d%H%M%S")
assert len(dups.with_same_tail)==2
assert len(dups.no_same_tail)==1 #script.sh has no duplicate
tails = [tail for tail, paths in dups.no_same_tail]
assert not any(tails)
assert len(dups.with_same_tail[0][1]) == 3
assert len(cmds) > 1
anybackslashes = any(['\\' in x for x in cmds])
if script.endswith('.bat') and sys.platform=='win32':
assert anybackslashes
else:#.sh
assert not anybackslashes
if cmd=='rm':#remove all but one
assert sum([re.match('^'+dups.comment+'>#',x) and 1 or 0 for x in cmds]) == 4
else:#copy one and leave the rest
assert sum([re.match('^'+dups.comment+'>#',x) and 1 or 0 for x in cmds]) == 3
assert sum([re.search('_\d\.jpg',x) and 1 or 0 for x in cmds]) == 1
dups.args.script.close()
assert os.path.exists(script)
lns = []
with open(script,'r') as f:
lns = f.readlines()
assert len(lns) > 1
@pytest.fixture
def updated(here_otherdir):
here,other = here_otherdir
main(parse_args(['remdups','update',other]))
for hf in glob('.remdups_*'):
with open(hf,'r') as hfh:
lns = [convunix(x) for x in hfh.readlines()]
assert len(lns)>0
for e in lns:
assert '//' in e
return here,other
def test_dupsoftail(updated,capfd):
main(parse_args(['remdups','dupsoftail','img.jpg']))
out, err = capfd.readouterr()
assert 'img.jpg' in out
assert 'newimg.jpg' in out
def test_dupsoftail_notail(updated,capfd):
main(parse_args(['remdups','dupsoftail','img']))
out, err = capfd.readouterr()
assert 'img.jpg' not in out
def test_dupsof(updated,capfd):
main(parse_args(['remdups','dupsof','sub/new']))
out, err = capfd.readouterr()
assert 'img.jpg' in out
assert 'newimg.jpg' in out
def test_dupsof_nofile(updated,capfd):
with pytest.raises(ValueError):
main(parse_args(['remdups','dupsof','img']))
@pytest.mark.parametrize('script',['s.sh','s.bat','s.py'])
def test_cp(updated,script):
here,other = updated
main(parse_args(['remdups','cp','-s',script,'-o','sub','--safe']))
ld = os.listdir('.')
assert 'img.jpg' not in ld #first script must run
assert script in ld
with open(script,'r') as s:
lns = s.readlines()
assert len(lns)>1
ru = run(script)
if ru != None:
assert ru.returncode == 0
ld = os.listdir('.')
assert 'img.jpg' in ld
assert 'newimg.jpg' in ld
assert 'some.html' in ld
assert 'sub' not in ld
@pytest.mark.parametrize('script',['s.sh','s.bat','s.py'])
def test_mv(updated,script):
here,other = updated
main(parse_args(['remdups','mv','-s',script,'-i','sub','-c','some']))
ld = os.listdir('.')
assert 'img.jpg' not in ld #first script must run
assert script in ld
with open(script,'r') as s:
lns = s.readlines()
assert len(lns)>1
ru = run(script)
if ru != None:
assert ru.returncode == 0
ld = os.listdir('.')
assert 'img.jpg' not in ld
assert 'newimg.jpg' not in ld
assert 'some.html' not in ld
assert 'sub' in ld
ldsub = os.listdir('sub')
assert 'img.jpg' in ldsub
assert 'newimg.jpg' in ldsub
oldldsub = os.listdir(joinp(other,'sub'))
assert 'img.jpg' not in oldldsub
assert 'newimg.jpg' not in oldldsub
@pytest.fixture
def updatedhere(emptyhashfiles):
main(parse_args(['remdups','update']))
for hf in glob('.remdups_*'):
with open(hf,'r') as hfh:
lns = [convunix(x) for x in hfh.readlines()]
assert len(lns)>0
for e in lns:
assert '//' not in e
return os.getcwd()
@pytest.mark.parametrize('script',['s.sh','s.bat','s.py'])
def test_rm(updatedhere,script):
main(parse_args(['remdups','rm','-s',script,'-o','.txt']))
ld = os.listdir('.')
assert 'img.jpg' in ld #first script must run
assert script in ld
with open(script,'r') as s:
lns = s.readlines()
assert len(lns)>1
ru = run(script)
if ru != None:
assert ru.returncode == 0
ld = os.listdir('.')
assert 'img.jpg' in ld
assert 'newimg.jpg' in ld
assert 'some.html' in ld
assert 'sub' not in ld #empty folders are deleted
some_files = os.listdir('some_files')
assert 'img.jpg' in some_files
##other
def test_convuinx(request):
fn=r"U:\w&k(2)\wf g.txt"
assert convunix(fn) == "'/U/w&k(2)/wf g.txt'"
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2011 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.compute import utils as compute_utils
from nova import context
from nova.network import linux_net
from nova.objects import instance as instance_obj
from nova.objects import security_group as security_group_obj
from nova.objects import security_group_rule as security_group_rule_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import netutils
LOG = logging.getLogger(__name__)
firewall_opts = [
cfg.StrOpt('firewall_driver',
help='Firewall driver '
'(defaults to hypervisor specific iptables driver)'),
cfg.BoolOpt('allow_same_net_traffic',
default=True,
help='Whether to allow network traffic from same network'),
]
CONF = cfg.CONF
CONF.register_opts(firewall_opts)
CONF.import_opt('use_ipv6', 'nova.netconf')
def load_driver(default, *args, **kwargs):
fw_class = importutils.import_class(CONF.firewall_driver or default)
return fw_class(*args, **kwargs)
class FirewallDriver(object):
"""Firewall Driver base class.
Defines methods that any driver providing security groups
and provider firewall functionality should implement.
"""
def __init__(self, virtapi):
self._virtapi = virtapi
def prepare_instance_filter(self, instance, network_info):
"""Prepare filters for the instance.
At this point, the instance isn't running yet.
"""
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
raise NotImplementedError()
def apply_instance_filter(self, instance, network_info):
"""Apply instance filter.
Once this method returns, the instance should be firewalled
appropriately. This method should as far as possible be a
no-op. It's vastly preferred to get everything set up in
prepare_instance_filter.
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store
Gets called when a rule has been added to or removed from
the security group.
"""
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""Refresh security group members from data store
Gets called when an instance gets added to or removed from
the security group.
"""
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or looses a rule.
"""
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""Refresh common rules for all hosts/instances from data store.
Gets called when a rule has been added to or removed from
the list of rules (via admin api).
"""
raise NotImplementedError()
def setup_basic_filtering(self, instance, network_info):
"""Create rules to block spoofing and allow dhcp.
This gets called when spawning an instance, before
:py:meth:`prepare_instance_filter`.
"""
raise NotImplementedError()
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
raise NotImplementedError()
class IptablesFirewallDriver(FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
def __init__(self, virtapi, **kwargs):
super(IptablesFirewallDriver, self).__init__(virtapi)
self.iptables = linux_net.iptables_manager
self.instances = {}
self.network_infos = {}
self.basically_filtered = False
# Flags for DHCP request rule
self.dhcp_create = False
self.dhcp_created = False
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def setup_basic_filtering(self, instance, network_info):
pass
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def filter_defer_apply_on(self):
self.iptables.defer_apply_on()
def filter_defer_apply_off(self):
self.iptables.defer_apply_off()
def unfilter_instance(self, instance, network_info):
if self.instances.pop(instance['id'], None):
# NOTE(vish): use the passed info instead of the stored info
self.network_infos.pop(instance['id'])
self.remove_filters_for_instance(instance)
self.iptables.apply()
else:
LOG.info(_('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
def prepare_instance_filter(self, instance, network_info):
self.instances[instance['id']] = instance
self.network_infos[instance['id']] = network_info
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
self.add_filters_for_instance(instance, ipv4_rules, ipv6_rules)
LOG.debug(_('Filters added to instance'), instance=instance)
self.refresh_provider_fw_rules()
LOG.debug(_('Provider Firewall Rules refreshed'), instance=instance)
# Ensure that DHCP request rule is updated if necessary
if (self.dhcp_create and not self.dhcp_created):
self.iptables.ipv4['filter'].add_rule(
'INPUT',
'-s 0.0.0.0/32 -d 255.255.255.255/32 '
'-p udp -m udp --sport 68 --dport 67 -j ACCEPT')
self.iptables.ipv4['filter'].add_rule(
'FORWARD',
'-s 0.0.0.0/32 -d 255.255.255.255/32 '
'-p udp -m udp --sport 68 --dport 67 -j ACCEPT')
self.dhcp_created = True
self.iptables.apply()
def _create_filter(self, ips, chain_name):
return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
def _get_subnets(self, network_info, version):
subnets = []
for vif in network_info:
if 'network' in vif and 'subnets' in vif['network']:
for subnet in vif['network']['subnets']:
if subnet['version'] == version:
subnets.append(subnet)
return subnets
def _filters_for_instance(self, chain_name, network_info):
"""Creates a rule corresponding to each ip that defines a
jump to the corresponding instance - chain for all the traffic
destined to that ip.
"""
v4_subnets = self._get_subnets(network_info, 4)
v6_subnets = self._get_subnets(network_info, 6)
ips_v4 = [ip['address'] for subnet in v4_subnets
for ip in subnet['ips']]
ipv4_rules = self._create_filter(ips_v4, chain_name)
ipv6_rules = ips_v6 = []
if CONF.use_ipv6:
if v6_subnets:
ips_v6 = [ip['address'] for subnet in v6_subnets
for ip in subnet['ips']]
ipv6_rules = self._create_filter(ips_v6, chain_name)
return ipv4_rules, ipv6_rules
def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
def add_filters_for_instance(self, instance, inst_ipv4_rules,
inst_ipv6_rules):
network_info = self.network_infos[instance['id']]
chain_name = self._instance_chain_name(instance)
if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain(chain_name)
self.iptables.ipv4['filter'].add_chain(chain_name)
ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
network_info)
self._add_filters('local', ipv4_rules, ipv6_rules)
self._add_filters(chain_name, inst_ipv4_rules, inst_ipv6_rules)
def remove_filters_for_instance(self, instance):
chain_name = self._instance_chain_name(instance)
self.iptables.ipv4['filter'].remove_chain(chain_name)
if CONF.use_ipv6:
self.iptables.ipv6['filter'].remove_chain(chain_name)
def _instance_chain_name(self, instance):
return 'inst-%s' % (instance['id'],)
def _do_basic_rules(self, ipv4_rules, ipv6_rules, network_info):
# Always drop invalid packets
ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
# Allow established connections
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
# Pass through provider-wide drops
ipv4_rules += ['-j $provider']
ipv6_rules += ['-j $provider']
def _do_dhcp_rules(self, ipv4_rules, network_info):
v4_subnets = self._get_subnets(network_info, 4)
dhcp_servers = [subnet.get_meta('dhcp_server')
for subnet in v4_subnets if subnet.get_meta('dhcp_server')]
for dhcp_server in dhcp_servers:
if dhcp_server:
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
'-j ACCEPT' % (dhcp_server,))
self.dhcp_create = True
def _do_project_network_rules(self, ipv4_rules, ipv6_rules, network_info):
v4_subnets = self._get_subnets(network_info, 4)
v6_subnets = self._get_subnets(network_info, 6)
cidrs = [subnet['cidr'] for subnet in v4_subnets]
for cidr in cidrs:
ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
if CONF.use_ipv6:
cidrv6s = [subnet['cidr'] for subnet in v6_subnets]
for cidrv6 in cidrv6s:
ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
def _do_ra_rules(self, ipv6_rules, network_info):
v6_subnets = self._get_subnets(network_info, 6)
gateways_v6 = [subnet['gateway']['address'] for subnet in v6_subnets]
for gateway_v6 in gateways_v6:
ipv6_rules.append(
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
def _build_icmp_rule(self, rule, version):
icmp_type = rule['from_port']
icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
if version == 4:
return ['-m', 'icmp', '--icmp-type', icmp_type_arg]
elif version == 6:
return ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg]
# return empty list if icmp_type == -1
return []
def _build_tcp_udp_rule(self, rule, version):
if rule['from_port'] == rule['to_port']:
return ['--dport', '%s' % (rule['from_port'],)]
else:
return ['-m', 'multiport',
'--dports', '%s:%s' % (rule['from_port'],
rule['to_port'])]
def instance_rules(self, instance, network_info):
ctxt = context.get_admin_context()
if isinstance(instance, dict):
# NOTE(danms): allow old-world instance objects from
# unconverted callers; all we need is instance.uuid below
instance = instance_obj.Instance._from_db_object(
ctxt, instance_obj.Instance(), instance, [])
ipv4_rules = []
ipv6_rules = []
# Initialize with basic rules
self._do_basic_rules(ipv4_rules, ipv6_rules, network_info)
# Set up rules to allow traffic to/from DHCP server
self._do_dhcp_rules(ipv4_rules, network_info)
#Allow project network traffic
if CONF.allow_same_net_traffic:
self._do_project_network_rules(ipv4_rules, ipv6_rules,
network_info)
# We wrap these in CONF.use_ipv6 because they might cause
# a DB lookup. The other ones are just list operations, so
# they're not worth the clutter.
if CONF.use_ipv6:
# Allow RA responses
self._do_ra_rules(ipv6_rules, network_info)
security_groups = security_group_obj.SecurityGroupList.get_by_instance(
ctxt, instance)
# then, security group chains and rules
for security_group in security_groups:
rules_cls = security_group_rule_obj.SecurityGroupRuleList
rules = rules_cls.get_by_security_group(ctxt, security_group)
for rule in rules:
LOG.debug(_('Adding security group rule: %r'), rule,
instance=instance)
if not rule['cidr']:
version = 4
else:
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
protocol = rule['protocol']
if protocol:
protocol = rule['protocol'].lower()
if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
args = ['-j ACCEPT']
if protocol:
args += ['-p', protocol]
if protocol in ['udp', 'tcp']:
args += self._build_tcp_udp_rule(rule, version)
elif protocol == 'icmp':
args += self._build_icmp_rule(rule, version)
if rule['cidr']:
LOG.debug('Using cidr %r', rule['cidr'], instance=instance)
args += ['-s', rule['cidr']]
fw_rules += [' '.join(args)]
else:
if rule['grantee_group']:
insts = (
instance_obj.InstanceList.get_by_security_group(
ctxt, rule['grantee_group']))
for instance in insts:
if instance['info_cache']['deleted']:
LOG.debug('ignoring deleted cache')
continue
nw_info = compute_utils.get_nw_info_for_instance(
instance)
ips = [ip['address']
for ip in nw_info.fixed_ips()
if ip['version'] == version]
LOG.debug('ips: %r', ips, instance=instance)
for ip in ips:
subrule = args + ['-s %s' % ip]
fw_rules += [' '.join(subrule)]
LOG.debug('Using fw_rules: %r', fw_rules, instance=instance)
ipv4_rules += ['-j $sg-fallback']
ipv6_rules += ['-j $sg-fallback']
return ipv4_rules, ipv6_rules
def instance_filter_exists(self, instance, network_info):
pass
def refresh_security_group_members(self, security_group):
self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
def refresh_security_group_rules(self, security_group):
self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
def refresh_instance_security_rules(self, instance):
self.do_refresh_instance_rules(instance)
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def _inner_do_refresh_rules(self, instance, ipv4_rules,
ipv6_rules):
self.remove_filters_for_instance(instance)
self.add_filters_for_instance(instance, ipv4_rules, ipv6_rules)
def do_refresh_security_group_rules(self, security_group):
for instance in self.instances.values():
network_info = self.network_infos[instance['id']]
ipv4_rules, ipv6_rules = self.instance_rules(instance,
network_info)
self._inner_do_refresh_rules(instance, ipv4_rules, ipv6_rules)
def do_refresh_instance_rules(self, instance):
network_info = self.network_infos[instance['id']]
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
self._inner_do_refresh_rules(instance, ipv4_rules, ipv6_rules)
def refresh_provider_fw_rules(self):
"""See :class:`FirewallDriver` docs."""
self._do_refresh_provider_fw_rules()
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def _do_refresh_provider_fw_rules(self):
"""Internal, synchronized version of refresh_provider_fw_rules."""
self._purge_provider_fw_rules()
self._build_provider_fw_rules()
def _purge_provider_fw_rules(self):
"""Remove all rules from the provider chains."""
self.iptables.ipv4['filter'].empty_chain('provider')
if CONF.use_ipv6:
self.iptables.ipv6['filter'].empty_chain('provider')
def _build_provider_fw_rules(self):
"""Create all rules for the provider IP DROPs."""
self.iptables.ipv4['filter'].add_chain('provider')
if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain('provider')
ipv4_rules, ipv6_rules = self._provider_rules()
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule('provider', rule)
if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule('provider', rule)
def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6."""
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
rules = self._virtapi.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
protocol = rule['protocol']
if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
args = ['-p', protocol, '-s', rule['cidr']]
if protocol in ['udp', 'tcp']:
if rule['from_port'] == rule['to_port']:
args += ['--dport', '%s' % (rule['from_port'],)]
else:
args += ['-m', 'multiport',
'--dports', '%s:%s' % (rule['from_port'],
rule['to_port'])]
elif protocol == 'icmp':
icmp_type = rule['from_port']
icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j DROP']
fw_rules += [' '.join(args)]
return ipv4_rules, ipv6_rules
class NoopFirewallDriver(object):
"""Firewall driver which just provides No-op methods."""
def __init__(self, *args, **kwargs):
pass
def _noop(self, *args, **kwargs):
pass
def __getattr__(self, key):
return self._noop
def instance_filter_exists(self, instance, network_info):
return True
|
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
class StoryExpectations(object):
"""An object that contains disabling expectations for benchmarks and stories.
Example Usage:
class FooBenchmarkExpectations(expectations.StoryExpectations):
def SetExpectations(self):
self.PermanentlyDisableBenchmark(
[expectations.ALL_MOBILE], 'Desktop Benchmark')
self.DisableStory('story_name1', [expectations.ALL_MAC], 'crbug.com/456')
self.DisableStory('story_name2', [expectations.ALL], 'crbug.com/789')
...
"""
def __init__(self):
self._disabled_platforms = []
self._expectations = {}
self._frozen = False
self.SetExpectations()
self._Freeze()
def GetBrokenExpectations(self, story_set):
story_set_story_names = [s.name for s in story_set.stories]
invalid_story_names = []
for story_name in self._expectations:
if story_name not in story_set_story_names:
invalid_story_names.append(story_name)
logging.error('Story %s is not in the story set.' % story_name)
return invalid_story_names
def SetExpectations(self):
"""Sets the Expectations for test disabling
Override in subclasses to disable tests."""
pass
def _Freeze(self):
self._frozen = True
def PermanentlyDisableBenchmark(self, conditions, reason):
"""Permanently Disable benchmark under the given conditions.
This means that even if --also-run-disabled-tests is passed, the benchmark
will not run. Some benchmarks (such as system_health.mobile_* benchmarks)
contain android specific commands and as such, cannot run on desktop
platforms under any condition.
Example:
PermanentlyDisableBenchmark(
[expectations.ALL_MOBILE], 'Desktop benchmark')
Args:
conditions: List of _TestCondition subclasses.
reason: Reason for disabling the benchmark.
"""
assert reason, 'A reason for disabling must be given.'
assert not self._frozen, ('Cannot disable benchmark on a frozen '
'StoryExpectation object.')
for condition in conditions:
assert isinstance(condition, _TestCondition)
self._disabled_platforms.append((conditions, reason))
def IsBenchmarkDisabled(self, platform, finder_options):
"""Returns the reason the benchmark was disabled, or None if not disabled.
Args:
platform: A platform object.
"""
for conditions, reason in self._disabled_platforms:
for condition in conditions:
if condition.ShouldDisable(platform, finder_options):
logging.info('Benchmark permanently disabled on %s due to %s.',
condition, reason)
return reason
return None
def DisableStory(self, story_name, conditions, reason):
"""Disable the story under the given conditions.
Example:
DisableStory('story_name', [expectations.ALL_WIN], 'crbug.com/123')
Args:
story_name: Name of the story to disable passed as a string.
conditions: List of _TestCondition subclasses.
reason: Reason for disabling the story.
"""
assert reason, 'A reason for disabling must be given.'
# TODO(rnephew): Remove http check when old stories that use urls as names
# are removed.
if not story_name.startswith('http'):
# Decrease to 50 after we shorten names of existing tests.
assert len(story_name) < 75, (
"Story name exceeds limit of 75 characters. This limit is in place to"
" encourage Telemetry benchmark owners to use short, simple story "
"names (e.g. 'google_search_images', not "
"'http://www.google.com/images/1234/abc')."
)
assert not self._frozen, ('Cannot disable stories on a frozen '
'StoryExpectation object.')
for condition in conditions:
assert isinstance(condition, _TestCondition)
if not self._expectations.get(story_name):
self._expectations[story_name] = []
self._expectations[story_name].append((conditions, reason))
def IsStoryDisabled(self, story, platform, finder_options):
"""Returns the reason the story was disabled, or None if not disabled.
Args:
story: Story object that contains a name property.
platform: A platform object.
Returns:
Reason if disabled, None otherwise.
"""
for conditions, reason in self._expectations.get(story.name, []):
for condition in conditions:
if condition.ShouldDisable(platform, finder_options):
logging.info('%s is disabled on %s due to %s.',
story.name, condition, reason)
return reason
return None
class _TestCondition(object):
def ShouldDisable(self, platform, finder_options):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class _TestConditionByPlatformList(_TestCondition):
def __init__(self, platforms, name):
self._platforms = platforms
self._name = name
def ShouldDisable(self, platform, finder_options):
del finder_options # Unused.
return platform.GetOSName() in self._platforms
def __str__(self):
return self._name
class _AllTestCondition(_TestCondition):
def ShouldDisable(self, platform, finder_options):
del platform, finder_options # Unused.
return True
def __str__(self):
return 'All Platforms'
class _TestConditionAndroidSvelte(_TestCondition):
"""Matches android devices with a svelte (low-memory) build."""
def ShouldDisable(self, platform, finder_options):
del finder_options # Unused.
return platform.GetOSName() == 'android' and platform.IsSvelte()
def __str__(self):
return 'Android Svelte'
class _TestConditionByAndroidModel(_TestCondition):
def __init__(self, model, name=None):
self._model = model
self._name = name if name else model
def ShouldDisable(self, platform, finder_options):
return (platform.GetOSName() == 'android' and
self._model in platform.GetDeviceTypeName())
def __str__(self):
return self._name
class _TestConditionAndroidWebview(_TestCondition):
def ShouldDisable(self, platform, finder_options):
return (platform.GetOSName() == 'android' and
finder_options.browser_type == 'android-webview')
def __str__(self):
return 'Android Webview'
class _TestConditionByMacVersion(_TestCondition):
def __init__(self, version, name=None):
self._version = version
self._name = name
def __str__(self):
return self._name
def ShouldDisable(self, platform, finder_options):
if platform.GetOSName() != 'mac':
return False
return platform.GetOSVersionDetailString().startswith(self._version)
ALL = _AllTestCondition()
ALL_MAC = _TestConditionByPlatformList(['mac'], 'Mac Platforms')
ALL_WIN = _TestConditionByPlatformList(['win'], 'Win Platforms')
ALL_LINUX = _TestConditionByPlatformList(['linux'], 'Linux Platforms')
ALL_ANDROID = _TestConditionByPlatformList(['android'], 'Android Platforms')
ALL_DESKTOP = _TestConditionByPlatformList(
['mac', 'linux', 'win'], 'Desktop Platforms')
ALL_MOBILE = _TestConditionByPlatformList(['android'], 'Mobile Platforms')
ANDROID_NEXUS5 = _TestConditionByAndroidModel('Nexus 5')
ANDROID_NEXUS5X = _TestConditionByAndroidModel('Nexus 5X')
ANDROID_NEXUS6 = _TestConditionByAndroidModel('Nexus 6')
ANDROID_NEXUS6P = _TestConditionByAndroidModel('Nexus 6P')
ANDROID_NEXUS7 = _TestConditionByAndroidModel('Nexus 7')
ANDROID_ONE = _TestConditionByAndroidModel(
'W6210', 'Cherry Mobile Android One')
ANDROID_SVELTE = _TestConditionAndroidSvelte()
ANDROID_WEBVIEW = _TestConditionAndroidWebview()
# MAC_10_11 Includes:
# Mac 10.11 Perf, Mac Retina Perf, Mac Pro 10.11 Perf, Mac Air 10.11 Perf
MAC_10_11 = _TestConditionByMacVersion('10.11', 'Mac 10.11')
# Mac 10_12 Includes:
# Mac 10.12 Perf, Mac Mini 8GB 10.12 Perf
MAC_10_12 = _TestConditionByMacVersion('10.12', 'Mac 10.12')
|
|
from netbox_api.api.protocol import RequestHandler
from netbox_api.model import *
class NetboxClient(object):
def __init__(self, host, port, token, scheme, verify=None):
# Request handling
self._request_handler = RequestHandler(host, port, token, scheme, verify)
# Client parts
self.ipam = IPAMClient(self._request_handler)
self.dcim = DCIMClient(self._request_handler)
self.tenancy = TenancyClient(self._request_handler)
class NetboxClientPart(object):
def __init__(self, request_handler):
self._request_handler = request_handler
def _format_url(self, path_fmt, *parts):
return self._request_handler.format_url(path_fmt, *parts)
def _request(self, method, url, **kwargs):
return self._request_handler.request(
method=method,
url=url,
**kwargs)
def _paginate(self, cls, method, url, **kwargs):
return self._request_handler.paginate(
cls=cls,
method=method,
url=url,
**kwargs)
def _list(self, cls, uri, query_params):
itr = self._paginate(
cls=cls,
method='get',
url=self._format_url(uri),
params=query_params)
# TODO: Expose this as an optimization for other components later
return [r for r in itr]
class DCIMClient(NetboxClientPart):
def interface(self, interface_id):
"""
Get a device interface by the interface's ID.
:param interface_id:
:return:
"""
resp = self._request(
method='get',
url=self._format_url('/dcim/interfaces/{}', interface_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(Interface)[0]
def list_interfaces(self, **query):
return self._list(Interface, '/dcim/interfaces', query)
def create_interface(self, name, form_factor, device_id, mac_address=None, management_only=False, parent_lag=None):
"""
Create a new device interface. The ID of the new interface is returned upon success.
:param name:
:param form_factor:
:param device_id:
:param mac_address:
:param management_only:
:param parent_lag:
:return:
"""
# Map constants
if isinstance(form_factor, FormFactorConstant):
form_factor = form_factor.value
resp = self._request(
method='post',
url=self._format_url('/dcim/interfaces'),
json={
'name': name,
'form_factor': form_factor,
'mac_address': mac_address,
'lag': parent_lag,
'device': device_id,
'mgmt_only': management_only
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_interface(self, interface_id):
"""
Deletes a device interface by the interface's ID.
:param interface_id:
:return:
"""
resp = self._request(
method='delete',
url=self._format_url('/dcim/interfaces/{}', interface_id))
# Raise on bad status codes
resp.raise_on_status()
def connect_interfaces(self, status, interface_a_id, interface_b_id):
"""
Connect two device interfaces and assign the connection a status.
:param status:
:param interface_a_id:
:param interface_b_id:
:return:
"""
resp = self._request(
method='post',
url=self._format_url('/dcim/interface-connections'),
json={
'connection_status': status,
'interface_a': interface_a_id,
'interface_b': interface_b_id
})
# Raise on bad status codes
resp.raise_on_status()
def region(self, region_id):
resp = self._request(
method='get',
url=self._format_url('/dcim/regions/{}', region_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(Region)[0]
def list_interfaces(self, **query):
return self._list(Region, '/dcim/regions', query)
def create_region(self, name, slug, parent_region_id=None):
resp = self._request(
method='post',
url=self._format_url('/dcim/regions'),
json={
'name': name,
'slug': slug,
'parent': parent_region_id
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_region(self, region_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/regions/{}', region_id))
# Raise on bad status codes
resp.raise_on_status()
def site(self, site_id):
"""
Get a site by the site's ID.
:param site_id:
:return:
"""
resp = self._request(
method='get',
url=self._format_url('/dcim/sites/{}', site_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(Site)[0]
def list_sites(self, **query):
return self._list(Site, '/dcim/sites', query)
def create_site(self, name, slug, tenant_id, region_id, contact_email=None, physical_address=None,
shipping_address=None, contact_name=None, contact_phone=None, asn=None, comments=None,
facility=None, custom_fields=None):
resp = self._request(
method='post',
url=self._format_url('/dcim/sites'),
json={
'name': name,
'slug': slug,
'facility': facility,
'tenant': tenant_id,
'region': region_id,
'contact_name': contact_name,
'contact_phone': contact_phone,
'contact_email': contact_email,
'physical_address': physical_address,
'shipping_address': shipping_address,
'comments': comments,
'asn': asn,
'custom_fields': custom_fields if custom_fields is not None else dict()
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_site(self, site_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/sites/{}', site_id))
# Raise on bad status codes
resp.raise_on_status()
def rack_group(self, rack_group_id):
resp = self._request(
method='get',
url=self._format_url('/dcim/rack-groups/{}', rack_group_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(RackGroup)[0]
def list_rack_groups(self, **query):
return self._list(RackGroup, '/dcim/rack-groups', query)
def create_rack_group(self, name, slug, site_id):
resp = self._request(
method='post',
url=self._format_url('/dcim/rack-groups'),
json={
'name': name,
'slug': slug,
'site': site_id
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_rack_group(self, rack_group_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/rack-groups/{}', rack_group_id))
# Raise on bad status codes
resp.raise_on_status()
def rack_role(self, rack_role_id):
resp = self._request(
method='get',
url=self._format_url('/dcim/rack-roles/{}', rack_role_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(RackRole)[0]
def list_rack_roles(self, **query):
return self._list(RackRole, '/dcim/rack-roles', query)
def create_rack_role(self, name, slug, color='000000'):
resp = self._request(
method='post',
url=self._format_url('/dcim/rack-roles'),
json={
'name': name,
'slug': slug,
'color': color
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_rack_role(self, rack_role_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/rack-roles/{}', rack_role_id))
# Raise on bad status codes
resp.raise_on_status()
def rack(self, rack_id):
resp = self._request(
method='get',
url=self._format_url('/dcim/racks/{}', rack_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(Rack)[0]
def list_racks(self, **query):
return self._list(Rack, '/dcim/racks', query)
def create_rack(self, name, rack_group_id, site_id, tenant_id, u_height, width, descending_units, rack_type,
rack_role_id=None, facility=None, comments='', custom_fields=None):
# Map constants to their values for the API
if isinstance(rack_type, RackTypeConstant):
rack_type = rack_type.value
if isinstance(width, RackWidthConstant):
width = width.value
resp = self._request(
method='post',
url=self._format_url('/dcim/racks'),
json={
'name': name,
'u_height': u_height,
'width': width,
'group': rack_group_id,
'site': site_id,
'facility_id': facility,
'role': rack_role_id,
'desc_units': descending_units,
'type': rack_type,
'tenant': tenant_id,
'comments': comments,
'custom_fields': custom_fields if custom_fields is not None else dict()
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_rack(self, rack_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/racks/{}', rack_id))
# Raise on bad status codes
resp.raise_on_status()
def platform(self, platform_id):
resp = self._request(
method='get',
url=self._format_url('/dcim/platforms/{}', platform_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(Platform)[0]
def create_platform(self, name, slug, rpc_client=''):
resp = self._request(
method='post',
url=self._format_url('/dcim/platforms'),
json={
'name': name,
'slug': slug,
'rpc_client': rpc_client
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_platform(self, platform_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/platforms/{}', platform_id))
# Raise on bad status codes
resp.raise_on_status()
def manufacturer(self, manufacturer_id):
resp = self._request(
method='get',
url=self._format_url('/dcim/manufacturers/{}', manufacturer_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(Manufacturer)[0]
def create_manufacturer(self, name, slug):
resp = self._request(
method='post',
url=self._format_url('/dcim/manufacturers'),
json={
'name': name,
'slug': slug,
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_manufacturer(self, manufacturer_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/manufacturers/{}', manufacturer_id))
# Raise on bad status codes
resp.raise_on_status()
def device_type(self, device_type_id):
resp = self._request(
method='get',
url=self._format_url('/dcim/device-types/{}', device_type_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(DeviceType)[0]
def create_device_type(self, model, slug, u_height, manufacturer_id, part_number=None,
interface_ordering=InterfaceOrderConstant.BY_RACK_POSITION, is_console_server=False,
is_network_device=False, subdevice_role=SubdeviceTypeConstant.NONE, is_full_depth=False,
is_pdu=False, comments='', custom_fields=None):
# Map constants
if isinstance(subdevice_role, SubdeviceTypeConstant):
subdevice_role = subdevice_role.value
if isinstance(interface_ordering, InterfaceOrderConstant):
interface_ordering = interface_ordering.value
resp = self._request(
method='post',
url=self._format_url('/dcim/device-types'),
json={
'model': model,
'slug': slug,
'u_height': u_height,
'is_pdu': is_pdu,
'is_full_depth': is_full_depth,
'subdevice_role': subdevice_role,
'is_console_server': is_console_server,
'is_network_device': is_network_device,
'part_number': part_number,
'interface_ordering': interface_ordering,
'manufacturer': manufacturer_id,
'comments': comments,
'custom_fields': custom_fields if custom_fields is not None else dict()
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_device_type(self, device_type_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/device-types/{}', device_type_id))
# Raise on bad status codes
resp.raise_on_status()
def device_role(self, device_role_id):
resp = self._request(
method='get',
url=self._format_url('/dcim/device-roles/{}', device_role_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(DeviceRole)[0]
def create_device_role(self, name, slug, color='000000'):
resp = self._request(
method='post',
url=self._format_url('/dcim/device-roles'),
json={
'name': name,
'slug': slug,
'color': color
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_device_role(self, device_role_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/device-roles/{}', device_role_id))
# Raise on bad status codes
resp.raise_on_status()
def device(self, device_id):
"""
Get a device by the device's ID.
:param device_id:
:return:
"""
resp = self._request(
method='get',
url=self._format_url('/dcim/devices/{}', device_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(Device)[0]
def list_devices(self, **query):
return self._list(Device, '/dcim/devices', query)
def create_device(self, name, device_role_id, site_id, status=DeviceStatusConstant.ACTIVE, custom_fields=None,
comments='', rack_face=RackFaceConstant.FRONT, asset_tag=None, platform_id=None,
primary_ip4_id=None, primary_ip6_id=None, position=0, device_type_id=None, serial=None,
rack_id=None, tenant_id=None):
# Map constants
if isinstance(status, DeviceStatusConstant):
status = status.value
if isinstance(rack_face, RackFaceConstant):
rack_face = rack_face.value
resp = self._request(
method='post',
url=self._format_url('/dcim/devices'),
json={
'status': status,
'device_role': device_role_id,
'name': name,
'site': site_id,
'comments': comments,
'face': rack_face,
'asset_tag': asset_tag,
'platform': platform_id,
'device_type': device_type_id,
'primary_ip4': primary_ip4_id,
'primary_ip6': primary_ip6_id,
'position': position,
'serial': serial,
'rack': rack_id,
'tenant': tenant_id,
'custom_fields': custom_fields if custom_fields is not None else dict()
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def update_device(self, device_id, **fields):
resp = self._request(
method='patch',
url=self._format_url('/dcim/devices/{}', device_id),
json=fields)
# Raise on bad status codes
resp.raise_on_status()
def delete_device(self, device_id):
resp = self._request(
method='delete',
url=self._format_url('/dcim/devices/{}', device_id))
# Raise on bad status codes
resp.raise_on_status()
class IPAMClient(NetboxClientPart):
def vrf(self, vrf_id):
resp = self._request(
method='get',
url=self._format_url('/ipam/vrfs/{}', vrf_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(VRF)[0]
def list_vrfs(self, **query):
return self._list(VRF, '/ipam/vrfs', query)
def create_vrf(self, name, route_distinguisher, tenant_id, enforce_unique=False, description=None,
custom_fields=None):
"""
:return:
"""
resp = self._request(
method='post',
url=self._format_url('/ipam/vrfs'),
json={
'name': name,
'rd': route_distinguisher,
'tenant': tenant_id,
'enforce_unique': enforce_unique,
'description': description,
'custom_fields': custom_fields if custom_fields is not None else dict(),
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_vrf(self, vrf_id):
resp = self._request(
method='delete',
url=self._format_url('/ipam/vrfs/{}', vrf_id))
# Raise on bad status codes
resp.raise_on_status()
def prefix_role(self, prefix_role_id):
resp = self._request(
method='get',
url=self._format_url('/ipam/roles/{}', prefix_role_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(PrefixRole)[0]
def list_prefix_roles(self, **query):
return self._list(PrefixRole, '/ipam/roles', query)
def create_prefix_role(self, name, slug, weight=0):
"""
:param name:
:param slug:
:param weight:
:return:
"""
resp = self._request(
method='post',
url=self._format_url('/ipam/roles'),
json={
'name': name,
'slug': slug,
'weight': weight
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_prefix_role(self, ipam_role_id):
resp = self._request(
method='delete',
url=self._format_url('/ipam/roles/{}', ipam_role_id))
# Raise on bad status codes
resp.raise_on_status()
def ip_address(self, ip_address_id):
resp = self._request(
method='get',
url=self._format_url('/ipam/ip-addresses/{}', ip_address_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(IPAddress)[0]
def list_ip_addresses(self, **query):
return self._list(IPAddress, '/ipam/ip-addresses', query)
def create_ip_address(self, address, status, tenant_id, role=None, interface_id=None, vrf_id=None,
nat_inside=None, description=None, custom_fields=None):
# Map constants
if isinstance(status, IPAddressStatusConstant):
status = status.value
if isinstance(role, IPAddressRoleConstant):
role = role.value
resp = self._request(
method='post',
url=self._format_url('/ipam/ip-addresses'),
json={
'description': description,
'tenant': tenant_id,
'interface': interface_id,
'vrf': vrf_id,
'role': role,
'status': status,
'address': address,
'nat_inside': nat_inside,
'custom_fields': custom_fields if custom_fields is not None else dict()
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_ip_address(self, ip_address_id):
resp = self._request(
method='delete',
url=self._format_url('/ipam/ip-addresses/{}', ip_address_id))
# Raise on bad status codes
resp.raise_on_status()
def assign_ip(self, address, interface_id, tenant_id, is_primary=False):
"""
Assign an IP address to an interface.
:param address:
:param interface_id:
:param tenant_id:
:param is_primary:
:return:
"""
resp = self._request(
method='post',
url=self._format_url('/ipam/ip-addresses'),
json={
'is_primary': is_primary,
'address': address,
'interface': interface_id,
'tenant': tenant_id
})
# Raise on bad status codes
resp.raise_on_status()
class TenancyClient(NetboxClientPart):
def tenant_group(self, tenant_group_id):
resp = self._request(
method='get',
url=self._format_url('/tenancy/tenant-groups/{}', tenant_group_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(TenantGroup)[0]
def list_tenant_groups(self, **query):
return self._list(TenantGroup, '/tenancy/tenant-groups', query)
def create_tenant_group(self, name, slug):
resp = self._request(
method='post',
url=self._format_url('/tenancy/tenant-groups'),
json={
'name': name,
'slug': slug
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_tenant_group(self, tenant_group_id):
resp = self._request(
method='delete',
url=self._format_url('/tenancy/tenant-groups/{}', tenant_group_id))
# Raise on bad status codes
resp.raise_on_status()
def tenant(self, tenant_id):
"""
Get a tenant by the tenant's ID.
:param tenant_id:
:return:
"""
resp = self._request(
method='get',
url=self._format_url('/tenancy/tenants/{}', tenant_id))
# Raise on bad status codes
resp.raise_on_status()
# If there are results, return them
return resp.wrap_results(Tenant)[0]
def list_tenants(self, **query):
return self._list(Tenant, '/tenancy/tenants', query)
def create_tenant(self, name, slug, tenant_group_id, description=None, comments=None, custom_fields=None):
resp = self._request(
method='post',
url=self._format_url('/tenancy/tenants'),
json={
'name': name,
'slug': slug,
'group': tenant_group_id,
'description': description,
'comments': comments,
'custom_fields': custom_fields if custom_fields is not None else dict()
})
# Raise on bad status codes
resp.raise_on_status()
# Return the ID of the new interface
return resp.results[0]['id']
def delete_tenant(self, tenant_id):
resp = self._request(
method='delete',
url=self._format_url('/tenancy/tenants/{}', tenant_id))
# Raise on bad status codes
resp.raise_on_status()
|
|
import json
import copy
import logging
import os
import sys
from lxml import etree
from xblock.core import XML_NAMESPACES
from xblock.fields import Dict, Scope, ScopeIds
from xblock.runtime import KvsFieldData
from xmodule.x_module import XModuleDescriptor, DEPRECATION_VSCOMPAT_EVENT
from xmodule.modulestore.inheritance import own_metadata, InheritanceKeyValueStore
from xmodule.modulestore import EdxJSONEncoder
import platform_core.lib.dogstats_wrapper as dog_stats_api
from lxml.etree import (
Element, ElementTree, XMLParser,
)
log = logging.getLogger(__name__)
# assume all XML files are persisted as utf-8.
EDX_XML_PARSER = XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True,
encoding='utf-8')
def name_to_pathname(name):
"""
Convert a location name for use in a path: replace ':' with '/'.
This allows users of the xml format to organize content into directories
"""
return name.replace(':', '/')
def is_pointer_tag(xml_obj):
"""
Check if xml_obj is a pointer tag: <blah url_name="something" />.
No children, one attribute named url_name, no text.
Special case for course roots: the pointer is
<course url_name="something" org="myorg" course="course">
xml_obj: an etree Element
Returns a bool.
"""
if xml_obj.tag != "course":
expected_attr = set(['url_name'])
else:
expected_attr = set(['url_name', 'course', 'org'])
actual_attr = set(xml_obj.attrib.keys())
has_text = xml_obj.text is not None and len(xml_obj.text.strip()) > 0
return len(xml_obj) == 0 and actual_attr == expected_attr and not has_text
def serialize_field(value):
"""
Return a string version of the value (where value is the JSON-formatted, internally stored value).
If the value is a string, then we simply return what was passed in.
Otherwise, we return json.dumps on the input value.
"""
if isinstance(value, basestring):
return value
return json.dumps(value, cls=EdxJSONEncoder)
def deserialize_field(field, value):
"""
Deserialize the string version to the value stored internally.
Note that this is not the same as the value returned by from_json, as model types typically store
their value internally as JSON. By default, this method will return the result of calling json.loads
on the supplied value, unless json.loads throws a TypeError, or the type of the value returned by json.loads
is not supported for this class (from_json throws an Error). In either of those cases, this method returns
the input value.
"""
try:
deserialized = json.loads(value)
if deserialized is None:
return deserialized
try:
field.from_json(deserialized)
return deserialized
except (ValueError, TypeError):
# Support older serialized version, which was just a string, not result of json.dumps.
# If the deserialized version cannot be converted to the type (via from_json),
# just return the original value. For example, if a string value of '3.4' was
# stored for a String field (before we started storing the result of json.dumps),
# then it would be deserialized as 3.4, but 3.4 is not supported for a String
# field. Therefore field.from_json(3.4) will throw an Error, and we should
# actually return the original value of '3.4'.
return value
except (ValueError, TypeError):
# Support older serialized version.
return value
class XmlParserMixin(object):
"""
Class containing XML parsing functionality shared between XBlock and XModuleDescriptor.
"""
# Extension to append to filename paths
filename_extension = 'xml'
xml_attributes = Dict(help="Map of unhandled xml attributes, used only for storage between import and export",
default={}, scope=Scope.settings)
# VS[compat]. Backwards compatibility code that can go away after
# importing 2012 courses.
# A set of metadata key conversions that we want to make
metadata_translations = {
'slug': 'url_name',
'name': 'display_name',
}
@classmethod
def _translate(cls, key):
"""
VS[compat]
"""
return cls.metadata_translations.get(key, key)
# The attributes will be removed from the definition xml passed
# to definition_from_xml, and from the xml returned by definition_to_xml
# Note -- url_name isn't in this list because it's handled specially on
# import and export.
metadata_to_strip = ('data_dir',
'tabs', 'grading_policy',
'discussion_blackouts',
# VS[compat] -- remove the below attrs once everything is in the CMS
'course', 'org', 'url_name', 'filename',
# Used for storing xml attributes between import and export, for roundtrips
'xml_attributes')
metadata_to_export_to_policy = ('discussion_topics',)
@staticmethod
def _get_metadata_from_xml(xml_object, remove=True):
"""
Extract the metadata from the XML.
"""
meta = xml_object.find('meta')
if meta is None:
return ''
dmdata = meta.text
if remove:
xml_object.remove(meta)
return dmdata
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Return the definition to be passed to the newly created descriptor
during from_xml
xml_object: An etree Element
"""
raise NotImplementedError("%s does not implement definition_from_xml" % cls.__name__)
@classmethod
def clean_metadata_from_xml(cls, xml_object):
"""
Remove any attribute named for a field with scope Scope.settings from the supplied
xml_object
"""
for field_name, field in cls.fields.items():
if field.scope == Scope.settings and xml_object.get(field_name) is not None:
del xml_object.attrib[field_name]
@classmethod
def file_to_xml(cls, file_object):
"""
Used when this module wants to parse a file object to xml
that will be converted to the definition.
Returns an lxml Element
"""
return etree.parse(file_object, parser=EDX_XML_PARSER).getroot()
@classmethod
def load_file(cls, filepath, fs, def_id): # pylint: disable=invalid-name
"""
Open the specified file in fs, and call cls.file_to_xml on it,
returning the lxml object.
Add details and reraise on error.
"""
try:
with fs.open(filepath) as xml_file:
return cls.file_to_xml(xml_file)
except Exception as err:
# Add info about where we are, but keep the traceback
msg = 'Unable to load file contents at path %s for item %s: %s ' % (
filepath, def_id, err)
raise Exception, msg, sys.exc_info()[2]
@classmethod
def load_definition(cls, xml_object, system, def_id, id_generator):
"""
Load a descriptor definition from the specified xml_object.
Subclasses should not need to override this except in special
cases (e.g. html module)
Args:
xml_object: an lxml.etree._Element containing the definition to load
system: the modulestore system (aka, runtime) which accesses data and provides access to services
def_id: the definition id for the block--used to compute the usage id and asides ids
id_generator: used to generate the usage_id
"""
# VS[compat] -- the filename attr should go away once everything is
# converted. (note: make sure html files still work once this goes away)
filename = xml_object.get('filename')
if filename is None:
definition_xml = copy.deepcopy(xml_object)
filepath = ''
aside_children = []
else:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_filename"]
)
filepath = cls._format_filepath(xml_object.tag, filename)
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
# give the class a chance to fix it up. The file will be written out
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath) and hasattr(cls, 'backcompat_paths'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_backcompat"]
)
candidates = cls.backcompat_paths(filepath)
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate
break
definition_xml = cls.load_file(filepath, system.resources_fs, def_id)
usage_id = id_generator.create_usage(def_id)
aside_children = system.parse_asides(definition_xml, def_id, usage_id, id_generator)
# Add the attributes from the pointer node
definition_xml.attrib.update(xml_object.attrib)
definition_metadata = cls._get_metadata_from_xml(definition_xml)
cls.clean_metadata_from_xml(definition_xml)
definition, children = cls.definition_from_xml(definition_xml, system)
if definition_metadata:
definition['definition_metadata'] = definition_metadata
definition['filename'] = [filepath, filename]
if aside_children:
definition['aside_children'] = aside_children
return definition, children
@classmethod
def load_metadata(cls, xml_object):
"""
Read the metadata attributes from this xml_object.
Returns a dictionary {key: value}.
"""
metadata = {'xml_attributes': {}}
for attr, val in xml_object.attrib.iteritems():
# VS[compat]. Remove after all key translations done
attr = cls._translate(attr)
if attr in cls.metadata_to_strip:
if attr in ('course', 'org', 'url_name', 'filename'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xmlparser_util_mixin_load_metadata",
"metadata:{}".format(attr),
)
)
# don't load these
continue
if attr not in cls.fields:
metadata['xml_attributes'][attr] = val
else:
metadata[attr] = deserialize_field(cls.fields[attr], val)
return metadata
@classmethod
def apply_policy(cls, metadata, policy):
"""
Add the keys in policy to metadata, after processing them
through the attrmap. Updates the metadata dict in place.
"""
for attr, value in policy.iteritems():
attr = cls._translate(attr)
if attr not in cls.fields:
# Store unknown attributes coming from policy.json
# in such a way that they will export to xml unchanged
metadata['xml_attributes'][attr] = value
else:
metadata[attr] = value
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator): # pylint: disable=unused-argument
"""
Use `node` to construct a new block.
Arguments:
node (etree.Element): The xml node to parse into an xblock.
runtime (:class:`.Runtime`): The runtime to use while parsing.
keys (:class:`.ScopeIds`): The keys identifying where this block
will store its data.
id_generator (:class:`.IdGenerator`): An object that will allow the
runtime to generate correct definition and usage ids for
children of this block.
Returns (XBlock): The newly parsed XBlock
"""
# VS[compat] -- just have the url_name lookup, once translation is done
url_name = cls._get_url_name(node)
def_id = id_generator.create_definition(node.tag, url_name)
usage_id = id_generator.create_usage(def_id)
aside_children = []
# VS[compat] -- detect new-style each-in-a-file mode
if is_pointer_tag(node):
# new style:
# read the actual definition file--named using url_name.replace(':','/')
definition_xml, filepath = cls.load_definition_xml(node, runtime, def_id)
aside_children = runtime.parse_asides(definition_xml, def_id, usage_id, id_generator)
else:
filepath = None
definition_xml = node
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_parse_xml"]
)
# Note: removes metadata.
definition, children = cls.load_definition(definition_xml, runtime, def_id, id_generator)
# VS[compat] -- make Ike's github preview links work in both old and
# new file layouts
if is_pointer_tag(node):
# new style -- contents actually at filepath
definition['filename'] = [filepath, filepath]
metadata = cls.load_metadata(definition_xml)
# move definition metadata into dict
dmdata = definition.get('definition_metadata', '')
if dmdata:
metadata['definition_metadata_raw'] = dmdata
try:
metadata.update(json.loads(dmdata))
except Exception as err:
log.debug('Error in loading metadata %r', dmdata, exc_info=True)
metadata['definition_metadata_err'] = str(err)
definition_aside_children = definition.pop('aside_children', None)
if definition_aside_children:
aside_children.extend(definition_aside_children)
# Set/override any metadata specified by policy
cls.apply_policy(metadata, runtime.get_policy(usage_id))
field_data = {}
field_data.update(metadata)
field_data.update(definition)
field_data['children'] = children
field_data['xml_attributes']['filename'] = definition.get('filename', ['', None]) # for git link
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
xblock = runtime.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
ScopeIds(None, node.tag, def_id, usage_id),
field_data,
)
if aside_children:
asides_tags = [x.tag for x in aside_children]
asides = runtime.get_asides(xblock)
for asd in asides:
if asd.scope_ids.block_type in asides_tags:
xblock.add_aside(asd)
return xblock
@classmethod
def _get_url_name(cls, node):
"""
Reads url_name attribute from the node
"""
return node.get('url_name', node.get('slug'))
@classmethod
def load_definition_xml(cls, node, runtime, def_id):
"""
Loads definition_xml stored in a dedicated file
"""
url_name = cls._get_url_name(node)
filepath = cls._format_filepath(node.tag, name_to_pathname(url_name))
definition_xml = cls.load_file(filepath, runtime.resources_fs, def_id)
return definition_xml, filepath
@classmethod
def _format_filepath(cls, category, name):
return u'{category}/{name}.{ext}'.format(category=category,
name=name,
ext=cls.filename_extension)
def export_to_file(self):
"""If this returns True, write the definition of this descriptor to a separate
file.
NOTE: Do not override this without a good reason. It is here
specifically for customtag...
"""
return True
def add_xml_to_node(self, node):
"""
For exporting, set data on `node` from ourselves.
"""
# Get the definition
xml_object = self.definition_to_xml(self.runtime.export_fs)
for aside in self.runtime.get_asides(self):
if aside.needs_serialization():
aside_node = etree.Element("unknown_root", nsmap=XML_NAMESPACES)
aside.add_xml_to_node(aside_node)
xml_object.append(aside_node)
self.clean_metadata_from_xml(xml_object)
# Set the tag on both nodes so we get the file path right.
xml_object.tag = self.category
node.tag = self.category
# Add the non-inherited metadata
for attr in sorted(own_metadata(self)):
# don't want e.g. data_dir
if attr not in self.metadata_to_strip and attr not in self.metadata_to_export_to_policy:
val = serialize_field(self._field_data.get(self, attr))
try:
xml_object.set(attr, val)
except Exception:
logging.exception(
u'Failed to serialize metadata attribute %s with value %s in module %s. This could mean data loss!!!',
attr, val, self.url_name
)
for key, value in self.xml_attributes.items():
if key not in self.metadata_to_strip:
xml_object.set(key, serialize_field(value))
if self.export_to_file():
# Write the definition to a file
url_path = name_to_pathname(self.url_name)
filepath = self._format_filepath(self.category, url_path)
self.runtime.export_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True)
with self.runtime.export_fs.open(filepath, 'w') as fileobj:
ElementTree(xml_object).write(fileobj, pretty_print=True, encoding='utf-8')
else:
# Write all attributes from xml_object onto node
node.clear()
node.tag = xml_object.tag
node.text = xml_object.text
node.tail = xml_object.tail
node.attrib.update(xml_object.attrib)
node.extend(xml_object)
node.set('url_name', self.url_name)
# Special case for course pointers:
if self.category == 'course':
# add org and course attributes on the pointer tag
node.set('org', self.location.org)
node.set('course', self.location.course)
def definition_to_xml(self, resource_fs):
"""
Return a new etree Element object created from this modules definition.
"""
raise NotImplementedError(
"%s does not implement definition_to_xml" % self.__class__.__name__)
@property
def non_editable_metadata_fields(self):
"""
Return a list of all metadata fields that cannot be edited.
"""
non_editable_fields = super(XmlParserMixin, self).non_editable_metadata_fields
non_editable_fields.append(XmlParserMixin.xml_attributes)
return non_editable_fields
class XmlDescriptor(XmlParserMixin, XModuleDescriptor): # pylint: disable=abstract-method
"""
Mixin class for standardized parsing of XModule xml.
"""
resources_dir = None
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses.
Args:
xml_data (str): A string of xml that will be translated into data and children
for this module
system (:class:`.XMLParsingSystem):
id_generator (:class:`xblock.runtime.IdGenerator`): Used to generate the
usage_ids and definition_ids when loading this xml
"""
# Shim from from_xml to the parse_xml defined in XmlParserMixin.
# This only exists to satisfy subclasses that both:
# a) define from_xml themselves
# b) call super(..).from_xml(..)
return super(XmlDescriptor, cls).parse_xml(
etree.fromstring(xml_data),
system,
None, # This is ignored by XmlParserMixin
id_generator,
)
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Interpret the parsed XML in `node`, creating an XModuleDescriptor.
"""
if cls.from_xml != XmlDescriptor.from_xml:
# Skip the parse_xml from XmlParserMixin to get the shim parse_xml
# from XModuleDescriptor, which actually calls `from_xml`.
return super(XmlParserMixin, cls).parse_xml(node, runtime, keys, id_generator) # pylint: disable=bad-super-call
else:
return super(XmlDescriptor, cls).parse_xml(node, runtime, keys, id_generator)
def export_to_xml(self, resource_fs):
"""
Returns an xml string representing this module, and all modules
underneath it. May also write required resources out to resource_fs.
Assumes that modules have single parentage (that no module appears twice
in the same course), and that it is thus safe to nest modules as xml
children as appropriate.
The returned XML should be able to be parsed back into an identical
XModuleDescriptor using the from_xml method with the same system, org,
and course
"""
# Shim from export_to_xml to the add_xml_to_node defined in XmlParserMixin.
# This only exists to satisfy subclasses that both:
# a) define export_to_xml themselves
# b) call super(..).export_to_xml(..)
node = Element(self.category)
super(XmlDescriptor, self).add_xml_to_node(node)
return etree.tostring(node)
def add_xml_to_node(self, node):
"""
Export this :class:`XModuleDescriptor` as XML, by setting attributes on the provided
`node`.
"""
if self.export_to_xml != XmlDescriptor.export_to_xml:
# Skip the add_xml_to_node from XmlParserMixin to get the shim add_xml_to_node
# from XModuleDescriptor, which actually calls `export_to_xml`.
super(XmlParserMixin, self).add_xml_to_node(node) # pylint: disable=bad-super-call
else:
super(XmlDescriptor, self).add_xml_to_node(node)
|
|
import numpy as np
import os
import warnings
import random
import sys
import logging
import scipy
from itertools import product, izip, chain, cycle
from collections import defaultdict
from functools import partial
from pymaptools.iter import izip_with_cycles, isiterable, take
from pymaptools.containers import labels_to_clusters, clusters_to_labels
from pymaptools.sample import discrete_sample, freqs2probas, randround
from pymaptools.io import GzipFileType, PathArgumentParser, write_json_line, read_json_lines, ndjson2col
from pymaptools.benchmark import PMTimer
from lsh_hdc.monte_carlo import utils
from lsh_hdc.fent import minmaxr
from lsh_hdc.utils import _div
from lsh_hdc.metrics import ClusteringMetrics, ConfusionMatrix2
from lsh_hdc.ranking import dist_auc
from sklearn.metrics.ranking import auc
def parse_args(args=None):
parser = PathArgumentParser()
parser.add_argument(
'--logging', type=str, default='WARN', help="Logging level",
choices=[key for key in logging._levelNames.keys() if isinstance(key, str)])
subparsers = parser.add_subparsers()
p_mapper = subparsers.add_parser('mapper')
p_mapper.add_argument('--h0_err', type=float, default=1.0,
help='H0 error rate')
p_mapper.add_argument('--h1_err', type=float, default=0.5,
help='H1 error rate')
p_mapper.add_argument('--population_size', type=int, default=2000,
help='population size')
p_mapper.add_argument('--sim_size', type=int, default=1000,
help='Simulation size')
p_mapper.add_argument('--nclusters', type=int, default=20,
help='number of clusters to generate')
p_mapper.add_argument('--join_negatives', type=int, default=0,
help='whether to join negatives (if split_join<0)')
p_mapper.add_argument('--split_join', type=int, default=0,
help='number of splits (if positive) or joins (if negative) to perform')
p_mapper.add_argument('--sampling_warnings', type=int, default=0,
help='if true, show sampling warnings')
p_mapper.add_argument('--output', type=GzipFileType('w'),
default=sys.stdout, help='Output file')
p_mapper.add_argument('--metrics', type=str, required=True, nargs='*',
help='Which metrics to compute')
p_mapper.set_defaults(func=do_mapper)
p_reducer = subparsers.add_parser('reducer')
p_reducer.add_argument(
'--group_by', type=str, default=None,
help='Field to group by')
p_reducer.add_argument(
'--x_axis', type=str, default=None,
help='Which column to plot as X axis')
p_reducer.add_argument(
'--metrics', type=str, required=True, nargs='*',
help='Which metrics to compute')
p_reducer.add_argument(
'--input', type=GzipFileType('r'), default=sys.stdin, help='File input')
p_reducer.add_argument(
'--output', type=str, metavar='DIR', help='Output directory')
p_reducer.add_argument(
'--fig_title', type=str, default=None, help='Title (for figures generated)')
p_reducer.add_argument(
'--fig_format', type=str, default='svg', help='Figure format')
p_reducer.add_argument(
'--legend_loc', type=str, default='lower left',
help='legend location')
p_reducer.set_defaults(func=do_reducer)
namespace = parser.parse_args(args)
return namespace
def do_mapper(args):
params = dict(
n=args.sim_size,
nclusters=args.nclusters,
split_join=args.split_join,
join_negatives=bool(args.join_negatives),
population_size=args.population_size,
with_warnings=args.sampling_warnings,
)
h0 = Grid.with_sim_clusters(p_err=args.h0_err, **params)
h1 = Grid.with_sim_clusters(p_err=args.h1_err, **params)
with PMTimer() as timer:
results = h0.compare(h1, args.metrics)
for result in results:
result.update(timer.to_dict())
result.update(utils.serialize_args(args))
write_json_line(args.output, result)
def auc_xscaled(xs, ys):
"""AUC score scaled to fill x interval
"""
xmin, xmax = minmaxr(xs)
denom = float(xmax - xmin)
xs_corr = [(x - xmin) / denom for x in xs]
return auc(xs_corr, ys)
def create_plots(args, df):
import jinja2
import matplotlib.pyplot as plt
from palettable import colorbrewer
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('xx-small')
#groups = df.set_index(args.x_axis).groupby([args.group_by])
groups = df.groupby([args.group_by])
metrics = list(set(args.metrics) & set(df.keys()))
colors = take(len(metrics), cycle(chain(
colorbrewer.qualitative.Dark2_8.mpl_colors,
colorbrewer.qualitative.Set2_8.mpl_colors,
)))
template_loader = jinja2.FileSystemLoader(os.path.join(args.output, '..'))
template_env = jinja2.Environment(loader=template_loader)
template_interactive = template_env.get_template('template_fig_interactive.html')
template_static = template_env.get_template('template_fig_static.html')
table_interactive = []
table_static = []
for group_name, group in groups:
# always sort by X values
group = group.sort([args.x_axis])
if args.fig_title is None:
fig_title = '%s=%s' % (args.group_by, group_name)
else:
fig_title = args.fig_title
# compute AUC scores
ys = []
for metric, color in zip(metrics, colors):
series = group[metric]
score = auc_xscaled(group[args.x_axis].values, series.values)
label = "%s (%.4f)" % (metric, score)
ys.append((score, metric, label, color))
ys.sort(reverse=True)
lbls_old, lbls_new, colors = zip(*ys)[1:4]
group = group[[args.x_axis] + list(lbls_old)] \
.set_index(args.x_axis) \
.rename(columns=dict(zip(lbls_old, lbls_new)))
# create plots
fig, ax = plt.subplots()
group.plot(ax=ax, title=fig_title, color=list(colors))
ax.set_xlim(*minmaxr(group.index.values))
ax.set_ylim(0.4, 1.0)
ax.legend(loc=args.legend_loc, prop=fontP)
fig_name = 'fig-%s.%s' % (group_name, args.fig_format)
fig_path = os.path.join(args.output, fig_name)
csv_name = 'fig-%s.csv' % group_name
csv_path = os.path.join(args.output, csv_name)
group.to_csv(csv_path)
table_interactive.append((
csv_name,
args.x_axis,
"%s=%s" % (args.group_by, group_name),
))
table_static.append(fig_name)
fig.savefig(fig_path, format=args.fig_format)
plt.close(fig)
with open(os.path.join(args.output, 'fig_interactive.html'), 'w') as fh:
fh.write(template_interactive.render(table=table_interactive))
with open(os.path.join(args.output, 'fig_static.html'), 'w') as fh:
fh.write(template_static.render(table=table_static))
def do_reducer(args):
import pandas as pd
obj = ndjson2col(read_json_lines(args.input))
df = pd.DataFrame.from_dict(obj)
csv_path = os.path.join(args.output, "summary.csv")
logging.info("Writing brief summary to %s", csv_path)
df.to_csv(csv_path)
create_plots(args, df)
def run(args):
logging.basicConfig(level=getattr(logging, args.logging))
args.func(args)
def get_conf(obj):
try:
return obj.pairwise
except AttributeError:
return obj
def sample_with_error(label, error_distribution, null_distribution):
"""Return label given error probability and null distributions
error_distribution must be of form {False: 1.0 - p_err, True: p_err}
"""
if discrete_sample(error_distribution):
# to generate error properly, draw from null distribution
return discrete_sample(null_distribution)
else:
# no error: append actual class label
return label
def relabel_negatives(clusters):
"""Place each negative label in its own class
"""
idx = -1
relabeled = []
for cluster in clusters:
relabeled_cluster = []
for class_label in cluster:
if class_label <= 0:
class_label = idx
relabeled_cluster.append(class_label)
idx -= 1
relabeled.append(relabeled_cluster)
return relabeled
def join_clusters(clusters):
"""Reduce number of clusters 2x by joining
"""
even = clusters[0::2]
odd = clusters[1::2]
if len(even) < len(odd):
even.append([])
elif len(even) > len(odd):
odd.append([])
assert len(even) == len(odd)
result = []
for c1, c2 in izip(even, odd):
result.append(c1 + c2)
return result
def split_clusters(clusters):
"""Increase number of clusters 2x by splitting
"""
result = []
for cluster in clusters:
even = cluster[0::2]
odd = cluster[1::2]
assert len(even) + len(odd) == len(cluster)
if even:
result.append(even)
if odd:
result.append(odd)
return result
def simulate_clustering(galpha=2, gbeta=10, nclusters=20, pos_ratio=0.2,
p_err=0.05, population_size=2000, split_join=0,
join_negatives=False, with_warnings=True):
if not 0.0 <= p_err <= 1.0:
raise ValueError(p_err)
csizes = map(randround, np.random.gamma(galpha, gbeta, nclusters))
# make sure at least one cluster is generated
num_pos = sum(csizes)
if num_pos == 0:
csizes.append(1)
num_pos += 1
num_neg = max(0, population_size - num_pos)
if with_warnings:
if not 0.0 <= pos_ratio <= 1.0:
raise ValueError(pos_ratio)
expected_num_neg = num_pos * _div(1.0 - pos_ratio, pos_ratio)
actual_neg_ratio = _div(num_neg - expected_num_neg, expected_num_neg)
if abs(actual_neg_ratio) > 0.2:
warnings.warn(
"{:.1%} {} negatives than expected. Got: {} "
"(expected: {}. Recommended population_size: {})"
.format(abs(actual_neg_ratio), ("fewer" if actual_neg_ratio < 0.0 else "more"), num_neg,
int(expected_num_neg), int(expected_num_neg + num_pos)))
# set up probability distributions we will use
null_dist = freqs2probas([num_neg] + csizes)
error_dist = {False: 1.0 - p_err, True: p_err}
# negative case first
negatives = []
for _ in xrange(num_neg):
class_label = sample_with_error(0, error_dist, null_dist)
negatives.append([class_label])
# positive cases
positives = []
for idx, csize in enumerate(csizes, start=1):
if csize < 1:
continue
cluster = []
for _ in xrange(csize):
class_label = sample_with_error(idx, error_dist, null_dist)
cluster.append(class_label)
positives.append(cluster)
if split_join > 0:
for _ in xrange(split_join):
positives = split_clusters(positives)
elif split_join < 0:
for _ in xrange(-split_join):
positives = join_clusters(positives)
if join_negatives:
for _ in xrange(-split_join):
negatives = join_clusters(negatives)
return relabel_negatives(positives + negatives)
def simulate_labeling(sample_size=2000, **kwargs):
clusters = simulate_clustering(**kwargs)
tuples = zip(*clusters_to_labels(clusters))
random.shuffle(tuples)
tuples = tuples[:sample_size]
ltrue, lpred = zip(*tuples) or ([], [])
return ltrue, lpred
class Grid(object):
def __init__(self, seed=None):
if seed is not None:
np.random.seed(seed)
self.max_classes = None
self.max_counts = None
self.n = None
self.size = None
self.grid = None
self.grid_type = None
self.get_matrix = None
self.show_record = None
@classmethod
def with_sim_clusters(cls, n=1000, size=200, seed=None, **kwargs):
obj = cls(seed=seed)
obj.grid = obj.fill_sim_clusters(size=size, n=n, **kwargs)
obj.grid_type = 'sim_clusters'
obj.get_matrix = obj.matrix_from_labels
obj.show_record = obj.show_cluster
return obj
@classmethod
def with_clusters(cls, n=1000, size=200, max_classes=5, seed=None):
obj = cls(seed=seed)
obj.grid = obj.fill_clusters(max_classes=max_classes, size=size, n=n)
obj.grid_type = 'clusters'
obj.get_matrix = obj.matrix_from_labels
obj.show_record = obj.show_cluster
return obj
@classmethod
def with_matrices(cls, n=1000, max_counts=100, seed=None):
obj = cls(seed=seed)
obj.grid = obj.fill_matrices(max_counts=max_counts, n=n)
obj.grid_type = 'matrices'
obj.get_matrix = obj.matrix_from_matrices
obj.show_record = obj.show_matrix
return obj
def show_matrix(self, idx, inverse=False):
grid = self.grid
return grid[0][idx]
def show_cluster(self, idx, inverse=False):
grid = self.grid
a, b = (1, 0) if inverse else (0, 1)
return labels_to_clusters(grid[a][idx], grid[b][idx])
def best_clustering_by_score(self, score, flip_sign=False):
idx, val = self.find_highest(score, flip_sign)
return {"idx": idx,
"found": "%s = %.4f" % (score, val),
"result": self.show_cluster(idx),
"inverse": self.show_cluster(idx, inverse=True)}
@staticmethod
def matrix_from_labels(*args):
ltrue, lpred = args
return ClusteringMetrics.from_labels(ltrue, lpred)
@staticmethod
def matrix_from_matrices(*args):
arr = args[0]
return ConfusionMatrix2.from_ccw(*arr)
def iter_grid(self):
return enumerate(izip(*self.grid))
iter_clusters = iter_grid
def iter_matrices(self):
if self.grid_type in ['matrices']:
for idx, tup in self.iter_grid():
yield idx, self.matrix_from_matrices(*tup)
elif self.grid_type in ['clusters', 'sim_clusters']:
for idx, labels in self.iter_grid():
yield idx, self.matrix_from_labels(*labels)
def describe_matrices(self):
for idx, matrix in self.iter_matrices():
tup = tuple(get_conf(matrix).to_ccw())
max_idx = tup.index(max(tup))
if max_idx != 2:
print idx, tup
def fill_clusters(self, n=None, size=None, max_classes=None):
if n is None:
n = self.n
else:
self.n = n
if size is None:
size = self.size
else:
self.size = size
if max_classes is None:
max_classes = self.max_classes
else:
self.max_classes = max_classes
classes = np.random.randint(
low=0, high=max_classes, size=(n, size))
clusters = np.random.randint(
low=0, high=max_classes, size=(n, size))
return classes, clusters
def fill_sim_clusters(self, n=None, size=None, **kwargs):
if n is None:
n = self.n
else:
self.n = n
if size is None:
size = self.size
else:
self.size = size
classes = np.empty((n, size), dtype=np.int64)
clusters = np.empty((n, size), dtype=np.int64)
for idx in xrange(n):
ltrue, lpred = simulate_labeling(sample_size=size, **kwargs)
classes[idx, :] = ltrue
clusters[idx, :] = lpred
return classes, clusters
def fill_matrices(self, max_counts=None, n=None):
if max_counts is None:
max_counts = self.max_counts
else:
self.max_counts = max_counts
if n is None:
n = self.n
else:
self.n = n
matrices = np.random.randint(
low=0, high=max_counts, size=(n, 4))
return (matrices,)
def find_highest(self, score, flip_sign=False):
best_index = -1
if flip_sign:
direction = 1
curr_score = float('inf')
else:
direction = -1
curr_score = float('-inf')
for idx, conf in self.iter_matrices():
new_score = conf.get_score(score)
if cmp(curr_score, new_score) == direction:
best_index = idx
curr_score = new_score
return (best_index, curr_score)
def find_matching_matrix(self, matches):
for idx, mx in self.iter_matrices():
mx = get_conf(mx)
if matches(mx):
return idx, mx
def compute(self, scores, show_progress=False, dtype=np.float16):
result = defaultdict(partial(np.empty, (self.n,), dtype=dtype))
if not isiterable(scores):
scores = [scores]
for idx, conf in self.iter_matrices():
if show_progress:
pct_done = 100 * idx / float(self.n)
if pct_done % 5 == 0:
sys.stderr.write("%d%% done\n" % pct_done)
for score in scores:
score_arr = conf.get_score(score)
if isiterable(score_arr):
for j, val in enumerate(score_arr):
result["%s-%d" % (score, j)][idx] = val
else:
result[score][idx] = score_arr
return result
def compare(self, others, scores, dtype=np.float16, plot=False):
result0 = self.compute(scores, dtype=dtype)
if not isiterable(others):
others = [others]
result_grid = []
for other in others:
result1 = other.compute(scores, dtype=dtype)
if plot:
from matplotlib import pyplot as plt
from palettable import colorbrewer
colors = colorbrewer.get_map('Set1', 'qualitative', 9).mpl_colors
result_row = {}
for score_name, scores0 in result0.iteritems():
scores1 = result1[score_name]
auc_score = dist_auc(scores0, scores1)
result_row[score_name] = auc_score
if plot:
scores0p = [x for x in scores0 if not np.isnan(x)]
scores1p = [x for x in scores1 if not np.isnan(x)]
hmin0, hmax0 = minmaxr(scores0p)
hmin1, hmax1 = minmaxr(scores1p)
bins = np.linspace(min(hmin0, hmin1), max(hmax0, hmax1), 50)
plt.hist(scores0p, bins, alpha=0.5, label='0', color=colors[0], edgecolor="none")
plt.hist(scores1p, bins, alpha=0.5, label='1', color=colors[1], edgecolor="none")
plt.legend(loc='upper right')
plt.title("%s: AUC=%.4f" % (score_name, auc_score))
plt.show()
result_grid.append(result_row)
return result_grid
def corrplot(self, compute_result, save_to, symmetric=False, **kwargs):
items = compute_result.items()
if not os.path.exists(save_to):
os.mkdir(save_to)
elif not os.path.isdir(save_to):
raise IOError("save_to already exists and is a file")
seen_pairs = set()
for (lbl1, arr1), (lbl2, arr2) in product(items, items):
if lbl1 == lbl2:
continue
elif (not symmetric) and (lbl2, lbl1) in seen_pairs:
continue
elif (not symmetric) and (lbl1, lbl2) in seen_pairs:
continue
figtitle = "%s vs. %s" % (lbl1, lbl2)
filename = "%s_vs_%s.png" % (lbl1, lbl2)
filepath = os.path.join(save_to, filename)
if os.path.exists(filepath):
warnings.warn("File exists: not overwriting %s" % filepath)
seen_pairs.add((lbl1, lbl2))
seen_pairs.add((lbl2, lbl1))
continue
self.plot([(arr1, arr2)], save_to=filepath, title=figtitle,
xlabel=lbl1, ylabel=lbl2, **kwargs)
seen_pairs.add((lbl1, lbl2))
seen_pairs.add((lbl2, lbl1))
@staticmethod
def plot(pairs, xlim=None, ylim=None, title=None, dither=0.0002,
marker='.', s=0.01, color='black', alpha=1.0, save_to=None,
label=None, xlabel=None, ylabel=None, **kwargs):
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
for (xs, ys), dither_, marker_, s_, color_, label_, alpha_ in \
izip_with_cycles(pairs, dither, marker, s, color, label, alpha):
rho0 = scipy.stats.spearmanr(xs, ys)[0]
rho1 = scipy.stats.spearmanr(ys, xs)[0]
if not np.isclose(rho0, rho1):
# should never happen
raise RuntimeError("Error calculating Spearman's rho")
ax.annotate('$\\rho=%.3f$' % rho0, (0.05, 0.9), xycoords='axes fraction')
if dither_ is not None:
xs = np.random.normal(xs, dither_)
ys = np.random.normal(ys, dither_)
ax.scatter(xs, ys, marker=marker_, s=s_, color=color_,
alpha=alpha_, label=label_, **kwargs)
if label:
legend = ax.legend(loc='upper left', markerscale=80, scatterpoints=1)
for lbl in legend.get_texts():
lbl.set_fontsize('small')
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if save_to is None:
fig.show()
else:
fig.savefig(save_to)
plt.close(fig)
if __name__ == "__main__":
run(parse_args())
|
|
from unittest import mock
import pytest
from opentrons.protocols.parse import parse
from opentrons.protocols.execution.execute_json_v4 import (
dispatch_json, _engage_magnet, _disengage_magnet, load_modules_from_json,
_temperature_module_set_temp,
_temperature_module_deactivate,
_temperature_module_await_temp,
_thermocycler_close_lid,
_thermocycler_open_lid,
_thermocycler_deactivate_block,
_thermocycler_deactivate_lid,
_thermocycler_set_block_temperature,
_thermocycler_set_lid_temperature,
_thermocycler_run_profile,
assert_no_async_tc_behavior,
assert_tc_commands_do_not_use_unimplemented_params,
TC_SPANNING_SLOT)
import opentrons.protocols.execution.execute_json_v4 as v4
from opentrons.protocol_api import (
MagneticModuleContext, TemperatureModuleContext, ThermocyclerContext,
ProtocolContext)
from opentrons.protocols.execution import execute
from opentrons_shared_data.protocol.constants import (
JsonPipetteCommand as JPC,
JsonMagneticModuleCommand as JMMC,
JsonTemperatureModuleCommand as JTMC,
JsonThermocyclerCommand as JTHC
)
# autouse set to True to setup/teardown mock after each run
@pytest.fixture(autouse=True)
def mockObj():
m = mock.Mock()
yield m
m.reset()
@pytest.fixture
def pipette_command_map(mockObj):
mock_pipette_command_map = {
JPC.blowout.value: mockObj._blowout,
JPC.pickUpTip.value: mockObj._pick_up_tip,
JPC.dropTip.value: mockObj._drop_tip,
JPC.aspirate.value: mockObj._aspirate,
JPC.dispense.value: mockObj._dispense,
JPC.touchTip.value: mockObj._touch_tip,
}
return mock_pipette_command_map
@pytest.fixture
def magnetic_module_command_map(mockObj):
mock_magnetic_module_command_map = {
JMMC.magneticModuleEngageMagnet.value:
mockObj._engage_magnet,
JMMC.magneticModuleDisengageMagnet.value:
mockObj._disengage_magnet,
}
return mock_magnetic_module_command_map
@pytest.fixture
def temperature_module_command_map(mockObj):
mock_temperature_module_command_map = {
JTMC.temperatureModuleSetTargetTemperature.value:
mockObj._temperature_module_set_temp,
JTMC.temperatureModuleDeactivate.value:
mockObj._temperature_module_deactivate,
JTMC.temperatureModuleAwaitTemperature.value:
mockObj._temperature_module_await_temp
}
return mock_temperature_module_command_map
@pytest.fixture
def thermocycler_module_command_map(mockObj):
mock_thermocycler_module_command_map = {
JTHC.thermocyclerCloseLid.value:
mockObj._thermocycler_close_lid,
JTHC.thermocyclerOpenLid.value:
mockObj._thermocycler_open_lid,
JTHC.thermocyclerDeactivateBlock.value:
mockObj._thermocycler_deactivate_block,
JTHC.thermocyclerDeactivateLid.value:
mockObj._thermocycler_deactivate_lid,
JTHC.thermocyclerSetTargetBlockTemperature.value:
mockObj._thermocycler_set_block_temperature,
JTHC.thermocyclerSetTargetLidTemperature.value:
mockObj._thermocycler_set_lid_temperature,
JTHC.thermocyclerRunProfile.value:
mockObj._thermocycler_run_profile,
# NOTE: the thermocyclerAwaitX commands are expected to always
# follow a corresponding SetX command, which is implemented as
# blocking. Then nothing needs to be done for awaitX commands.
JTHC.thermocyclerAwaitBlockTemperature.value: \
mockObj.tc_do_nothing,
JTHC.thermocyclerAwaitLidTemperature.value: \
mockObj.tc_do_nothing,
JTHC.thermocyclerAwaitProfileComplete.value: \
mockObj.tc_do_nothing
}
return mock_thermocycler_module_command_map
def test_load_modules_from_json():
def fake_module(model, slot=None):
return 'mock_module_ctx_' + model
ctx = mock.create_autospec(ProtocolContext)
ctx.load_module.side_effect = fake_module
protocol = {'modules': {
'aID': {'slot': '1', 'model': 'magneticModuleV1'},
'bID': {'slot': '4', 'model': 'temperatureModuleV2'},
'cID': {
'slot': TC_SPANNING_SLOT,
'model': 'thermocyclerModuleV1'}
}}
result = load_modules_from_json(ctx, protocol)
# load_module should be called in a deterministic order
assert ctx.mock_calls == [
mock.call.load_module('magneticModuleV1', '1'),
mock.call.load_module('temperatureModuleV2', '4'),
mock.call.load_module('thermocyclerModuleV1')
]
# resulting dict should have ModuleContext objects (from calling
# load_module) as its values
assert result == {'aID': 'mock_module_ctx_magneticModuleV1',
'bID': 'mock_module_ctx_temperatureModuleV2',
'cID': 'mock_module_ctx_thermocyclerModuleV1'}
def test_engage_magnet():
module_mock = mock.create_autospec(MagneticModuleContext)
params = {'module': 'someModuleId', 'engageHeight': 4.2, }
_engage_magnet(module_mock, params)
assert module_mock.mock_calls == [
mock.call.engage(height_from_base=4.2)
]
def test_disengage_magnet():
module_mock = mock.create_autospec(MagneticModuleContext)
params = {'module': 'someModuleId'}
_disengage_magnet(module_mock, params)
assert module_mock.mock_calls == [
mock.call.disengage()
]
def test_temperature_module_set_temp():
module_mock = mock.create_autospec(TemperatureModuleContext)
params = {'module': 'someModuleId', 'temperature': 42.5}
_temperature_module_set_temp(module_mock, params)
assert module_mock.mock_calls == [
mock.call.start_set_temperature(42.5)
]
def test_temperature_module_deactivate():
module_mock = mock.create_autospec(TemperatureModuleContext)
params = {'module': 'someModuleId'}
_temperature_module_deactivate(module_mock, params)
assert module_mock.mock_calls == [
mock.call.deactivate()
]
def test_temperature_module_await_temp():
module_mock = mock.create_autospec(TemperatureModuleContext)
params = {'module': 'someModuleId', 'temperature': 12.3}
_temperature_module_await_temp(module_mock, params)
assert module_mock.mock_calls == [
mock.call.await_temperature(12.3)
]
def test_thermocycler_close_lid():
module_mock = mock.create_autospec(ThermocyclerContext)
params = {"module": "someModuleId"}
_thermocycler_close_lid(module_mock, params)
assert module_mock.mock_calls == [
mock.call.close_lid()
]
def test_thermocycler_open_lid():
module_mock = mock.create_autospec(ThermocyclerContext)
params = {"module": "someModuleId"}
_thermocycler_open_lid(module_mock, params)
assert module_mock.mock_calls == [
mock.call.open_lid()
]
def test_thermocycler_deactivate_block():
module_mock = mock.create_autospec(ThermocyclerContext)
params = {"module": "someModuleId"}
_thermocycler_deactivate_block(module_mock, params)
assert module_mock.mock_calls == [
mock.call.deactivate_block()
]
def test_thermocycler_deactivate_lid():
module_mock = mock.create_autospec(ThermocyclerContext)
params = {"module": "someModuleId"}
_thermocycler_deactivate_lid(module_mock, params)
assert module_mock.mock_calls == [
mock.call.deactivate_lid()
]
def test_thermocycler_set_block_temperature():
module_mock = mock.create_autospec(ThermocyclerContext)
params = {"temperature": 42, "module": "someModuleId"}
_thermocycler_set_block_temperature(module_mock, params)
assert module_mock.mock_calls == [
mock.call.set_block_temperature(42)
]
def test_thermocycler_set_lid_temperature():
module_mock = mock.create_autospec(ThermocyclerContext)
params = {"module": "someModuleId", "temperature": 42}
_thermocycler_set_lid_temperature(module_mock, params)
assert module_mock.mock_calls == [
mock.call.set_lid_temperature(42)
]
def test_thermocycler_run_profile():
module_mock = mock.create_autospec(ThermocyclerContext)
params = {
"profile": [
{'temperature': 55, 'holdTime': 90},
{'temperature': 65, 'holdTime': 30}
],
"module": "someModuleId",
"volume": 98
}
steps = [
{'temperature': 55, 'hold_time_seconds': 90},
{'temperature': 65, 'hold_time_seconds': 30}
]
_thermocycler_run_profile(module_mock, params)
assert module_mock.mock_calls == [
mock.call.execute_profile(
steps=steps, block_max_volume=98, repetitions=1)
]
def test_dispatch_json(
monkeypatch,
pipette_command_map,
magnetic_module_command_map,
temperature_module_command_map,
thermocycler_module_command_map,
mockObj
):
monkeypatch.setattr(v4, '_delay', mockObj)
monkeypatch.setattr(v4, '_move_to_slot', mockObj)
magnetic_module_id = 'magnetic_module_id'
temperature_module_id = 'temperature_module_id'
thermocycler_module_id = 'thermocycler_module_id'
mock_magnetic_module = mock.create_autospec(MagneticModuleContext)
mock_temperature_module = mock.create_autospec(TemperatureModuleContext)
mock_thermocycler_module = mock.create_autospec(ThermocyclerContext)
protocol_data = {'commands': [
# Pipette
{'command': 'delay', 'params': 'delay_params'},
{'command': 'blowout', 'params': 'blowout_params'},
{'command': 'pickUpTip', 'params': 'pickUpTip_params'},
{'command': 'dropTip', 'params': 'dropTip_params'},
{'command': 'aspirate', 'params': 'aspirate_params'},
{'command': 'dispense', 'params': 'dispense_params'},
{'command': 'touchTip', 'params': 'touchTip_params'},
{'command': 'moveToSlot', 'params': 'moveToSlot_params'},
# Magnetic Module
{'command': 'magneticModule/engageMagnet',
'params': {'module': magnetic_module_id}},
{'command': 'magneticModule/disengageMagnet',
'params': {'module': magnetic_module_id}},
# Temperature Module
{'command': 'temperatureModule/setTargetTemperature',
'params': {'module': temperature_module_id}},
{'command': 'temperatureModule/deactivate',
'params': {'module': temperature_module_id}},
{'command': 'temperatureModule/awaitTemperature',
'params': {'module': temperature_module_id}},
# Thermocycler
{
"command": "thermocycler/setTargetBlockTemperature",
"params": {
"module": thermocycler_module_id,
"temperature": 55
}
},
{
"command": "thermocycler/awaitBlockTemperature",
"params": {
"module": thermocycler_module_id,
"temperature": 55
}
},
{
"command": "thermocycler/setTargetLidTemperature",
"params": {
"module": thermocycler_module_id,
"temperature": 60
}
},
{
"command": "thermocycler/awaitLidTemperature",
"params": {
"module": thermocycler_module_id,
"temperature": 60
}
},
{
"command": "thermocycler/closeLid",
"params": {"module": thermocycler_module_id}
},
{
"command": "thermocycler/openLid",
"params": {"module": thermocycler_module_id}
},
{
"command": "thermocycler/deactivateBlock",
"params": {"module": thermocycler_module_id}
},
{
"command": "thermocycler/deactivateLid",
"params": {"module": thermocycler_module_id}
},
{
"command": "thermocycler/closeLid",
"params": {"module": thermocycler_module_id}
},
{
"command": "thermocycler/runProfile",
"params": {
"module": thermocycler_module_id,
"profile": [
{"temperature": 70, "holdTime": 60},
{"temperature": 40, "holdTime": 30},
{"temperature": 72, "holdTime": 60},
{"temperature": 38, "holdTime": 30}
],
"volume": 123
}
},
{
"command": "thermocycler/awaitProfileComplete",
"params": {"module": thermocycler_module_id}
}
]}
context = mock.sentinel.context
instruments = mock.sentinel.instruments
loaded_labware = mock.sentinel.loaded_labware
modules = {
magnetic_module_id: mock_magnetic_module,
temperature_module_id: mock_temperature_module,
thermocycler_module_id: mock_thermocycler_module
}
dispatch_json(
context,
protocol_data,
instruments,
loaded_labware,
modules,
pipette_command_map,
magnetic_module_command_map,
temperature_module_command_map,
thermocycler_module_command_map
)
assert mockObj.mock_calls == [
# Pipette
mock.call._delay(context, 'delay_params'),
mock.call._blowout(instruments, loaded_labware, 'blowout_params'),
mock.call._pick_up_tip(
instruments, loaded_labware, 'pickUpTip_params'),
mock.call._drop_tip(instruments, loaded_labware, 'dropTip_params'),
mock.call._aspirate(
instruments, loaded_labware, 'aspirate_params'),
mock.call._dispense(
instruments, loaded_labware, 'dispense_params'),
mock.call._touch_tip(
instruments, loaded_labware, 'touchTip_params'),
mock.call._move_to_slot(context, instruments, 'moveToSlot_params'),
# Magnetic module
mock.call._engage_magnet(
mock_magnetic_module, {'module': magnetic_module_id}
),
mock.call._disengage_magnet(
mock_magnetic_module, {'module': magnetic_module_id}
),
# Temperature module
mock.call._temperature_module_set_temp(
mock_temperature_module, {'module': temperature_module_id}
),
mock.call._temperature_module_deactivate(
mock_temperature_module, {'module': temperature_module_id}
),
mock.call._temperature_module_await_temp(
mock_temperature_module, {'module': temperature_module_id}
),
# Thermocycler
mock.call._thermocycler_set_block_temperature(
mock_thermocycler_module,
{'module': thermocycler_module_id, 'temperature': 55}
),
# await block temp = do nothing (corresponding set is blocking)
mock.call.tc_do_nothing(
mock_thermocycler_module,
{'module': thermocycler_module_id, 'temperature': 55}
),
mock.call._thermocycler_set_lid_temperature(
mock_thermocycler_module,
{'module': thermocycler_module_id, 'temperature': 60}
),
# await lid temp = do nothing (corresponding set is blocking)
mock.call.tc_do_nothing(
mock_thermocycler_module,
{'module': thermocycler_module_id, 'temperature': 60}
),
mock.call._thermocycler_close_lid(
mock_thermocycler_module, {'module': thermocycler_module_id}
),
mock.call._thermocycler_open_lid(
mock_thermocycler_module, {'module': thermocycler_module_id}
),
mock.call._thermocycler_deactivate_block(
mock_thermocycler_module, {'module': thermocycler_module_id}
),
mock.call._thermocycler_deactivate_lid(
mock_thermocycler_module, {'module': thermocycler_module_id}
),
mock.call._thermocycler_close_lid(
mock_thermocycler_module, {'module': thermocycler_module_id}
),
mock.call._thermocycler_run_profile(
mock_thermocycler_module, {
"module": thermocycler_module_id,
"profile": [
{"temperature": 70, "holdTime": 60},
{"temperature": 40, "holdTime": 30},
{"temperature": 72, "holdTime": 60},
{"temperature": 38, "holdTime": 30}
],
"volume": 123
}
),
# await profile complete = do nothing
# (corresponding set is blocking)
mock.call.tc_do_nothing(
mock_thermocycler_module,
{'module': thermocycler_module_id}
)
]
def test_dispatch_json_invalid_command(
pipette_command_map,
magnetic_module_command_map,
temperature_module_command_map,
thermocycler_module_command_map
):
protocol_data = {'commands': [
{'command': 'no_such_command', 'params': 'foo'},
]}
with pytest.raises(RuntimeError):
dispatch_json(
context=None,
protocol_data=protocol_data,
instruments=None,
loaded_labware=None,
modules=None,
pipette_command_map=pipette_command_map,
magnetic_module_command_map=magnetic_module_command_map,
temperature_module_command_map=temperature_module_command_map,
thermocycler_module_command_map=thermocycler_module_command_map
)
def test_papi_execute_json_v4(monkeypatch, loop, get_json_protocol_fixture):
protocol_data = get_json_protocol_fixture(
'4', 'testModulesProtocol', False)
protocol = parse(protocol_data, None)
ctx = ProtocolContext(loop=loop)
ctx.home()
# Check that we end up executing the protocol ok
execute.run_protocol(protocol, ctx)
def test_assert_no_async_tc_behavior():
setBlock = {
"command": "thermocycler/setTargetBlockTemperature"}
awaitBlock = {
"command": "thermocycler/awaitBlockTemperature"}
setLid = {
"command": "thermocycler/setTargetLidTemperature"}
awaitLid = {
"command": "thermocycler/awaitLidTemperature"}
runProfile = {
"command": "thermocycler/runProfile"}
awaitProfile = {
"command": "thermocycler/awaitProfileComplete"}
anything = {"command": "foo"} # stand-in for some other command
# no error
assert_no_async_tc_behavior([
anything,
setBlock, awaitBlock,
anything,
setLid, awaitLid,
anything,
runProfile, awaitProfile,
anything
])
assert_no_async_tc_behavior([anything, anything])
setters = [setBlock, setLid, runProfile]
for setter in setters:
# Should raise if anything except the corresponding await
# follows a "setter" command
with pytest.raises(RuntimeError):
assert_no_async_tc_behavior([
setter, anything
])
# Should raise if setter is the last command in the array
with pytest.raises(RuntimeError):
assert_no_async_tc_behavior([
anything, setter
])
awaiters = [awaitBlock, awaitLid, awaitProfile]
for awaiter in awaiters:
# Should raise if anything except the corresponding set
# precedes the await
with pytest.raises(RuntimeError):
assert_no_async_tc_behavior([
anything, awaiter
])
# Should raise if awaiter is the first command in the array
with pytest.raises(RuntimeError):
assert_no_async_tc_behavior([
awaiter, anything
])
def test_assert_tc_commands_do_not_use_unimplemented_params():
# should not throw for arbitrary commands
assert_tc_commands_do_not_use_unimplemented_params([
{'command': 'foo', 'params': {}}])
fail_cases = [
[{
'command': 'thermocycler/setTargetBlockTemperature',
'params': {'volume': 0}}]
]
for cmds in fail_cases:
with pytest.raises(RuntimeError):
assert_tc_commands_do_not_use_unimplemented_params(cmds)
|
|
"""
Utilities for determining application-specific dirs. See <https://github.com/platformdirs/platformdirs> for details and
usage.
"""
from __future__ import annotations
import importlib
import os
import sys
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pip._vendor.typing_extensions import Literal # pragma: no cover
from .api import PlatformDirsABC
from .version import __version__, __version_info__
def _set_platform_dir_class() -> type[PlatformDirsABC]:
if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":
module, name = "pip._vendor.platformdirs.android", "Android"
elif sys.platform == "win32":
module, name = "pip._vendor.platformdirs.windows", "Windows"
elif sys.platform == "darwin":
module, name = "pip._vendor.platformdirs.macos", "MacOS"
else:
module, name = "pip._vendor.platformdirs.unix", "Unix"
result: type[PlatformDirsABC] = getattr(importlib.import_module(module), name)
return result
PlatformDirs = _set_platform_dir_class() #: Currently active platform
AppDirs = PlatformDirs #: Backwards compatibility with appdirs
def user_data_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: data directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_dir
def site_data_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: data directory shared by users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_dir
def user_config_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: config directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_dir
def site_config_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: config directory shared by the users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_dir
def user_cache_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: cache directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_dir
def user_state_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: state directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_dir
def user_log_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: log directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_dir
def user_documents_dir() -> str:
"""
:returns: documents directory tied to the user
"""
return PlatformDirs().user_documents_dir
def user_runtime_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: runtime directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_dir
def user_data_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: data path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_path
def site_data_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `multipath <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: data path shared by users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_path
def user_config_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: config path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_path
def site_config_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: config path shared by the users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_path
def user_cache_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: cache path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_path
def user_state_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: state path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_path
def user_log_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: log path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_path
def user_documents_path() -> Path:
"""
:returns: documents path tied to the user
"""
return PlatformDirs().user_documents_path
def user_runtime_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: runtime path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_path
__all__ = [
"__version__",
"__version_info__",
"PlatformDirs",
"AppDirs",
"PlatformDirsABC",
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"user_documents_dir",
"user_runtime_dir",
"site_data_dir",
"site_config_dir",
"user_data_path",
"user_config_path",
"user_cache_path",
"user_state_path",
"user_log_path",
"user_documents_path",
"user_runtime_path",
"site_data_path",
"site_config_path",
]
|
|
# Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def format_for_options(name, value):
name = name.strip()
if type(value) is str:
value = value.strip()
LOG.debug('name = %s value %s', name, value)
if name not in OPTIONS:
LOG.warning("Unrecognized DHCP options: %s", name)
return
code, datatype = OPTIONS[name]
try:
value = _format_value(datatype, value)
except Exception:
LOG.warning("Failed to parse DHCP option: %s", name)
return
value = ':'.join(value[i:i + 2] for i in range(0, len(value), 2))
LOG.debug('name = %s value %s', name, value)
return value
def _format_value(datatype, value):
datatype = datatype.strip()
if ',' in datatype:
t1, _, t2 = datatype.partition(',')
v1, _, v2 = value.partition(' ')
return _format_value(t1, v1) + _format_value(t2, v2)
elif datatype.endswith('-list'):
t = datatype[:-5]
return ''.join([_format_value(t, v) for v in value.split(',')])
elif datatype == 'none':
return ''
elif datatype == 'bool':
if value in set([True, 'True', 'yes', 'on', '1']):
return '01'
else:
return '00'
elif datatype.startswith('int'):
length = int(datatype[3:]) / 4
return ('{:0{}x}'.format(int(value), int(length)))
elif datatype == 'string':
return (binascii.hexlify(value.encode('utf-8'))).decode('utf-8')
elif datatype == 'ip':
return ''.join(['{:02x}'.format(int(o)) for o in value.split('.')])
elif datatype == 'route':
dest, _, nexthop = value.partition(' ')
return _format_value('ip', dest) + _format_value('ip', nexthop)
def format_for_pnr(name, value):
name = name.strip()
value = value.strip()
if name not in OPTIONS:
LOG.warning("Unrecognized DHCP options: %s", name)
return None
code, datatype = OPTIONS[name]
return {'number': str(code), 'value': value}
OPTIONS = {
'subnet-mask': (1, 'ip'),
'time-offset': (2, 'int32'),
'routers': (3, 'ip-list'),
'time-servers': (4, 'ip-list'),
'name-servers': (5, 'ip-list'),
'domain-name-servers': (6, 'ip-list'),
'log-servers': (7, 'ip-list'),
'quote-servers': (8, 'ip-list'),
'lpr-servers': (9, 'ip-list'),
'impress-servers': (10, 'ip-list'),
'resource-location-servers': (11, 'ip-list'),
'host-name': (12, 'string'),
'boot-size': (13, 'int16'),
'merit-dump': (14, 'string'),
'domain-name': (15, 'string'),
'swap-server': (16, 'ip'),
'root-path': (17, 'string'),
'extension-path': (18, 'string'),
'ip-forwarding': (19, 'bool'),
'non-local-source-routing': (20, 'bool'),
'policy-filter': (21, 'ip-list'),
'max-dgram-reassembly': (22, 'int16'),
'default-ip-ttl': (23, 'int8'),
'path-mtu-aging-timeout': (24, 'int32'),
'path-mtu-plateau-table': (25, 'int16-list'),
'interface-mtu': (26, 'int16'),
'all-subnets-local': (27, 'bool'),
'broadcast-address': (28, 'ip'),
'perform-mask-discovery': (29, 'bool'),
'mask-supplier': (30, 'bool'),
'router-discovery': (31, 'bool'),
'router-solicitation-address': (32, 'ip'),
'static-routes': (33, 'ip-list'),
'trailer-encapsulation': (34, 'bool'),
'arp-cache-timeout': (35, 'int32'),
'ieee802-3-encapsulation': (36, 'bool'),
'default-tcp-ttl': (37, 'int8'),
'tcp-keepalive-interval': (38, 'int32'),
'tcp-keepalive-garbage': (39, 'bool'),
'nis-domain': (40, 'string'),
'nis-servers': (41, 'ip-list'),
'ntp-servers': (42, 'ip-list'),
'vendor-encapsulated-options': (43, 'string'),
'netbios-name-servers': (44, 'ip-list'),
'netbios-dd-server': (45, 'ip-list'),
'netbios-node-type': (46, 'int8'),
'netbios-scope': (47, 'string'),
'font-servers': (48, 'ip-list'),
'x-display-manager': (49, 'ip-list'),
'dhcp-requested-address': (50, 'ip'),
'dhcp-lease-time': (51, 'int32'),
'dhcp-option-overload': (52, 'int8'),
'dhcp-message-type': (53, 'int8'),
'dhcp-server-identifier': (54, 'ip'),
'dhcp-parameter-request-list': (55, 'string'),
'dhcp-message': (56, 'string'),
'dhcp-max-message-size': (57, 'int16'),
'dhcp-renewal-time': (58, 'int32'),
'dhcp-rebinding-time': (59, 'int32'),
'class-id': (60, 'string'),
'dhcp-client-identifier': (61, 'string'),
'nwip-domain': (62, 'string'),
'nwip-suboptions': (63, 'string'),
'nisplus-domain': (64, 'string'),
'nisplus-servers': (65, 'ip-list'),
'tftp-server-name': (66, 'string'),
'bootfile-name': (67, 'string'),
'mobile-ip-home-agent': (68, 'ip-list'),
'smtp-server': (69, 'ip-list'),
'pop-server': (70, 'ip-list'),
'nntp-server': (71, 'ip-list'),
'www-server': (72, 'ip-list'),
'finger-server': (73, 'ip-list'),
'irc-server': (74, 'ip-list'),
'streettalk-server': (75, 'ip-list'),
'streettalk-directory-assistance-server': (76, 'ip-list'),
'user-class': (77, 'string'),
'slp-directory-agent': (78, 'int8,ip-list'),
'slp-service-scope': (79, 'int8,string'),
'rapid-commit': (80, 'none'),
'client-fqdn': (81, 'string'),
'storage-ns': (83, 'string'),
'nds-servers': (85, 'ip-list'),
'nds-tree-name': (86, 'string'),
'nds-context': (87, 'string'),
'bcms-controller-names': (88, 'string'),
'bcms-controller-address': (89, 'string'),
'dhcp-auth': (90, 'string'),
'dhcp-client-last-time': (91, 'int32'),
'associated-ip': (92, 'ip-list'),
'system-architecture': (93, 'int16'),
'interface-id': (94, 'string'),
'ldap-servers': (95, 'ip-list'),
'machine-id': (97, 'string'),
'user-auth': (98, 'string'),
'geoconf-civic': (99, 'string'),
'ieee-1003-1-tz': (100, 'string'),
'ref-tz-db': (101, 'string'),
'netinfo-server-address': (112, 'string'),
'netinfo-server-tag': (113, 'ip-list'),
'default-url': (114, 'string'),
'auto-configure': (116, 'bool'),
'name-search': (117, 'int16-list'),
'subnet-selection': (118, 'ip'),
'domain-search': (119, 'string-list'),
'sip-servers': (120, 'string'),
'classless-static-routes': (121, 'route-list'),
'dhcp-ccc': (122, 'string'),
'dhcp-geoconf': (123, 'string'),
'vendor-class-identifier': (124, 'string'),
'vivso': (125, 'string'),
'tftp-server': (128, 'ip-list'),
'pxe-vendor-specific-129': (129, 'string'),
'pxe-vendor-specific-130': (130, 'string'),
'pxe-vendor-specific-131': (131, 'string'),
'pxe-vendor-specific-132': (132, 'string'),
'pxe-vendor-specific-133': (133, 'string'),
'pxe-vendor-specific-134': (134, 'string'),
'pxe-vendor-specific-135': (135, 'string'),
'pana-agent': (136, 'ip-list'),
'lost-server': (137, 'string'),
'capwap-ac-v4': (138, 'ip-list'),
'dhcp-mos': (139, 'string'),
'dhcp-fqdn-mos': (140, 'string'),
'sip-ua-config-domain': (141, 'string'),
'andsf-servers': (142, 'ip-list'),
'dhcp-geoloc': (144, 'string'),
'force-renew-nonce-cap': (145, 'string'),
'rdnss-selection': (146, 'string'),
'tftp-server-address': (150, 'ip-list'),
'status-code': (151, 'int8,string'),
'dhcp-base-time': (152, 'int32'),
'dhcp-state-start-time': (153, 'int32'),
'dhcp-query-start-time': (154, 'int32'),
'dhcp-query-end-time': (155, 'int32'),
'dhcp-state': (156, 'int8'),
'data-source': (157, 'int8'),
'pcp-server': (158, 'string'),
'dhcp-pxe-magic': (208, 'int32'),
'config-file': (209, 'string'),
'path-prefix': (210, 'string'),
'reboot-time': (211, 'int32'),
'dhcp-6rd': (212, 'string'),
'dhcp-access-domain': (213, 'string'),
'subnet-allocation': (220, 'string'),
'dhcp-vss': (221, 'int8,string')}
|
|
import ctypes
import pymem.ressources.structure
dll = ctypes.WinDLL('kernel32.dll')
#: Opens an existing local process object.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684320%28v=vs.85%29.aspx
OpenProcess = dll.OpenProcess
OpenProcess.restype = ctypes.c_ulonglong
#: Terminates the specified process and all of its threads.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686714%28v=vs.85%29.aspx
TerminateProcess = dll.TerminateProcess
TerminateProcess.restype = ctypes.c_ulong
#: Closes an open object handle.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211%28v=vs.85%29.aspx
CloseHandle = dll.CloseHandle
CloseHandle.restype = ctypes.c_long
CloseHandle.argtypes = [
ctypes.c_void_p
]
#: Retrieves the calling thread's last-error code value. The last-error code is maintained on a per-thread basis.
#: Multiple threads do not overwrite each other's last-error code.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms679360%28v=vs.85%29.aspx
GetLastError = dll.GetLastError
GetLastError.restype = ctypes.c_ulong
#: Retrieves a pseudo handle for the current process.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms683179%28v=vs.85%29.aspx
GetCurrentProcess = dll.GetCurrentProcess
GetCurrentProcess.argtypes = []
GetCurrentProcess.restype = ctypes.c_ulong
#: Reads data from an area of memory in a specified process. The entire area to be read must be accessible or the operation fails.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms680553%28v=vs.85%29.aspx
ReadProcessMemory = dll.ReadProcessMemory
ReadProcessMemory.argtypes = (
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_size_t)
)
ReadProcessMemory.restype = ctypes.c_long
#: Writes data to an area of memory in a specified process.
#: The entire area to be written to must be accessible or the operation fails.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684320%28v=vs.85%29.aspx
WriteProcessMemory = dll.WriteProcessMemory
WriteProcessMemory.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_size_t)
]
WriteProcessMemory.restype = ctypes.c_long
#: Enables a debugger to attach to an active process and debug it.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms679295%28v=vs.85%29.aspx
DebugActiveProcess = dll.DebugActiveProcess
DebugActiveProcess.restype = ctypes.c_long
#: Reserves or commits a region of memory within the virtual address space of a specified process.
#: The function initializes the memory it allocates to zero, unless MEM_RESET is used.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366890%28v=vs.85%29.aspx
VirtualAllocEx = dll.VirtualAllocEx
VirtualAllocEx.restype = ctypes.c_void_p
VirtualAllocEx.argtypes = (
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_ulong,
ctypes.c_ulong,
ctypes.c_ulong
)
#: Changes the protection on a region of committed pages in the virtual address space of a specified process.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366899%28v=vs.85%29.aspx
VirtualProtectEx = dll.VirtualProtectEx
VirtualProtectEx.restype = ctypes.c_long
#: Takes a snapshot of the specified processes, as well as the heaps, modules, and threads used by these processes.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms682489%28v=vs.85%29.aspx
CreateToolhelp32Snapshot = dll.CreateToolhelp32Snapshot
CreateToolhelp32Snapshot.restype = ctypes.c_void_p
CreateToolhelp32Snapshot.argtypes = (ctypes.c_ulong, ctypes.c_ulong)
#: Retrieves information about the first module associated with a process.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684218%28v=vs.85%29.aspx
Module32First = dll.Module32First
Module32First.restype = ctypes.c_ulonglong
Module32First.argtypes = (ctypes.c_void_p, pymem.ressources.structure.LPMODULEENTRY32)
#: Retrieves information about the next module associated with a process or thread.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684221%28v=vs.85%29.aspx
Module32Next = dll.Module32Next
Module32Next.restype = ctypes.c_ulonglong
Module32Next.argtypes = (ctypes.c_void_p, pymem.ressources.structure.LPMODULEENTRY32)
#: Retrieves information about the first process encountered in a system snapshot.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684834%28v=vs.85%29.aspx
Process32First = dll.Process32First
Process32First.restype = ctypes.c_long
#: Retrieves information about the next process recorded in a system snapshot.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684836%28v=vs.85%29.aspx
Process32Next = dll.Process32Next
Process32Next.restype = ctypes.c_long
#: Retrieves information about the first thread of any process encountered in a system snapshot.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686728%28v=vs.85%29.aspx
Thread32First = dll.Thread32First
Thread32First.restype = ctypes.c_long
Thread32First.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(pymem.ressources.structure.ThreadEntry32)
]
#: Retrieves information about the next thread of any process encountered in the system memory snapshot.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686731%28v=vs.85%29.aspx
Thread32Next = dll.Thread32Next
Thread32Next.restype = ctypes.c_long
Thread32Next.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(pymem.ressources.structure.ThreadEntry32)
]
#: Opens an existing thread object.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684335%28v=vs.85%29.aspx
OpenThread = dll.OpenThread
OpenThread.restype = ctypes.c_void_p
OpenThread.argtypes = [
ctypes.c_ulong,
ctypes.c_long,
ctypes.c_ulong
]
#: Suspends the specified thread.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686345%28v=vs.85%29.aspx
SuspendThread = dll.SuspendThread
SuspendThread.restype = ctypes.c_ulong
#: Decrements a thread's suspend count. When the suspend count is decremented to zero, the execution of the thread is resumed.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms685086%28v=vs.85%29.aspx
ResumeThread = dll.ResumeThread
ResumeThread.restype = ctypes.c_ulong
#: Retrieves the context of the specified thread.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms679362%28v=vs.85%29.aspx
GetThreadContext = dll.GetThreadContext
GetThreadContext.restype = ctypes.c_long
#: Sets the context for the specified thread.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms680632%28v=vs.85%29.aspx
SetThreadContext = dll.SetThreadContext
SetThreadContext.restype = ctypes.c_long
#: Releases, decommits, or releases and decommits a region of memory within the virtual address space of a specified process.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366894%28v=vs.85%29.aspx
VirtualFreeEx = dll.VirtualFreeEx
VirtualFreeEx.restype = ctypes.c_long
#: Retrieves information about a range of pages in the virtual address space of the calling process.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366907(v=vs.85).aspx
VirtualQueryEx = dll.VirtualQueryEx
VirtualQueryEx.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_size_t
]
VirtualQueryEx.restype = ctypes.c_ulong
#: Determines whether the specified process is running under WOW64.
#:
#: https://msdn.microsoft.com/en-us/library/ms684139(v=vs.85).aspx
IsWow64Process = dll.IsWow64Process
IsWow64Process.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_long)
]
IsWow64Process.restype = ctypes.c_long
#: Retrieves information about the current system.
#:
#: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724381(v=vs.85).aspx
GetSystemInfo = dll.GetSystemInfo
GetSystemInfo.restype = ctypes.c_void_p
GetModuleHandleW = dll.GetModuleHandleW
GetModuleHandleW.restype = ctypes.c_void_p
GetModuleHandleW.argtypes = [ctypes.c_wchar_p]
GetProcAddress = dll.GetProcAddress
GetProcAddress.restype = ctypes.c_void_p
GetProcAddress.argtypes = (ctypes.c_void_p, ctypes.c_char_p)
CreateRemoteThread = dll.CreateRemoteThread
CreateRemoteThread.restype = ctypes.c_void_p
CreateRemoteThread.argtypes = (
ctypes.c_void_p,
pymem.ressources.structure.LPSECURITY_ATTRIBUTES,
ctypes.c_size_t,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_ulong,
ctypes.POINTER(ctypes.c_ulong)
)
GetFullPathNameA = dll.GetFullPathNameA
GetFullPathNameA.restype = ctypes.c_ulong
GetFullPathNameA.argtypes = [
ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p)
]
WaitForSingleObject = dll.WaitForSingleObject
WaitForSingleObject.restype = ctypes.c_ulong
WaitForSingleObject.argtypes = [
ctypes.c_void_p, ctypes.c_ulong
]
GetExitCodeThread = dll.GetExitCodeThread
GetExitCodeThread.restype = ctypes.c_long
GetExitCodeThread.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_ulong)
]
VirtualFreeEx = dll.VirtualFreeEx
VirtualFreeEx.restype = ctypes.c_long
VirtualFreeEx.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_size_t,
ctypes.c_ulong
]
GetThreadTimes = dll.GetThreadTimes
GetThreadTimes.restype = ctypes.c_long
GetThreadTimes.artypes = [
ctypes.c_void_p,
ctypes.POINTER(pymem.ressources.structure.FILETIME),
ctypes.POINTER(pymem.ressources.structure.FILETIME),
ctypes.POINTER(pymem.ressources.structure.FILETIME),
ctypes.POINTER(pymem.ressources.structure.FILETIME)
]
|
|
from __future__ import unicode_literals
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = [
("pages", "0001_initial"),
]
def forwards(self, orm):
# Adding model 'ProductOption'
db.create_table('shop_productoption', (
('type', self.gf('django.db.models.fields.IntegerField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('cartridge.shop.fields.OptionField')(max_length=50, null=True)),
))
db.send_create_signal('shop', ['ProductOption'])
# Adding model 'Category'
db.create_table('shop_category', (
('content', self.gf('mezzanine.core.fields.HtmlField')()),
('page_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.Page'], unique=True, primary_key=True)),
))
db.send_create_signal('shop', ['Category'])
# Adding model 'Product'
db.create_table('shop_product', (
('status', self.gf('django.db.models.fields.IntegerField')(default=1)),
('sale_to', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('available', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('description', self.gf('mezzanine.core.fields.HtmlField')(blank=True)),
('_keywords', self.gf('django.db.models.fields.CharField')(max_length=500)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('short_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('image', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('sale_id', self.gf('django.db.models.fields.IntegerField')(null=True)),
('unit_price', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content', self.gf('mezzanine.core.fields.HtmlField')()),
('expiry_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('publish_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('sale_price', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('slug', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('sale_from', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('shop', ['Product'])
# Adding M2M table for field keywords on 'Product'
db.create_table('shop_product_keywords', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('product', models.ForeignKey(orm['shop.product'], null=False)),
('keyword', models.ForeignKey(orm['core.keyword'], null=False))
))
db.create_unique('shop_product_keywords', ['product_id', 'keyword_id'])
# Adding M2M table for field categories on 'Product'
db.create_table('shop_product_categories', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('product', models.ForeignKey(orm['shop.product'], null=False)),
('category', models.ForeignKey(orm['shop.category'], null=False))
))
db.create_unique('shop_product_categories', ['product_id', 'category_id'])
# Adding model 'ProductImage'
db.create_table('shop_productimage', (
('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='images', to=orm['shop.Product'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('file', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('description', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
))
db.send_create_signal('shop', ['ProductImage'])
# Adding model 'ProductVariation'
db.create_table('shop_productvariation', (
('sale_to', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('sku', self.gf('cartridge.shop.fields.SKUField')(unique=True, max_length=20)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='variations', to=orm['shop.Product'])),
('sale_from', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('default', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shop.ProductImage'], null=True, blank=True)),
('sale_id', self.gf('django.db.models.fields.IntegerField')(null=True)),
('unit_price', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('num_in_stock', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('sale_price', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('option2', self.gf('cartridge.shop.fields.OptionField')(max_length=50, null=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('option1', self.gf('cartridge.shop.fields.OptionField')(max_length=50, null=True)),
))
db.send_create_signal('shop', ['ProductVariation'])
# Adding model 'Order'
db.create_table('shop_order', (
('status', self.gf('django.db.models.fields.IntegerField')(default=1)),
('shipping_detail_country', self.gf('django.db.models.fields.CharField')(max_length=100)),
('additional_instructions', self.gf('django.db.models.fields.TextField')(blank=True)),
('billing_detail_city', self.gf('django.db.models.fields.CharField')(max_length=100)),
('shipping_type', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('billing_detail_country', self.gf('django.db.models.fields.CharField')(max_length=100)),
('shipping_detail_phone', self.gf('django.db.models.fields.CharField')(max_length=20)),
('shipping_detail_city', self.gf('django.db.models.fields.CharField')(max_length=100)),
('total', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('shipping_detail_postcode', self.gf('django.db.models.fields.CharField')(max_length=10)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('billing_detail_phone', self.gf('django.db.models.fields.CharField')(max_length=20)),
('shipping_detail_last_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('billing_detail_street', self.gf('django.db.models.fields.CharField')(max_length=100)),
('shipping_detail_first_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('billing_detail_last_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('discount_code', self.gf('cartridge.shop.fields.DiscountCodeField')(max_length=20, blank=True)),
('billing_detail_postcode', self.gf('django.db.models.fields.CharField')(max_length=10)),
('shipping_total', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('shipping_detail_state', self.gf('django.db.models.fields.CharField')(max_length=100)),
('item_total', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('billing_detail_state', self.gf('django.db.models.fields.CharField')(max_length=100)),
('key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('user_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('billing_detail_first_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('shipping_detail_street', self.gf('django.db.models.fields.CharField')(max_length=100)),
('billing_detail_email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('discount_total', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
))
db.send_create_signal('shop', ['Order'])
# Adding model 'Cart'
db.create_table('shop_cart', (
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('shop', ['Cart'])
# Adding model 'CartItem'
db.create_table('shop_cartitem', (
('sku', self.gf('cartridge.shop.fields.SKUField')(max_length=20)),
('total_price', self.gf('cartridge.shop.fields.MoneyField')(default='0', null=True, max_digits=10, decimal_places=2, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=200)),
('url', self.gf('django.db.models.fields.CharField')(max_length=200)),
('image', self.gf('django.db.models.fields.CharField')(max_length=200, null=True)),
('unit_price', self.gf('cartridge.shop.fields.MoneyField')(default='0', null=True, max_digits=10, decimal_places=2, blank=True)),
('cart', self.gf('django.db.models.fields.related.ForeignKey')(related_name='items', to=orm['shop.Cart'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('quantity', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('shop', ['CartItem'])
# Adding model 'OrderItem'
db.create_table('shop_orderitem', (
('sku', self.gf('cartridge.shop.fields.SKUField')(max_length=20)),
('total_price', self.gf('cartridge.shop.fields.MoneyField')(default='0', null=True, max_digits=10, decimal_places=2, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=200)),
('unit_price', self.gf('cartridge.shop.fields.MoneyField')(default='0', null=True, max_digits=10, decimal_places=2, blank=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(related_name='items', to=orm['shop.Order'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('quantity', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('shop', ['OrderItem'])
# Adding model 'ProductAction'
db.create_table('shop_productaction', (
('timestamp', self.gf('django.db.models.fields.IntegerField')()),
('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='actions', to=orm['shop.Product'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('total_purchase', self.gf('django.db.models.fields.IntegerField')(default=0)),
('total_cart', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('shop', ['ProductAction'])
# Adding unique constraint on 'ProductAction', fields ['product', 'timestamp']
db.create_unique('shop_productaction', ['product_id', 'timestamp'])
# Adding model 'Sale'
db.create_table('shop_sale', (
('valid_from', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('valid_to', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('discount_percent', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=4, decimal_places=2, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('discount_exact', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('discount_deduct', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
))
db.send_create_signal('shop', ['Sale'])
# Adding M2M table for field products on 'Sale'
db.create_table('shop_sale_products', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('sale', models.ForeignKey(orm['shop.sale'], null=False)),
('product', models.ForeignKey(orm['shop.product'], null=False))
))
db.create_unique('shop_sale_products', ['sale_id', 'product_id'])
# Adding M2M table for field categories on 'Sale'
db.create_table('shop_sale_categories', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('sale', models.ForeignKey(orm['shop.sale'], null=False)),
('category', models.ForeignKey(orm['shop.category'], null=False))
))
db.create_unique('shop_sale_categories', ['sale_id', 'category_id'])
# Adding model 'DiscountCode'
db.create_table('shop_discountcode', (
('free_shipping', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('valid_from', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('valid_to', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('code', self.gf('cartridge.shop.fields.DiscountCodeField')(unique=True, max_length=20)),
('min_purchase', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('discount_percent', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=4, decimal_places=2, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('discount_exact', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('discount_deduct', self.gf('cartridge.shop.fields.MoneyField')(null=True, max_digits=10, decimal_places=2, blank=True)),
))
db.send_create_signal('shop', ['DiscountCode'])
# Adding M2M table for field products on 'DiscountCode'
db.create_table('shop_discountcode_products', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('discountcode', models.ForeignKey(orm['shop.discountcode'], null=False)),
('product', models.ForeignKey(orm['shop.product'], null=False))
))
db.create_unique('shop_discountcode_products', ['discountcode_id', 'product_id'])
# Adding M2M table for field categories on 'DiscountCode'
db.create_table('shop_discountcode_categories', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('discountcode', models.ForeignKey(orm['shop.discountcode'], null=False)),
('category', models.ForeignKey(orm['shop.category'], null=False))
))
db.create_unique('shop_discountcode_categories', ['discountcode_id', 'category_id'])
def backwards(self, orm):
# Deleting model 'ProductOption'
db.delete_table('shop_productoption')
# Deleting model 'Category'
db.delete_table('shop_category')
# Deleting model 'Product'
db.delete_table('shop_product')
# Removing M2M table for field keywords on 'Product'
db.delete_table('shop_product_keywords')
# Removing M2M table for field categories on 'Product'
db.delete_table('shop_product_categories')
# Deleting model 'ProductImage'
db.delete_table('shop_productimage')
# Deleting model 'ProductVariation'
db.delete_table('shop_productvariation')
# Deleting model 'Order'
db.delete_table('shop_order')
# Deleting model 'Cart'
db.delete_table('shop_cart')
# Deleting model 'CartItem'
db.delete_table('shop_cartitem')
# Deleting model 'OrderItem'
db.delete_table('shop_orderitem')
# Deleting model 'ProductAction'
db.delete_table('shop_productaction')
# Removing unique constraint on 'ProductAction', fields ['product', 'timestamp']
db.delete_unique('shop_productaction', ['product_id', 'timestamp'])
# Deleting model 'Sale'
db.delete_table('shop_sale')
# Removing M2M table for field products on 'Sale'
db.delete_table('shop_sale_products')
# Removing M2M table for field categories on 'Sale'
db.delete_table('shop_sale_categories')
# Deleting model 'DiscountCode'
db.delete_table('shop_discountcode')
# Removing M2M table for field products on 'DiscountCode'
db.delete_table('shop_discountcode_products')
# Removing M2M table for field categories on 'DiscountCode'
db.delete_table('shop_discountcode_categories')
models = {
'core.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pages.page': {
'Meta': {'object_name': 'Page'},
'_keywords': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('mezzanine.core.fields.HtmlField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_footer': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Keyword']", 'symmetrical': 'False', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'shop.cart': {
'Meta': {'object_name': 'Cart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'})
},
'shop.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Cart']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'shop.category': {
'Meta': {'object_name': 'Category', '_ormbases': ['pages.Page']},
'content': ('mezzanine.core.fields.HtmlField', [], {}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'})
},
'shop.discountcode': {
'Meta': {'object_name': 'DiscountCode'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'shop.order': {
'Meta': {'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'shop.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Order']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'_keywords': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'content': ('mezzanine.core.fields.HtmlField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('mezzanine.core.fields.HtmlField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Keyword']", 'symmetrical': 'False', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.productaction': {
'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['shop.Product']"}),
'timestamp': ('django.db.models.fields.IntegerField', [], {}),
'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shop.productimage': {
'Meta': {'object_name': 'ProductImage'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['shop.Product']"})
},
'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'shop.productvariation': {
'Meta': {'object_name': 'ProductVariation'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'unique': 'True', 'max_length': '20'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['shop']
|
|
# DExTer : Debugging Experience Tester
# ~~~~~~ ~ ~~ ~ ~~
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Test tool."""
import math
import os
import csv
import pickle
import shutil
from dex.builder import run_external_build_script
from dex.command.ParseCommand import get_command_infos
from dex.debugger.Debuggers import run_debugger_subprocess
from dex.debugger.DebuggerControllers.DefaultController import DefaultController
from dex.dextIR.DextIR import DextIR
from dex.heuristic import Heuristic
from dex.tools import TestToolBase
from dex.utils.Exceptions import DebuggerException
from dex.utils.Exceptions import BuildScriptException, HeuristicException
from dex.utils.PrettyOutputBase import Stream
from dex.utils.ReturnCode import ReturnCode
from dex.dextIR import BuilderIR
class TestCase(object):
def __init__(self, context, name, heuristic, error):
self.context = context
self.name = name
self.heuristic = heuristic
self.error = error
@property
def penalty(self):
try:
return self.heuristic.penalty
except AttributeError:
return float('nan')
@property
def max_penalty(self):
try:
return self.heuristic.max_penalty
except AttributeError:
return float('nan')
@property
def score(self):
try:
return self.heuristic.score
except AttributeError:
return float('nan')
def __str__(self):
if self.error and self.context.options.verbose:
verbose_error = str(self.error)
else:
verbose_error = ''
if self.error:
script_error = (' : {}'.format(
self.error.script_error.splitlines()[0]) if getattr(
self.error, 'script_error', None) else '')
error = ' [{}{}]'.format(
str(self.error).splitlines()[0], script_error)
else:
error = ''
try:
summary = self.heuristic.summary_string
except AttributeError:
summary = '<r>nan/nan (nan)</>'
return '{}: {}{}\n{}'.format(self.name, summary, error, verbose_error)
class Tool(TestToolBase):
"""Run the specified DExTer test(s) with the specified compiler and linker
options and produce a dextIR file as well as printing out the debugging
experience score calculated by the DExTer heuristic.
"""
def __init__(self, *args, **kwargs):
super(Tool, self).__init__(*args, **kwargs)
self._test_cases = []
@property
def name(self):
return 'DExTer test'
def add_tool_arguments(self, parser, defaults):
parser.add_argument('--fail-lt',
type=float,
default=0.0, # By default TEST always succeeds.
help='exit with status FAIL(2) if the test result'
' is less than this value.',
metavar='<float>')
parser.add_argument('--calculate-average',
action="store_true",
help='calculate the average score of every test run')
super(Tool, self).add_tool_arguments(parser, defaults)
def _build_test_case(self):
"""Build an executable from the test source with the given --builder
script and flags (--cflags, --ldflags) in the working directory.
Or, if the --binary option has been given, copy the executable provided
into the working directory and rename it to match the --builder output.
"""
options = self.context.options
if options.binary:
# Copy user's binary into the tmp working directory
shutil.copy(options.binary, options.executable)
builderIR = BuilderIR(
name='binary',
cflags=[options.binary],
ldflags='')
else:
options = self.context.options
compiler_options = [options.cflags for _ in options.source_files]
linker_options = options.ldflags
_, _, builderIR = run_external_build_script(
self.context,
script_path=self.build_script,
source_files=options.source_files,
compiler_options=compiler_options,
linker_options=linker_options,
executable_file=options.executable)
return builderIR
def _init_debugger_controller(self):
step_collection = DextIR(
executable_path=self.context.options.executable,
source_paths=self.context.options.source_files,
dexter_version=self.context.version)
step_collection.commands = get_command_infos(
self.context.options.source_files)
debugger_controller = DefaultController(self.context, step_collection)
return debugger_controller
def _get_steps(self, builderIR):
"""Generate a list of debugger steps from a test case.
"""
debugger_controller = self._init_debugger_controller()
debugger_controller = run_debugger_subprocess(
debugger_controller, self.context.working_directory.path)
steps = debugger_controller.step_collection
steps.builder = builderIR
return steps
def _get_results_basename(self, test_name):
def splitall(x):
while len(x) > 0:
x, y = os.path.split(x)
yield y
all_components = reversed([x for x in splitall(test_name)])
return '_'.join(all_components)
def _get_results_path(self, test_name):
"""Returns the path to the test results directory for the test denoted
by test_name.
"""
return os.path.join(self.context.options.results_directory,
self._get_results_basename(test_name))
def _get_results_text_path(self, test_name):
"""Returns path results .txt file for test denoted by test_name.
"""
test_results_path = self._get_results_path(test_name)
return '{}.txt'.format(test_results_path)
def _get_results_pickle_path(self, test_name):
"""Returns path results .dextIR file for test denoted by test_name.
"""
test_results_path = self._get_results_path(test_name)
return '{}.dextIR'.format(test_results_path)
def _record_steps(self, test_name, steps):
"""Write out the set of steps out to the test's .txt and .json
results file.
"""
output_text_path = self._get_results_text_path(test_name)
with open(output_text_path, 'w') as fp:
self.context.o.auto(str(steps), stream=Stream(fp))
output_dextIR_path = self._get_results_pickle_path(test_name)
with open(output_dextIR_path, 'wb') as fp:
pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
def _record_score(self, test_name, heuristic):
"""Write out the test's heuristic score to the results .txt file.
"""
output_text_path = self._get_results_text_path(test_name)
with open(output_text_path, 'a') as fp:
self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))
def _record_test_and_display(self, test_case):
"""Output test case to o stream and record test case internally for
handling later.
"""
self.context.o.auto(test_case)
self._test_cases.append(test_case)
def _record_failed_test(self, test_name, exception):
"""Instantiate a failed test case with failure exception and
store internally.
"""
test_case = TestCase(self.context, test_name, None, exception)
self._record_test_and_display(test_case)
def _record_successful_test(self, test_name, steps, heuristic):
"""Instantiate a successful test run, store test for handling later.
Display verbose output for test case if required.
"""
test_case = TestCase(self.context, test_name, heuristic, None)
self._record_test_and_display(test_case)
if self.context.options.verbose:
self.context.o.auto('\n{}\n'.format(steps))
self.context.o.auto(heuristic.verbose_output)
def _run_test(self, test_name):
"""Attempt to run test files specified in options.source_files. Store
result internally in self._test_cases.
"""
try:
builderIR = self._build_test_case()
steps = self._get_steps(builderIR)
self._record_steps(test_name, steps)
heuristic_score = Heuristic(self.context, steps)
self._record_score(test_name, heuristic_score)
except (BuildScriptException, DebuggerException,
HeuristicException) as e:
self._record_failed_test(test_name, e)
return
self._record_successful_test(test_name, steps, heuristic_score)
return
def _handle_results(self) -> ReturnCode:
return_code = ReturnCode.OK
options = self.context.options
if not options.verbose:
self.context.o.auto('\n')
if options.calculate_average:
# Calculate and print the average score
score_sum = 0.0
num_tests = 0
for test_case in self._test_cases:
score = test_case.score
if not test_case.error and not math.isnan(score):
score_sum += test_case.score
num_tests += 1
if num_tests != 0:
print("@avg: ({:.4f})".format(score_sum/num_tests))
summary_path = os.path.join(options.results_directory, 'summary.csv')
with open(summary_path, mode='w', newline='') as fp:
writer = csv.writer(fp, delimiter=',')
writer.writerow(['Test Case', 'Score', 'Error'])
for test_case in self._test_cases:
if (test_case.score < options.fail_lt or
test_case.error is not None):
return_code = ReturnCode.FAIL
writer.writerow([
test_case.name, '{:.4f}'.format(test_case.score),
test_case.error
])
return return_code
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##################################################################################################
# module for the eigenvalue problem
# Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
#
# todo:
# - implement balancing
# - agressive early deflation
#
##################################################################################################
"""
The eigenvalue problem
----------------------
This file contains routines for the eigenvalue problem.
high level routines:
hessenberg : reduction of a real or complex square matrix to upper Hessenberg form
schur : reduction of a real or complex square matrix to upper Schur form
eig : eigenvalues and eigenvectors of a real or complex square matrix
low level routines:
hessenberg_reduce_0 : reduction of a real or complex square matrix to upper Hessenberg form
hessenberg_reduce_1 : auxiliary routine to hessenberg_reduce_0
qr_step : a single implicitly shifted QR step for an upper Hessenberg matrix
hessenberg_qr : Schur decomposition of an upper Hessenberg matrix
eig_tr_r : right eigenvectors of an upper triangular matrix
eig_tr_l : left eigenvectors of an upper triangular matrix
"""
from ..libmp.backend import xrange
class Eigen(object):
pass
def defun(f):
setattr(Eigen, f.__name__, f)
return f
def hessenberg_reduce_0(ctx, A, T):
"""
This routine computes the (upper) Hessenberg decomposition of a square matrix A.
Given A, an unitary matrix Q is calculated such that
Q' A Q = H and Q' Q = Q Q' = 1
where H is an upper Hessenberg matrix, meaning that it only contains zeros
below the first subdiagonal. Here ' denotes the hermitian transpose (i.e.
transposition and conjugation).
parameters:
A (input/output) On input, A contains the square matrix A of
dimension (n,n). On output, A contains a compressed representation
of Q and H.
T (output) An array of length n containing the first elements of
the Householder reflectors.
"""
# internally we work with householder reflections from the right.
# let u be a row vector (i.e. u[i]=A[i,:i]). then
# Q is build up by reflectors of the type (1-v'v) where v is a suitable
# modification of u. these reflectors are applyed to A from the right.
# because we work with reflectors from the right we have to start with
# the bottom row of A and work then upwards (this corresponds to
# some kind of RQ decomposition).
# the first part of the vectors v (i.e. A[i,:(i-1)]) are stored as row vectors
# in the lower left part of A (excluding the diagonal and subdiagonal).
# the last entry of v is stored in T.
# the upper right part of A (including diagonal and subdiagonal) becomes H.
n = A.rows
if n <= 2: return
for i in xrange(n-1, 1, -1):
# scale the vector
scale = 0
for k in xrange(0, i):
scale += abs(ctx.re(A[i,k])) + abs(ctx.im(A[i,k]))
scale_inv = 0
if scale != 0:
scale_inv = 1 / scale
if scale == 0 or ctx.isinf(scale_inv):
# sadly there are floating point numbers not equal to zero whose reciprocal is infinity
T[i] = 0
A[i,i-1] = 0
continue
# calculate parameters for housholder transformation
H = 0
for k in xrange(0, i):
A[i,k] *= scale_inv
rr = ctx.re(A[i,k])
ii = ctx.im(A[i,k])
H += rr * rr + ii * ii
F = A[i,i-1]
f = abs(F)
G = ctx.sqrt(H)
A[i,i-1] = - G * scale
if f == 0:
T[i] = G
else:
ff = F / f
T[i] = F + G * ff
A[i,i-1] *= ff
H += G * f
H = 1 / ctx.sqrt(H)
T[i] *= H
for k in xrange(0, i - 1):
A[i,k] *= H
for j in xrange(0, i):
# apply housholder transformation (from right)
G = ctx.conj(T[i]) * A[j,i-1]
for k in xrange(0, i-1):
G += ctx.conj(A[i,k]) * A[j,k]
A[j,i-1] -= G * T[i]
for k in xrange(0, i-1):
A[j,k] -= G * A[i,k]
for j in xrange(0, n):
# apply housholder transformation (from left)
G = T[i] * A[i-1,j]
for k in xrange(0, i-1):
G += A[i,k] * A[k,j]
A[i-1,j] -= G * ctx.conj(T[i])
for k in xrange(0, i-1):
A[k,j] -= G * ctx.conj(A[i,k])
def hessenberg_reduce_1(ctx, A, T):
"""
This routine forms the unitary matrix Q described in hessenberg_reduce_0.
parameters:
A (input/output) On input, A is the same matrix as delivered by
hessenberg_reduce_0. On output, A is set to Q.
T (input) On input, T is the same array as delivered by hessenberg_reduce_0.
"""
n = A.rows
if n == 1:
A[0,0] = 1
return
A[0,0] = A[1,1] = 1
A[0,1] = A[1,0] = 0
for i in xrange(2, n):
if T[i] != 0:
for j in xrange(0, i):
G = T[i] * A[i-1,j]
for k in xrange(0, i-1):
G += A[i,k] * A[k,j]
A[i-1,j] -= G * ctx.conj(T[i])
for k in xrange(0, i-1):
A[k,j] -= G * ctx.conj(A[i,k])
A[i,i] = 1
for j in xrange(0, i):
A[j,i] = A[i,j] = 0
@defun
def hessenberg(ctx, A, overwrite_a = False):
"""
This routine computes the Hessenberg decomposition of a square matrix A.
Given A, an unitary matrix Q is determined such that
Q' A Q = H and Q' Q = Q Q' = 1
where H is an upper right Hessenberg matrix. Here ' denotes the hermitian
transpose (i.e. transposition and conjugation).
input:
A : a real or complex square matrix
overwrite_a : if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
Q : an unitary matrix
H : an upper right Hessenberg matrix
example:
>>> from mpmath import mp
>>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]])
>>> Q, H = mp.hessenberg(A)
>>> mp.nprint(H, 3) # doctest:+SKIP
[ 3.15 2.23 4.44]
[-0.769 4.85 3.05]
[ 0.0 3.61 7.0]
>>> print(mp.chop(A - Q * H * Q.transpose_conj()))
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
return value: (Q, H)
"""
n = A.rows
if n == 1:
return (ctx.matrix([[1]]), A)
if not overwrite_a:
A = A.copy()
T = ctx.matrix(n, 1)
hessenberg_reduce_0(ctx, A, T)
Q = A.copy()
hessenberg_reduce_1(ctx, Q, T)
for x in xrange(n):
for y in xrange(x+2, n):
A[y,x] = 0
return Q, A
###########################################################################
def qr_step(ctx, n0, n1, A, Q, shift):
"""
This subroutine executes a single implicitly shifted QR step applied to an
upper Hessenberg matrix A. Given A and shift as input, first an QR
decomposition is calculated:
Q R = A - shift * 1 .
The output is then following matrix:
R Q + shift * 1
parameters:
n0, n1 (input) Two integers which specify the submatrix A[n0:n1,n0:n1]
on which this subroutine operators. The subdiagonal elements
to the left and below this submatrix must be deflated (i.e. zero).
following restriction is imposed: n1>=n0+2
A (input/output) On input, A is an upper Hessenberg matrix.
On output, A is replaced by "R Q + shift * 1"
Q (input/output) The parameter Q is multiplied by the unitary matrix
Q arising from the QR decomposition. Q can also be false, in which
case the unitary matrix Q is not computated.
shift (input) a complex number specifying the shift. idealy close to an
eigenvalue of the bottemmost part of the submatrix A[n0:n1,n0:n1].
references:
Stoer, Bulirsch - Introduction to Numerical Analysis.
Kresser : Numerical Methods for General and Structured Eigenvalue Problems
"""
# implicitly shifted and bulge chasing is explained at p.398/399 in "Stoer, Bulirsch - Introduction to Numerical Analysis"
# for bulge chasing see also "Watkins - The Matrix Eigenvalue Problem" sec.4.5,p.173
# the Givens rotation we used is determined as follows: let c,s be two complex
# numbers. then we have following relation:
#
# v = sqrt(|c|^2 + |s|^2)
#
# 1/v [ c~ s~] [c] = [v]
# [-s c ] [s] [0]
#
# the matrix on the left is our Givens rotation.
n = A.rows
# first step
# calculate givens rotation
c = A[n0 ,n0] - shift
s = A[n0+1,n0]
v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s)))
if v == 0:
v = 1
c = 1
s = 0
else:
c /= v
s /= v
cc = ctx.conj(c)
cs = ctx.conj(s)
for k in xrange(n0, n):
# apply givens rotation from the left
x = A[n0 ,k]
y = A[n0+1,k]
A[n0 ,k] = cc * x + cs * y
A[n0+1,k] = c * y - s * x
for k in xrange(min(n1, n0+3)):
# apply givens rotation from the right
x = A[k,n0 ]
y = A[k,n0+1]
A[k,n0 ] = c * x + s * y
A[k,n0+1] = cc * y - cs * x
if not isinstance(Q, bool):
for k in xrange(n):
# eigenvectors
x = Q[k,n0 ]
y = Q[k,n0+1]
Q[k,n0 ] = c * x + s * y
Q[k,n0+1] = cc * y - cs * x
# chase the bulge
for j in xrange(n0, n1 - 2):
# calculate givens rotation
c = A[j+1,j]
s = A[j+2,j]
v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s)))
if v == 0:
A[j+1,j] = 0
v = 1
c = 1
s = 0
else:
A[j+1,j] = v
c /= v
s /= v
A[j+2,j] = 0
cc = ctx.conj(c)
cs = ctx.conj(s)
for k in xrange(j+1, n):
# apply givens rotation from the left
x = A[j+1,k]
y = A[j+2,k]
A[j+1,k] = cc * x + cs * y
A[j+2,k] = c * y - s * x
for k in xrange(0, min(n1, j+4)):
# apply givens rotation from the right
x = A[k,j+1]
y = A[k,j+2]
A[k,j+1] = c * x + s * y
A[k,j+2] = cc * y - cs * x
if not isinstance(Q, bool):
for k in xrange(0, n):
# eigenvectors
x = Q[k,j+1]
y = Q[k,j+2]
Q[k,j+1] = c * x + s * y
Q[k,j+2] = cc * y - cs * x
def hessenberg_qr(ctx, A, Q):
"""
This routine computes the Schur decomposition of an upper Hessenberg matrix A.
Given A, an unitary matrix Q is determined such that
Q' A Q = R and Q' Q = Q Q' = 1
where R is an upper right triangular matrix. Here ' denotes the hermitian
transpose (i.e. transposition and conjugation).
parameters:
A (input/output) On input, A contains an upper Hessenberg matrix.
On output, A is replace by the upper right triangluar matrix R.
Q (input/output) The parameter Q is multiplied by the unitary
matrix Q arising from the Schur decomposition. Q can also be
false, in which case the unitary matrix Q is not computated.
"""
n = A.rows
norm = 0
for x in xrange(n):
for y in xrange(min(x+2, n)):
norm += ctx.re(A[y,x]) ** 2 + ctx.im(A[y,x]) ** 2
norm = ctx.sqrt(norm) / n
if norm == 0:
return
n0 = 0
n1 = n
eps = ctx.eps / (100 * n)
maxits = ctx.dps * 4
its = totalits = 0
while 1:
# kressner p.32 algo 3
# the active submatrix is A[n0:n1,n0:n1]
k = n0
while k + 1 < n1:
s = abs(ctx.re(A[k,k])) + abs(ctx.im(A[k,k])) + abs(ctx.re(A[k+1,k+1])) + abs(ctx.im(A[k+1,k+1]))
if s < eps * norm:
s = norm
if abs(A[k+1,k]) < eps * s:
break
k += 1
if k + 1 < n1:
# deflation found at position (k+1, k)
A[k+1,k] = 0
n0 = k + 1
its = 0
if n0 + 1 >= n1:
# block of size at most two has converged
n0 = 0
n1 = k + 1
if n1 < 2:
# QR algorithm has converged
return
else:
if (its % 30) == 10:
# exceptional shift
shift = A[n1-1,n1-2]
elif (its % 30) == 20:
# exceptional shift
shift = abs(A[n1-1,n1-2])
elif (its % 30) == 29:
# exceptional shift
shift = norm
else:
# A = [ a b ] det(x-A)=x*x-x*tr(A)+det(A)
# [ c d ]
#
# eigenvalues bad: (tr(A)+sqrt((tr(A))**2-4*det(A)))/2
# bad because of cancellation if |c| is small and |a-d| is small, too.
#
# eigenvalues good: (a+d+sqrt((a-d)**2+4*b*c))/2
t = A[n1-2,n1-2] + A[n1-1,n1-1]
s = (A[n1-1,n1-1] - A[n1-2,n1-2]) ** 2 + 4 * A[n1-1,n1-2] * A[n1-2,n1-1]
if ctx.re(s) > 0:
s = ctx.sqrt(s)
else:
s = ctx.sqrt(-s) * 1j
a = (t + s) / 2
b = (t - s) / 2
if abs(A[n1-1,n1-1] - a) > abs(A[n1-1,n1-1] - b):
shift = b
else:
shift = a
its += 1
totalits += 1
qr_step(ctx, n0, n1, A, Q, shift)
if its > maxits:
raise RuntimeError("qr: failed to converge after %d steps" % its)
@defun
def schur(ctx, A, overwrite_a = False):
"""
This routine computes the Schur decomposition of a square matrix A.
Given A, an unitary matrix Q is determined such that
Q' A Q = R and Q' Q = Q Q' = 1
where R is an upper right triangular matrix. Here ' denotes the
hermitian transpose (i.e. transposition and conjugation).
input:
A : a real or complex square matrix
overwrite_a : if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
Q : an unitary matrix
R : an upper right triangular matrix
return value: (Q, R)
example:
>>> from mpmath import mp
>>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]])
>>> Q, R = mp.schur(A)
>>> mp.nprint(R, 3) # doctest:+SKIP
[2.0 0.417 -2.53]
[0.0 4.0 -4.74]
[0.0 0.0 9.0]
>>> print(mp.chop(A - Q * R * Q.transpose_conj()))
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
warning: The Schur decomposition is not unique.
"""
n = A.rows
if n == 1:
return (ctx.matrix([[1]]), A)
if not overwrite_a:
A = A.copy()
T = ctx.matrix(n, 1)
hessenberg_reduce_0(ctx, A, T)
Q = A.copy()
hessenberg_reduce_1(ctx, Q, T)
for x in xrange(n):
for y in xrange(x + 2, n):
A[y,x] = 0
hessenberg_qr(ctx, A, Q)
return Q, A
def eig_tr_r(ctx, A):
"""
This routine calculates the right eigenvectors of an upper right triangular matrix.
input:
A an upper right triangular matrix
output:
ER a matrix whose columns form the right eigenvectors of A
return value: ER
"""
# this subroutine is inspired by the lapack routines ctrevc.f,clatrs.f
n = A.rows
ER = ctx.eye(n)
eps = ctx.eps
unfl = ctx.ldexp(ctx.one, -ctx.prec * 30)
# since mpmath effectively has no limits on the exponent, we simply scale doubles up
# original double has prec*20
smlnum = unfl * (n / eps)
simin = 1 / ctx.sqrt(eps)
rmax = 1
for i in xrange(1, n):
s = A[i,i]
smin = max(eps * abs(s), smlnum)
for j in xrange(i - 1, -1, -1):
r = 0
for k in xrange(j + 1, i + 1):
r += A[j,k] * ER[k,i]
t = A[j,j] - s
if abs(t) < smin:
t = smin
r = -r / t
ER[j,i] = r
rmax = max(rmax, abs(r))
if rmax > simin:
for k in xrange(j, i+1):
ER[k,i] /= rmax
rmax = 1
if rmax != 1:
for k in xrange(0, i + 1):
ER[k,i] /= rmax
return ER
def eig_tr_l(ctx, A):
"""
This routine calculates the left eigenvectors of an upper right triangular matrix.
input:
A an upper right triangular matrix
output:
EL a matrix whose rows form the left eigenvectors of A
return value: EL
"""
n = A.rows
EL = ctx.eye(n)
eps = ctx.eps
unfl = ctx.ldexp(ctx.one, -ctx.prec * 30)
# since mpmath effectively has no limits on the exponent, we simply scale doubles up
# original double has prec*20
smlnum = unfl * (n / eps)
simin = 1 / ctx.sqrt(eps)
rmax = 1
for i in xrange(0, n - 1):
s = A[i,i]
smin = max(eps * abs(s), smlnum)
for j in xrange(i + 1, n):
r = 0
for k in xrange(i, j):
r += EL[i,k] * A[k,j]
t = A[j,j] - s
if abs(t) < smin:
t = smin
r = -r / t
EL[i,j] = r
rmax = max(rmax, abs(r))
if rmax > simin:
for k in xrange(i, j + 1):
EL[i,k] /= rmax
rmax = 1
if rmax != 1:
for k in xrange(i, n):
EL[i,k] /= rmax
return EL
@defun
def eig(ctx, A, left = False, right = True, overwrite_a = False):
"""
This routine computes the eigenvalues and optionally the left and right
eigenvectors of a square matrix A. Given A, a vector E and matrices ER
and EL are calculated such that
A ER[:,i] = E[i] ER[:,i]
EL[i,:] A = EL[i,:] E[i]
E contains the eigenvalues of A. The columns of ER contain the right eigenvectors
of A whereas the rows of EL contain the left eigenvectors.
input:
A : a real or complex square matrix of shape (n, n)
left : if true, the left eigenvectors are calculated.
right : if true, the right eigenvectors are calculated.
overwrite_a : if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
E : a list of length n containing the eigenvalues of A.
ER : a matrix whose columns contain the right eigenvectors of A.
EL : a matrix whose rows contain the left eigenvectors of A.
return values:
E if left and right are both false.
(E, ER) if right is true and left is false.
(E, EL) if left is true and right is false.
(E, EL, ER) if left and right are true.
examples:
>>> from mpmath import mp
>>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]])
>>> E, ER = mp.eig(A)
>>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0]))
[0.0]
[0.0]
[0.0]
>>> E, EL, ER = mp.eig(A,left = True, right = True)
>>> E, EL, ER = mp.eig_sort(E, EL, ER)
>>> mp.nprint(E)
[2.0, 4.0, 9.0]
>>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0]))
[0.0]
[0.0]
[0.0]
>>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0]))
[0.0 0.0 0.0]
warning:
- If there are multiple eigenvalues, the eigenvectors do not necessarily
span the whole vectorspace, i.e. ER and EL may have not full rank.
Furthermore in that case the eigenvectors are numerical ill-conditioned.
- In the general case the eigenvalues have no natural order.
see also:
- eigh (or eigsy, eighe) for the symmetric eigenvalue problem.
- eig_sort for sorting of eigenvalues and eigenvectors
"""
n = A.rows
if n == 1:
if left and (not right):
return ([A[0]], ctx.matrix([[1]]))
if right and (not left):
return ([A[0]], ctx.matrix([[1]]))
return ([A[0]], ctx.matrix([[1]]), ctx.matrix([[1]]))
if not overwrite_a:
A = A.copy()
T = ctx.zeros(n, 1)
hessenberg_reduce_0(ctx, A, T)
if left or right:
Q = A.copy()
hessenberg_reduce_1(ctx, Q, T)
else:
Q = False
for x in xrange(n):
for y in xrange(x + 2, n):
A[y,x] = 0
hessenberg_qr(ctx, A, Q)
E = [0 for i in xrange(n)]
for i in xrange(n):
E[i] = A[i,i]
if not (left or right):
return E
if left:
EL = eig_tr_l(ctx, A)
EL = EL * Q.transpose_conj()
if right:
ER = eig_tr_r(ctx, A)
ER = Q * ER
if left and (not right):
return (E, EL)
if right and (not left):
return (E, ER)
return (E, EL, ER)
@defun
def eig_sort(ctx, E, EL = False, ER = False, f = "real"):
"""
This routine sorts the eigenvalues and eigenvectors delivered by ``eig``.
parameters:
E : the eigenvalues as delivered by eig
EL : the left eigenvectors as delivered by eig, or false
ER : the right eigenvectors as delivered by eig, or false
f : either a string ("real" sort by increasing real part, "imag" sort by
increasing imag part, "abs" sort by absolute value) or a function
mapping complexs to the reals, i.e. ``f = lambda x: -mp.re(x) ``
would sort the eigenvalues by decreasing real part.
return values:
E if EL and ER are both false.
(E, ER) if ER is not false and left is false.
(E, EL) if EL is not false and right is false.
(E, EL, ER) if EL and ER are not false.
example:
>>> from mpmath import mp
>>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]])
>>> E, EL, ER = mp.eig(A,left = True, right = True)
>>> E, EL, ER = mp.eig_sort(E, EL, ER)
>>> mp.nprint(E)
[2.0, 4.0, 9.0]
>>> E, EL, ER = mp.eig_sort(E, EL, ER,f = lambda x: -mp.re(x))
>>> mp.nprint(E)
[9.0, 4.0, 2.0]
>>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0]))
[0.0]
[0.0]
[0.0]
>>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0]))
[0.0 0.0 0.0]
"""
if isinstance(f, str):
if f == "real":
f = ctx.re
elif f == "imag":
f = ctx.im
elif f == "abs":
f = abs
else:
raise RuntimeError("unknown function %s" % f)
n = len(E)
# Sort eigenvalues (bubble-sort)
for i in xrange(n):
imax = i
s = f(E[i]) # s is the current maximal element
for j in xrange(i + 1, n):
c = f(E[j])
if c < s:
s = c
imax = j
if imax != i:
# swap eigenvalues
z = E[i]
E[i] = E[imax]
E[imax] = z
if not isinstance(EL, bool):
for j in xrange(n):
z = EL[i,j]
EL[i,j] = EL[imax,j]
EL[imax,j] = z
if not isinstance(ER, bool):
for j in xrange(n):
z = ER[j,i]
ER[j,i] = ER[j,imax]
ER[j,imax] = z
if isinstance(EL, bool) and isinstance(ER, bool):
return E
if isinstance(EL, bool) and not(isinstance(ER, bool)):
return (E, ER)
if isinstance(ER, bool) and not(isinstance(EL, bool)):
return (E, EL)
return (E, EL, ER)
|
|
# MySQL Connector/Python - MySQL driver written in Python.
"""Django database Backend using MySQL Connector/Python
This Django database backend is heavily based on the MySQL backend coming
with Django.
Changes include:
* Support for microseconds (MySQL 5.6.3 and later)
* Using INFORMATION_SCHEMA where possible
* Using new defaults for, for example SQL_AUTO_IS_NULL
Requires and comes with MySQL Connector/Python v1.1 and later:
http://dev.)_mysql.com/downloads/connector/python/
"""
from __future__ import unicode_literals
import sys
import django
from django.utils.functional import cached_property
try:
import mysql.connector
from mysql.connector.conversion import MySQLConverter
except ImportError as err:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"Error loading )_mysql.connector module: {0}".format(err))
try:
version = mysql.connector.__version_info__[0:3]
except AttributeError:
from mysql.connector.version import VERSION
version = VERSION[0:3]
if version < (1, 1):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"MySQL Connector/Python v1.1.0 or newer "
"is required; you have %s" % mysql.connector.__version__)
from django.db import utils
if django.VERSION < (1, 7):
from django.db.backends import util
else:
from django.db.backends import utils as backend_utils
from django.db.backends import (BaseDatabaseFeatures, BaseDatabaseOperations,
BaseDatabaseWrapper)
from django.db.backends.signals import connection_created
from django.utils import (six, timezone, dateparse)
from django.conf import settings
from mysql.connector.django.client import DatabaseClient
from mysql.connector.django.creation import DatabaseCreation
from mysql.connector.django.introspection import DatabaseIntrospection
from mysql.connector.django.validation import DatabaseValidation
if django.VERSION >= (1, 7):
from mysql.connector.django.schema import DatabaseSchemaEditor
try:
import pytz
HAVE_PYTZ = True
except ImportError:
HAVE_PYTZ = False
DatabaseError = mysql.connector.DatabaseError
IntegrityError = mysql.connector.IntegrityError
NotSupportedError = mysql.connector.NotSupportedError
class DjangoMySQLConverter(MySQLConverter):
"""Custom converter for Django"""
def _TIME_to_python(self, value, dsc=None):
"""Return MySQL TIME data type as datetime.time()
Returns datetime.time()
"""
return dateparse.parse_time(value.decode('utf-8'))
def _DATETIME_to_python(self, value, dsc=None):
"""Connector/Python always returns naive datetime.datetime
Connector/Python always returns naive timestamps since MySQL has
no time zone support. Since Django needs non-naive, we need to add
the UTC time zone.
Returns datetime.datetime()
"""
if not value:
return None
dt = MySQLConverter._DATETIME_to_python(self, value)
if dt is None:
return None
if settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def _safetext_to_mysql(self, value):
return self._str_to_mysql(value)
def _safebytes_to_mysql(self, value):
return self._bytes_to_mysql(value)
class CursorWrapper(object):
"""Wrapper around MySQL Connector/Python's cursor class.
The cursor class is defined by the options passed to MySQL
Connector/Python. If buffered option is True in those options,
MySQLCursorBuffered will be used.
"""
def __init__(self, cursor):
self.cursor = cursor
def _execute_wrapper(self, method, query, args):
"""Wrapper around execute() and executemany()"""
try:
return method(query, args)
except (mysql.connector.ProgrammingError) as err:
six.reraise(utils.ProgrammingError,
utils.ProgrammingError(err.msg), sys.exc_info()[2])
except (mysql.connector.IntegrityError) as err:
six.reraise(utils.IntegrityError,
utils.IntegrityError(err.msg), sys.exc_info()[2])
except mysql.connector.OperationalError as err:
six.reraise(utils.DatabaseError,
utils.DatabaseError(err.msg), sys.exc_info()[2])
except mysql.connector.DatabaseError as err:
six.reraise(utils.DatabaseError,
utils.DatabaseError(err.msg), sys.exc_info()[2])
def execute(self, query, args=None):
"""Executes the given operation
This wrapper method around the execute()-method of the cursor is
mainly needed to re-raise using different exceptions.
"""
return self._execute_wrapper(self.cursor.execute, query, args)
def executemany(self, query, args):
"""Executes the given operation
This wrapper method around the executemany()-method of the cursor is
mainly needed to re-raise using different exceptions.
"""
return self._execute_wrapper(self.cursor.executemany, query, args)
def __getattr__(self, attr):
"""Return attribute of wrapped cursor"""
return getattr(self.cursor, attr)
def __iter__(self):
"""Returns iterator over wrapped cursor"""
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
class DatabaseFeatures(BaseDatabaseFeatures):
"""Features specific to MySQL
Microsecond precision is supported since MySQL 5.6.3 and turned on
by default.
"""
empty_fetchmany_value = []
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_long_model_names = False
supports_binary_field = six.PY2
supports_microsecond_precision = False # toggled in __init__()
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
can_introspect_binary_field = False
can_introspect_boolean_field = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_auto_pk_0 = False
allows_primary_key_0 = False
uses_savepoints = True
atomic_transactions = False
supports_column_check_constraints = False
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
self.supports_microsecond_precision = self._microseconds_precision()
def _microseconds_precision(self):
if self.connection.mysql_version >= (5, 6, 3):
return True
return False
@cached_property
def _mysql_storage_engine(self):
"""Get default storage engine of MySQL
This method creates a table without ENGINE table option and inspects
which engine was used.
Used by Django tests.
"""
tblname = 'INTROSPECT_TEST'
droptable = 'DROP TABLE IF EXISTS {table}'.format(table=tblname)
with self.connection.cursor() as cursor:
cursor.execute(droptable)
cursor.execute('CREATE TABLE {table} (X INT)'.format(table=tblname))
if self.connection.mysql_version >= (5, 0, 0):
cursor.execute(
"SELECT ENGINE FROM INFORMATION_SCHEMA.TABLES "
"WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s",
(self.connection.settings_dict['NAME'], tblname))
engine = cursor.fetchone()[0]
else:
# Very old MySQL servers..
cursor.execute("SHOW TABLE STATUS WHERE Name='{table}'".format(
table=tblname))
engine = cursor.fetchone()[1]
cursor.execute(droptable)
self._cached_storage_engine = engine
return engine
@cached_property
def can_introspect_foreign_keys(self):
"""Confirm support for introspected foreign keys
Only the InnoDB storage engine supports Foreigen Key (not taking
into account MySQL Cluster here).
"""
return self._mysql_storage_engine == 'InnoDB'
@cached_property
def has_zoneinfo_database(self):
"""Tests if the time zone definitions are installed
MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
abbreviations (eg. EAT). When pytz isn't installed and the current
time zone is LocalTimezone (the only sensible value in this context),
the current time zone name will be an abbreviation. As a consequence,
MySQL cannot perform time zone conversions reliably.
"""
# Django 1.6
if not HAVE_PYTZ:
return False
with self.connection.cursor() as cursor:
cursor.execute("SELECT 1 FROM )_mysql.time_zone LIMIT 1")
return cursor.fetchall() != []
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = ")_mysql.connector.django.compiler"
# MySQL stores positive fields as UNSIGNED ints.
if django.VERSION >= (1, 7):
integer_field_ranges = dict(BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 4294967295),
PositiveIntegerField=(
0, 18446744073709551615),)
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK({0})".format(field_name)
else:
return "EXTRACT({0} FROM {1})".format(
lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
"""Returns SQL simulating DATE_TRUNC
This function uses MySQL functions DATE_FORMAT and CAST to
simulate DATE_TRUNC.
The field_name is returned when lookup_type is not supported.
"""
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%Y-', '%m', '-%d', ' %H:', '%i', ':%S')
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
# Wrong lookup type, just return the value from MySQL as-is
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] +
[f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT({0}, '{1}') AS DATETIME)".format(
field_name, format_str)
return sql
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Django 1.6
if settings.USE_TZ:
field_name = "CONVERT_TZ({0}, 'UTC', %s)".format(field_name)
params = [tzname]
else:
params = []
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
sql = "DAYOFWEEK({0})".format(field_name)
else:
sql = "EXTRACT({0} FROM {1})".format(lookup_type.upper(),
field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Django 1.6
if settings.USE_TZ:
field_name = "CONVERT_TZ({0}, 'UTC', %s)".format(field_name)
params = [tzname]
else:
params = []
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format_ = ('%Y-', '%m', '-%d', ' %H:', '%i', ':%S')
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format_[:i]] +
[f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT({0}, '{1}') AS DATETIME)".format(
field_name, format_str)
return sql, params
def date_interval_sql(self, sql, connector, timedelta):
"""Returns SQL for calculating date/time intervals
"""
fmt = (
"({sql} {connector} INTERVAL '{days} "
"0:0:{secs}:{msecs}' DAY_MICROSECOND)"
)
return fmt.format(
sql=sql,
connector=connector,
days=timedelta.days,
secs=timedelta.seconds,
msecs=timedelta.microseconds
)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH ({0}) AGAINST (%s IN BOOLEAN MODE)'.format(field_name)
def last_executed_query(self, cursor, sql, params):
return cursor.statement
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`{0}`".format(name)
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('{keyword} {table};'.format(
keyword=style.SQL_KEYWORD('TRUNCATE'),
table=style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# Truncate already resets the AUTO_INCREMENT field from
# MySQL version 5.0.13 onwards. Refs #16961.
res = []
if self.connection.mysql_version < (5, 0, 13):
fmt = "{alter} {table} {{tablename}} {auto_inc} {field};".format(
alter=style.SQL_KEYWORD('ALTER'),
table=style.SQL_KEYWORD('TABLE'),
auto_inc=style.SQL_KEYWORD('AUTO_INCREMENT'),
field=style.SQL_FIELD('= 1')
)
for sequence in sequences:
tablename = style.SQL_TABLE(self.quote_name(sequence['table']))
res.append(fmt.format(tablename=tablename))
return res
return res
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError(
"MySQL backend does not support timezone-aware times."
)
return self.connection.converter.to_mysql(value)
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware "
"times.")
return self.connection.converter.to_mysql(value)
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '{0}-01-01 00:00:00'
second = '{0}-12-31 23:59:59.999999'
return [first.format(value), second.format(value)]
def year_lookup_bounds_for_datetime_field(self, value):
# Django 1.6
# Again, no microseconds
first, second = super(DatabaseOperations,
self).year_lookup_bounds_for_datetime_field(value)
if self.connection.mysql_version >= (5, 6, 4):
return [first.replace(microsecond=0), second]
else:
return [first.replace(microsecond=0),
second.replace(microsecond=0)]
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "({0})".format(", ".join(["%s"] * len(fields)))
return "VALUES " + ", ".join([items_sql] * num_values)
def savepoint_create_sql(self, sid):
return "SAVEPOINT {0}".format(sid)
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT {0}".format(sid)
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT {0}".format(sid)
def combine_expression(self, connector, sub_expressions):
"""
MySQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(
connector, sub_expressions)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = ')_mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
Database = mysql.connector
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.converter = DjangoMySQLConverter()
self.ops = DatabaseOperations(self)
self.features = DatabaseFeatures(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def _valid_connection(self):
if self.connection:
return self.connection.is_connected()
return False
def get_connection_params(self):
# Django 1.6
kwargs = {
'charset': 'utf8',
'use_unicode': True,
'buffered': True,
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['database'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = settings_dict['PASSWORD']
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# Raise exceptions for database warnings if DEBUG is on
kwargs['raise_on_warnings'] = settings.DEBUG
kwargs['client_flags'] = [
# Need potentially affected rows on UPDATE
mysql.connector.constants.ClientFlag.FOUND_ROWS,
]
try:
kwargs.update(settings_dict['OPTIONS'])
except KeyError:
# OPTIONS missing is OK
pass
return kwargs
def get_new_connection(self, conn_params):
# Django 1.6
cnx = mysql.connector.connect(**conn_params)
cnx.set_converter_class(DjangoMySQLConverter)
return cnx
def init_connection_state(self):
# Django 1.6
if self.mysql_version < (5, 5, 3):
# See sysvar_sql_auto_is_null in MySQL Reference manual
self.connection.cmd_query("SET SQL_AUTO_IS_NULL = 0")
if 'AUTOCOMMIT' in self.settings_dict:
try:
# Django 1.6
self.set_autocommit(self.settings_dict['AUTOCOMMIT'])
except AttributeError:
self._set_autocommit(self.settings_dict['AUTOCOMMIT'])
def create_cursor(self):
# Django 1.6
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _connect(self):
"""Setup the connection with MySQL"""
self.connection = self.get_new_connection(self.get_connection_params())
connection_created.send(sender=self.__class__, connection=self)
self.init_connection_state()
def _cursor(self):
"""Return a CursorWrapper object
Returns a CursorWrapper
"""
try:
# Django 1.6
return super(DatabaseWrapper, self)._cursor()
except AttributeError:
if not self.connection:
self._connect()
return self.create_cursor()
def get_server_version(self):
"""Returns the MySQL server version of current connection
Returns a tuple
"""
try:
# Django 1.6
self.ensure_connection()
except AttributeError:
if not self.connection:
self._connect()
return self.connection.get_server_version()
def disable_constraint_checking(self):
"""Disables foreign key checks
Disables foreign key checks, primarily for use in adding rows with
forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
Returns True
"""
self.cursor().execute('SET @@session.foreign_key_checks = 0')
return True
def enable_constraint_checking(self):
"""Re-enable foreign key checks
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
if django.VERSION >= (1, 6):
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET @@session.foreign_key_checks = 1')
finally:
if django.VERSION >= (1, 6):
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""Check rows in tables for invalid foreign key references
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while
constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
ref_query = """
SELECT REFERRING.`{0}`, REFERRING.`{1}` FROM `{2}` as REFERRING
LEFT JOIN `{3}` as REFERRED
ON (REFERRING.`{4}` = REFERRED.`{5}`)
WHERE REFERRING.`{6}` IS NOT NULL AND REFERRED.`{7}` IS NULL"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = \
self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor,
table_name)
for column_name, referenced_table_name, referenced_column_name \
in key_columns:
cursor.execute(ref_query.format(primary_key_column_name,
column_name, table_name,
referenced_table_name,
column_name,
referenced_column_name,
column_name,
referenced_column_name))
for bad_row in cursor.fetchall():
msg = ("The row in table '{0}' with primary key '{1}' has "
"an invalid foreign key: {2}.{3} contains a value "
"'{4}' that does not have a corresponding value in "
"{5}.{6}.".format(table_name, bad_row[0],
table_name, column_name,
bad_row[1], referenced_table_name,
referenced_column_name))
raise utils.IntegrityError(msg)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except NotSupportedError:
pass
def _set_autocommit(self, autocommit):
# Django 1.6
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def schema_editor(self, *args, **kwargs):
"""Returns a new instance of this backend's SchemaEditor"""
# Django 1.7
return DatabaseSchemaEditor(self, *args, **kwargs)
def is_usable(self):
# Django 1.6
return self.connection.is_connected()
@cached_property
def mysql_version(self):
config = self.get_connection_params()
temp_conn = mysql.connector.connect(**config)
server_version = temp_conn.get_server_version()
temp_conn.close()
return server_version
|
|
import logging
import unittest
from mock import MagicMock
from crutch.core.runner import Runner, Runners
from crutch.core.runtime import RuntimeEnvironment
from crutch.core.features.basics import Feature, FeatureCategory
def create_runtime(runner):
return RuntimeEnvironment(Runners({'runner': runner}))
def create_feature(*args, **kwargs):
return MagicMock(Feature, wraps=Feature(*args, **kwargs))
def create_category(*args, **kwargs):
return MagicMock(FeatureCategory, wraps=FeatureCategory(*args, **kwargs))
class FeatureCtrlReplProviderTest(unittest.TestCase):
def test_generate(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_category_class(
'alpha', features=['bravo', 'charlie'], mono=False)
self.register_feature_class('foxtrot', Feature)
self.register_feature_category_class('echo', features=['foxtrot'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
renv.feature_ctrl.activate_features(['bravo', 'charlie', 'foxtrot'])
# Collect all the replacments
renv.repl.fetch()
self.assertTrue(renv.repl.get('project_feature_category_alpha'))
self.assertTrue(renv.repl.get('project_feature_bravo'))
self.assertTrue(renv.repl.get('project_feature_charlie'))
self.assertTrue(renv.repl.get('project_feature_category_echo'))
self.assertTrue(renv.repl.get('project_feature_foxtrot'))
class FeatureCtrlTestCircularDependencies(unittest.TestCase):
@unittest.expectedFailure
def test_category_require_self(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class(
'alpha', features=['bravo'], requires=['alpha'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
@unittest.expectedFailure
def test_category_require_self_transitively(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class(
'alpha', features=['bravo'], requires=['echo'])
self.register_feature_class('foxtrot', Feature)
self.register_feature_category_class(
'echo', features=['foxtrot'], requires=['alpha'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
@unittest.expectedFailure
def test_feature_require_self(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature, requires=['bravo'])
self.register_feature_category_class('alpha', features=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
@unittest.expectedFailure
def test_feature_require_self_transitively(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class(
'alpha', features=['bravo'], requires=['foxtrot'])
self.register_feature_class('foxtrot', Feature)
self.register_feature_category_class(
'echo', features=['foxtrot'], requires=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
@unittest.expectedFailure
def test_feature_require_own_category(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature, requires=['alpha'])
self.register_feature_category_class('alpha', features=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
@unittest.expectedFailure
def test_feature_reattaching(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature, requires=['alpha'])
self.register_feature_category_class('alpha', features=['bravo'])
self.register_feature_category_class('echo', features=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
class FeatureCtrlTestNames(unittest.TestCase):
@unittest.expectedFailure
def test_activate_unknown_names(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class('alpha', features=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.activate_features(['oscar'])
@unittest.expectedFailure
def test_deactivate_unknown_names(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class('alpha', features=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.deactivate_features(['oscar'])
class FeatureCtrlTestMono(unittest.TestCase):
def setUp(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_class('delta', Feature)
self.register_feature_category_class(
'alpha', features=['bravo', 'charlie', 'delta'], mono=True)
self.register_feature_class('foxtrot', Feature)
self.register_feature_class('golf', Feature)
self.register_feature_class('hotel', Feature)
self.register_feature_category_class(
'echo', features=['foxtrot', 'golf', 'hotel'], mono=False)
self.renv = create_runtime(RunnerBlah)
self.renv.create_runner('runner')
self.ctrl = self.renv.feature_ctrl
@unittest.expectedFailure
def test_fail_on_many_featuers_of_mono_category(self):
self.ctrl.activate_features(['bravo', 'charlie', 'delta'])
def test_no_fail_on_many_ftrs_multi_category(self):
self.ctrl.activate_features(['foxtrot', 'golf', 'hotel'])
class FeatureCtrlTestActivationOrder(unittest.TestCase):
def test_default_activates_all_defaults(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_class('delta', Feature)
self.register_feature_category_class(
'alpha',
features=['bravo', 'charlie', 'delta'],
defaults=['bravo', 'charlie', 'delta'],
mono=False)
self.register_feature_class('foxtrot', Feature)
self.register_feature_class('golf', Feature)
self.register_feature_class('hotel', Feature)
self.register_feature_category_class(
'echo',
features=['foxtrot', 'golf', 'hotel'],
defaults=['foxtrot', 'golf', 'hotel'],
mono=False)
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
expect = sorted(['foxtrot', 'golf', 'hotel', 'bravo', 'charlie', 'delta'])
total_order, _ = ctrl.get_activation_order('default')
self.assertEqual(expect, sorted(total_order))
def test_category_activates_all_defaults(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_class('delta', Feature)
self.register_feature_category_class(
'alpha',
features=['bravo', 'charlie', 'delta'],
defaults=['bravo', 'charlie'],
mono=False)
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
expect = sorted(['bravo', 'charlie'])
total_order, _ = ctrl.get_activation_order(['alpha'])
self.assertEqual(expect, sorted(total_order))
def test_category_and_its_feature(self):
"""
If features and their category is mentioned in the activation order only the
features stay
"""
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_category_class(
'alpha', features=['bravo', 'charlie'], mono=True)
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])
self.assertEqual(['bravo'], total_order)
def test_category_and_its_feature_dep(self):
"""
If features and their category is mentioned in the requires order only the
features stay
"""
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class(
'alpha', features=['bravo'], defaults=['bravo'])
self.register_feature_class(
'foxtrot', Feature, requires=['alpha', 'bravo'])
self.register_feature_category_class('echo', features=['foxtrot'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
total_order, _ = ctrl.get_activation_order(['foxtrot'])
self.assertEqual(['bravo', 'foxtrot'], total_order)
def test_sibling_feature_dependency(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('charlie', Feature)
self.register_feature_class('delta', Feature, requires=['charlie'])
self.register_feature_class('bravo', Feature, requires=['delta'])
self.register_feature_category_class(
'alpha', features=['bravo', 'charlie', 'delta'], mono=False)
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
expect = ['charlie', 'delta', 'bravo']
total_order, _ = ctrl.get_activation_order(['bravo'])
self.assertEqual(expect, total_order)
def test_category_depends_on_category_with_defaults(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('foxtrot', Feature)
self.register_feature_category_class(
'echo', features=['foxtrot'], defaults=['foxtrot'])
self.register_feature_class('bravo', Feature)
self.register_feature_category_class(
'alpha', features=['bravo'], defaults=['bravo'], requires=['echo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
expect = ['foxtrot', 'bravo']
total_order, _ = ctrl.get_activation_order(['alpha'])
self.assertEqual(expect, total_order)
def test_feature_depends_on_category_with_defaults(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('delta', Feature)
self.register_feature_class('charlie', Feature, requires=['delta'])
self.register_feature_class('bravo', Feature, requires=['charlie'])
self.register_feature_category_class(
'alpha',
defaults=['bravo', 'charlie', 'delta'], features=['bravo'])
self.register_feature_class('foxtrot', Feature, requires=['alpha'])
self.register_feature_category_class('echo', features=['foxtrot'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
expect = ['delta', 'charlie', 'bravo', 'foxtrot']
total_order, _ = ctrl.get_activation_order(['foxtrot'])
self.assertEqual(expect, total_order)
def test_feature_activate_transitive_dependencies(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('delta', Feature)
self.register_feature_class('charlie', Feature, requires=['delta'])
self.register_feature_class('bravo', Feature, requires=['charlie'])
self.register_feature_category_class(
'alpha',
features=['bravo', 'charlie', 'delta'],
defaults=['bravo', 'charlie', 'delta'])
self.register_feature_class('foxtrot', Feature, requires=['bravo'])
self.register_feature_category_class('echo', features=['foxtrot'])
self.register_feature_class('juliet', Feature, requires=['foxtrot'])
self.register_feature_category_class('india', features=['juliet'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
expect = ['delta', 'charlie', 'bravo', 'foxtrot', 'juliet']
total_order, _ = ctrl.get_activation_order(['juliet'])
self.assertEqual(expect, total_order)
@unittest.expectedFailure
def test_category_depends_on_category_with_no_defaults(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('foxtrot', Feature)
self.register_feature_category_class('echo', features=['foxtrot'])
self.register_feature_class('bravo', Feature)
self.register_feature_category_class(
'alpha', features=['bravo'], defaults=['bravo'], requires=['echo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.get_activation_order(['alpha'])
@unittest.expectedFailure
def test_fail_activate_feature_mono_twice(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_category_class(
'alpha', features=['bravo', 'charlie'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.activate_features(['bravo'])
ctrl.activate_features(['charlie'])
@unittest.expectedFailure
def test_fail_activate_category_twice(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class(
'alpha', features=['bravo'], defaults=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.activate_features(['alpha'])
ctrl.activate_features(['alpha'])
class FeatureCtrlTestDeactivationOrder(unittest.TestCase):
def test_all_deactivates_all_active(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_class('delta', Feature)
self.register_feature_category_class(
'alpha',
features=['bravo', 'charlie', 'delta'],
defaults=['bravo', 'charlie', 'delta'],
mono=False)
self.register_feature_class('foxtrot', Feature)
self.register_feature_class('golf', Feature)
self.register_feature_class('hotel', Feature)
self.register_feature_category_class(
'echo',
features=['foxtrot', 'golf', 'hotel'],
defaults=['foxtrot', 'golf', 'hotel'],
mono=False)
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
expect = sorted(['golf', 'bravo', 'charlie', 'delta'])
ctrl.activate_features(['alpha', 'golf'])
total_order, _ = ctrl.deactivate_features('all')
self.assertEqual(expect, sorted(total_order))
self.assertFalse(ctrl.get_active_categories())
self.assertFalse(ctrl.get_active_features())
def test_category_deactivates_all_its_active_features(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_class('delta', Feature)
self.register_feature_category_class(
'alpha',
features=['bravo', 'charlie', 'delta'],
defaults=['bravo', 'charlie', 'delta'],
mono=False)
self.register_feature_class('foxtrot', Feature)
self.register_feature_class('golf', Feature)
self.register_feature_class('hotel', Feature)
self.register_feature_category_class(
'echo',
features=['foxtrot', 'golf', 'hotel'],
defaults=['foxtrot', 'golf', 'hotel'],
mono=False)
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
killed = sorted(['bravo', 'charlie', 'delta'])
ctrl.activate_features('default')
total_order, _ = ctrl.deactivate_features(['alpha'])
self.assertEqual(killed, sorted(total_order))
self.assertEqual(ctrl.get_active_categories_names(), ['echo'])
alive = sorted(['foxtrot', 'golf', 'hotel'])
self.assertEqual(sorted(ctrl.get_active_features_names()), alive)
def test_deactivate_implicit_dependencies(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_category_class(
'alpha', features=['bravo', 'charlie'])
self.register_feature_class(
'foxtrot', Feature, requires=['bravo', 'charlie'])
self.register_feature_category_class('echo', features=['foxtrot'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.activate_features(['foxtrot'])
total_order, _ = ctrl.deactivate_features(['foxtrot'])
killed_ftrs = sorted(['foxtrot', 'bravo', 'charlie'])
self.assertEqual(sorted(total_order), killed_ftrs)
self.assertFalse(ctrl.get_active_categories_names())
self.assertFalse(ctrl.get_active_features_names())
def test_deactivate_implicit_dependencies_category(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_category_class(
'alpha', features=['bravo', 'charlie'],
defaults=['bravo', 'charlie'])
self.register_feature_class(
'foxtrot', Feature, requires=['alpha'])
self.register_feature_category_class('echo', features=['foxtrot'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.activate_features(['foxtrot'])
total_order, _ = ctrl.deactivate_features(['foxtrot'])
killed_ftrs = sorted(['foxtrot', 'bravo', 'charlie'])
self.assertEqual(sorted(total_order), killed_ftrs)
self.assertFalse(ctrl.get_active_categories_names())
self.assertFalse(ctrl.get_active_features_names())
def test_deactivation_skip(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class(
'alpha', features=['bravo'])
self.register_feature_class(
'foxtrot', Feature, requires=['bravo'])
self.register_feature_category_class(
'echo', features=['foxtrot'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.activate_features(['foxtrot', 'bravo'])
total_order, _ = ctrl.deactivate_features(['foxtrot'], skip=['bravo'])
killed_ftrs = ['foxtrot']
self.assertEqual(total_order, killed_ftrs)
active_cats = ['alpha']
self.assertEqual(ctrl.get_active_categories_names(), active_cats)
active_ftrs = ['bravo']
self.assertEqual(ctrl.get_active_features_names(), active_ftrs)
def test_deactivate_skip_if_referenced_by_other(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class('alpha', features=['bravo'])
self.register_feature_class('foxtrot', Feature, requires=['bravo'])
self.register_feature_class('golf', Feature, requires=['bravo'])
self.register_feature_category_class(
'echo', features=['foxtrot', 'golf'], mono=False)
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.activate_features(['foxtrot', 'golf'])
total_order, _ = ctrl.deactivate_features(['foxtrot'])
killed_ftrs = ['foxtrot']
self.assertEqual(total_order, killed_ftrs)
active_cats = sorted(['alpha', 'echo'])
self.assertEqual(sorted(ctrl.get_active_categories_names()), active_cats)
active_ftrs = sorted(['golf', 'bravo'])
self.assertEqual(sorted(ctrl.get_active_features_names()), active_ftrs)
@unittest.expectedFailure
def test_deactivation_of_direct_dep(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_category_class('alpha', features=['bravo'])
self.register_feature_class('foxtrot', Feature, requires=['bravo'])
self.register_feature_category_class('echo', features=['foxtrot'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.activate_features(['foxtrot'])
ctrl.deactivate_features(['bravo'])
class FeatureCtrlTestFtrCatLifecycle(unittest.TestCase):
def test_lifecycle(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', create_feature)
self.register_feature_category_class(
'alpha', create_category, features=['bravo'], defaults=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
ctrl = renv.feature_ctrl
ctrl.activate_features(['alpha'], set_up=True)
alpha = ctrl.get_active_category('alpha')
alpha.set_up.assert_called_once()
alpha.activate.assert_called_once()
bravo = ctrl.get_active_feature('bravo')
bravo.set_up.assert_called_once()
bravo.activate.assert_called_once()
ctrl.deactivate_features(['alpha'], tear_down=True)
alpha.deactivate.assert_called_once()
alpha.tear_down.assert_called_once()
bravo.deactivate.assert_called_once()
bravo.tear_down.assert_called_once()
class FeatureCtrlTestAPI(unittest.TestCase):
@unittest.expectedFailure
def test_register_category_no_features(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_category_class(
'alpha', create_category, features=[])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
@unittest.expectedFailure
def test_register_category_same_feature(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', create_feature)
self.register_feature_category_class(
'alpha', create_category, features=['bravo'], defaults=['bravo'])
self.register_feature_category_class(
'echo', create_category, features=['bravo'], defaults=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
def test_activate_deactivate_project_features(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', Feature)
self.register_feature_class('charlie', Feature)
self.register_feature_class('delta', Feature)
self.register_feature_category_class(
'alpha',
features=['bravo', 'charlie', 'delta'],
defaults=['bravo', 'charlie'],
mono=False)
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
renv.set_prop('project_features', ['bravo', 'delta'])
ctrl = renv.feature_ctrl
expect = sorted(['bravo', 'delta'])
ctrl.activate()
self.assertEqual(expect, sorted(ctrl.get_active_features_names()))
ctrl.deactivate()
self.assertFalse(ctrl.get_active_features_names())
def test_invoke(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', create_feature)
self.register_feature_category_class(
'alpha', create_category, features=['bravo'], defaults=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
renv.set_prop('project_features', ['bravo'])
ctrl = renv.feature_ctrl
ctrl.activate()
alpha = ctrl.get_active_category('alpha')
bravo = ctrl.get_active_feature('bravo')
ctrl.invoke('alpha')
alpha.handle.assert_called_once()
bravo.handle.assert_called_once()
bravo.reset_mock()
ctrl.invoke('bravo')
alpha.handle_feature.assert_called_once()
bravo.handle.assert_called_once()
def test_get_mono_feature(self):
class RunnerBlah(Runner):
def __init__(self, renv):
super(RunnerBlah, self).__init__(renv)
self.register_feature_class('bravo', create_feature)
self.register_feature_category_class(
'alpha', create_category, features=['bravo'], defaults=['bravo'])
renv = create_runtime(RunnerBlah)
renv.create_runner('runner')
renv.set_prop('project_features', ['bravo'])
ctrl = renv.feature_ctrl
ctrl.activate()
self.assertEqual(ctrl.get_active_feature('bravo'),
ctrl.get_mono_feature('alpha'))
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a Pytest plugin for a Bokeh-specific testing tools
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import socket
import time
from contextlib import closing
from threading import Thread
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Protocol,
Tuple,
)
# External imports
import pytest
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler
if TYPE_CHECKING:
from selenium.webdriver.common.keys import _KeySeq
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
# Bokeh imports
import bokeh.server.views.ws as ws
from bokeh._testing.util.selenium import (
INIT,
RESULTS,
find_matching_element,
get_events_el,
)
from bokeh.application.handlers.function import ModifyDoc
from bokeh.io import save
from bokeh.models import LayoutDOM, Plot
from bokeh.server.server import Server
if TYPE_CHECKING:
from bokeh._testing.plugins.file_server import SimpleWebServer
from bokeh.model import Model
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
"bokeh._testing.plugins.file_server",
"bokeh._testing.plugins.selenium",
)
__all__ = (
'bokeh_app_info',
'bokeh_model_page',
'bokeh_server_page',
'find_free_port',
'output_file_url',
'single_plot_page',
'test_file_path_and_url',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@pytest.fixture
def output_file_url(request: pytest.FixtureRequest, file_server: SimpleWebServer) -> str:
from bokeh.io import output_file
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
output_file(file_path, mode='inline')
def tear_down() -> None:
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_server.where_is(url)
@pytest.fixture
def test_file_path_and_url(request: pytest.FixtureRequest, file_server: SimpleWebServer) -> Tuple[str, str]:
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
def tear_down() -> None:
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_path, file_server.where_is(url)
class _ExitHandler(RequestHandler):
def initialize(self, io_loop: IOLoop) -> None:
self.io_loop = io_loop
async def get(self, *args: Any, **kwargs: Any) -> None:
self.io_loop.stop()
def find_free_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
class BokehAppInfo(Protocol):
def __call__(self, modify_doc: ModifyDoc) -> Tuple[str, ws.MessageTestPort]: ...
class HasNoConsoleErrors(Protocol):
def __call__(self, webdriver: WebDriver) -> bool: ...
@pytest.fixture
def bokeh_app_info(request: pytest.FixtureRequest, driver: WebDriver) -> BokehAppInfo:
''' Start a Bokeh server app and return information needed to test it.
Returns a tuple (url, message_test_port), where the latter is an instance of
``MessageTestPort`` dataclass, and will contain all messages that the Bokeh
Server sends/receives while running during the test.
'''
def func(modify_doc: ModifyDoc) -> Tuple[str, ws.MessageTestPort]:
ws._message_test_port = ws.MessageTestPort(sent=[], received=[])
port = find_free_port()
def worker() -> None:
io_loop = IOLoop()
server = Server({'/': modify_doc},
port=port,
io_loop=io_loop,
extra_patterns=[('/exit', _ExitHandler, dict(io_loop=io_loop))])
server.start()
server.io_loop.start()
t = Thread(target=worker)
t.start()
def cleanup() -> None:
driver.get(f"http://localhost:{port}/exit")
# XXX (bev) this line is a workaround for https://github.com/bokeh/bokeh/issues/7970
# and should be removed when that issue is resolved
driver.get_log('browser')
ws._message_test_port = None
t.join()
request.addfinalizer(cleanup)
return f"http://localhost:{port}/", ws._message_test_port
return func
class _ElementMixin:
_driver: WebDriver
def click_element_at_position(self, element: WebElement, x: int, y: int) -> None:
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.perform()
def double_click_element_at_position(self, element: WebElement, x: int, y: int) -> None:
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.click()
actions.perform()
def drag_element_at_position(self, element: WebElement, x: int, y: int, dx: int, dy: int, mod: _KeySeq | None = None) -> None:
actions = ActionChains(self._driver)
if mod:
actions.key_down(mod)
actions.move_to_element_with_offset(element, x, y)
actions.click_and_hold()
actions.move_by_offset(dx, dy)
actions.release()
if mod:
actions.key_up(mod)
actions.perform()
def send_keys(self, *keys: _KeySeq) -> None:
actions = ActionChains(self._driver)
actions.send_keys(*keys)
actions.perform()
class _CanvasMixin(_ElementMixin):
canvas: WebElement
def click_canvas_at_position(self, plot: Plot, x: int, y: int) -> None:
events_el = get_events_el(self._driver, plot)
self.click_element_at_position(events_el, x, y)
def double_click_canvas_at_position(self, plot: Plot, x: int, y: int) -> None:
events_el = get_events_el(self._driver, plot)
self.double_click_element_at_position(events_el, x, y)
def drag_canvas_at_position(self, plot: Plot, x: int, y: int, dx: int, dy: int, mod: _KeySeq | None = None) -> None:
events_el = get_events_el(self._driver, plot)
self.drag_element_at_position(events_el, x, y, dx, dy, mod)
def eval_custom_action(self) -> None:
return self._driver.execute_script('Bokeh.documents[0].get_model_by_name("custom-action").execute()')
def get_toolbar_button(self, name: str) -> WebElement:
return find_matching_element(self._driver, f".bk-tool-icon-{name}")
class _BokehPageMixin(_ElementMixin):
test_div: WebElement
_driver: WebDriver
_has_no_console_errors: HasNoConsoleErrors
@property
def results(self) -> Dict[str, Any]:
WebDriverWait(self._driver, 10).until(EC.staleness_of(self.test_div))
self.test_div = find_matching_element(self._driver, ".bokeh-test-div")
return self._driver.execute_script(RESULTS)
@property
def driver(self) -> WebDriver:
return self._driver
def init_results(self) -> None:
self._driver.execute_script(INIT)
self.test_div = find_matching_element(self._driver, ".bokeh-test-div")
def has_no_console_errors(self) -> bool:
return self._has_no_console_errors(self._driver)
class _BokehModelPage(_BokehPageMixin):
def __init__(self, model: LayoutDOM, driver: WebDriver, output_file_url: str, has_no_console_errors: HasNoConsoleErrors) -> None:
self._driver = driver
self._model = model
self._has_no_console_errors = has_no_console_errors
save(self._model)
self._driver.get(output_file_url)
self.init_results()
await_ready(driver, model)
BokehModelPage = Callable[[LayoutDOM], _BokehModelPage]
@pytest.fixture()
def bokeh_model_page(driver: WebDriver, output_file_url: str, has_no_console_errors: HasNoConsoleErrors) -> BokehModelPage:
def func(model: LayoutDOM) -> _BokehModelPage:
return _BokehModelPage(model, driver, output_file_url, has_no_console_errors)
return func
class _SinglePlotPage(_BokehModelPage, _CanvasMixin):
# model may be a layout, but should only contain a single plot
def __init__(self, model: LayoutDOM, driver: WebDriver, output_file_url: str, has_no_console_errors: HasNoConsoleErrors) -> None:
super().__init__(model, driver, output_file_url, has_no_console_errors)
SinglePlotPage = Callable[[LayoutDOM], _SinglePlotPage]
@pytest.fixture()
def single_plot_page(driver: WebDriver, output_file_url: str,
has_no_console_errors: HasNoConsoleErrors) -> SinglePlotPage:
def func(model: LayoutDOM) -> _SinglePlotPage:
return _SinglePlotPage(model, driver, output_file_url, has_no_console_errors)
return func
class _BokehServerPage(_BokehPageMixin, _CanvasMixin):
def __init__(self, modify_doc: ModifyDoc, driver: WebDriver, bokeh_app_info: BokehAppInfo, has_no_console_errors: HasNoConsoleErrors) -> None:
self._driver = driver
self._has_no_console_errors = has_no_console_errors
self._app_url, self.message_test_port = bokeh_app_info(modify_doc)
time.sleep(0.1)
self._driver.get(self._app_url)
self.init_results()
def ready(driver: WebDriver) -> bool:
try:
await_all_ready(driver)
return True
except RuntimeError:
return False
WebDriverWait(self._driver, 10).until(ready)
BokehServerPage = Callable[[ModifyDoc], _BokehServerPage]
@pytest.fixture()
def bokeh_server_page(driver: WebDriver, bokeh_app_info: BokehAppInfo,
has_no_console_errors: HasNoConsoleErrors) -> BokehServerPage:
def func(modify_doc: ModifyDoc) -> _BokehServerPage:
return _BokehServerPage(modify_doc, driver, bokeh_app_info, has_no_console_errors)
return func
def await_ready(driver: WebDriver, root: Model) -> None:
script = """
const [root_id, done] = [...arguments];
(async function() {
const view = Bokeh.index[root_id]
if (view == null)
done(false)
else {
await view.ready
done(true)
}
})()
"""
if not driver.execute_async_script(script, root.id):
raise RuntimeError(f"could not find a root view for {root}")
def await_all_ready(driver: WebDriver) -> None:
script = """
const [done] = [...arguments];
(async function() {
const views = [...Object.values(Bokeh.index)]
if (views.length == 0)
done(false)
else {
await Promise.all(views.map((view) => view.ready))
done(true)
}
})()
"""
if not driver.execute_async_script(script):
raise RuntimeError("could not find any root views")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
__author__ = 'matthi'
import operator
import copy
from collections import deque
from pyspeechgrammar import model
from pyspeechgrammar.fst import model as fst_model
class FSTPart:
def __init__(self):
self.input_arcs = []
self.output_arcs = []
self.is_optional = False
class FSTSerializer:
def __init__(self, grammar):
self.grammar = grammar
self.rule_parts = {}
def serialize(self):
part = self.__create_part_from_rule(self.grammar.root_rule)
fst = self.__create_fst_from_part(part)
# set label ids
labels = {}
i = 1
for arc in fst.arcs:
if not arc.label in labels.keys():
labels[arc.label] = i
i += 1
# FST
lines = []
for arc in fst.arcs:
lines.append("{} {} {} {} {}".format(arc.src_state.identifier, arc.dest_state.identifier, arc.label, arc.label, arc.weight))
lines.append("{} 1.0".format(fst.final.identifier))
lines.append("")
fst_string = "\n".join(lines)
# LABEL
lines = []
lines.append("<eps> 0")
for label, index in sorted(labels.items(), key=operator.itemgetter(1)):
lines.append("{} {}".format(label, index))
lines.append("")
label_string = "\n".join(lines)
return fst_string, label_string
def __create_fst_from_part(self, part):
fst = fst_model.FiniteStateTransducer()
start = fst_model.FSTState(0)
for input_arc in part.input_arcs:
input_arc.src_state = start
start.outputs.append(input_arc)
end = fst_model.FSTState(1)
for output_arc in part.output_arcs:
output_arc.dest_state = end
end.inputs.append(output_arc)
# set state ids
q = deque([start])
marked = set()
i = 0
while len(q) > 0:
current_state = q.popleft()
current_state.identifier = i
i += 1
fst.states.append(current_state)
weight = 1.0
if len(current_state.outputs) > 0:
weight = 1.0 / len(current_state.outputs)
for output_arc in current_state.outputs:
dest_state = output_arc.dest_state
output_arc.weight = weight
fst.arcs.append(output_arc)
if not dest_state in marked:
marked.add(dest_state)
q.append(dest_state)
fst.start = start
fst.final = end
return fst
def __create_part_from_rule(self, rule):
return self.__create_part_from_element(rule.value)
def __create_part_from_element(self, element):
part = None
if isinstance(element, model.Token):
part = self.__create_part_from_token(element)
elif isinstance(element, model.RuleReference):
part = self.__create_part_from_rule_reference(element)
elif isinstance(element, model.Group):
part = self.__create_part_from_group(element)
elif isinstance(element, model.OptionalGroup):
part = self.__create_part_from_optional_group(element)
elif isinstance(element, model.Sequence):
part = self.__create_part_from_sequence(element)
elif isinstance(element, model.Alternatives):
part = self.__create_part_from_alternative(element)
return part
def __create_part_from_token(self, token):
arc = fst_model.FSTArc(None, None, token.value, 1.0)
part = FSTPart()
part.input_arcs.append(arc)
part.output_arcs.append(arc)
return part
def __create_part_from_rule_reference(self, rule_reference):
rule = self.grammar.get_rule_with_name(rule_reference.rule_name)
if rule is not None:
return self.__create_part_from_rule(rule)
return None
def __create_part_from_group(self, group):
return self.__create_part_from_element(group.value)
def __create_part_from_optional_group(self, optional_group):
part = self.__create_part_from_element(optional_group.value)
part.is_optional = True
return part
def __create_part_from_sequence(self, sequence):
subparts = []
for element in sequence.elements:
subpart = self.__create_part_from_element(element)
subparts.append(subpart)
part = FSTPart()
first_subpart = subparts[0]
last_subpart = subparts[len(subparts) - 1]
part.input_arcs.extend(first_subpart.input_arcs)
part.output_arcs.extend(last_subpart.output_arcs)
optional_output_arcs = []
for i in range(len(subparts) - 1):
left_part = subparts[i]
right_part = subparts[i+1]
state = fst_model.FSTState(i)
for input_arc in left_part.output_arcs:
input_arc.dest_state = state
state.inputs.append(input_arc)
for input_arc in optional_output_arcs:
new_arc = fst_model.FSTArc(input_arc.src_state, state, input_arc.label, 1.0)
state.inputs.append(new_arc)
if input_arc.src_state is not None:
input_arc.src_state.outputs.append(new_arc)
else:
part.input_arcs.append(new_arc)
for input_arc in right_part.input_arcs:
input_arc.src_state = state
state.outputs.append(input_arc)
if right_part.is_optional:
optional_output_arcs.extend(left_part.output_arcs)
else:
optional_output_arcs = []
for input_arc in optional_output_arcs:
new_arc = fst_model.FSTArc(input_arc.src_state, None, input_arc.label, input_arc.weight)
if input_arc.src_state is not None:
input_arc.src_state.outputs.append(new_arc)
else:
part.input_arcs.append(new_arc)
part.output_arcs.append(new_arc)
optional_input_arcs = []
for i in range(len(subparts)-2, -1, -1):
left_part = subparts[i]
right_part = subparts[i+1]
if left_part.is_optional:
optional_input_arcs.extend(right_part.input_arcs)
else:
optional_input_arcs = []
for output_arc in optional_input_arcs:
new_arc = fst_model.FSTArc(None, output_arc.dest_state, output_arc.label, output_arc.weight)
if output_arc.dest_state is not None:
output_arc.dest_state.inputs.append(new_arc)
else:
part.output_arcs.append(new_arc)
part.input_arcs.append(new_arc)
return part
def __create_part_from_alternative(self, alternative):
part = FSTPart()
for element in alternative.elements:
subpart = self.__create_part_from_element(element)
part.input_arcs.extend(subpart.input_arcs)
part.output_arcs.extend(subpart.output_arcs)
return part
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_context import context as oslo_ctx
import six
from senlin.common import consts
from senlin.common import context as senlin_ctx
from senlin.common import exception
from senlin.common import schema
from senlin.common import utils as common_utils
from senlin.db import api as db_api
from senlin.engine import environment
from senlin.engine import parser
from senlin.policies import base as policy_base
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
sample_policy = """
type: senlin.policy.dummy
version: 1.0
properties:
key1: value1
key2: 2
"""
class DummyPolicy(policy_base.Policy):
VERSION = '1.0'
properties_schema = {
'key1': schema.String(
'first key',
default='value1'
),
'key2': schema.Integer(
'second key',
required=True,
),
}
def __init__(self, name, spec, **kwargs):
super(DummyPolicy, self).__init__(name, spec, **kwargs)
class TestPolicyBase(base.SenlinTestCase):
def setUp(self):
super(TestPolicyBase, self).setUp()
self.ctx = utils.dummy_context()
environment.global_env().register_policy('senlin.policy.dummy-1.0',
DummyPolicy)
self.spec = parser.simple_parse(sample_policy)
def _create_policy(self, policy_name, policy_id=None):
policy = policy_base.Policy(policy_name, self.spec,
user=self.ctx.user,
project=self.ctx.project,
domain=self.ctx.domain)
if policy_id:
policy.id = policy_id
return policy
def _create_db_policy(self, **kwargs):
values = {
'name': 'test-policy',
'type': 'senlin.policy.dummy-1.0',
'spec': self.spec,
'user': self.ctx.user,
'project': self.ctx.project,
'domain': self.ctx.domain,
'data': {}
}
values.update(kwargs)
return db_api.policy_create(self.ctx, values)
def test_init(self):
policy = self._create_policy('test-policy')
self.assertIsNone(policy.id)
self.assertEqual('test-policy', policy.name)
self.assertEqual(self.spec, policy.spec)
self.assertEqual('senlin.policy.dummy-1.0', policy.type)
self.assertEqual(self.ctx.user, policy.user)
self.assertEqual(self.ctx.project, policy.project)
self.assertEqual(self.ctx.domain, policy.domain)
self.assertEqual({}, policy.data)
self.assertIsNone(policy.created_at)
self.assertIsNone(policy.updated_at)
self.assertTrue(policy.singleton)
spec_data = policy.spec_data
self.assertEqual('senlin.policy.dummy', spec_data['type'])
self.assertEqual('1.0', spec_data['version'])
self.assertEqual({'key1': 'value1', 'key2': 2},
spec_data['properties'])
self.assertEqual({'key1': 'value1', 'key2': 2}, policy.properties)
def test_policy_new_type_not_found(self):
bad_spec = {
'type': 'bad-type',
'version': '1.0',
'properties': '',
}
self.assertRaises(exception.PolicyTypeNotFound,
policy_base.Policy,
'test-policy', bad_spec)
def test_load(self):
policy = self._create_db_policy()
result = policy_base.Policy.load(self.ctx, policy.id)
self.assertEqual(policy.id, result.id)
self.assertEqual(policy.name, result.name)
self.assertEqual(policy.type, result.type)
self.assertEqual(policy.user, result.user)
self.assertEqual(policy.project, result.project)
self.assertEqual(policy.domain, result.domain)
self.assertEqual(policy.spec, result.spec)
self.assertEqual(policy.data, result.data)
self.assertEqual({'key1': 'value1', 'key2': 2}, result.properties)
self.assertEqual(policy.created_at, result.created_at)
self.assertEqual(policy.updated_at, result.updated_at)
def test_load_diff_project(self):
policy = self._create_db_policy()
new_ctx = utils.dummy_context(project='a-different-project')
self.assertRaises(exception.PolicyNotFound,
policy_base.Policy.load,
new_ctx, policy.id, None)
res = policy_base.Policy.load(new_ctx, policy.id, project_safe=False)
self.assertIsNotNone(res)
self.assertEqual(policy.id, res.id)
def test_load_not_found(self):
ex = self.assertRaises(exception.PolicyNotFound,
policy_base.Policy.load,
self.ctx, 'fake-policy', None)
self.assertEqual('The policy (fake-policy) could not be found.',
six.text_type(ex))
ex = self.assertRaises(exception.PolicyNotFound,
policy_base.Policy.load,
self.ctx, None, None)
self.assertEqual('The policy (None) could not be found.',
six.text_type(ex))
def test_load_all(self):
result = policy_base.Policy.load_all(self.ctx)
self.assertEqual([], list(result))
policy1 = self._create_db_policy(name='policy-1', id='ID1')
policy2 = self._create_db_policy(name='policy-2', id='ID2')
result = policy_base.Policy.load_all(self.ctx)
policies = list(result)
self.assertEqual(2, len(policies))
self.assertEqual(policy1.id, policies[0].id)
self.assertEqual(policy2.id, policies[1].id)
def test_load_all_diff_project(self):
self._create_db_policy(name='policy-1', id='ID1')
self._create_db_policy(name='policy-2', id='ID2')
new_ctx = utils.dummy_context(project='a-different-project')
res = policy_base.Policy.load_all(new_ctx)
self.assertEqual(0, len(list(res)))
res = policy_base.Policy.load_all(new_ctx, project_safe=False)
self.assertEqual(2, len(list(res)))
@mock.patch.object(db_api, 'policy_get_all')
def test_load_all_with_params(self, mock_get):
mock_get.return_value = []
res = list(policy_base.Policy.load_all(self.ctx))
self.assertEqual([], res)
mock_get.assert_called_once_with(self.ctx, limit=None, marker=None,
sort=None, filters=None,
project_safe=True)
mock_get.reset_mock()
res = list(policy_base.Policy.load_all(
self.ctx, limit=1, marker='MARKER', sort='K1:asc'))
self.assertEqual([], res)
mock_get.assert_called_once_with(self.ctx, limit=1, marker='MARKER',
sort='K1:asc', filters=None,
project_safe=True)
def test_delete(self):
policy = self._create_db_policy()
policy_id = policy.id
res = policy_base.Policy.delete(self.ctx, policy_id)
self.assertIsNone(res)
self.assertRaises(exception.PolicyNotFound,
policy_base.Policy.load,
self.ctx, policy_id, None)
def test_delete_not_found(self):
result = policy_base.Policy.delete(self.ctx, 'bogus')
self.assertIsNone(result)
def test_store_for_create(self):
policy = self._create_policy('test-policy')
self.assertIsNone(policy.id)
policy_id = policy.store(self.ctx)
self.assertIsNotNone(policy_id)
self.assertEqual(policy_id, policy.id)
result = db_api.policy_get(self.ctx, policy_id)
self.assertIsNotNone(result)
self.assertEqual('test-policy', result.name)
self.assertEqual(policy_id, result.id)
self.assertEqual(policy.type, result.type)
self.assertEqual(policy.user, result.user)
self.assertEqual(policy.project, result.project)
self.assertEqual(policy.domain, result.domain)
self.assertEqual(policy.spec, result.spec)
self.assertEqual(policy.data, result.data)
self.assertIsNotNone(result.created_at)
self.assertIsNone(result.updated_at)
def test_store_for_update(self):
policy = self._create_policy('test-policy')
self.assertIsNone(policy.id)
policy_id = policy.store(self.ctx)
self.assertIsNotNone(policy_id)
self.assertEqual(policy_id, policy.id)
# do an update
policy.name = 'test-policy-1'
policy.data = {'kk': 'vv'}
new_id = policy.store(self.ctx)
self.assertEqual(policy_id, new_id)
result = db_api.policy_get(self.ctx, policy_id)
self.assertIsNotNone(result)
self.assertEqual('test-policy-1', result.name)
self.assertEqual({'kk': 'vv'}, policy.data)
self.assertIsNotNone(policy.created_at)
self.assertIsNotNone(policy.updated_at)
def test_to_dict(self):
policy = self._create_policy('test-policy')
policy_id = policy.store(self.ctx)
self.assertIsNotNone(policy_id)
expected = {
'id': policy_id,
'name': policy.name,
'type': policy.type,
'user': policy.user,
'project': policy.project,
'domain': policy.domain,
'spec': policy.spec,
'data': policy.data,
'created_at': common_utils.format_time(policy.created_at),
'updated_at': None,
}
result = policy_base.Policy.load(self.ctx, policy_id=policy.id)
self.assertEqual(expected, result.to_dict())
def test_get_schema(self):
expected = {
'key1': {
'default': 'value1',
'description': 'first key',
'readonly': False,
'required': False,
'updatable': False,
'type': 'String'
},
'key2': {
'description': 'second key',
'readonly': False,
'required': True,
'updatable': False,
'type': 'Integer'
},
}
res = DummyPolicy.get_schema()
self.assertEqual(expected, res)
def test_build_policy_data(self):
policy = self._create_policy('test-policy')
data = {'key1': 'value1'}
res = policy._build_policy_data(data)
expect_result = {
'DummyPolicy': {
'version': '1.0',
'data': data
}
}
self.assertEqual(expect_result, res)
def test_extract_policy_data(self):
policy = self._create_policy('test-policy')
# Extract data correctly
data = {'key1': 'value1'}
policy_data = {
'DummyPolicy': {
'version': '1.0',
'data': data
}
}
res = policy._extract_policy_data(policy_data)
self.assertEqual(data, res)
# Policy class name unmatch
data = {'key1': 'value1'}
policy_data = {
'FakePolicy': {
'version': '1.0',
'data': data
}
}
res = policy._extract_policy_data(policy_data)
self.assertIsNone(res)
# Policy version don't match
data = {'key1': 'value1'}
policy_data = {
'DummyPolicy': {
'version': '2.0',
'data': data
}
}
res = policy._extract_policy_data(policy_data)
self.assertIsNone(res)
def test_default_need_check(self):
action = mock.Mock()
action.action = consts.CLUSTER_SCALE_IN
action.data = {}
policy = self._create_policy('test-policy')
res = policy.need_check('BEFORE', action)
self.assertTrue(res)
setattr(policy, 'TARGET', [('BEFORE', consts.CLUSTER_SCALE_IN)])
res = policy.need_check('BEFORE', action)
self.assertTrue(res)
res = policy.need_check('AFTER', action)
self.assertFalse(res)
def test_default_pre_op(self):
policy = self._create_policy('test-policy')
res = policy.pre_op('CLUSTER_ID', 'FOO')
self.assertIsNone(res)
def test_default_post_op(self):
policy = self._create_policy('test-policy')
res = policy.post_op('CLUSTER_ID', 'FOO')
self.assertIsNone(res)
def test_default_attach(self):
cluster = mock.Mock()
policy = self._create_policy('test-policy')
# Policy targets on ANY profile types
policy.PROFILE_TYPE = ['ANY']
res = policy.attach(cluster)
self.assertEqual((True, None), res)
# Profile type of cluster is not in policy's target scope
profile = mock.Mock()
profile.type = 'os.nova.server'
cluster.rt = {'profile': profile}
policy.PROFILE_TYPE = ['os.heat.resource']
msg = 'Policy not applicable on profile type: os.nova.server'
res = policy.attach(cluster)
self.assertEqual((False, msg), res)
# Attaching succeed
policy.PROFILE_TYPE = ['os.nova.server', 'os.heat.resource']
res = policy.attach(cluster)
self.assertEqual((True, None), res)
def test_default_detach(self):
cluster = mock.Mock()
policy = self._create_policy('test-policy')
res = policy.detach(cluster)
self.assertEqual((True, None), res)
@mock.patch.object(db_api, 'cred_get')
@mock.patch.object(senlin_ctx, 'get_service_context')
@mock.patch.object(oslo_ctx, 'get_current')
def test_build_conn_params(self, mock_get_current, mock_get_service_ctx,
mock_cred_get):
service_cred = {
'auth_url': 'AUTH_URL',
'username': 'senlin',
'user_domain_name': 'default',
'password': '123'
}
current_ctx = {
'auth_url': 'auth_url',
'user_name': 'user1',
'user_domain_name': 'default',
'password': '456'
}
cred_info = {
'openstack': {
'trust': 'TRUST_ID',
}
}
cluster = mock.Mock()
cluster.user = 'user1'
cluster.project = 'project1'
cred = mock.Mock()
cred.cred = cred_info
mock_get_service_ctx.return_value = service_cred
mock_get_current.return_value = current_ctx
mock_cred_get.return_value = cred
policy = self._create_policy('test-policy')
expected_result = {
'auth_url': 'AUTH_URL',
'username': 'senlin',
'user_domain_name': 'default',
'password': '123',
'trust_id': 'TRUST_ID'
}
res = policy._build_conn_params(cluster)
self.assertEqual(expected_result, res)
mock_get_service_ctx.assert_called_once_with()
mock_cred_get.assert_called_once_with(current_ctx, 'user1', 'project1')
@mock.patch.object(db_api, 'cred_get')
@mock.patch.object(senlin_ctx, 'get_service_context')
@mock.patch.object(oslo_ctx, 'get_current')
def test_build_conn_params_trust_not_found(
self, mock_get_current, mock_get_service_ctx, mock_cred_get):
service_cred = {
'auth_url': 'AUTH_URL',
'username': 'senlin',
'user_domain_name': 'default',
'password': '123'
}
mock_get_service_ctx.return_value = service_cred
mock_cred_get.return_value = None
cluster = mock.Mock()
cluster.user = 'user1'
cluster.project = 'project1'
policy = self._create_policy('test-policy')
ex = self.assertRaises(exception.TrustNotFound,
policy._build_conn_params, cluster)
msg = "The trust for trustor (user1) could not be found."
self.assertEqual(msg, six.text_type(ex))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_put_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_patch_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_validate_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/validate')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class ConfigServersOperations(object):
"""ConfigServersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2022_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> "_models.ConfigServerResource":
"""Get the config server and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigServerResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_01_01_preview.models.ConfigServerResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
def _update_put_initial(
self,
resource_group_name: str,
service_name: str,
config_server_resource: "_models.ConfigServerResource",
**kwargs: Any
) -> "_models.ConfigServerResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(config_server_resource, 'ConfigServerResource')
request = build_update_put_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._update_put_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_put_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
@distributed_trace
def begin_update_put(
self,
resource_group_name: str,
service_name: str,
config_server_resource: "_models.ConfigServerResource",
**kwargs: Any
) -> LROPoller["_models.ConfigServerResource"]:
"""Update the config server.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param config_server_resource: Parameters for the update operation.
:type config_server_resource:
~azure.mgmt.appplatform.v2022_01_01_preview.models.ConfigServerResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ConfigServerResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_01_01_preview.models.ConfigServerResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_put_initial(
resource_group_name=resource_group_name,
service_name=service_name,
config_server_resource=config_server_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
def _update_patch_initial(
self,
resource_group_name: str,
service_name: str,
config_server_resource: "_models.ConfigServerResource",
**kwargs: Any
) -> "_models.ConfigServerResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(config_server_resource, 'ConfigServerResource')
request = build_update_patch_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._update_patch_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_patch_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
@distributed_trace
def begin_update_patch(
self,
resource_group_name: str,
service_name: str,
config_server_resource: "_models.ConfigServerResource",
**kwargs: Any
) -> LROPoller["_models.ConfigServerResource"]:
"""Update the config server.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param config_server_resource: Parameters for the update operation.
:type config_server_resource:
~azure.mgmt.appplatform.v2022_01_01_preview.models.ConfigServerResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ConfigServerResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_01_01_preview.models.ConfigServerResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_patch_initial(
resource_group_name=resource_group_name,
service_name=service_name,
config_server_resource=config_server_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
def _validate_initial(
self,
resource_group_name: str,
service_name: str,
config_server_settings: "_models.ConfigServerSettings",
**kwargs: Any
) -> "_models.ConfigServerSettingsValidateResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerSettingsValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(config_server_settings, 'ConfigServerSettings')
request = build_validate_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._validate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConfigServerSettingsValidateResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConfigServerSettingsValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_validate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/validate'} # type: ignore
@distributed_trace
def begin_validate(
self,
resource_group_name: str,
service_name: str,
config_server_settings: "_models.ConfigServerSettings",
**kwargs: Any
) -> LROPoller["_models.ConfigServerSettingsValidateResult"]:
"""Check if the config server settings are valid.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param config_server_settings: Config server settings to be validated.
:type config_server_settings:
~azure.mgmt.appplatform.v2022_01_01_preview.models.ConfigServerSettings
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ConfigServerSettingsValidateResult or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_01_01_preview.models.ConfigServerSettingsValidateResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerSettingsValidateResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._validate_initial(
resource_group_name=resource_group_name,
service_name=service_name,
config_server_settings=config_server_settings,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ConfigServerSettingsValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_validate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/validate'} # type: ignore
|
|
# Copyright (c) 2005-2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
try:
import unittest2 as unittest
except ImportError:
import unittest
from sheet.parser.fl_cell_reference_parse_node import FLCellReferenceParseNode
from sheet.parser.fl_cell_range_parse_node import FLCellRangeParseNode
from sheet.parser.fl_column_reference_parse_node import FLColumnReferenceParseNode
from sheet.parser.fl_row_reference_parse_node import FLRowReferenceParseNode
from sheet.parser.parse_node import ParseNode
from sheet.parser.parse_node_constructors import (
AndTest,
ArgList,
Argument,
ArithExpr,
Atom,
Comparison,
CompOperator,
ConcatExpr,
DictMaker,
Expr,
ExprList,
Factor,
FLCellRange,
FLCellReference,
FLColumnReference,
FLRowReference,
FLDDECall,
FLDeletedReference,
FLInvalidReference,
FLNakedWorksheetReference,
FLReference,
FLRoot,
FPDef,
FPList,
GenFor,
GenIf,
GenIter,
LambDef,
ListFor,
ListIf,
ListIter,
ListMaker,
Name,
NotTest,
Number,
Power,
ShiftExpr,
SliceOp,
StringLiteral,
Subscript,
SubscriptList,
Term,
Test,
TestList,
TestListGexp,
Trailer,
VarArgsList,
ArithExpr_Term,
Expr_ConcatExpr_ShiftExpr,
Factor_Power_FLReference_Atom,
Test_AndTest_NotTest_Comparison,
ExprFromNameChild,
ExprFromAtomChild,
TestFromAtomChild,
TestFromPowerChild,
)
class ParseNodeConstructorsTest(unittest.TestCase):
def testCombinedConvenienceConstructors(self):
randomChild = "hello"
self.assertEquals(ParseNode(ParseNode.ARITH_EXPR,
[ParseNode(ParseNode.TERM, [randomChild])]),
ArithExpr_Term(randomChild))
self.assertEquals(ParseNode(ParseNode.EXPR,
[ParseNode(ParseNode.CONCAT_EXPR,
[ParseNode(ParseNode.SHIFT_EXPR,
[randomChild])])]),
Expr_ConcatExpr_ShiftExpr(randomChild))
self.assertEquals(ParseNode(ParseNode.FACTOR,
[ParseNode(ParseNode.POWER,
[ParseNode(ParseNode.FL_REFERENCE,
[ParseNode(ParseNode.ATOM,
[randomChild])])])]),
Factor_Power_FLReference_Atom(randomChild))
self.assertEquals(ParseNode(ParseNode.TEST,
[ParseNode(ParseNode.AND_TEST,
[ParseNode(ParseNode.NOT_TEST,
[ParseNode(ParseNode.COMPARISON,
[randomChild])])])]),
Test_AndTest_NotTest_Comparison(randomChild))
self.assertEquals(ParseNode(ParseNode.EXPR,
[ParseNode(ParseNode.CONCAT_EXPR,
[ParseNode(ParseNode.SHIFT_EXPR,
[ParseNode(ParseNode.ARITH_EXPR,
[ParseNode(ParseNode.TERM,
[ParseNode(ParseNode.FACTOR,
[ParseNode(ParseNode.POWER,
[ParseNode(ParseNode.FL_REFERENCE,
[ParseNode(ParseNode.ATOM,
[ParseNode(ParseNode.NAME,
[randomChild])])])])])])])])])]),
ExprFromNameChild(randomChild))
self.assertEquals(ParseNode(ParseNode.EXPR,
[ParseNode(ParseNode.CONCAT_EXPR,
[ParseNode(ParseNode.SHIFT_EXPR,
[ParseNode(ParseNode.ARITH_EXPR,
[ParseNode(ParseNode.TERM,
[ParseNode(ParseNode.FACTOR,
[ParseNode(ParseNode.POWER,
[ParseNode(ParseNode.FL_REFERENCE,
[ParseNode(ParseNode.ATOM,
[randomChild])])])])])])])])]),
ExprFromAtomChild(randomChild))
self.assertEquals(ParseNode(ParseNode.TEST,
[ParseNode(ParseNode.AND_TEST,
[ParseNode(ParseNode.NOT_TEST,
[ParseNode(ParseNode.COMPARISON,
[ParseNode(ParseNode.EXPR,
[ParseNode(ParseNode.CONCAT_EXPR,
[ParseNode(ParseNode.SHIFT_EXPR,
[ParseNode(ParseNode.ARITH_EXPR,
[ParseNode(ParseNode.TERM,
[ParseNode(ParseNode.FACTOR,
[ParseNode(ParseNode.POWER,
[ParseNode(ParseNode.FL_REFERENCE,
[ParseNode(ParseNode.ATOM,
[randomChild])])])])])])])])])])])])]),
TestFromAtomChild(randomChild))
self.assertEquals(ParseNode(ParseNode.TEST,
[ParseNode(ParseNode.AND_TEST,
[ParseNode(ParseNode.NOT_TEST,
[ParseNode(ParseNode.COMPARISON,
[ParseNode(ParseNode.EXPR,
[ParseNode(ParseNode.CONCAT_EXPR,
[ParseNode(ParseNode.SHIFT_EXPR,
[ParseNode(ParseNode.ARITH_EXPR,
[ParseNode(ParseNode.TERM,
[ParseNode(ParseNode.FACTOR,
[ParseNode(ParseNode.POWER,
[randomChild])])])])])])])])])])]),
TestFromPowerChild(randomChild))
def testSimpleConvenienceConstructors(self):
randomList = ["foo", "bar", 27]
self.assertEquals(ParseNode(ParseNode.AND_TEST, randomList),
AndTest(randomList))
self.assertEquals(ParseNode(ParseNode.ARG_LIST, randomList),
ArgList(randomList))
self.assertEquals(ParseNode(ParseNode.ARGUMENT, randomList),
Argument(randomList))
self.assertEquals(ParseNode(ParseNode.ARITH_EXPR, randomList),
ArithExpr(randomList))
self.assertEquals(ParseNode(ParseNode.ATOM, randomList),
Atom(randomList))
self.assertEquals(ParseNode(ParseNode.COMPARISON, randomList),
Comparison(randomList))
self.assertEquals(ParseNode(ParseNode.COMP_OPERATOR, randomList),
CompOperator(randomList))
self.assertEquals(ParseNode(ParseNode.CONCAT_EXPR, randomList),
ConcatExpr(randomList))
self.assertEquals(ParseNode(ParseNode.DICT_MAKER, randomList),
DictMaker(randomList))
self.assertEquals(ParseNode(ParseNode.EXPR, randomList),
Expr(randomList))
self.assertEquals(ParseNode(ParseNode.EXPR_LIST, randomList),
ExprList(randomList))
self.assertEquals(ParseNode(ParseNode.FACTOR, randomList),
Factor(randomList))
self.assertEquals(FLCellRangeParseNode(randomList),
FLCellRange(randomList))
self.assertEquals(FLCellReferenceParseNode(randomList),
FLCellReference(randomList))
self.assertEquals(FLColumnReferenceParseNode(randomList),
FLColumnReference(randomList))
self.assertEquals(FLRowReferenceParseNode(randomList),
FLRowReference(randomList))
self.assertEquals(ParseNode(ParseNode.FL_DDE_CALL, randomList),
FLDDECall(randomList))
self.assertEquals(ParseNode(ParseNode.FL_DELETED_REFERENCE, randomList),
FLDeletedReference(randomList))
self.assertEquals(ParseNode(ParseNode.FL_INVALID_REFERENCE, randomList),
FLInvalidReference(randomList))
self.assertEquals(ParseNode(ParseNode.FL_NAKED_WORKSHEET_REFERENCE, randomList),
FLNakedWorksheetReference(randomList))
self.assertEquals(ParseNode(ParseNode.FL_REFERENCE, randomList),
FLReference(randomList))
self.assertEquals(ParseNode(ParseNode.FL_ROOT, randomList),
FLRoot(randomList))
self.assertEquals(ParseNode(ParseNode.FP_DEF, randomList),
FPDef(randomList))
self.assertEquals(ParseNode(ParseNode.FP_LIST, randomList),
FPList(randomList))
self.assertEquals(ParseNode(ParseNode.GEN_FOR, randomList),
GenFor(randomList))
self.assertEquals(ParseNode(ParseNode.GEN_IF, randomList),
GenIf(randomList))
self.assertEquals(ParseNode(ParseNode.GEN_ITER, randomList),
GenIter(randomList))
self.assertEquals(ParseNode(ParseNode.LAMBDEF, randomList),
LambDef(randomList))
self.assertEquals(ParseNode(ParseNode.LIST_FOR, randomList),
ListFor(randomList))
self.assertEquals(ParseNode(ParseNode.LIST_IF, randomList),
ListIf(randomList))
self.assertEquals(ParseNode(ParseNode.LIST_ITER, randomList),
ListIter(randomList))
self.assertEquals(ParseNode(ParseNode.LIST_MAKER, randomList),
ListMaker(randomList))
self.assertEquals(ParseNode(ParseNode.NAME, randomList),
Name(randomList))
self.assertEquals(ParseNode(ParseNode.NOT_TEST, randomList),
NotTest(randomList))
self.assertEquals(ParseNode(ParseNode.NUMBER, randomList),
Number(randomList))
self.assertEquals(ParseNode(ParseNode.POWER, randomList),
Power(randomList))
self.assertEquals(ParseNode(ParseNode.SHIFT_EXPR, randomList),
ShiftExpr(randomList))
self.assertEquals(ParseNode(ParseNode.SLICE_OP, randomList),
SliceOp(randomList))
self.assertEquals(ParseNode(ParseNode.STRINGLITERAL, randomList),
StringLiteral(randomList))
self.assertEquals(ParseNode(ParseNode.SUBSCRIPT, randomList),
Subscript(randomList))
self.assertEquals(ParseNode(ParseNode.SUBSCRIPT_LIST, randomList),
SubscriptList(randomList))
self.assertEquals(ParseNode(ParseNode.TERM, randomList),
Term(randomList))
self.assertEquals(ParseNode(ParseNode.TEST, randomList),
Test(randomList))
self.assertEquals(ParseNode(ParseNode.TEST_LIST, randomList),
TestList(randomList))
self.assertEquals(ParseNode(ParseNode.TEST_LIST_GEXP, randomList),
TestListGexp(randomList))
self.assertEquals(ParseNode(ParseNode.TRAILER, randomList),
Trailer(randomList))
self.assertEquals(ParseNode(ParseNode.VAR_ARGS_LIST, randomList),
VarArgsList(randomList))
def testSubclasses(self):
self.assertNotEqual(type(FLCellReferenceParseNode(["A1"])),
ParseNode,
"Subclass of ParseNode had incorrect type")
self.assertNotEqual(type(FLColumnReferenceParseNode(['A_'])),
ParseNode,
"Subclass of ParseNode had incorrect type")
self.assertNotEqual(type(FLRowReferenceParseNode([])),
ParseNode,
"Subclass of ParseNode had incorrect type")
def testFLCell(self):
"test FLCellReferenceParseNode registered"
self.assertIn(ParseNode.FL_COLUMN_REFERENCE, ParseNode.classRegistry,
"FL_COLUMN_REFERENCE not registered in ParseNode class registry")
self.assertIn(ParseNode.FL_ROW_REFERENCE, ParseNode.classRegistry,
"FL_ROW_REFERENCE registered in ParseNode class registry")
self.assertIn(ParseNode.FL_CELL_REFERENCE, ParseNode.classRegistry,
"FL_CELL_REFERENCE not registered in ParseNode class registry")
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides templates which allow variable sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import traceback
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
__all__ = ["make_template"]
def make_template(name_, func_, create_scope_now_=False, unique_name_=None,
custom_getter_=None, **kwargs):
"""Given an arbitrary function, wrap it so that it does variable sharing.
This wraps `func_` in a Template and partially evaluates it. Templates are
functions that create variables the first time they are called and reuse them
thereafter. In order for `func_` to be compatible with a `Template` it must
have the following properties:
* The function should create all trainable variables and any variables that
should be reused by calling `tf.get_variable`. If a trainable variable is
created using `tf.Variable`, then a ValueError will be thrown. Variables
that are intended to be locals can be created by specifying
`tf.Variable(..., trainable=false)`.
* The function may use variable scopes and other templates internally to
create and reuse variables, but it shouldn't use `tf.global_variables` to
capture variables that are defined outside of the scope of the function.
* Internal scopes and variable names should not depend on any arguments that
are not supplied to `make_template`. In general you will get a ValueError
telling you that you are trying to reuse a variable that doesn't exist
if you make a mistake.
In the following example, both `z` and `w` will be scaled by the same `y`. It
is important to note that if we didn't assign `scalar_name` and used a
different name for z and w that a `ValueError` would be thrown because it
couldn't reuse the variable.
```python
def my_op(x, scalar_name):
var1 = tf.get_variable(scalar_name,
shape=[],
initializer=tf.constant_initializer(1))
return x * var1
scale_by_y = tf.make_template('scale_by_y', my_op, scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
```
As a safe-guard, the returned function will raise a `ValueError` after the
first call if trainable variables are created by calling `tf.Variable`.
If all of these are true, then 2 properties are enforced by the template:
1. Calling the same template multiple times will share all non-local
variables.
2. Two different templates are guaranteed to be unique, unless you reenter the
same variable scope as the initial definition of a template and redefine
it. An examples of this exception:
```python
def my_op(x, scalar_name):
var1 = tf.get_variable(scalar_name,
shape=[],
initializer=tf.constant_initializer(1))
return x * var1
with tf.variable_scope('scope') as vs:
scale_by_y = tf.make_template('scale_by_y', my_op, scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
# Creates a template that reuses the variables above.
with tf.variable_scope(vs, reuse=True):
scale_by_y2 = tf.make_template('scale_by_y', my_op, scalar_name='y')
z2 = scale_by_y2(input1)
w2 = scale_by_y2(input2)
```
Depending on the value of `create_scope_now_`, the full variable scope may be
captured either at the time of first call or at the time of construction. If
this option is set to True, then all Tensors created by repeated calls to the
template will have an extra trailing _N+1 to their name, as the first time the
scope is entered in the Template constructor no Tensors are created.
Note: `name_`, `func_` and `create_scope_now_` have a trailing underscore to
reduce the likelihood of collisions with kwargs.
Args:
name_: A name for the scope created by this template. If necessary, the name
will be made unique by appending `_N` to the name.
func_: The function to wrap.
create_scope_now_: Boolean controlling whether the scope should be created
when the template is constructed or when the template is called. Default
is False, meaning the scope is created when the template is called.
unique_name_: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is false,
an error is raised. Defaults to None.
custom_getter_: Optional custom getter for variables used in `func_`. See
the @{tf.get_variable} `custom_getter` documentation for
more information.
**kwargs: Keyword arguments to apply to `func_`.
Returns:
A function to encapsulate a set of variables which should be created once
and reused. An enclosing scope will created, either where `make_template`
is called, or wherever the result is called, depending on the value of
`create_scope_now_`. Regardless of the value, the first time the template
is called it will enter the scope with no reuse, and call `func_` to create
variables, which are guaranteed to be unique. All subsequent calls will
re-enter the scope and reuse those variables.
Raises:
ValueError: if the name is None.
"""
if kwargs:
func_ = functools.partial(func_, **kwargs)
return Template(
name_, func_, create_scope_now=create_scope_now_,
unique_name=unique_name_, custom_getter=custom_getter_)
def _skip_common_stack_elements(stacktrace, base_case):
"""Skips items that the target stacktrace shares with the base stacktrace."""
for i, (trace, base) in enumerate(zip(stacktrace, base_case)):
if trace != base:
return stacktrace[i:]
return stacktrace[-1:]
class Template(object):
"""Wrap a function to aid in variable sharing.
Templates are functions that create variables the first time they are called
and reuse them thereafter. See `make_template` for full documentation.
Note: By default, the full variable scope is captured at the time of first
call. If `create_scope_now_` is passed as True to the constructor, the full
scope will be captured there, but no variables will created until the first
call.
"""
def __init__(self, name, func, create_scope_now=False, unique_name=None,
custom_getter=None):
"""Creates a template for the given function.
Args:
name: A name for the scope created by this template. The
name will be made unique by appending `_N` to the it (see how
`tf.variable_scope` treats the `default_name` for details).
func: The function to apply each time.
create_scope_now: Whether to create the scope at Template construction
time, rather than first call. Defaults to false. Creating the scope at
construction time may be more convenient if the template is to passed
through much lower level code, and you want to be sure of the scope
name without knowing exactly where it will be first called. If set to
True, the scope will be created in the constructor, and all subsequent
times in __call__, leading to a trailing numeral being added to the
names of all created Tensors. If set to False, the scope will be created
at the first call location.
unique_name: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is
false, an error is raised. Defaults to None.
custom_getter: optional custom getter to pass to variable_scope()
Raises:
ValueError: if the name is None.
"""
self._func = func
self._stacktrace = traceback.format_stack()[:-2]
self._name = name
self._unique_name = unique_name
self._custom_getter = custom_getter
if name is None:
raise ValueError("name cannot be None.")
if create_scope_now:
with variable_scope._pure_variable_scope( # pylint:disable=protected-access
(self._unique_name or
variable_scope._get_unique_variable_scope(self._name)), # pylint:disable=protected-access
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
else:
self._variable_scope = None
# This variable keeps track of whether the template has been called yet,
# which is not the same as whether the scope has been created.
self._variables_created = False
def _call_func(self, args, kwargs, check_for_new_variables):
try:
vars_at_start = len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
trainable_at_start = len(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
result = self._func(*args, **kwargs)
if check_for_new_variables:
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
# If a variable that we intend to train is created as a side effect
# of creating a template, then that is almost certainly an error.
if trainable_at_start != len(trainable_variables):
raise ValueError("Trainable variable created when calling a template "
"after the first time, perhaps you used tf.Variable "
"when you meant tf.get_variable: %s" %
(trainable_variables[trainable_at_start:],))
# Non-trainable tracking variables are a legitimate reason why a new
# variable would be created, but it is a relatively advanced use-case,
# so log it.
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
if vars_at_start != len(variables):
logging.info("New variables created when calling a template after "
"the first time, perhaps you used tf.Variable when you "
"meant tf.get_variable: %s",
variables[vars_at_start:])
return result
except Exception as exc:
# Reraise the exception, but append the original definition to the
# trace.
args = exc.args
if not args:
arg0 = ""
else:
arg0 = args[0]
trace = "".join(_skip_common_stack_elements(self._stacktrace,
traceback.format_stack()))
arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace)
new_args = [arg0]
new_args.extend(args[1:])
exc.args = tuple(new_args)
raise
def __call__(self, *args, **kwargs):
if self._variable_scope:
if self._variables_created:
# This is not the first visit to __call__, so variables have already
# been created, and we want to reuse them.
with variable_scope.variable_scope(self._variable_scope, reuse=True):
return self._call_func(args, kwargs, check_for_new_variables=True)
else:
# This is the first visit to __call__, but the scope has already been
# created in the constructor. Set _variables_created so that subsequent
# calls take the if branch above.
self._variables_created = True
with variable_scope.variable_scope(self._variable_scope):
return self._call_func(args, kwargs, check_for_new_variables=False)
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
self._variables_created = True
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
return self._call_func(args, kwargs, check_for_new_variables=False)
@property
def variable_scope(self):
"""Returns the variable scope object created by this Template."""
return self._variable_scope
@property
@deprecated(
"2017-02-21", "The .var_scope property is deprecated. Please change your "
"code to use the .variable_scope property")
def var_scope(self):
"""Returns the variable scope object created by this Template."""
return self._variable_scope
|
|
from sfepy.base.base import *
from sfepy.solvers.solvers import EigenvalueSolver
##
# c: 03.03.2008, r: 03.03.2008
class SymeigEigenvalueSolver( EigenvalueSolver ):
name = 'eig.symeig'
##
# c: 03.03.2008, r: 03.03.2008
def __init__( self, conf, **kwargs ):
EigenvalueSolver.__init__( self, conf, **kwargs )
try:
import symeig
self.symeig = symeig.symeig
except:
self.symeig = None
output( 'cannot import symeig, required by %s solver' % self.name )
raise
##
# c: 03.03.2008, r: 08.04.2008
def __call__( self, mtx_a, mtx_b = None, n_eigs = None,
eigenvectors = None, status = None, conf = None ):
conf = get_default( conf, self.conf )
mtx_a = get_default( mtx_a, self.mtx_a )
mtx_b = get_default( mtx_b, self.mtx_b )
n_eigs = get_default( n_eigs, self.n_eigs )
eigenvectors = get_default( eigenvectors, self.eigenvectors )
status = get_default( status, self.status )
if n_eigs is None:
rng = None
else:
rng = (1, n_eigs)
tt = time.clock()
mtx_a, mtx_b = self._to_array( mtx_a, mtx_b )
out = self.symeig( mtx_a, mtx_b, range = rng, eigenvectors = eigenvectors )
if status is not None:
status['time'] = time.clock() - tt
return out
##
# c: 03.03.2008, r: 03.03.2008
class ScipyEigenvalueSolver( EigenvalueSolver ):
name = 'eig.scipy'
##
# c: 03.03.2008, r: 03.03.2008
def __init__( self, conf, **kwargs ):
EigenvalueSolver.__init__( self, conf, **kwargs )
##
# c: 03.03.2008, r: 08.04.2008
def __call__( self, mtx_a, mtx_b = None, n_eigs = None,
eigenvectors = None, status = None, conf = None ):
conf = get_default( conf, self.conf )
mtx_a = get_default( mtx_a, self.mtx_a )
mtx_b = get_default( mtx_b, self.mtx_b )
n_eigs = get_default( n_eigs, self.n_eigs )
eigenvectors = get_default( eigenvectors, self.eigenvectors )
status = get_default( status, self.status )
tt = time.clock()
if n_eigs is None:
mtx_a, mtx_b = self._to_array( mtx_a, mtx_b )
out = nla.eig( mtx_a, mtx_b, right = eigenvectors )
if eigenvectors:
eigs = out[0]
else:
eigs = out
ii = nm.argsort( eigs )
if eigenvectors:
mtx_ev = out[1][:,ii]
out = (eigs[ii], mtx_ev)
else:
out = (eigs,)
else:
out = sc.splinalg.eigen_symmetric( mtx_a, k = n_eigs, M = mtx_b )
if status is not None:
status['time'] = time.clock() - tt
return out
##
# c: 08.04..2008, r: 08.04..2008
class ScipySGEigenvalueSolver( ScipyEigenvalueSolver ):
"""Solver for symmetric problems."""
name = 'eig.sgscipy'
##
# c: 08.04..2008, r: 08.04..2008
def __call__( self, mtx_a, mtx_b = None, n_eigs = None,
eigenvectors = None, status = None, conf = None ):
"""eigenvectors arg ignored, computes them always"""
import scipy.lib.lapack as ll
conf = get_default( conf, self.conf )
mtx_a = get_default( mtx_a, self.mtx_a )
mtx_b = get_default( mtx_b, self.mtx_b )
n_eigs = get_default( n_eigs, self.n_eigs )
eigenvectors = get_default( eigenvectors, self.eigenvectors )
status = get_default( status, self.status )
tt = time.clock()
if n_eigs is None:
mtx_a, mtx_b = self._to_array( mtx_a, mtx_b )
if nm.iscomplexobj( mtx_a ):
if mtx_b is None:
fun = ll.get_lapack_funcs( ['heev'], arrays = (mtx_a,) )[0]
else:
fun = ll.get_lapack_funcs( ['hegv'], arrays = (mtx_a,) )[0]
else:
if mtx_b is None:
fun = ll.get_lapack_funcs( ['syev'], arrays = (mtx_a,) )[0]
else:
fun = ll.get_lapack_funcs( ['sygv'], arrays = (mtx_a,) )[0]
## print fun
if mtx_b is None:
out = fun( mtx_a )
else:
out = fun( mtx_a, mtx_b )
if not eigenvectors:
out = out[0]
else:
out = out[:-1]
else:
out = ScipyEigenvalueSolver.__call__( self, mtx_a, mtx_b, n_eigs,
eigenvectors, status = None )
if status is not None:
status['time'] = time.clock() - tt
return out
##
# c: 03.03.2008, r: 03.03.2008
class PysparseEigenvalueSolver( EigenvalueSolver ):
name = 'eig.pysparse'
def process_conf( conf ):
"""
Missing items are set to default values.
Example configuration, all items:
solver_2 = {
'name' : 'eigen1',
'kind' : 'eig.pysparse',
'i_max' : 150,
'eps_a' : 1e-5,
'tau' : -10.0,
'method' : 'qmrs',
'verbosity' : 0,
'strategy' : 1,
}
"""
get = conf.get_default_attr
i_max = get( 'i_max', 100 )
eps_a = get( 'eps_a', 1e-5 )
tau = get( 'tau', 0.0 )
method = get( 'method', 'qmrs' )
verbosity = get( 'verbosity', 0 )
strategy = get( 'strategy', 1 )
common = EigenvalueSolver.process_conf( conf )
return Struct( **locals() ) + common
process_conf = staticmethod( process_conf )
def _convert_mat(mtx):
from pysparse import spmatrix
A = spmatrix.ll_mat(*mtx.shape)
for i in xrange( mtx.indptr.shape[0] - 1 ):
ii = slice( mtx.indptr[i], mtx.indptr[i+1] )
n_in_row = ii.stop - ii.start
A.update_add_at( mtx.data[ii], [i] * n_in_row, mtx.indices[ii] )
return A
_convert_mat = staticmethod( _convert_mat )
##
# c: 03.03.2008, r: 03.03.2008
def __init__( self, conf, **kwargs ):
EigenvalueSolver.__init__( self, conf, **kwargs )
##
# c: 03.03.2008, r: 03.03.2008
def __call__( self, mtx_a, mtx_b = None, n_eigs = None,
eigenvectors = None, status = None, conf = None ):
from pysparse import jdsym, itsolvers, precon
conf = get_default( conf, self.conf )
mtx_a = get_default( mtx_a, self.mtx_a )
mtx_b = get_default( mtx_b, self.mtx_b )
n_eigs = get_default( n_eigs, self.n_eigs )
eigenvectors = get_default( eigenvectors, self.eigenvectors )
status = get_default( status, self.status )
output( "loading..." )
A = self._convert_mat( mtx_a )
output( "...done" )
if mtx_b is not None:
M = self._convert_mat( mtx_b )
output( "solving..." )
tt = time.clock()
Atau=A.copy()
Atau.shift(-conf.tau,M)
K=precon.jacobi(Atau)
A=A.to_sss();
if mtx_b is not None:
M=M.to_sss();
method = getattr( itsolvers, conf.method )
kconv, lmbd, Q, it, it_in = jdsym.jdsym( A, M, K, n_eigs, conf.tau,
conf.eps_a, conf.i_max,
method,
clvl = conf.verbosity,
strategy = conf.strategy )
output( "number of converged eigenvalues:", kconv )
ttt = time.clock() - tt
output( '...done in %.2f s' % ttt )
if status is not None:
status['time'] = ttt
status['q'] = Q
status['it'] = it
status['it_in'] = it_in
return lmbd, Q
|
|
"""
DontCamp.com Administrative Support System v. 1.3
written by: butter@dontcamp.com
Revision: $Revision: 1.30 $
Changelog:
1.1.2:
* changed the ass class layout significantly to make better use of
a branched object structure
* broke logic into more functions to make extensions both easier to write
and more powerful
1.1.1 beta:
* playernames with prefixs can now issue commands
1.1 beta:
* added userlevels via a kick points system a la PB power users
* added general public vote kicking
* using keyhash rather than PB GUID for auth
* moved to more OO codebase
* extensions can now act on anything in the dc_ass object
Acknowledgements:
Thanks to dst@dontcamp.com for significant coding contributions.
Thanks to superwabbit@dontcamp.com for his conceptual contributions.
Thanks to my brother for helping me with fundamental coding concepts.
Thanks to the guy that was willing to sell me a T40 ThinkPad on eBay for
that insanely low price!
Dear God, thanks be to O'Reilly!
And a REALLY BIG thanks to Kevin Lockitt of blackbagops.com. This software
is clearly a big stinkin' rip off of BFSM, a program that did as much
as anything to make DontCamp.com the successful gaming community that
it is.
Purpose:
To provide an in-game chat based admin system similar to Kevin Lockitt's
Battlefield Server Manager while also providing a kick vote system
similar to PunkBuster's Poweruser system.
Requirements:
1. dc_debug.py - a common debugging function for all DontCamp.com BF2
python level tools
2. dc_ass_extensions.py - this file is currently required to exist but
can be empty. See dc_ass_extensions Instructions.
3. dc_ass_cmds.ini - the configuration file for all in-game admin commands
4. power.dat - a list of your admins' power levels and keyhashs
Known issues:
1. writeToLogfile() is not currently tested or up-to-date and is therefore
disabled.
2. In general be very careful what you code in an extension. The BF2
server may lag or crash when running poorly written extensions.
3. dc_ass currently requires punkbuster to be enabled.
4. There is almost no sanity checking for poorly configured INI files.
5. Non permanent bans are not maintained between server restarts.
Setup:
1. Drop dc_ass.py and dc_debug.py into the admin/standard_admin directory
of your BF2 installation.
2. Edit admin/standard_admin/__init__.py file to include these lines:
import dc_ass
dc_ass.init()
3. Create an empty dc_ass_extensions.py file or populate it as per the
dc_ass_extensions instructions below.
4. Create a power.dat file as per the instructions below.
5. Create / edit a dc_ass_cmds.ini file as per the instructions below.
power.dat Instructions:
This file is simply a space delimited file with two values on each line
that must exist in the admin/standard_admin directory. The first value is
the power level assignment and the second value is the keyhash.
Currently dc_ass is lacking some flexibily in that 10 kick points always
causes a player to be kicked. Keep that in mind when assigning power
levels. The power level must be a positive integer but can otherwise be
whatever you like.
The keyhash for a user is a 32 digit hexidecimal number that be found when
executing admin.listplayers in the console. I have written a dc_ass
extensions that prints the keyhash for a user while in-game and I will like
make that extensions available in the future. Please note that BF2
keyhashes are NOT the same thing as PB GUID's.
An example power.dat file:
10 352a507c78da1dea6dd42a5867d3c2cc
4 2365c8ef755723efc10b425fffb65ebe
This examples shows that the player with the keyhash:
352a507c78da1dea6dd42a5867d3c2cc can apply 10 kick points to each player
and that the player with the keyhash: 2365c8ef755723efc10b425fffb65ebe can
apply 4 kick points to each player.
Since 10 kick points kicks a user, we can assume that any admin with a
kick power of 10 is a full admin and therefore they can run "admin"
commands. See the section on the dc_ass_cmds.ini file for more info.
dc_ass_cmds.ini Instructions:
This file follows standard INI file formatting. Each section name is the
name of a command that can be run from in-game text chat. Each section
must have a "type" option. Supported types are:
kick - a simple kick
ban - a permanent ban (or possibly temporary)
rcon - user defined rcon command execution
extension - user defined python function execution
Depending on the type, different options are required. Each type supports
the "level" option. The level option dictates what a user's minimum level
must be in order to execute the command in question. The admin option is
optional and defaults to whatever the adminLevel var is set to.
(* = required)
Type: kick
Options:
*reason = (a non-delimited string contained the reason for this kick)
length = (an integer representing how many minutes before the
the offender will be allowed back into the game. If this option is
missing, the default value will be 2.)
Type: ban
Options:
*reason = (same as above)
length = (same as above) (see known issue #5)
Type: rcon
Options:
*command = (a non-delimited string contained the rcon command you want
to issue)
Type: extension
Options:
*function = (the name of the function in dc_ass_extensions.py you want
this command to run WITHOUT any (). See dc_ass_extensions
Instructions.)
An example dc_ass_cmds.ini file might look like this:
[kfms]
type = kick
reason = for firing from a vehicle in/into an uncapturable spawn
length = 5
[brsc]
type = ban
reason = for a racist or slanderous comment
[restart]
type = rcon
command = admin.restartmap
[slap]
type = extension
function = slapPlayer
[nextmap]
type = extension
function = sayNextMap
level = 0
NOTE: Changes made to the dc_ass_cmds.ini file are in effect immediately
and do not require a server restart.
dc_ass_extensions.py Instructions:
The extensions provide a means to do more complex actions that cannot be
achieved through a simple rcon command. This file works similarly to other
python files inside the BF2 environment. It's simply a list of functions
as well as the proper imports to provide the facility the functions
require.
NOTE: Each function that is run as an extension must accept one and only
one parameter. That parameter will be the dc_ass admin object most noteably
containing these properties:
victimID - a list of all integers of the victim IDs found via the
parameter for this the command executed
victimTracker - a dictionary of all the people currently being tracked
by dc_ass (in other words, people that have had kick points issued
against them). This is a multidemensional dictionary where the top
level key is the player_id / slot number and value is another
dictionary where the key is the keyhash of the issuer of the points and
the value is a dictionary with the self explainitory keys: points and
reason.
issuer - an object representing the issuer of the person that executed
the current command. The typical properties you'll see for this object
are index and keyhash
command - an object containing various properties for the command being
issued. Commmand properties are: string, arguments, type, length,
reason, function, level, rconString, extension
config - this is the INI config file object for dc_ass
See http://dontcamp.com/code/ for an example dc_ass_extenions.py file.
Note: Changes made to the dc_ass_extensions.py file are in effect
immediately and do not require a server restart.
How to use all this once you've got it setup:
All commands are issued with in-game chat, NOT console commands. To my
knowledge, these commands should work in any chat mode. Each command MUST
be prefixed with an ! and optionally suffixed with a space followed by a
case insensitive playername, partial playername, or a . followed by a
player number. Additionally, you may prefex an @ in front of a case
insensitive string to execute an action against all players containing
that string in their playername. You may also use a % followed by a team
number to execute a command against an entire team.
Using our previous example INI file and given that the server has two
players on it: Butter (in slot 0) and MuttDog (in slot 1) here's how the
following commands would react...
!kfms
[ Whenever no parameter for a victim is given, dc_ass will
assume the player ID of the admin issuing the command. In this
case the admin will be kicked for 5 minutes for firing into
a main spawn. ]
!kfms .0
[ This will kick Butter for 5 minutes for firing into a main spawn
because butter is player number 0 / in slot 0. ]
!kfms butter
[ This will also kick Butter because butter matches Butter in our
case insensitive string comparison. ]
!kfms ter
[ This will also kick Butter because ter is a string match in
Butter's name and ONLY in Butter's name. ]
!kfms utt
[ This will kick no one but print a global message in green at the
top left of the screen that utt is ambiguous. ]
!kfms dst
[ This will kick no one and print a global message saying "Could
not find ID for dst". ]
!kfms @t
[ This will kick both dst and butter as they both have a t in their
their playername. ]
!kfms @T
[ This will still kick both dst and butter because our text search
is case insensitive. ]
!kfms %2
[ This will kick everyone on team 2. ]
"""
import bf2
import host
import re
import ConfigParser
import time
from dc_debug import decho
from dc_debug import showLevel
import dc_ass_extensions
class container:
pass
class ass:
# some init vars
victimTracker = {}
adminLevel = 10
defaultLevel = 2
kickThreshold = 10
"""
getPlayerList(): returns a dictionary of players currently playing
where the key is their player player_id/index and the value is list
containing their playername, IP, outbound port, and CD-key hash.
Thanks to Woody for doing all the hard work on this one.
"""
def getPlayerList(self):
decho('dc_ass: entering getPlayerList() method', 5)
rawData = host.rcon_invoke( 'admin.listplayers' )
# this pattern is for line 0 of the output for each player
# pattern1 = re.compile(r'''^Id:\ +(\d+)\ -\ (\S+)\ is\ remote\ ip:\ (\d+\.\d+\.\d+\.\d+):(\d+)''', re.VERBOSE)
pattern1 = re.compile(r'''^Id:\ +(\d+)\ -\ (.*?)\ is\ remote\ ip:\ (\d+\.\d+\.\d+\.\d+):(\d+)''', re.VERBOSE)
# this pattern is for line 1 of the output for each player
pattern2 = re.compile(r'''(?:.*hash:\ (\w{32}))?''', re.VERBOSE)
players = {}
i = 0
for line in rawData.split("\n"):
# if we're looking at a "line 0"
if i == 0:
matches = pattern1.findall(line)
if len(matches) != 0:
p_id = int(matches[0][0])
players[p_id] = []
players[p_id].append(matches[0][1])
players[p_id].append(matches[0][2])
players[p_id].append(matches[0][3])
# if we're looking at a "line 1"
elif i == 1:
matches = pattern2.findall(line)
players[p_id].append(matches[0])
# flop the value of the iter
i ^= 1
decho( 'dc_ass: exiting getPlayerList() method', 5 )
return players
def getMapList(self):
rawData = host.rcon_invoke( 'maplist.list' )
pattern = re.compile('(\d+): "(\S*?)" (\S*) (\d+)')
# init the mapList
mapList = {}
for line in rawData.split("\n"):
matches = pattern.findall(line)
if len(matches) != 0:
mapID = int(matches[0][0])
mapList[mapID] = {'name':matches[0][1], 'gpm':matches[0][2], 'size':matches[0][3]}
return mapList
"""
getKeyhashFromIndex(): find the keyhash of the user at index
returns a 32-digit hex string that is the user's keyhash at index
returns False on failure
"""
def getKeyhashFromIndex(self, index):
decho( 'dc_ass: entering getKeyhashFromIndex() method', 5 )
# make False the default return value
returnValue = False
# get a list of players connected right now
players = self.getPlayerList()
# is our index in this list of players?
if players.has_key(index):
# grab their keyhash
returnValue = players[index][3]
decho( 'dc_ass: exiting getKeyhashFromIndex() method', 5 )
return returnValue
"""
getLevelFromIndex(): finds the admin level of the user at index
returns a 2 element tuple with the admin level and keyhash of the user at index
returns false on failure
"""
def getLevelFromIndex(self, index):
decho( 'dc_ass: entering getLevelFromIndex() method', 5 )
# make False the default return value
returnValue = False
# grab their keyhash
keyhash = self.getKeyhashFromIndex(index)
if keyhash:
decho( 'dc_ass: Attempting to authenticate keyhash: %s' % keyhash, 5 )
# just in case there is no pbpower.dat file
try:
# loop through each line looking for their keyhash
for line in open('admin/standard_admin/power.dat', 'r'):
if keyhash == line.split(' ')[1].strip():
# assign their level
userLevel = int(line.split(' ')[0].strip())
decho( 'dc_ass: %s is level %d in power.dat!' % (keyhash, userLevel), 5 )
break
except IOError:
decho( 'dc_ass: unable to open pb/pbpower.dat file', 5 )
userLevel = None
returnValue = (userLevel, keyhash)
decho( 'dc_ass: exiting getLevelFromIndex() method', 5 )
return returnValue
"""
getVictimIDs(): finds all victims from the in-game chat command parameter
returns true on success and false on failure (the failure case is not being able to determine with certainly which victims the command is for)
assigns list self.victimID
"""
def getVictimIDs(self, string):
decho( 'dc_ass: entering getVictimIDs() method', 5 )
# init/destory the old victim list
self.victimID = []
# default value for result
result = False
decho( 'dc_ass: Attempting to find victim ID...', 5 )
# if the string is the player number
if string[0:1] == '.':
id = int(string[1:])
for p in bf2.playerManager.getPlayers():
if p.index == id:
self.victimID = [id]
decho( 'dc_ass: string was an int, and id = %d' % self.victimID[0], 5 )
result = True
break
if not result:
decho( 'dc_ass: (ERROR) no player found with ID: %d' % id, 1 )
# if the string is a wildcard / clantag
elif string[0:1] == '@':
decho( 'dc_ass: finding ALL matching players with string...', 5 )
decho( 'dc_ass: string was NOT an int', 5 )
# for all players connected...
for p in bf2.playerManager.getPlayers():
decho( 'dc_ass: checking player: %s' % p.getName(), 5 )
# if we find a name that losely matches...
if p.getName().lower().find( string[1:].lower() ) != -1:
decho( 'dc_ass: %s losely matches %s' % ( string[1:], p.getName() ), 5 )
self.victimID.append(p.index)
result = True
if result:
decho( 'dc_ass: Victim ID(s) found @ %r' % self.victimID, 5 )
else:
decho( 'dc_ass: (ERROR) no players found with %s in their name' % string[1:], 1 )
# if the string is a team number
elif string[0:1] == '%':
decho( 'dc_ass: finding ALL matching players on team %s' % string[1:], 5 )
# for all players connected...
for p in bf2.playerManager.getPlayers():
decho( 'dc_ass: checking player: %s' % p.getName(), 5 )
# if we find a player on the team supplied
decho( 'dc_ass: %s is on team %d' % ( p.getName(), p.getTeam() ), 5 )
if p.getTeam() == int(string[1:]):
self.victimID.append(p.index)
result = True
if result:
decho( 'dc_ass: Victim ID(s) found @ %r' % self.victimID, 5 )
else:
decho( 'dc_ass: (ERROR) there seems to be no one on team %s' % string[1:], 1 )
# if the string is a playername or partial playername
else:
# set a centinal value for id so we know if we've found anyone yet
id = -1
decho( 'dc_ass: string was NOT an int', 5)
# for all players connected...
for p in bf2.playerManager.getPlayers():
decho( 'dc_ass: checking player: %s' % p.getName(), 5 )
# if we find a name that loosely matches...
if p.getName().lower().find( string.lower() ) != -1:
decho( 'dc_ass: %s loosely matches %s' % ( string, p.getName() ), 5 )
# if this is the first victim we've found...
if id == -1:
decho( 'dc_ass: found %s in %s' % ( string, p.getName() ), 5)
id = [int(p.index)]
result = True
# if we've gotten another possible match...
else:
result = False
break
if id != -1:
if result:
self.victimID = id
decho( 'dc_ass: Victim ID found @ %d' % self.victimID[0], 5 )
else:
decho( 'dc_ass: %s is ambiguous.' % string, 1 )
else:
decho( "dc_ass: (ERROR) no players were found against: '%s'" % string, 1 )
decho( 'dc_ass: exiting getVictimIDs() method', 5 )
return result
"""
def writeLogFile(self, victimID, admin_id, type, reason, length):
v = bf2.PlayerManager.Player(victimID)
a = bf2.PlayerManager.Player(admin_id)
decho("\"%s\",%s,%d,%s,%d,%s,%s,%d,%s\n" % (v.getName(), v.getAddress(), v.getProfileId(), a.getName(), int(time.time()), bf2.gameLogic.getMapName(), type, reason, length * 60), 5)
fo = open('admin/standard_admin/adminlog.csv', 'a')
# v.name, v.ip, v.profileId, a.name, timestamp, map, type, length, reason
fo.write("\"%s\",%s,%d,%s,%d,%s,%s,%d,%s\n" % (v.getName(), v.getAddress(), v.getProfileId(), a.getName(), int(time.time()), bf2.gameLogic.getMapName(), type, reason, length * 60))
fo.close()
"""
"""
stripPrefix(): removes context prefixes from a chati lines and returns the trimmed chat line
"""
def stripPrefix(self, text):
decho('dc_ass: entering stripPrefix() method', 5)
text = text.replace( 'HUD_TEXT_CHAT_TEAM', '' )
text = text.replace( 'HUD_TEXT_CHAT_SQUAD', '' )
text = text.replace( 'HUD_CHAT_DEADPREFIX', '' )
text = text.replace( "*\xA71DEAD\xA70*", '' )
decho('dc_ass: exiting stripPrefix() method', 5)
# in later versions of BF2 we sometimes saw extra white space in the
# chat text
return text.strip()
"""
splitArguments(): splits a string into separate arguments
returns a list of all space, double quote, or single quote string
"""
def splitArguments(self, string):
# init the return list
returnValue = []
# string can == None if there are no arguments
if string != None:
# init the iterator
i = 0
# grab the length of the arguments string
length = len(string)
while i < length:
# handle strings inside ""
if string[i] == '"':
# advance the iter past the "
i += 1
# find the next "
nextMark = string[i:].find('"')
# if we didn't find another " grab the rest of the string
if nextMark == -1:
returnValue.append(string[i:])
break
else:
# grab the string inside the ""
returnValue.append(string[i:i + nextMark])
# advance the iter past the second "
i = i + nextMark + 1
# handle strings inside ''
elif string[i] == "'":
i += 1
nextMark = string[i:].find("'")
if nextMark == -1:
returnValue.append(string[i:])
break
else:
returnValue.append(string[i:i + nextMark])
i = i + nextMark + 1
# handle other space separated strings
else:
nextMark = string[i:].find(' ')
if nextMark == -1:
returnValue.append(string[i:])
i = length
else:
returnValue.append(string[i:i + nextMark])
i = i + nextMark + 1
return returnValue
"""
addPoints(): adds points to the tracker from the issuer to the players @ self.victimID
returns true if points were added and false if not
"""
def addPoints(self):
decho( 'dc_ass: entering addPoints() method', 5 )
# set the default returnValue
returnValue = False
# add entries in the victimTracker foreach victim
for vID in self.victimID:
# check to see if we're already tracking the joker
if self.victimTracker.has_key(vID):
decho( 'dc_ass: already tracking %d' % vID, 5 )
# check to see if this admin has already applied their level to the victim
if not self.victimTracker[vID].has_key(self.issuer.keyhash):
if self.manageTracker(vID, self.issuer.keyhash, self.issuer.level, self.command.reason):
decho( 'dc_ass: applied %d points from %s to player %d' % (self.issuer.level, self.issuer.keyhash, vID), 5 )
returnValue = True
# this elif was really added to deal with an extension used at DontCamp.com but it makes sense to have anyway
# this elif allows an admin to apply their kick points even if they have already
elif self.issuer.level == self.adminLevel:
if self.manageTracker(vID, self.issuer.keyhash, self.issuer.level, self.command.reason):
returnValue = True
else:
decho( 'dc_ass: %s has already applied their points to %s' % (bf2.PlayerManager.Player(self.issuer.index).getName(), bf2.PlayerManager.Player(vID).getName()), 1 )
elif self.manageTracker(vID, self.issuer.keyhash, self.issuer.level, self.command.reason):
decho( 'dc_ass: applied %d points from %s to player %d' % (self.issuer.level, self.issuer.keyhash, vID), 5 )
returnValue = True
decho( 'dc_ass: exiting addPoints() method', 5 )
return returnValue
"""
manageTracker(): changes/adds/removes points given by 'keyhash' to 'victimID' via 'delta' and 'reason'
returns true is any points were successfully applied
returns false if no points were applied
"""
def manageTracker(self, victimID, keyhash, delta, reason):
decho( 'dc_ass: entering manageTracker() method', 5)
returnValue = True
# if they're not already being tracked...
if not self.victimTracker.has_key(victimID):
self.victimTracker[victimID] = {}
decho( 'dc_ass: now tracking player: %d' % victimID, 5 )
# has this keyhash already applied their points?
if self.victimTracker[victimID].has_key(keyhash):
# apply the delta
self.victimTracker[victimID][keyhash]['points'] += delta
# if the resulting delta totally removes this keyhash points then delete the keyhash from their tracker
if self.victimTracker[victimID][keyhash]['points'] <= 0:
del(self.victimTracker[victimID][keyhash])
# if this keyhash HAS NOT already applied their points, only if the delta is > 0 should we even touch the tracker
elif delta > 0:
self.victimTracker[victimID][keyhash] = {'points': delta, 'reason': reason}
else:
decho( 'dc_ass: no action taken where issuer points <= 0', 2)
returnValue = False
decho( 'dc_ass: exiting manageTracker() method', 5)
return returnValue
"""
getPointsFromIndex(): returns total accumlative points against player at index
returns 0 if player at index is not being tracked
"""
def getPointsFromIndex(self, index):
decho( 'dc_ass: entering getPointsFromIndex() method', 5 )
# set default returnValue
returnValue = 0
# is this player index being tracked?
if self.victimTracker.has_key(index):
# init value for totalPoints
totalPoints = 0
for unusedKey, record in admin.victimTracker[index].iteritems():
totalPoints += record['points']
returnValue = totalPoints
decho( 'dc_ass: exiting getPointsFromIndex() method', 5 )
return returnValue
"""
checkPoints(): checks to see if anyone being tracked has acculated enough points to be kicked
returns nothing! You son of a bitch!
"""
def checkPoints(self):
decho('dc_ass: entering checkPoints() method', 5)
for index, tracking in self.victimTracker.iteritems():
# reset totalPoints for this victim (as we loop through them)
# this variable hold the accumlative value of all points so far applied to a player
totalPoints = self.getPointsFromIndex(index)
# if we see any of our victims here lets print to the screen that they have new and shiny points
if index in self.victimID:
decho( 'dc_ass: %s now has %d of %d kick points' % (bf2.PlayerManager.Player(index).getName(), totalPoints, self.kickThreshold), 1 )
if totalPoints >= self.kickThreshold:
# issue the kick command
decho( "dc_ass: Kicking player '%s' (%d) %s" % (bf2.PlayerManager.Player(index).getName(), index, self.command.reason), 1 )
# need to change this to show them ALL the reasons they were kicked
unused = host.rcon_invoke('pb_sv_kick %d %d %s' % (index + 1, self.command.length, self.command.reason) )
decho('dc_ass: exiting checkPoints() method', 5)
"""
processPoints(): this function simply executes checkPoints() conditionally on executing addPoints()
this can simplify extension code as before you must have called both addPoints and checkPoints you
can now call only processPoints()
returns nothing
"""
def processPoints(self):
decho( 'dc_ass: entering processPoints() method', 5 )
if self.addPoints():
self.checkPoints()
decho( 'dc_ass: exiting processPoints() method', 5 )
"""
prepExec(): returns true on success and false on failure
finds the issuer's admin level and keyhash and ensures that the issuer has rights
to execute the command being atempted
"""
def prepExec(self):
decho( 'dc_ass: entering prepExec() method', 5 )
# set default returnValue
returnValue = False
# find the issuer's level and keyhash
levelAndKeyhash = self.getLevelFromIndex(self.issuer.index)
if levelAndKeyhash:
# assign the level and keyhash as properties to the issuer object
# see if we need to assign the default level for this user
if levelAndKeyhash[0] == None:
self.issuer.level = self.defaultLevel
else:
self.issuer.level = levelAndKeyhash[0]
self.issuer.keyhash = levelAndKeyhash[1]
# is the issuer allowed to run this command?
if self.issuer.level >= self.command.level:
returnValue = True
else:
decho ( 'dc_ass: %s is not authorized for the requested action' % bf2.PlayerManager.Player(self.issuer.index).getName(), 1 )
else:
decho( 'dc_ass: ERROR failed to get keyhash for player: %s' % bf2.PlayerManager.Player(self.issuer.index).getName(), 1 )
decho( 'dc_ass: exiting prepExec() method', 5 )
return returnValue
def execKick(self):
decho('dc_ass: entering execKick() method', 5)
if self.prepExec():
# make a victim list
if self.getVictimIDs(self.command.arguments):
self.processPoints()
else:
decho( 'dc_ass: Victim ID not findable given the arguments', 2 )
def execBan(self):
decho( 'dc_ass: entering execBan() method', 5 )
if self.prepExec():
# make a victim list
if self.getVictimIDs(self.command.arguments):
for vID in self.victimID:
decho( "dc_ass: Banning player '%s' (%d) %s" % (bf2.PlayerManager.Player(vID).getName(), vID, self.command.reason), 1 )
unused = host.rcon_invoke( 'pb_sv_ban %d %s' % (vID + 1, self.command.reason) )
else:
decho( 'dc_ass: Victim ID not findable given the arguments', 2 )
def execRcon(self):
decho( 'dc_ass: entering execRcon() method', 5 )
if self.prepExec():
decho( 'dc_ass: Running rcon command: %s' % self.command.rconString, 2 )
if self.command.arguments == None:
unused = host.rcon_invoke( self.command.rconString)
else:
unused = host.rcon_invoke( self.command.rconString + ' ' + self.command.arguments )
def execExtension(self):
decho( 'dc_ass: entering execExtension() method', 5 )
if self.prepExec():
# reload the extensions file
reload( dc_ass_extensions )
if self.command.function in dc_ass_extensions.__dict__:
decho( 'dc_ass: executing %s() extension' % self.command.function, 2 )
# send the entire ass object to the extensions
exec( 'dc_ass_extensions.%s(%s)' % (self.command.function, 'self') )
else:
decho( 'dc_ass: %s() extension not found.' % self.command.function, 2 )
def getCommandData(self, command):
decho( 'dc_ass: entering getCommandData() method', 5 )
# set a default returnValue
returnValue = False
# read in the commands INI file
try:
self.config = ConfigParser.ConfigParser()
self.config.read('admin/standard_admin/dc_ass_cmds.ini')
# is this a defined command in the INI file
if command in self.config.sections():
decho( 'dc_ass: command: %s found in INI file' % command, 5 )
# define/declare some vars for this admin event
# (I really wouldn't need to do this if I know how to check if a var is defined or even exists
type = None
# the default length for a kick
length = 2
rconString = None
# default reason, is no reason at all
reason = 'No reason given'
# this makes all commands default to requiring full admin privs
level = self.adminLevel
function = None
# get all possible options
# I could add sanity checks in here for poorly formated lines in the INI file
for option in self.config.options(command):
if option == 'type':
type = self.config.get(command, option).strip()
elif option == 'reason':
reason = self.config.get(command, option).strip()
elif option == 'length':
length = int(self.config.get(command, option))
elif option == 'command':
rconString = self.config.get(command, option).strip()
elif option == 'level':
level = int(self.config.get(command, option))
elif option == 'function':
function = self.config.get(command, option).strip()
returnValue = (type, reason, length, rconString, level, function)
else: # end of is command in INI file conditional
decho( 'dc_ass: (ERROR) %s command not found!' % command, 1 )
except IOError:
decho( 'dc_ass: (FATAL ERROR) could not open dc_ass_cmds.ini', 1 )
return returnValue
def onChatMessage(self, player_id, text, channel, flags):
decho('dc_ass: entering onChatMessage() method', 5)
self.issuer = container()
self.issuer.index = player_id
# pull the potential prefix off the text line
self.chatString = self.stripPrefix(text)
# unless the first character is ! don't do anything
if self.chatString[0:1] == '!':
decho( 'dc_ass: the first character of %s was !' % self.chatString, 5 )
# grab the parts of the chatline I need with a REGEX
pattern = re.compile(r'!(\w*) ?(.*)')
matches = pattern.findall(text)
self.command = container()
self.command.string = matches[0][0]
decho( 'dc_ass: command.string = %s' % self.command.string, 5 )
# in case the command doesn't require a victim parameter...
if matches[0][1] != '':
self.command.arguments = matches[0][1]
decho( 'dc_ass: arguments = %s' % self.command.arguments, 5 )
else:
self.command.arguments = None
decho( 'dc_ass: no arguments given', 5 )
commandData = self.getCommandData(self.command.string)
if commandData:
self.command.type = commandData[0]
self.command.reason = commandData[1]
self.command.length = commandData[2]
self.command.rconString = commandData[3]
self.command.level = commandData[4]
self.command.function = commandData[5]
# "switch" for the various command types
if self.command.type == 'kick':
self.execKick()
elif self.command.type == 'ban':
self.execBan()
elif self.command.type == 'rcon':
self.execRcon()
elif self.command.type == 'extension':
self.execExtension()
else:
decho( 'dc_ass: (ERROR) No type value for %s in dc_ass_cmds.ini file!' % self.commandString, 1 )
def onPlayerDisconnect(self, p):
decho('dc_ass: entering onPlayerDisconnect() method', 5)
# if they disconnect, which might have happened if they were kicked, drop them from the tracker, if they're even in it
if self.victimTracker.has_key(p.index):
del(self.victimTracker[p.index])
decho( 'dc_ass: player %d disconnected and no longer tracked' % p.index, 2 )
admin = ass()
def init():
decho('dc_ass: initializing DontCamp.com Admin Support System', 2)
host.registerHandler('ChatMessage', admin.onChatMessage, 1)
host.registerHandler('PlayerDisconnect', admin.onPlayerDisconnect, 1)
showLevel()
|
|
from calendar import timegm
from datetime import datetime, timedelta
from requests import Session
import pyfilemail as pm
from pyfilemail import logger, login_required, load_config, get_configfile
from urls import get_URL
from transfer import Transfer
from errors import hellraiser, FMBaseError
class User(object):
"""This is the entry point to filemail.
If you use a registered username you'll need to provide
a password to login. If no password is passed during init a search for
password is done in $HOME/.netrc
You may also login at a later time with the :func:`User.login` function.
:param username: your email/username
:param password: filemail password if registered username is used
:type username: str
:type password: str
::
#$HOME/.netrc example:
machine yourfilemailuser@email.com
login yourfilemailuser@email.com
password topsecretpassword
"""
def __init__(self, username, password=None):
self.username = username
self.transfers = []
self.session = Session()
self.session.cookies['source'] = 'Desktop'
self.config = load_config()
apikey = self.config.get('apikey')
self.session.cookies['apikey'] = apikey
if apikey.startswith('GET KEY AT:'):
msg = 'No API KEY set in {conf}.\n{apikey}\n'
logger.warning(msg.format(conf=get_configfile(), apikey=apikey))
if password is None and pm.NETRC:
machine = pm._netrc.authenticators(username)
if machine:
password = machine[2]
else:
password = None
if password is not None:
self.login(password)
else:
self.session.cookies['logintoken'] = None
@property
def is_registered(self):
"""If user is a registered user or not.
:rtype: bool
"""
return not self.session.cookies.get('logintoken')
@property
def logged_in(self):
"""If registered user is logged in or not.
:rtype: bool
"""
return self.session.cookies.get('logintoken') and True or False
def login(self, password):
"""Login to filemail as the current user.
:param password:
:type password: ``str``
"""
method, url = get_URL('login')
payload = {
'apikey': self.config.get('apikey'),
'username': self.username,
'password': password,
'source': 'Desktop'
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def logout(self):
"""Logout of filemail and closing the session."""
# Check if all transfers are complete before logout
self.transfers_complete
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken')
}
method, url = get_URL('logout')
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
self.session.cookies['logintoken'] = None
return True
hellraiser(res)
@property
def transfers_complete(self):
"""Check if all transfers are completed."""
for transfer in self.transfers:
if not transfer.is_complete:
error = {
'errorcode': 4003,
'errormessage': 'You must complete transfer before logout.'
}
hellraiser(error)
@login_required
def get_sent(self, expired=False, for_all=False):
"""Retreve information on previously sent transfers.
:param expired: Whether or not to return expired transfers.
:param for_all: Get transfers for all users.
Requires a Filemail Business account.
:type for_all: bool
:type expired: bool
:rtype: ``list`` of :class:`pyfilemail.Transfer` objects
"""
method, url = get_URL('get_sent')
payload = {
'apikey': self.session.cookies.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'getexpired': expired,
'getforallusers': for_all
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return self._restore_transfers(res)
hellraiser(res.json())
@login_required
def get_user_info(self, save_to_config=True):
"""Get user info and settings from Filemail.
:param save_to_config: Whether or not to save settings to config file
:type save_to_config: ``bool``
:rtype: ``dict`` containig user information and default settings.
"""
method, url = get_URL('user_get')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken')
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
settings = res.json()['user']
if save_to_config:
self.config.update(settings)
return settings
hellraiser(res)
@login_required
def update_user_info(self, **kwargs):
"""Update user info and settings.
:param \*\*kwargs: settings to be merged with
:func:`User.get_configfile` setings and sent to Filemail.
:rtype: ``bool``
"""
if kwargs:
self.config.update(kwargs)
method, url = get_URL('user_update')
res = getattr(self.session, method)(url, params=self.config)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def get_received(self, age=None, for_all=True):
"""Retrieve a list of transfers sent to you or your company
from other people.
:param age: between 1 and 90 days.
:param for_all: If ``True`` will return received files for
all users in the same business. (Available for business account
members only).
:type age: ``int``
:type for_all: ``bool``
:rtype: ``list`` of :class:`Transfer` objects.
"""
method, url = get_URL('received_get')
if age:
if not isinstance(age, int) or age < 0 or age > 90:
raise FMBaseError('Age must be <int> between 0-90')
past = datetime.utcnow() - timedelta(days=age)
age = timegm(past.utctimetuple())
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'getForAllUsers': for_all,
'from': age
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return self._restore_transfers(res)
hellraiser(res)
def _restore_transfers(self, response):
"""Restore transfers from josn retreived Filemail
:param response: response object from request
:rtype: ``list`` with :class:`Transfer` objects
"""
transfers = []
for transfer_data in response.json()['transfers']:
transfer = Transfer(self, _restore=True)
transfer.transfer_info.update(transfer_data)
transfer.get_files()
transfers.append(transfer)
return transfers
@login_required
def get_contacts(self):
"""Get contacts from Filemail. Usually people you've sent files
to in the past.
:rtype: ``list`` of ``dict`` objects containing contact information
"""
method, url = get_URL('contacts_get')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken')
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return res.json()['contacts']
hellraiser(res)
@login_required
def get_contact(self, email):
"""Get Filemail contact based on email.
:param email: address of contact
:type email: ``str``, ``unicode``
:rtype: ``dict`` with contact information
"""
contacts = self.get_contacts()
for contact in contacts:
if contact['email'] == email:
return contact
msg = 'No contact with email: "{email}" found.'
raise FMBaseError(msg.format(email=email))
@login_required
def update_contact(self, contact):
"""Update name and/or email for contact.
:param contact: with updated info
:type contact: ``dict``
:rtype: ``bool``
"""
if not isinstance(contact, dict):
raise AttributeError('contact must be a <dict>')
method, url = get_URL('contacts_update')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'contactid': contact.get('contactid'),
'name': contact.get('name'),
'email': contact.get('email')
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def add_contact(self, name, email):
"""Add new contact.
:param name: name of contact
:param email: email of contact
:type name: ``str``, ``unicode``
:type email: ``str``, ``unicode``
:returns: contact information for new current user
:rtype: ``dict``
"""
method, url = get_URL('contacts_add')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'name': name,
'email': email
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return res.json()['contact']
hellraiser(res)
@login_required
def delete_contact(self, contact):
"""Delete contact.
:param contact: with `contactid`
:type contact: ``dict``
:rtype: ``bool``
"""
if not isinstance(contact, dict):
raise AttributeError('contact must be a <dict>')
method, url = get_URL('contacts_delete')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'contactid': contact.get('contactid')
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def get_groups(self):
"""Get contact groups
:rtype: ``list`` of ``dict`` with group data
"""
method, url = get_URL('groups_get')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken')
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return res.json()['groups']
hellraiser(res)
@login_required
def get_group(self, name):
"""Get contact group by name
:param name: name of group
:type name: ``str``, ``unicode``
:rtype: ``dict`` with group data
"""
groups = self.get_groups()
for group in groups:
if group['contactgroupname'] == name:
return group
msg = 'No group named: "{name}" found.'
raise FMBaseError(msg.format(name=name))
@login_required
def add_group(self, name):
"""Add new contact group
:param name: name of new group
:type name: ``str``, ``unicode``
:rtype: ``dict`` with group data
"""
method, url = get_URL('group_add')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'name': name
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return res.json()['groups']
hellraiser(res)
@login_required
def delete_group(self, name):
"""Delete contact group
:param name: of group
:type name: ``str``, ``unicode``
:rtype: ``bool``
"""
group = self.get_group(name)
method, url = get_URL('group_delete')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'contactgroupid': group['contactgroupid']
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def rename_group(self, group, newname):
"""Rename contact group
:param group: group data or name of group
:param newname: of group
:type group: ``str``, ``unicode``, ``dict``
:type newname: ``str``, ``unicode``
:rtype: ``bool``
"""
if isinstance(group, basestring):
group = self.get_contact(group)
method, url = get_URL('group_update')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'contactgroupid': group['contactgroupid'],
'name': newname
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def add_contact_to_group(self, contact, group):
"""Add contact to group
:param contact: name or contact object
:param group: name or group object
:type contact: ``str``, ``unicode``, ``dict``
:type group: ``str``, ``unicode``, ``dict``
:rtype: ``bool``
"""
if isinstance(contact, basestring):
contact = self.get_contact(contact)
if isinstance(group, basestring):
group = self.get_group(group)
method, url = get_URL('contacts_add_to_group')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'contactid': contact['contactid'],
'contactgroupid': group['contactgroupid']
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def remove_contact_from_group(self, contact, group):
"""Remove contact from group
:param contact: name or contact object
:param group: name or group object
:type contact: ``str``, ``unicode``, ``dict``
:type group: ``str``, ``unicode``, ``dict``
:rtype: ``bool``
"""
if isinstance(contact, basestring):
contact = self.get_contact(contact)
if isinstance(group, basestring):
group = self.get_group(group)
method, url = get_URL('contacts_remove_from_group')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'contactid': contact['contactid'],
'contactgroupid': group['contactgroupid']
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def get_company_info(self):
"""Get company settings from Filemail
:rtype: ``dict`` with company data
"""
method, url = get_URL('company_get')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken')
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return res.json()['company']
hellraiser(res)
@login_required
def update_company(self, company):
"""Update company settings
:param company: updated settings
:type company: ``dict``
:rtype: ``bool``
"""
if not isinstance(company, dict):
raise AttributeError('company must be a <dict>')
method, url = get_URL('company_update')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken')
}
payload.update(company)
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def get_company_users(self):
"""Get company users from Filemail
:rtype: ``list`` of ``dict`` with user data
"""
method, url = get_URL('company_get_users')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken')
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return res.json()['users']
hellraiser(res)
@login_required
def get_company_user(self, email):
"""Get company user based on email.
:param email: address of contact
:type email: ``str``, ``unicode``
:rtype: ``dict`` with contact information
"""
users = self.get_company_users()
for user in users:
if user['email'] == email:
return user
msg = 'No user with email: "{email}" associated with this company.'
raise FMBaseError(msg.format(email=email))
@login_required
def company_add_user(self, email, name, password, receiver, admin):
"""Add a user to the company account.
:param email:
:param name:
:param password: Pass without storing in plain text
:param receiver: Can user receive files
:param admin:
:type email: ``str`` or ``unicode``
:type name: ``str`` or ``unicode``
:type password: ``str`` or ``unicode``
:type receiver: ``bool``
:type admin: ``bool``
:rtype: ``bool``
"""
method, url = get_URL('company_add_user')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'email': email,
'name': name,
'password': password,
'canreceivefiles': receiver,
'admin': admin
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
@login_required
def update_company_user(self, email, userdata):
"""Update a company users settings
:param email: current email address of user
:param userdata: updated settings
:type email: ``str`` or ``unicode``
:type userdata: ``dict``
:rtype: ``bool``
"""
if not isinstance(userdata, dict):
raise AttributeError('userdata must be a <dict>')
method, url = get_URL('company_update_user')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'useremail': email
}
payload.update(userdata)
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res)
|
|
"""Module with main classes related to Interfaces."""
import json
import logging
from enum import IntEnum
from pyof.v0x01.common.phy_port import PortFeatures as PortFeatures01
from pyof.v0x04.common.port import PortFeatures as PortFeatures04
from kytos.core.common import GenericEntity
from kytos.core.helpers import now
__all__ = ('Interface',)
LOG = logging.getLogger(__name__)
class TAGType(IntEnum):
"""Class that represents a TAG Type."""
VLAN = 1
VLAN_QINQ = 2
MPLS = 3
class TAG:
"""Class that represents a TAG."""
def __init__(self, tag_type, value):
self.tag_type = tag_type
self.value = value
def __eq__(self, other):
return self.tag_type == other.tag_type and self.value == other.value
class Interface(GenericEntity): # pylint: disable=too-many-instance-attributes
"""Interface Class used to abstract the network interfaces."""
# pylint: disable=too-many-arguments
def __init__(self, name, port_number, switch, address=None, state=None,
features=None, speed=None):
"""Assign the parameters to instance attributes.
Args:
name (string): name from this interface.
port_number (int): port number from this interface.
switch (:class:`~.core.switch.Switch`): Switch with this interface.
address (|hw_address|): Port address from this interface.
state (|port_stats|): Port Stat from interface. It will be
deprecated.
features (|port_features|): Port feature used to calculate link
utilization from this interface. It will be deprecated.
speed (int, float): Interface speed in bytes per second. Defaults
to what is informed by the switch. Return ``None`` if not set
and switch does not inform the speed.
"""
self.name = name
self.port_number = int(port_number)
self.switch = switch
self.address = address
self.state = state
self.features = features
self.nni = False
self.endpoints = []
self.stats = None
self.link = None
self._custom_speed = speed
self.available_tags = []
for i in range(1, 4096):
vlan = TAGType.VLAN
tag = TAG(vlan, i)
self.available_tags.append(tag)
super().__init__()
def __eq__(self, other):
"""Compare Interface class with another instance."""
if isinstance(other, str):
return self.address == other
elif isinstance(other, Interface):
return self.port_number == other.port_number and \
self.name == other.name and \
self.address == other.address and \
self.switch.dpid == other.switch.dpid
return False
@property
def id(self): # pylint: disable=invalid-name
"""Return id from Interface intance.
Returns:
string: Interface id.
"""
return "{}:{}".format(self.switch.dpid, self.port_number)
@property
def uni(self):
"""Return if an interface is a user-to-network Interface."""
return not self.nni
def enable(self):
"""Enable this interface instance.
Also enable the switch instance this interface is attached to.
"""
self.switch.enable()
self.enabled = True
def disable(self):
"""Disable this interface instance.
Also disable the link related to this interface.
"""
if self.link:
self.link.disable()
self.enabled = False
def use_tag(self, tag):
"""Remove a specific tag from available_tags if it is there."""
for available_tag in self.available_tags:
if tag == available_tag:
self.available_tags.remove(available_tag)
return True
return False
def is_tag_available(self, tag):
"""Check if a tag is available."""
return tag in self.available_tags
def get_next_available_tag(self):
"""Return the next available tag if exists."""
try:
return self.available_tags.pop()
except IndexError:
return False
def make_tag_available(self, tag):
"""Add a specific tag in available_tags."""
if not self.is_tag_available(tag):
self.available_tags.append(tag)
else:
return False
def get_endpoint(self, endpoint):
"""Return a tuple with existent endpoint, None otherwise.
Args:
endpoint(|hw_address|, :class:`.Interface`): endpoint instance.
Returns:
tuple: A tuple with endpoint and time of last update.
"""
for item in self.endpoints:
if endpoint == item[0]:
return item
return None
def add_endpoint(self, endpoint):
"""Create a new endpoint to Interface instance.
Args:
endpoint(|hw_address|, :class:`.Interface`): A target endpoint.
"""
exists = self.get_endpoint(endpoint)
if not exists:
self.endpoints.append((endpoint, now()))
def delete_endpoint(self, endpoint):
"""Delete a existent endpoint in Interface instance.
Args:
endpoint (|hw_address|, :class:`.Interface`): A target endpoint.
"""
exists = self.get_endpoint(endpoint)
if exists:
self.endpoints.remove(exists)
def update_endpoint(self, endpoint):
"""Update or create new endpoint to Interface instance.
Args:
endpoint(|hw_address|, :class:`.Interface`): A target endpoint.
"""
exists = self.get_endpoint(endpoint)
if exists:
self.delete_endpoint(endpoint)
self.add_endpoint(endpoint)
def update_link(self, link):
"""Update link for this interface in a consistent way.
Verify of the other endpoint of the link has the same Link information
attached to it, and change it if necessary.
Warning: This method can potentially change information of other
Interface instances. Use it with caution.
"""
if (link.endpoint_a != self and
link.endpoint_b != self):
return False
if self.link is None or self.link != link:
self.link = link
if link.endpoint_a == self:
endpoint = link.endpoint_b
else:
endpoint = link.endpoint_a
if endpoint.link is None or endpoint.link != link:
endpoint.link = link
@property
def speed(self):
"""Return the link speed in bytes per second, None otherwise.
If the switch was disconnected, we have :attr:`features` and speed is
still returned for common values between v0x01 and v0x04. For specific
v0x04 values (40 Gbps, 100 Gbps and 1 Tbps), the connection must be
active so we can make sure the protocol version is v0x04.
Returns:
int, None: Link speed in bytes per second or ``None``.
"""
if self._custom_speed is not None:
return self._custom_speed
return self.get_of_features_speed()
def set_custom_speed(self, bytes_per_second):
"""Set a speed that overrides switch OpenFlow information.
If ``None`` is given, :attr:`speed` becomes the one given by the
switch.
"""
self._custom_speed = bytes_per_second
def get_custom_speed(self):
"""Return custom speed or ``None`` if not set."""
return self._custom_speed
def get_of_features_speed(self):
"""Return the link speed in bytes per second, None otherwise.
If the switch was disconnected, we have :attr:`features` and speed is
still returned for common values between v0x01 and v0x04. For specific
v0x04 values (40 Gbps, 100 Gbps and 1 Tbps), the connection must be
active so we can make sure the protocol version is v0x04.
Returns:
int, None: Link speed in bytes per second or ``None``.
"""
speed = self._get_v0x01_v0x04_speed()
# Don't use switch.is_connected() because we can have the protocol
if speed is None and self._is_v0x04():
speed = self._get_v0x04_speed()
if speed is not None:
return speed
# Warn unknown speed
# Use shorter switch ID with its beginning and end
if isinstance(self.switch.id, str) and len(self.switch.id) > 20:
switch_id = self.switch.id[:3] + '...' + self.switch.id[-3:]
else:
switch_id = self.switch.id
LOG.warning("Couldn't get port %s speed, sw %s, feats %s",
self.port_number, switch_id, self.features)
def _is_v0x04(self):
"""Whether the switch is connected using OpenFlow 1.3."""
return self.switch.is_connected() and \
self.switch.connection.protocol.version == 0x04
def _get_v0x01_v0x04_speed(self):
"""Check against all values of v0x01. They're part of v0x04."""
fts = self.features
pfts = PortFeatures01
if fts and fts & pfts.OFPPF_10GB_FD:
return 10 * 10**9 / 8
elif fts and fts & (pfts.OFPPF_1GB_HD | pfts.OFPPF_1GB_FD):
return 10**9 / 8
elif fts and fts & (pfts.OFPPF_100MB_HD | pfts.OFPPF_100MB_FD):
return 100 * 10**6 / 8
elif fts and fts & (pfts.OFPPF_10MB_HD | pfts.OFPPF_10MB_FD):
return 10 * 10**6 / 8
def _get_v0x04_speed(self):
"""Check against higher enums of v0x04.
Must be called after :meth:`get_v0x01_speed` returns ``None``.
"""
fts = self.features
pfts = PortFeatures04
if fts and fts & pfts.OFPPF_1TB_FD:
return 10**12 / 8
elif fts and fts & pfts.OFPPF_100GB_FD:
return 100 * 10**9 / 8
elif fts and fts & pfts.OFPPF_40GB_FD:
return 40 * 10**9 / 8
def get_hr_speed(self):
"""Return Human-Readable string for link speed.
Returns:
string: String with link speed. e.g: '350 Gbps' or '350 Mbps'.
"""
speed = self.speed
if speed is None:
return ''
speed *= 8
if speed == 10**12:
return '1 Tbps'
if speed >= 10**9:
return '{} Gbps'.format(round(speed / 10**9))
return '{} Mbps'.format(round(speed / 10**6))
def as_dict(self):
"""Return a dictionary with Interface attributes.
Speed is in bytes/sec. Example of output (100 Gbps):
.. code-block:: python3
{'id': '00:00:00:00:00:00:00:01:2',
'name': 'eth01',
'port_number': 2,
'mac': '00:7e:04:3b:c2:a6',
'switch': '00:00:00:00:00:00:00:01',
'type': 'interface',
'nni': False,
'uni': True,
'speed': 12500000000,
'metadata': {},
'active': True,
'enabled': False,
'link': ""
}
Returns:
dict: Dictionary filled with interface attributes.
"""
iface_dict = {'id': self.id,
'name': self.name,
'port_number': self.port_number,
'mac': self.address,
'switch': self.switch.dpid,
'type': 'interface',
'nni': self.nni,
'uni': self.uni,
'speed': self.speed,
'metadata': self.metadata,
'active': self.active,
'enabled': self.enabled,
'link': self.link.id if self.link else ""}
if self.stats:
iface_dict['stats'] = self.stats.as_dict()
return iface_dict
def as_json(self):
"""Return a json with Interfaces attributes.
Example of output:
.. code-block:: json
{"mac": "00:7e:04:3b:c2:a6",
"switch": "00:00:00:00:00:00:00:01",
"type": "interface",
"name": "eth01",
"id": "00:00:00:00:00:00:00:01:2",
"port_number": 2,
"speed": "350 Mbps"}
Returns:
string: Json filled with interface attributes.
"""
return json.dumps(self.as_dict())
class UNI:
"""Class that represents an User-to-Network Interface."""
def __init__(self, interface, user_tag=None):
self.user_tag = user_tag
self.interface = interface
def is_valid(self):
"""Check if TAG is possible for this interface TAG pool."""
if self.user_tag:
return self.interface.is_tag_available(self.user_tag)
return True
class NNI:
"""Class that represents an Network-to-Network Interface."""
def __init__(self, interface):
self.interface = interface
class VNNI(NNI):
"""Class that represents an Virtual Network-to-Network Interface."""
def __init__(self, service_tag, *args, **kwargs):
self.service_tag = service_tag
super().__init__(*args, **kwargs)
|
|
#!/usr/bin/python
# Copyright 2016 Tomas Karasek <tom.to.the.k@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: packet_sshkey
short_description: Create/delete an SSH key in Packet host.
description:
- Create/delete an SSH key in Packet host.
- API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post).
version_added: "2.3"
author: "Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
auth_token:
description:
- Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
label:
description:
- Label for the key. If you keep it empty, it will be read from key string.
id:
description:
- UUID of the key which you want to remove.
fingerprint:
description:
- Fingerprint of the key which you want to remove.
key:
description:
- Public Key string ({type} {base64 encoded key} {description}).
key_file:
description:
- File with the public key.
requirements:
- "python >= 2.6"
- packet-python
'''
EXAMPLES = '''
# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
# You can also pass the api token in module param auth_token.
- name: create sshkey from string
hosts: localhost
tasks:
packet_sshkey:
key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
- name: create sshkey from file
hosts: localhost
tasks:
packet_sshkey:
label: key from file
key_file: ~/ff.pub
- name: remove sshkey by id
hosts: localhost
tasks:
packet_sshkey:
state: absent
id: eef49903-7a09-4ca1-af67-4087c29ab5b6
'''
RETURN = '''
changed:
description: True if a sshkey was created or removed.
type: bool
sample: True
returned: always
sshkeys:
description: Information about sshkeys that were createe/removed.
type: list
sample: [
{
"fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
"id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
"key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
"label": "mynewkey33"
}
]
returned: always
''' # NOQA
import os
import uuid
from ansible.module_utils.basic import AnsibleModule
HAS_PACKET_SDK = True
try:
import packet
except ImportError:
HAS_PACKET_SDK = False
PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
def serialize_sshkey(sshkey):
sshkey_data = {}
copy_keys = ['id', 'key', 'label','fingerprint']
for name in copy_keys:
sshkey_data[name] = getattr(sshkey, name)
return sshkey_data
def is_valid_uuid(myuuid):
try:
val = uuid.UUID(myuuid, version=4)
except ValueError:
return False
return str(val) == myuuid
def load_key_string(key_str):
ret_dict = {}
key_str = key_str.strip()
ret_dict['key'] = key_str
cut_key = key_str.split()
if len(cut_key) in [2,3]:
if len(cut_key) == 3:
ret_dict['label'] = cut_key[2]
else:
raise Exception("Public key %s is in wrong format" % key_str)
return ret_dict
def get_sshkey_selector(module):
key_id = module.params.get('id')
if key_id:
if not is_valid_uuid(key_id):
raise Exception("sshkey ID %s is not valid UUID" % key_id)
selecting_fields = ['label', 'fingerprint', 'id', 'key']
select_dict = {}
for f in selecting_fields:
if module.params.get(f) is not None:
select_dict[f] = module.params.get(f)
if module.params.get('key_file'):
with open(module.params.get('key_file')) as _file:
loaded_key = load_key_string(_file.read())
select_dict['key'] = loaded_key['key']
if module.params.get('label') is None:
if loaded_key.get('label'):
select_dict['label'] = loaded_key['label']
def selector(k):
if 'key' in select_dict:
# if key string is specified, compare only the key strings
return k.key == select_dict['key']
else:
# if key string not specified, all the fields must match
return all([select_dict[f] == getattr(k,f) for f in select_dict])
return selector
def act_on_sshkeys(target_state, module, packet_conn):
selector = get_sshkey_selector(module)
existing_sshkeys = packet_conn.list_ssh_keys()
matching_sshkeys = filter(selector, existing_sshkeys)
changed = False
if target_state == 'present':
if matching_sshkeys == []:
# there is no key matching the fields from module call
# => create the key, label and
newkey = {}
if module.params.get('key_file'):
with open(module.params.get('key_file')) as f:
newkey = load_key_string(f.read())
if module.params.get('key'):
newkey = load_key_string(module.params.get('key'))
if module.params.get('label'):
newkey['label'] = module.params.get('label')
for param in ('label', 'key'):
if param not in newkey:
_msg=("If you want to ensure a key is present, you must "
"supply both a label and a key string, either in "
"module params, or in a key file. %s is missing"
% param)
raise Exception(_msg)
matching_sshkeys = []
new_key_response = packet_conn.create_ssh_key(
newkey['label'], newkey['key'])
changed = True
matching_sshkeys.append(new_key_response)
else:
# state is 'absent' => delete mathcing keys
for k in matching_sshkeys:
try:
k.delete()
changed = True
except Exception as e:
_msg = ("while trying to remove sshkey %s, id %s %s, "
"got error: %s" %
(k.label, k.id, target_state, e))
raise Exception(_msg)
return {
'changed': changed,
'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys]
}
def main():
module = AnsibleModule(
argument_spec=dict(
state = dict(choices=['present', 'absent'], default='present'),
auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
no_log=True),
label=dict(type='str', aliases=['name'], default=None),
id=dict(type='str', default=None),
fingerprint=dict(type='str', default=None),
key=dict(type='str', default=None, no_log=True),
key_file=dict(type='path', default=None),
),
mutually_exclusive=[
('label', 'id'),
('label', 'fingerprint'),
('id', 'fingerprint'),
('key', 'fingerprint'),
('key', 'id'),
('key_file', 'key'),
]
)
if not HAS_PACKET_SDK:
module.fail_json(msg='packet required for this module')
if not module.params.get('auth_token'):
_fail_msg = ( "if Packet API token is not in environment variable %s, "
"the auth_token parameter is required" %
PACKET_API_TOKEN_ENV_VAR)
module.fail_json(msg=_fail_msg)
auth_token = module.params.get('auth_token')
packet_conn = packet.Manager(auth_token=auth_token)
state = module.params.get('state')
if state in ['present','absent']:
try:
module.exit_json(**act_on_sshkeys(state, module, packet_conn))
except Exception as e:
module.fail_json(msg='failed to set sshkey state: %s' % str(e))
else:
module.fail_json(msg='%s is not a valid state for this module' % state)
if __name__ == '__main__':
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import resnet_utils
from nets import resnet_v1
slim = tf.contrib.slim
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43],
[43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37],
[37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = dict(tf.get_collection('end_points'))
return net, end_points
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
bottleneck = resnet_v1.bottleneck
blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
depth, depth_bottleneck, stride = unit
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net,
depth=depth,
depth_bottleneck=depth_bottleneck,
stride=stride,
rate=1)
return net
def _atrousValues(self, bottleneck):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
Args:
bottleneck: The bottleneck function.
"""
blocks = [
resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]),
resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]),
resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
def testAtrousValuesBottleneck(self):
self._atrousValues(resnet_v1.bottleneck)
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
bottleneck = resnet_v1.bottleneck
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(4, 1, 1)] * 2 + [(4, 1, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(8, 2, 1)] * 2 + [(8, 2, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(16, 4, 1)] * 2 + [(16, 4, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(32, 8, 1)] * 2)]
return resnet_v1.resnet_v1(inputs, blocks, num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=include_root_block,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
include_root_block=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
tf.test.main()
|
|
#!/usr/bin/env python
# encoding: utf-8
import os
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth.core import User
from website import settings
from website.app import init_app
from website.conferences.model import Conference
def main():
init_app(set_backends=True, routes=False)
populate_conferences()
MEETING_DATA = {
'spsp2014': {
'name': 'Society for Personality and Social Psychology 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
},
'asb2014': {
'name': 'Association of Southeastern Biologists 2014',
'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
},
'aps2014': {
'name': 'Association for Psychological Science 2014',
'info_url': 'http://centerforopenscience.org/aps/',
'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg',
'active': False,
'admins': [],
'public_projects': True,
},
'annopeer2014': {
'name': '#annopeer',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
},
'cpa2014': {
'name': 'Canadian Psychological Association 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
},
'filaments2014': {
'name': 'National Radio Astronomy Observatory Filaments 2014',
'info_url': None,
'logo_url': 'https://science.nrao.edu/science/meetings/2014/'
'filamentary-structure/images/filaments2014_660x178.png',
'active': False,
'admins': [
'lvonschi@nrao.edu',
# 'Dkim@nrao.edu',
],
'public_projects': True,
},
'bitss2014': {
'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014',
'info_url': None,
'logo_url': os.path.join(
settings.STATIC_URL_PATH,
'img',
'conferences',
'bitss.jpg',
),
'active': False,
'admins': [
'gkroll@berkeley.edu',
'awais@berkeley.edu',
],
'public_projects': True,
},
'spsp2015': {
'name': 'Society for Personality and Social Psychology 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'meetings@spsp.org',
],
},
'aps2015': {
'name': 'Association for Psychological Science 2015',
'info_url': None,
'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg',
'active': True,
'admins': [
],
'public_projects': True,
},
'icps2015': {
'name': 'International Convention of Psychological Science 2015',
'info_url': None,
'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg',
'active': False,
'admins': [
],
'public_projects': True,
},
'mpa2015': {
'name': 'Midwestern Psychological Association 2015',
'info_url': None,
'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg',
'active': True,
'admins': [
'mpa@kent.edu',
],
'public_projects': True,
},
'NCCC2015': {
'name': 'North Carolina Cognition Conference 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'aoverman@elon.edu',
],
'public_projects': True,
},
'VPRSF2015': {
'name': 'Virginia Piedmont Regional Science Fair 2015',
'info_url': None,
'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png',
'active': False,
'admins': [
'director@vprsf.org',
],
'public_projects': True,
},
'APRS2015': {
'name': 'UVA Annual Postdoctoral Research Symposium 2015',
'info_url': None,
'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg',
'active': False,
'admins': [
'mhurst@virginia.edu',
],
'public_projects': True,
},
'ASB2015': {
'name': 'Association of Southeastern Biologists 2015',
'info_url': None,
'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png',
'active': False,
'admins': [
'amorris.mtsu@gmail.com',
],
'public_projects': True,
},
'TeaP2015': {
'name': 'Tagung experimentell arbeitender Psychologen 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
],
'public_projects': True,
},
'VSSEF2015': {
'name': 'Virginia State Science and Engineering Fair 2015',
'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/',
'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg',
'active': False,
'admins': [],
'public_projects': True,
},
'RMPA2015': {
'name': 'Rocky Mountain Psychological Association 2015',
'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': False,
'admins': [],
'public_projects': True,
},
'ARP2015': {
'name': 'Association for Research in Personality 2015',
'info_url': 'http://www.personality-arp.org/conference/',
'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg',
'active': True,
'admins': [],
'public_projects': True,
},
'SEP2015': {
'name': 'Society of Experimental Psychologists Meeting 2015',
'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/',
'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif',
'active': False,
'admins': [],
'public_projects': True,
},
'Reid2015': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2015',
'info_url': 'http://avillage.web.virginia.edu/Psych/Conference',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
},
'NEEPS2015': {
'name': 'Northeastern Evolutionary Psychology Conference 2015',
'info_url': 'http://neeps2015.weebly.com/',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
},
'VaACS2015': {
'name': 'Virginia Section American Chemical Society Student Poster Session 2015',
'info_url': 'http://virginia.sites.acs.org/',
'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg',
'active': False,
'admins': [],
'public_projects': True,
},
'MADSSCi2015': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png',
'active': True,
'admins': [],
'public_projects': True,
},
'NRAO2015': {
'name': 'National Radio Astronomy Observatory Accretion 2015',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015/posters',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
},
'ARCS2015': {
'name': 'Advancing Research Communication and Scholarship 2015',
'info_url': 'http://commons.pacificu.edu/arcs/',
'logo_url': 'http://commons.pacificu.edu/assets/md5images/4dfd167454e9f4745360a9550e189323.png',
'active': True,
'admins': [],
'public_projects': True,
},
'singlecasedesigns2015': {
'name': 'Single Case Designs in Clinical Psychology: Uniting Research and Practice',
'info_url': 'https://www.royalholloway.ac.uk/psychology/events/eventsarticles/singlecasedesignsinclinicalpsychologyunitingresearchandpractice.aspx',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
},
'OSFM2015': {
'name': 'OSF for Meetings 2015',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
},
}
def populate_conferences():
for meeting, attrs in MEETING_DATA.iteritems():
meeting = meeting.strip()
admin_emails = attrs.pop('admins')
admin_objs = []
for email in admin_emails:
try:
user = User.find_one(Q('username', 'iexact', email))
admin_objs.append(user)
except ModularOdmException:
raise RuntimeError('Username {0!r} is not registered.'.format(email))
conf = Conference(
endpoint=meeting, admins=admin_objs, **attrs
)
try:
conf.save()
except ModularOdmException:
print('{0} Conference already exists. Updating existing record...'.format(meeting))
conf = Conference.find_one(Q('endpoint', 'eq', meeting))
for key, value in attrs.items():
setattr(conf, key, value)
conf.admins = admin_objs
changed_fields = conf.save()
if changed_fields:
print('Changed: {}'.format(changed_fields))
if __name__ == '__main__':
main()
|
|
from sympy import Symbol, exp, Integer, Real, sin, cos, log, Poly, Lambda, \
Function, I, S, sqrt, srepr, Rational
from sympy.abc import x, y
from sympy.core.sympify import sympify, _sympify, SympifyError
from sympy.core.decorators import _sympifyit
from sympy.utilities.pytest import raises
def test_439():
v = sympify("exp(x)")
x = Symbol("x")
assert v == exp(x)
assert type(v) == type(exp(x))
assert str(type(v)) == str(type(exp(x)))
def test_sympify1():
assert sympify("x") == Symbol("x")
assert sympify(" x") == Symbol("x")
assert sympify(" x ") == Symbol("x")
# 1778
n1 = Rational(1, 2)
assert sympify('--.5') == n1
assert sympify('-1/2') == -n1
assert sympify('-+--.5') == -n1
assert sympify('-.[3]') == Rational(-1, 3)
assert sympify('.[3]') == Rational(1, 3)
assert sympify('+.[3]') == Rational(1, 3)
assert sympify('+0.[3]*10**-2') == Rational(1, 300)
# options to make reals into rationals
assert sympify('1.22[345]', rational=1) == \
1 + Rational(22, 100) + Rational(345, 99900)
assert sympify('2/2.6', rational=1) == Rational(10, 13)
assert sympify('2.6/2', rational=1) == Rational(13, 10)
assert sympify('2.6e2/17', rational=1) == Rational(260, 17)
assert sympify('2.6e+2/17', rational=1) == Rational(260, 17)
assert sympify('2.6e-2/17', rational=1) == Rational(26, 17000)
assert sympify('2.1+3/4', rational=1) == Rational(21, 10) + Rational(3, 4)
assert sympify('2.234456', rational=1) == Rational(279307, 125000)
assert sympify('2.234456e23', rational=1) == 223445600000000000000000
assert sympify('2.234456e-23', rational=1) == Rational(279307, 12500000000000000000000000000)
assert sympify('-2.234456e-23', rational=1) == Rational(-279307, 12500000000000000000000000000)
assert sympify('12345678901/17', rational=1) == Rational(12345678901, 17)
assert sympify('1/.3 + x', rational=1) == Rational(10, 3) + x
# make sure longs in fractions work
assert sympify('222222222222/11111111111') == Rational(222222222222, 11111111111)
# ... even if they come from repetend notation
assert sympify('1/.2[123456789012]') == Rational(333333333333, 70781892967)
# ... or from high precision reals
assert sympify('.1234567890123456', rational=1) == Rational(19290123283179, 156250000000000)
# sympify fractions.Fraction instances
try:
import fractions
assert sympify(fractions.Fraction(1, 2)) == Rational(1, 2)
except ImportError:
pass
def test_sympify2():
class A:
def _sympy_(self):
return Symbol("x")**3
a = A()
assert _sympify(a)== x**3
assert sympify(a) == x**3
assert a == x**3
def test_sympify3():
assert sympify("x**3") == x**3
assert sympify("x^3") == x**3
assert sympify("1/2") == Integer(1)/2
raises(SympifyError, "_sympify('x**3')")
raises(SympifyError, "_sympify('1/2')")
def test_sympify_bool():
"""Test that sympify accepts boolean values
and that output leaves them unchanged"""
assert sympify(True) == True
assert sympify(False)== False
def test_sympyify_iterables():
ans = [Rational(3, 10), Rational(1, 5)]
assert sympify(['.3', '.2'], rational=1) == ans
assert sympify(set(['.3', '.2']), rational=1) == set(ans)
assert sympify(tuple(['.3', '.2']), rational=1) == tuple(ans)
def test_sympify4():
class A:
def _sympy_(self):
return Symbol("x")
a = A()
assert _sympify(a)**3== x**3
assert sympify(a)**3 == x**3
assert a == x
def test_sympify_text():
assert sympify('some') == Symbol('some')
assert sympify('core') == Symbol('core')
assert sympify('True') == True
assert sympify('False') == False
assert sympify('Poly') == Poly
assert sympify('sin') == sin
def test_sympify_function():
assert sympify('factor(x**2-1, x)') == -(1-x)*(x+1)
assert sympify('sin(pi/2)*cos(pi)') == -Integer(1)
def test_sympify_poly():
p = Poly(x**2+x+1, x)
assert _sympify(p) is p
assert sympify(p) is p
def test_sage():
# how to effectivelly test for the _sage_() method without having SAGE
# installed?
assert hasattr(x, "_sage_")
assert hasattr(Integer(3), "_sage_")
assert hasattr(sin(x), "_sage_")
assert hasattr(cos(x), "_sage_")
assert hasattr(x**2, "_sage_")
assert hasattr(x+y, "_sage_")
assert hasattr(exp(x), "_sage_")
assert hasattr(log(x), "_sage_")
def test_bug496():
a_ = sympify("a_")
_a = sympify("_a")
def test_lambda():
x = Symbol('x')
assert sympify('lambda : 1') == Lambda(x, 1)
assert sympify('lambda x: 2*x') == Lambda(x, 2*x)
assert sympify('lambda x, y: 2*x+y') == Lambda([x, y], 2*x+y)
raises(SympifyError, "_sympify('lambda : 1')")
def test_sympify_raises():
raises(SympifyError, 'sympify("fx)")')
def test__sympify():
x = Symbol('x')
f = Function('f')
# positive _sympify
assert _sympify(x) is x
assert _sympify(f) is f
assert _sympify(1) == Integer(1)
assert _sympify(0.5) == Real("0.5")
assert _sympify(1+1j) == 1 + I
class A:
def _sympy_(self):
return Integer(5)
a = A()
assert _sympify(a) == Integer(5)
# negative _sympify
raises(SympifyError, "_sympify('1')")
raises(SympifyError, "_sympify([1,2,3])")
def test_sympifyit():
x = Symbol('x')
y = Symbol('y')
@_sympifyit('b', NotImplemented)
def add(a, b):
return a+b
assert add(x, 1) == x+1
assert add(x, 0.5) == x+Real('0.5')
assert add(x, y) == x+y
assert add(x, '1') == NotImplemented
@_sympifyit('b')
def add_raises(a, b):
return a+b
assert add_raises(x, 1) == x+1
assert add_raises(x, 0.5) == x+Real('0.5')
assert add_raises(x, y) == x+y
raises(SympifyError, "add_raises(x, '1')")
def test_int_float():
class F1_1(object):
def __float__(self):
return 1.1
class F1_1b(object):
"""
This class is still a float, even though it also implements __int__().
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
class F1_1c(object):
"""
This class is still a float, because it implements _sympy_()
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
def _sympy_(self):
return Real(1.1)
class I5(object):
def __int__(self):
return 5
class I5b(object):
"""
This class implements both __int__() and __float__(), so it will be
treated as Real in SymPy. One could change this behavior, by using
float(a) == int(a), but deciding that integer-valued floats represent
exact numbers is arbitrary and often not correct, so we do not do it.
If, in the future, we decide to do it anyway, the tests for I5b need to
be changed.
"""
def __float__(self):
return 5.0
def __int__(self):
return 5
class I5c(object):
"""
This class implements both __int__() and __float__(), but also
a _sympy_() method, so it will be Integer.
"""
def __float__(self):
return 5.0
def __int__(self):
return 5
def _sympy_(self):
return Integer(5)
i5 = I5()
i5b = I5b()
i5c = I5c()
f1_1 = F1_1()
f1_1b = F1_1b()
f1_1c = F1_1c()
assert sympify(i5) == 5
assert isinstance(sympify(i5), Integer)
assert sympify(i5b) == 5
assert isinstance(sympify(i5b), Real)
assert sympify(i5c) == 5
assert isinstance(sympify(i5c), Integer)
assert abs(sympify(f1_1) - 1.1) < 1e-5
assert abs(sympify(f1_1b) - 1.1) < 1e-5
assert abs(sympify(f1_1c) - 1.1) < 1e-5
assert _sympify(i5) == 5
assert isinstance(_sympify(i5), Integer)
assert _sympify(i5b) == 5
assert isinstance(_sympify(i5b), Real)
assert _sympify(i5c) == 5
assert isinstance(_sympify(i5c), Integer)
assert abs(_sympify(f1_1) - 1.1) < 1e-5
assert abs(_sympify(f1_1b) - 1.1) < 1e-5
assert abs(_sympify(f1_1c) - 1.1) < 1e-5
def test_issue1034():
a = sympify('Integer(4)')
assert a == Integer(4)
assert a.is_Integer
def test_issue883():
a = [3,2.0]
assert sympify(a) == [Integer(3), Real(2.0)]
assert sympify(tuple(a)) == (Integer(3), Real(2.0))
assert sympify(set(a)) == set([Integer(3), Real(2.0)])
def test_S_sympify():
assert S(1)/2 == sympify(1)/2
assert (-2)**(S(1)/2) == sqrt(2)*I
def test_issue1689():
assert srepr(S(1.0+0J)) == srepr(S(1.0)) == srepr(Real(1.0))
assert srepr(Real(1)) != srepr(Real(1.0))
def test_issue1699_None():
assert S(None) == None
def test_issue1889_Builtins():
C = Symbol('C')
vars = {}
vars['C'] = C
exp1 = sympify('C')
assert( exp1 == C ) # Make sure it did not get mixed up with sympy.C
exp2 = sympify('C', vars)
assert( exp2 == C ) # Make sure it did not get mixed up with sympy.C
|
|
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import re
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_constants
from cinder.tests.unit import utils as test_utils
from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \
as storops_ex
from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \
as storops
from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock
from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.dell_emc.vnx import adapter
from cinder.volume.drivers.dell_emc.vnx import client
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils
class TestCommonAdapter(test.TestCase):
def setUp(self):
super(TestCommonAdapter, self).setUp()
self.configuration = conf.Configuration(None)
vnx_utils.init_ops(self.configuration)
self.configuration.san_ip = '192.168.1.1'
self.configuration.storage_vnx_authentication_type = 'global'
self.ctxt = context.get_admin_context()
def tearDown(self):
super(TestCommonAdapter, self).tearDown()
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_volume(self, vnx_common, _ignore, mocked_input):
volume = mocked_input['volume']
volume.host.split('#')[1]
model_update = vnx_common.create_volume(volume)
self.assertEqual('False', model_update.get('metadata')['snapcopy'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_volume_error(self, vnx_common, _ignore, mocked_input):
self.assertRaises(storops_ex.VNXCreateLunError,
vnx_common.create_volume,
mocked_input['volume'])
@utils.patch_extra_specs({'provisioning:type': 'thick'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_thick_volume(self, vnx_common, _ignore, mocked_input):
volume = mocked_input['volume']
expected_pool = volume.host.split('#')[1]
vnx_common.create_volume(volume)
vnx_common.client.vnx.get_pool.assert_called_with(
name=expected_pool)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_migrate_volume(self, vnx_common, mocked, cinder_input):
volume = cinder_input['volume']
host = {'capabilities':
{'location_info': 'pool_name|fake_serial',
'storage_protocol': 'iscsi'},
'host': 'hostname@backend_name#pool_name'}
vnx_common.serial_number = 'fake_serial'
migrated, update = vnx_common.migrate_volume(None, volume, host)
self.assertTrue(migrated)
self.assertEqual('False', update['metadata']['snapcopy'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_migrate_volume_host_assisted(self, vnx_common, mocked,
cinder_input):
volume1 = cinder_input['volume']
host = {
'capabilities': {
'location_info': 'pool_name|fake_serial',
'storage_protocol': 'iscsi'},
'host': 'hostname@backend_name#pool_name'}
vnx_common.serial_number = 'new_serial'
migrated, update = vnx_common.migrate_volume(None, volume1, host)
self.assertFalse(migrated)
self.assertIsNone(update)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_cloned_volume(
self, vnx_common, mocked, cinder_input):
volume = cinder_input['volume']
src_vref = cinder_input['src_vref']
model_update = vnx_common.create_cloned_volume(volume, src_vref)
self.assertEqual('False', model_update['metadata']['snapcopy'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_cloned_volume_snapcopy(
self, vnx_common, mocked, cinder_input):
volume = cinder_input['volume']
volume.metadata = {'snapcopy': 'True'}
src_vref = cinder_input['src_vref']
model_update = vnx_common.create_cloned_volume(volume, src_vref)
self.assertEqual('True', model_update['metadata']['snapcopy'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_volume_from_snapshot(
self, vnx_common, mocked, cinder_input):
volume = cinder_input['volume']
volume['metadata'] = {'async_migrate': 'False'}
snapshot = cinder_input['snapshot']
snapshot.volume = volume
update = vnx_common.create_volume_from_snapshot(volume, snapshot)
self.assertEqual('False', update['metadata']['snapcopy'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_volume_from_snapshot_snapcopy(
self, vnx_common, mocked, cinder_input):
volume = cinder_input['volume']
volume.metadata = {'snapcopy': 'True'}
snapshot = cinder_input['snapshot']
snapshot.volume = volume
update = vnx_common.create_volume_from_snapshot(volume, snapshot)
self.assertEqual('True', update['metadata']['snapcopy'])
@res_mock.patch_common_adapter
def test_create_cg_from_cgsnapshot(self, common, _):
common.do_create_cg_from_cgsnap = mock.Mock(
return_value='fake_return')
new_cg = test_utils.create_consistencygroup(
self.ctxt,
id=fake_constants.CONSISTENCY_GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=fake_constants.VOLUME_TYPE_ID)
cg_snapshot = test_utils.create_cgsnapshot(
self.ctxt,
fake_constants.CONSISTENCY_GROUP2_ID)
vol = test_utils.create_volume(self.ctxt)
snaps = [
test_utils.create_snapshot(self.ctxt, vol.id)]
vol_new = test_utils.create_volume(self.ctxt)
ret = common.create_cg_from_cgsnapshot(
None, new_cg, [vol_new], cg_snapshot, snaps)
self.assertEqual('fake_return', ret)
common.do_create_cg_from_cgsnap.assert_called_once_with(
new_cg.id, new_cg.host, [vol_new], cg_snapshot.id, snaps)
@res_mock.patch_common_adapter
def test_create_group_from_group_snapshot(self, common, _):
common.do_create_cg_from_cgsnap = mock.Mock(
return_value='fake_return')
group = test_utils.create_group(
self.ctxt,
id=fake_constants.CONSISTENCY_GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=fake_constants.VOLUME_TYPE_ID)
group_snapshot = test_utils.create_group_snapshot(
self.ctxt,
fake_constants.CGSNAPSHOT_ID,
group_type_id=fake_constants.VOLUME_TYPE_ID)
vol = test_utils.create_volume(self.ctxt)
snaps = [
test_utils.create_snapshot(self.ctxt, vol.id)]
vol_new = test_utils.create_volume(self.ctxt)
ret = common.create_group_from_group_snapshot(
None, group, [vol_new], group_snapshot, snaps)
self.assertEqual('fake_return', ret)
common.do_create_cg_from_cgsnap.assert_called_once_with(
group.id, group.host, [vol_new], group_snapshot.id, snaps)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_do_create_cg_from_cgsnap(
self, vnx_common, mocked, cinder_input):
cg_id = fake_constants.CONSISTENCY_GROUP_ID
cg_host = 'host@backend#unit_test_pool'
volumes = [cinder_input['vol1']]
cgsnap_id = fake_constants.CGSNAPSHOT_ID
snaps = [cinder_input['snap1']]
model_update, volume_updates = (
vnx_common.do_create_cg_from_cgsnap(
cg_id, cg_host, volumes, cgsnap_id, snaps))
self.assertIsNone(model_update)
self.assertIsNotNone(
re.findall('id^12',
volume_updates[0]['provider_location']))
@res_mock.patch_common_adapter
def test_create_cloned_cg(self, common, _):
common.do_clone_cg = mock.Mock(
return_value='fake_return')
group = test_utils.create_consistencygroup(
self.ctxt,
id=fake_constants.CONSISTENCY_GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=fake_constants.VOLUME_TYPE_ID)
src_group = test_utils.create_consistencygroup(
self.ctxt,
id=fake_constants.CONSISTENCY_GROUP2_ID,
host='host@backend#unit_test_pool2',
group_type_id=fake_constants.VOLUME_TYPE_ID)
vol = test_utils.create_volume(self.ctxt)
src_vol = test_utils.create_volume(self.ctxt)
ret = common.create_cloned_group(
None, group, [vol], src_group, [src_vol])
self.assertEqual('fake_return', ret)
common.do_clone_cg.assert_called_once_with(
group.id, group.host, [vol], src_group.id, [src_vol])
@res_mock.patch_common_adapter
def test_create_cloned_group(self, common, _):
common.do_clone_cg = mock.Mock(
return_value='fake_return')
group = test_utils.create_group(
self.ctxt,
id=fake_constants.GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=fake_constants.VOLUME_TYPE_ID)
src_group = test_utils.create_group(
self.ctxt,
id=fake_constants.GROUP2_ID,
host='host@backend#unit_test_pool2',
group_type_id=fake_constants.VOLUME_TYPE_ID)
vol = test_utils.create_volume(self.ctxt)
src_vol = test_utils.create_volume(self.ctxt)
ret = common.create_cloned_group(
None, group, [vol], src_group, [src_vol])
self.assertEqual('fake_return', ret)
common.do_clone_cg.assert_called_once_with(
group.id, group.host, [vol], src_group.id, [src_vol])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_do_clone_cg(self, vnx_common, _, cinder_input):
cg_id = fake_constants.CONSISTENCY_GROUP_ID
cg_host = 'host@backend#unit_test_pool'
volumes = [cinder_input['vol1']]
src_cg_id = fake_constants.CONSISTENCY_GROUP2_ID
src_volumes = [cinder_input['src_vol1']]
model_update, volume_updates = vnx_common.do_clone_cg(
cg_id, cg_host, volumes, src_cg_id, src_volumes)
self.assertIsNone(model_update)
self.assertIsNotNone(
re.findall('id^12',
volume_updates[0]['provider_location']))
@res_mock.patch_common_adapter
def test_parse_pools(self, vnx_common, mocked):
vnx_common.config.storage_vnx_pool_names = ['pool5', 'pool6']
parsed = vnx_common.parse_pools()
self.assertEqual(
len(vnx_common.config.storage_vnx_pool_names),
len(parsed))
pools = vnx_common.client.get_pools()
self.assertEqual(pools, parsed)
@res_mock.patch_common_adapter
def test_parse_pools_one_invalid_pool(self, vnx_common, mocked):
vnx_common.config.storage_vnx_pool_names = ['pool5', 'pool7']
parsed = vnx_common.parse_pools()
pools = vnx_common.client.get_pools()
self.assertIn(parsed[0], pools)
@res_mock.patch_common_adapter
def test_parse_pools_all_invalid_pools(self, vnx_common, mocked):
vnx_common.config.storage_vnx_pool_names = ['pool7', 'pool8']
self.assertRaises(exception.VolumeBackendAPIException,
vnx_common.parse_pools)
@res_mock.patch_common_adapter
def test_get_enabler_stats(self, vnx_common, mocked):
stats = vnx_common.get_enabler_stats()
self.assertTrue(stats['compression_support'])
self.assertTrue(stats['fast_support'])
self.assertTrue(stats['deduplication_support'])
self.assertTrue(stats['thin_provisioning_support'])
self.assertTrue(stats['consistencygroup_support'])
@res_mock.patch_common_adapter
def test_get_pool_stats(self, vnx_common, mocked):
pools = vnx_common.client.vnx.get_pool()
vnx_common.config.storage_vnx_pool_names = [
pool.name for pool in pools]
stats = {
'compression_support': True,
'fast_support': True,
'deduplication_support': True,
'thin_provisioning_support': True,
'consistencygroup_support': True,
}
pool_stats = vnx_common.get_pool_stats(stats)
self.assertEqual(2, len(pool_stats))
for stat in pool_stats:
self.assertTrue(stat['fast_cache_enabled'])
self.assertIn(stat['pool_name'], [pools[0].name,
pools[1].name])
self.assertFalse(stat['replication_enabled'])
self.assertEqual([], stat['replication_targets'])
@res_mock.patch_common_adapter
def test_get_pool_stats_offline(self, vnx_common, mocked):
vnx_common.config.storage_vnx_pool_names = []
pool_stats = vnx_common.get_pool_stats()
for stat in pool_stats:
self.assertTrue(stat['fast_cache_enabled'])
self.assertEqual(0, stat['free_capacity_gb'])
@res_mock.patch_common_adapter
def test_get_pool_stats_max_luns_reached(self, vnx_common, mocked):
pools = vnx_common.client.vnx.get_pool()
vnx_common.config.storage_vnx_pool_names = [
pool.name for pool in pools]
stats = {
'compression_support': True,
'fast_support': True,
'deduplication_support': True,
'thin_provisioning_support': True,
'consistencygroup_support': True,
}
pool_stats = vnx_common.get_pool_stats(stats)
for stat in pool_stats:
self.assertTrue(stat['fast_cache_enabled'])
self.assertEqual(0, stat['free_capacity_gb'])
@res_mock.patch_common_adapter
def test_get_pool_stats_with_reserved(self, vnx_common, mocked):
pools = vnx_common.client.vnx.get_pool()
vnx_common.config.storage_vnx_pool_names = [
pool.name for pool in pools]
stats = {
'compression_support': True,
'fast_support': True,
'deduplication_support': True,
'thin_provisioning_support': True,
'consistencygroup_support': True,
}
vnx_common.reserved_percentage = 15
pool_stats = vnx_common.get_pool_stats(stats)
for stat in pool_stats:
self.assertTrue(stat['fast_cache_enabled'])
self.assertIsNot(0, stat['free_capacity_gb'])
self.assertEqual(15, stat['reserved_percentage'])
@res_mock.patch_common_adapter
def test_update_volume_stats(self, vnx_common, mocked):
with mock.patch.object(adapter.CommonAdapter, 'get_pool_stats'):
stats = vnx_common.update_volume_stats()
pools_stats = stats['pools']
for stat in pools_stats:
self.assertFalse(stat['replication_enabled'])
self.assertEqual([], stat['replication_targets'])
@res_mock.patch_common_adapter
def test_append_volume_stats(self, vnx_common, mocked):
device = utils.get_replication_device()
vnx_common.config.replication_device = [device]
vnx_common.mirror_view = utils.build_fake_mirror_view()
stats = {}
vnx_common.append_replication_stats(stats)
self.assertTrue(stats['replication_enabled'])
self.assertEqual(1, stats['replication_count'])
self.assertEqual(['sync'], stats['replication_type'])
self.assertEqual([device['backend_id']],
stats['replication_targets'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_delete_volume_not_force(self, vnx_common, mocked, mocked_input):
vnx_common.force_delete_lun_in_sg = False
volume = mocked_input['volume']
volume['metadata'] = {'async_migrate': 'False'}
vnx_common.delete_volume(volume)
lun = vnx_common.client.vnx.get_lun()
lun.delete.assert_called_with(force_detach=True, detach_from_sg=False)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_delete_volume_force(self, vnx_common, mocked, mocked_input):
vnx_common.force_delete_lun_in_sg = True
volume = mocked_input['volume']
volume['metadata'] = {'async_migrate': 'False'}
vnx_common.delete_volume(volume)
lun = vnx_common.client.vnx.get_lun()
lun.delete.assert_called_with(force_detach=True, detach_from_sg=True)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_delete_async_volume(self, vnx_common, mocked, mocked_input):
volume = mocked_input['volume']
volume.metadata = {'async_migrate': 'True'}
vnx_common.force_delete_lun_in_sg = True
vnx_common.delete_volume(volume)
lun = vnx_common.client.vnx.get_lun()
lun.delete.assert_called_with(force_detach=True, detach_from_sg=True)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_delete_async_volume_migrating(self, vnx_common, mocked,
mocked_input):
volume = mocked_input['volume']
volume.metadata = {'async_migrate': 'True'}
vnx_common.force_delete_lun_in_sg = True
vnx_common.client.cleanup_async_lun = mock.Mock()
vnx_common.delete_volume(volume)
lun = vnx_common.client.vnx.get_lun()
lun.delete.assert_called_with(force_detach=True, detach_from_sg=True)
@utils.patch_extra_specs_validate(side_effect=exception.InvalidVolumeType(
reason='fake_reason'))
@res_mock.patch_common_adapter
def test_retype_type_invalid(self, vnx_common, mocked):
self.assertRaises(exception.InvalidVolumeType,
vnx_common.retype,
None, None,
{'extra_specs': 'fake_spec'},
None, None)
@mock.patch.object(client.Client, 'get_vnx_enabler_status')
@utils.patch_extra_specs_validate(return_value=True)
@utils.patch_extra_specs({'storagetype:tiering': 'auto',
'provisioning:type': 'thin'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_retype_need_migration(
self, vnx_common, mocked, driver_in,
enabler_status):
new_type = {
'extra_specs': {'provisioning:type': 'deduplicated',
'storagetype:tiering': 'starthighthenauto'}}
volume = driver_in['volume']
host = driver_in['host']
fake_migrate_return = (True, ['fake_model_update'])
vnx_common._migrate_volume = mock.Mock(
return_value=fake_migrate_return)
ret = vnx_common.retype(None, volume, new_type, None, host)
self.assertEqual(fake_migrate_return, ret)
vnx_common._migrate_volume.assert_called_once_with(
volume, host, common.ExtraSpecs(new_type['extra_specs']))
@mock.patch.object(client.Client, 'get_vnx_enabler_status')
@utils.patch_extra_specs_validate(return_value=True)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_retype_turn_on_compression_change_tier(
self, vnx_common, mocked, driver_in,
enabler_status):
new_type = {
'extra_specs': {'provisioning:type': 'compressed',
'storagetype:tiering': 'starthighthenauto'}}
volume = driver_in['volume']
host = driver_in['host']
lun = mocked['lun']
vnx_common.client.get_lun = mock.Mock(return_value=lun)
ret = vnx_common.retype(None, volume, new_type, None, host)
self.assertTrue(ret)
lun.enable_compression.assert_called_once_with(ignore_thresholds=True)
self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, lun.tier)
@mock.patch.object(client.Client, 'get_vnx_enabler_status')
@utils.patch_extra_specs_validate(return_value=True)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_retype_lun_has_snap(
self, vnx_common, mocked, driver_in,
enabler_status):
new_type = {
'extra_specs': {'provisioning:type': 'thin',
'storagetype:tiering': 'auto'}}
volume = driver_in['volume']
host = driver_in['host']
new_type = {
'extra_specs': {'provisioning:type': 'thin',
'storagetype:tiering': 'auto'}}
ret = vnx_common.retype(None, volume, new_type, None, host)
self.assertFalse(ret)
new_type = {
'extra_specs': {'provisioning:type': 'compressed',
'storagetype:tiering': 'auto'}}
ret = vnx_common.retype(None, volume, new_type, None, host)
self.assertFalse(ret)
@mock.patch.object(client.Client, 'get_vnx_enabler_status')
@utils.patch_extra_specs_validate(return_value=True)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_retype_change_tier(
self, vnx_common, mocked, driver_in,
enabler_status):
new_type = {
'extra_specs': {'storagetype:tiering': 'auto'}}
volume = driver_in['volume']
host = driver_in['host']
lun = mocked['lun']
vnx_common.client.get_lun = mock.Mock(return_value=lun)
ret = vnx_common.retype(None, volume, new_type, None, host)
self.assertTrue(ret)
self.assertEqual(storops.VNXTieringEnum.AUTO, lun.tier)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_consistencygroup(self, vnx_common, mocked, mocked_input):
cg = mocked_input['cg']
model_update = vnx_common.create_consistencygroup(None, group=cg)
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
model_update['status'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_delete_consistencygroup(self, vnx_common, mocked, mocked_input):
cg = mocked_input['cg']
model_update, vol_update_list = vnx_common.delete_consistencygroup(
None, group=cg, volumes=[])
self.assertEqual(cg.status,
model_update['status'])
self.assertEqual([], vol_update_list)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_delete_consistencygroup_with_volume(
self, vnx_common, mocked, mocked_input):
cg = mocked_input['cg']
vol1 = mocked_input['vol1']
vol2 = mocked_input['vol2']
model_update, vol_update_list = vnx_common.delete_consistencygroup(
None, group=cg, volumes=[vol1, vol2])
self.assertEqual(cg.status,
model_update['status'])
for update in vol_update_list:
self.assertEqual(fields.ConsistencyGroupStatus.DELETED,
update['status'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_delete_consistencygroup_error(self, vnx_common,
mocked, mocked_input):
cg = mocked_input['cg']
self.assertRaises(
storops_ex.VNXConsistencyGroupError,
vnx_common.delete_consistencygroup,
context=None, group=cg, volumes=[])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_delete_consistencygroup_volume_error(self, vnx_common,
mocked, mocked_input):
cg = mocked_input['cg']
vol1 = mocked_input['vol1']
vol2 = mocked_input['vol2']
model_update, vol_update_list = vnx_common.delete_consistencygroup(
None, group=cg, volumes=[vol1, vol2])
self.assertEqual(cg.status,
model_update['status'])
for update in vol_update_list:
self.assertEqual(fields.ConsistencyGroupStatus.ERROR_DELETING,
update['status'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_extend_volume(self, common_adapter, _ignore, mocked_input):
common_adapter.extend_volume(mocked_input['volume'], 10)
lun = common_adapter.client.vnx.get_lun()
lun.expand.assert_called_once_with(10, ignore_thresholds=True)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_snapshot_adapter(self, common_adapter, _ignore,
mocked_input):
common_adapter.create_snapshot(mocked_input['snapshot'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_delete_snapshot_adapter(self, common_adapter, _ignore,
mocked_input):
common_adapter.delete_snapshot(mocked_input['snapshot'])
@res_mock.patch_common_adapter
def test_create_cgsnapshot(self, common_adapter, _):
common_adapter.do_create_cgsnap = mock.Mock(
return_value='fake_return')
cg_snapshot = test_utils.create_cgsnapshot(
self.ctxt,
fake_constants.CONSISTENCY_GROUP_ID)
vol = test_utils.create_volume(self.ctxt)
snaps = [
test_utils.create_snapshot(self.ctxt, vol.id)]
ret = common_adapter.create_cgsnapshot(
None, cg_snapshot, snaps)
self.assertEqual('fake_return', ret)
common_adapter.do_create_cgsnap.assert_called_once_with(
cg_snapshot.consistencygroup_id,
cg_snapshot.id,
snaps)
@res_mock.patch_common_adapter
def test_create_group_snap(self, common_adapter, _):
common_adapter.do_create_cgsnap = mock.Mock(
return_value='fake_return')
group_snapshot = test_utils.create_group_snapshot(
self.ctxt,
fake_constants.GROUP_ID,
group_type_id=fake_constants.VOLUME_TYPE_ID)
vol = test_utils.create_volume(self.ctxt)
snaps = [
test_utils.create_snapshot(self.ctxt, vol.id)]
ret = common_adapter.create_group_snapshot(
None, group_snapshot, snaps)
self.assertEqual('fake_return', ret)
common_adapter.do_create_cgsnap.assert_called_once_with(
group_snapshot.group_id,
group_snapshot.id,
snaps)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_do_create_cgsnap(self, common_adapter, _, mocked_input):
group_name = fake_constants.CONSISTENCY_GROUP_ID
snap_name = fake_constants.CGSNAPSHOT_ID
snap1 = mocked_input['snap1']
snap2 = mocked_input['snap2']
model_update, snapshots_model_update = (
common_adapter.do_create_cgsnap(group_name, snap_name,
[snap1, snap2]))
self.assertEqual('available', model_update['status'])
for update in snapshots_model_update:
self.assertEqual(fields.SnapshotStatus.AVAILABLE, update['status'])
@res_mock.patch_common_adapter
def test_delete_group_snapshot(self, common_adapter, _):
common_adapter.do_delete_cgsnap = mock.Mock(
return_value='fake_return')
group_snapshot = test_utils.create_group_snapshot(
self.ctxt,
fake_constants.GROUP_ID,
group_type_id=fake_constants.VOLUME_TYPE_ID)
vol = test_utils.create_volume(self.ctxt)
snaps = [
test_utils.create_snapshot(self.ctxt, vol.id)]
ret = common_adapter.delete_group_snapshot(
None, group_snapshot, snaps)
self.assertEqual('fake_return', ret)
common_adapter.do_delete_cgsnap.assert_called_once_with(
group_snapshot.group_id,
group_snapshot.id,
group_snapshot.status,
snaps)
@res_mock.patch_common_adapter
def test_delete_cgsnapshot(self, common_adapter, _):
common_adapter.do_delete_cgsnap = mock.Mock(
return_value='fake_return')
cg_snapshot = test_utils.create_cgsnapshot(
self.ctxt,
fake_constants.CONSISTENCY_GROUP_ID)
vol = test_utils.create_volume(self.ctxt)
snaps = [
test_utils.create_snapshot(self.ctxt, vol.id)]
ret = common_adapter.delete_cgsnapshot(None, cg_snapshot, snaps)
self.assertEqual('fake_return', ret)
common_adapter.do_delete_cgsnap.assert_called_once_with(
cg_snapshot.consistencygroup_id,
cg_snapshot.id,
cg_snapshot.status,
snaps)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_do_delete_cgsnap(self, common_adapter, _, mocked_input):
group_name = fake_constants.CGSNAPSHOT_ID
snap_name = fake_constants.CGSNAPSHOT_ID
model_update, snapshot_updates = (
common_adapter.do_delete_cgsnap(
group_name, snap_name, 'available',
[mocked_input['snap1'], mocked_input['snap2']]))
self.assertEqual('deleted', model_update['status'])
for snap in snapshot_updates:
self.assertEqual(fields.SnapshotStatus.DELETED, snap['status'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_manage_existing_lun_no_exist(
self, common_adapter, _ignore, mocked_input):
self.assertRaises(
exception.ManageExistingInvalidReference,
common_adapter.manage_existing_get_size,
mocked_input['volume'], {'source-name': 'fake'})
common_adapter.client.vnx.get_lun.assert_called_once_with(
name='fake', lun_id=None)
@res_mock.patch_common_adapter
def test_manage_existing_invalid_ref(
self, common_adapter, _ignore):
self.assertRaises(
exception.ManageExistingInvalidReference,
common_adapter.manage_existing_get_size,
None, {'invalidkey': 'fake'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_manage_existing_invalid_pool(
self, common_adapter, _ignore, mocked_input):
self.assertRaises(
exception.ManageExistingInvalidReference,
common_adapter.manage_existing_get_size,
mocked_input['volume'], {'source-id': '6'})
common_adapter.client.vnx.get_lun.assert_called_once_with(
lun_id='6', name=None)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_manage_existing_get_size(
self, common_adapter, mocked_res, mocked_input):
size = common_adapter.manage_existing_get_size(
mocked_input['volume'], {'source-name': 'test_lun'})
self.assertEqual(size, mocked_res['lun'].total_capacity_gb)
@utils.patch_extra_specs({'provisioning:type': 'thin',
'storagetype:tiering': 'auto'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_manage_existing_type_mismatch(
self, common_adapter, mocked_res, mocked_input):
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
common_adapter.manage_existing,
mocked_input['volume'],
{'source-name': 'test_lun'})
@utils.patch_extra_specs({'provisioning:type': 'deduplicated'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_manage_existing(
self, common_adapter, mocked_res, mocked_input):
test_lun = mocked_res['lun']
common_adapter.client.get_lun = mock.Mock(return_value=test_lun)
lun_name = mocked_input['volume'].name
common_adapter._build_provider_location = mock.Mock(
return_value="fake_pl")
pl = common_adapter.manage_existing(
mocked_input['volume'],
{'source-name': 'test_lun'})
common_adapter._build_provider_location.assert_called_with(
lun_type='lun',
lun_id=1,
base_lun_name=lun_name)
self.assertEqual('fake_pl', pl['provider_location'])
test_lun.rename.assert_called_once_with(
lun_name)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_manage_existing_smp(
self, common_adapter, mocked_res, mocked_input):
common_adapter._build_provider_location = mock.Mock(
return_value="fake_pl")
pl = common_adapter.manage_existing(
mocked_input['volume'], {'source-name': 'test_lun'})
common_adapter._build_provider_location.assert_called_with(
lun_id=2, lun_type='smp', base_lun_name='src_lun')
self.assertEqual('fake_pl', pl['provider_location'])
@res_mock.patch_common_adapter
def test_assure_storage_group(self, common_adapter, mocked_res):
host = common.Host('host', ['initiators'])
common_adapter.assure_storage_group(host)
@res_mock.patch_common_adapter
def test_assure_storage_group_create_new(self, common_adapter, mocked_res):
host = common.Host('host', ['initiators'])
common_adapter.assure_storage_group(host)
common_adapter.client.vnx.create_sg.assert_called_once_with(host.name)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_assure_host_access(self, common_adapter,
mocked_res, mocked_input):
common_adapter.config.initiator_auto_registration = True
common_adapter.max_retries = 3
common_adapter.auto_register_initiator = mock.Mock()
common_adapter.client.add_lun_to_sg = mock.Mock()
sg = mocked_res['sg']
host = common.Host('host', ['initiators'])
cinder_volume = mocked_input['volume']
volume = common.Volume(cinder_volume.name, cinder_volume.id,
common_adapter.client.get_lun_id(cinder_volume))
lun = common_adapter.client.get_lun()
common_adapter.assure_host_access(sg, host, volume, True)
common_adapter.auto_register_initiator.assert_called_once_with(
sg, host)
common_adapter.client.add_lun_to_sg.assert_called_once_with(
sg, lun, common_adapter.max_retries)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_assure_host_access_without_auto_register_new_sg(
self, common_adapter, mocked_res, mocked_input):
common_adapter.config.initiator_auto_registration = False
common_adapter.max_retries = 3
common_adapter.client.add_lun_to_sg = mock.Mock()
sg = mocked_res['sg']
host = common.Host('host', ['initiators'])
cinder_volume = mocked_input['volume']
volume = common.Volume(cinder_volume.name, cinder_volume.id,
common_adapter.client.get_lun_id(cinder_volume))
lun = common_adapter.client.get_lun()
common_adapter.assure_host_access(sg, host, volume, True)
sg.connect_host.assert_called_once_with(host.name)
common_adapter.client.add_lun_to_sg.assert_called_once_with(
sg, lun, common_adapter.max_retries)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_assure_host_access_without_auto_register(
self, common_adapter, mocked_res, mocked_input):
common_adapter.config.initiator_auto_registration = False
common_adapter.max_retries = 3
common_adapter.client.add_lun_to_sg = mock.Mock()
sg = mocked_res['sg']
host = common.Host('host', ['initiators'])
cinder_volume = mocked_input['volume']
volume = common.Volume(cinder_volume.name, cinder_volume.id,
common_adapter.client.get_lun_id(cinder_volume))
lun = common_adapter.client.get_lun()
common_adapter.assure_host_access(sg, host, volume, False)
sg.connect_host.assert_not_called()
common_adapter.client.add_lun_to_sg.assert_called_once_with(
sg, lun, common_adapter.max_retries)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_auto_register_initiator(
self, common_adapter, mocked_res, mocked_input):
common_adapter.client.register_initiator = mock.Mock()
common_adapter.config.io_port_list = ['a-0-0', 'a-0-1', 'a-1-0',
'b-0-1']
allowed_ports = mocked_res['allowed_ports']
common_adapter.allowed_ports = allowed_ports
reg_ports = mocked_res['reg_ports']
sg = mocked_res['sg']
host = common.Host('host', ['iqn-host-1', 'iqn-reg-2'])
common_adapter.auto_register_initiator(sg, host)
initiator_port_map = {'iqn-host-1': set(allowed_ports),
'iqn-reg-2': set(allowed_ports) - set(reg_ports)}
common_adapter.client.register_initiator.assert_called_once_with(
sg, host, initiator_port_map)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_auto_register_initiator_no_white_list(
self, common_adapter, mocked_res, mocked_input):
for io_port_list in (None, ):
common_adapter.client.register_initiator = mock.Mock()
common_adapter.config.io_port_list = io_port_list
allowed_ports = mocked_res['allowed_ports']
common_adapter.allowed_ports = allowed_ports
sg = mocked_res['sg']
host = common.Host('host', ['iqn-host-1', 'iqn-reg-2'])
common_adapter.auto_register_initiator(sg, host)
initiator_port_map = {'iqn-host-1': set(allowed_ports)}
common_adapter.client.register_initiator.assert_called_once_with(
sg, host, initiator_port_map)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_auto_register_initiator_no_port_to_reg(
self, common_adapter, mocked_res, mocked_input):
common_adapter.config.io_port_list = ['a-0-0']
allowed_ports = mocked_res['allowed_ports']
common_adapter.allowed_ports = allowed_ports
sg = mocked_res['sg']
host = common.Host('host', ['iqn-reg-1', 'iqn-reg-2'])
with mock.patch.object(common_adapter.client, 'register_initiator'):
common_adapter.auto_register_initiator(sg, host)
common_adapter.client.register_initiator.assert_called_once_with(
sg, host, {})
@res_mock.patch_common_adapter
def test_build_provider_location(self, common_adapter, mocked_res):
common_adapter.serial_number = 'vnx-serial'
pl = common_adapter._build_provider_location(
lun_id='fake_id', lun_type='smp', base_lun_name='fake_name')
expected_pl = vnx_utils.build_provider_location(
system='vnx-serial',
lun_type='smp',
lun_id='fake_id',
base_lun_name='fake_name',
version=common_adapter.VERSION)
self.assertEqual(expected_pl, pl)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_remove_host_access(
self, common_adapter, mocked_res, mocked_input):
host = common.Host('fake_host', ['fake_initiator'])
cinder_volume = mocked_input['volume']
volume = common.Volume(cinder_volume.name, cinder_volume.id,
common_adapter.client.get_lun_id(cinder_volume))
sg = mocked_res['sg']
common_adapter.remove_host_access(volume, host, sg)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_remove_host_access_sg_absent(
self, common_adapter, mocked_res, mocked_input):
host = common.Host('fake_host', ['fake_initiator'])
cinder_volume = mocked_input['volume']
volume = common.Volume(cinder_volume.name, cinder_volume.id,
common_adapter.client.get_lun_id(cinder_volume))
sg = mocked_res['sg']
common_adapter.remove_host_access(volume, host, sg)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_remove_host_access_volume_not_in_sg(
self, common_adapter, mocked_res, mocked_input):
host = common.Host('fake_host', ['fake_initiator'])
cinder_volume = mocked_input['volume']
volume = common.Volume(cinder_volume.name, cinder_volume.id,
common_adapter.client.get_lun_id(cinder_volume))
sg = mocked_res['sg']
common_adapter.remove_host_access(volume, host, sg)
@res_mock.patch_common_adapter
def test_terminate_connection_cleanup_sg_absent(
self, common_adapter, mocked_res):
common_adapter.destroy_empty_sg = True
common_adapter.itor_auto_dereg = True
host = common.Host('fake_host', ['fake_initiator'])
sg = mocked_res['sg']
common_adapter.terminate_connection_cleanup(host, sg)
@res_mock.patch_common_adapter
def test_terminate_connection_cleanup_remove_sg(
self, common_adapter, mocked_res):
common_adapter.destroy_empty_sg = True
common_adapter.itor_auto_dereg = False
host = common.Host('fake_host', ['fake_initiator'])
sg = mocked_res['sg']
common_adapter.terminate_connection_cleanup(host, sg)
@res_mock.patch_common_adapter
def test_terminate_connection_cleanup_deregister(
self, common_adapter, mocked_res):
common_adapter.destroy_empty_sg = True
common_adapter.itor_auto_dereg = True
host = common.Host('fake_host', ['fake_initiator1', 'fake_initiator2'])
sg = mocked_res['sg']
common_adapter.terminate_connection_cleanup(host, sg)
common_adapter.client.vnx.remove_hba.assert_any_call(
'fake_initiator1')
common_adapter.client.vnx.remove_hba.assert_any_call(
'fake_initiator2')
@res_mock.patch_common_adapter
def test_terminate_connection_cleanup_sg_is_not_empty(
self, common_adapter, mocked_res):
common_adapter.destroy_empty_sg = True
common_adapter.itor_auto_dereg = True
host = common.Host('fake_host', ['fake_initiator'])
sg = mocked_res['sg']
common_adapter.terminate_connection_cleanup(host, sg)
@res_mock.patch_common_adapter
def test_set_extra_spec_defaults(self, common_adapter, mocked_res):
common_adapter.set_extra_spec_defaults()
self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO,
common.ExtraSpecs.TIER_DEFAULT)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_do_update_cg(self, common_adapter, _, mocked_input):
common_adapter.client.update_consistencygroup = mock.Mock()
cg = mocked_input['cg']
common_adapter.client.get_cg = mock.Mock(return_value=cg)
common_adapter.do_update_cg(cg.id,
[mocked_input['volume_add']],
[mocked_input['volume_remove']])
common_adapter.client.update_consistencygroup.assert_called_once_with(
cg, [1], [2])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_create_export_snapshot(self, common_adapter, mocked_res,
mocked_input):
common_adapter.client.create_mount_point = mock.Mock()
snapshot = mocked_input['snapshot']
common_adapter.create_export_snapshot(None, snapshot, None)
common_adapter.client.create_mount_point.assert_called_once_with(
snapshot.volume_name, 'tmp-smp-' + snapshot.id)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_remove_export_snapshot(self, common_adapter, mocked_res,
mocked_input):
common_adapter.client.delete_lun = mock.Mock()
snapshot = mocked_input['snapshot']
common_adapter.remove_export_snapshot(None, snapshot)
common_adapter.client.delete_lun.assert_called_once_with(
'tmp-smp-' + snapshot.id)
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_initialize_connection_snapshot(self, common_adapter, mocked_res,
mocked_input):
common_adapter.client.attach_snapshot = mock.Mock()
common_adapter._initialize_connection = mock.Mock()
snapshot = mocked_input['snapshot']
smp_name = 'tmp-smp-' + snapshot.id
common_adapter.initialize_connection_snapshot(snapshot, None)
common_adapter.client.attach_snapshot.assert_called_once_with(
smp_name, snapshot.name)
lun = mocked_res['lun']
called_volume = common_adapter._initialize_connection.call_args[0][0]
self.assertEqual((smp_name, snapshot.id, lun.lun_id),
(called_volume.name, called_volume.id,
called_volume.vnx_lun_id))
self.assertIsNone(
common_adapter._initialize_connection.call_args[0][1])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_terminate_connection_snapshot(self, common_adapter, mocked_res,
mocked_input):
common_adapter.client.detach_snapshot = mock.Mock()
common_adapter._terminate_connection = mock.Mock()
snapshot = mocked_input['snapshot']
smp_name = 'tmp-smp-' + snapshot.id
common_adapter.terminate_connection_snapshot(snapshot, None)
lun = mocked_res['lun']
called_volume = common_adapter._terminate_connection.call_args[0][0]
self.assertEqual((smp_name, snapshot.id, lun.lun_id),
(called_volume.name, called_volume.id,
called_volume.vnx_lun_id))
self.assertIsNone(common_adapter._terminate_connection.call_args[0][1])
common_adapter.client.detach_snapshot.assert_called_once_with(
smp_name)
@utils.patch_extra_specs({'replication_enabled': '<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_setup_lun_replication(self, common_adapter,
mocked_res, mocked_input):
vol1 = mocked_input['vol1']
fake_mirror = utils.build_fake_mirror_view()
fake_mirror.secondary_client.create_lun.return_value = (
mocked_res['lun'])
common_adapter.mirror_view = fake_mirror
rep_update = common_adapter.setup_lun_replication(
vol1, 111)
fake_mirror.create_mirror.assert_called_once_with(
'mirror_' + vol1.id, 111)
fake_mirror.add_image.assert_called_once_with(
'mirror_' + vol1.id, mocked_res['lun'].lun_id)
self.assertEqual(fields.ReplicationStatus.ENABLED,
rep_update['replication_status'])
@utils.patch_extra_specs({'replication_enabled': '<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_cleanup_replication(self, common_adapter,
mocked_res, mocked_input):
fake_mirror = utils.build_fake_mirror_view()
vol1 = mocked_input['vol1']
with mock.patch.object(common_adapter, 'build_mirror_view') as fake:
fake.return_value = fake_mirror
common_adapter.cleanup_lun_replication(vol1)
fake_mirror.destroy_mirror.assert_called_once_with(
'mirror_' + vol1.id, vol1.name)
@res_mock.patch_common_adapter
def test_build_mirror_view(self, common_adapter,
mocked_res):
common_adapter.config.replication_device = [
utils.get_replication_device()]
with utils.patch_vnxsystem:
mirror = common_adapter.build_mirror_view(
common_adapter.config)
self.assertIsNotNone(mirror)
@res_mock.patch_common_adapter
def test_build_mirror_view_no_device(
self, common_adapter, mocked_res):
common_adapter.config.replication_device = []
mirror = common_adapter.build_mirror_view(
common_adapter.config)
self.assertIsNone(mirror)
@res_mock.patch_common_adapter
def test_build_mirror_view_2_device(self, common_adapter, mocked_res):
device = utils.get_replication_device()
device1 = device.copy()
common_adapter.config.replication_device = [device, device1]
self.assertRaises(exception.InvalidInput,
common_adapter.build_mirror_view,
common_adapter.config)
@res_mock.patch_common_adapter
def test_build_mirror_view_no_enabler(self, common_adapter, mocked_res):
common_adapter.config.replication_device = [
utils.get_replication_device()]
self.assertRaises(exception.InvalidInput,
common_adapter.build_mirror_view,
common_adapter.config)
@res_mock.patch_common_adapter
def test_build_mirror_view_failover_false(self, common_adapter,
mocked_res):
common_adapter.config.replication_device = [
utils.get_replication_device()]
with utils.patch_vnxsystem:
failover_mirror = common_adapter.build_mirror_view(
common_adapter.config, failover=False)
self.assertIsNotNone(failover_mirror)
@utils.patch_extra_specs({'replication_enabled': '<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_failover_host(self, common_adapter, mocked_res, mocked_input):
device = utils.get_replication_device()
common_adapter.config.replication_device = [device]
vol1 = mocked_input['vol1']
lun1 = mocked_res['lun1']
with mock.patch.object(common_adapter, 'build_mirror_view') as fake:
fake_mirror = utils.build_fake_mirror_view()
fake_mirror.secondary_client.get_lun.return_value = lun1
fake_mirror.secondary_client.get_serial.return_value = (
device['backend_id'])
fake.return_value = fake_mirror
backend_id, updates = common_adapter.failover_host(
None, [vol1], device['backend_id'])
fake_mirror.promote_image.assert_called_once_with(
'mirror_' + vol1.id)
fake_mirror.secondary_client.get_serial.assert_called_with()
fake_mirror.secondary_client.get_lun.assert_called_with(
name=vol1.name)
self.assertEqual(device['backend_id'], backend_id)
for update in updates:
self.assertEqual(fields.ReplicationStatus.FAILED_OVER,
update['updates']['replication_status'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_failover_host_invalid_backend_id(self, common_adapter,
mocked_res, mocked_input):
common_adapter.config.replication_device = [
utils.get_replication_device()]
vol1 = mocked_input['vol1']
self.assertRaises(exception.InvalidInput,
common_adapter.failover_host,
None, [vol1], 'new_id')
@utils.patch_extra_specs({'replication_enabled': '<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_failover_host_failback(self, common_adapter, mocked_res,
mocked_input):
device = utils.get_replication_device()
common_adapter.config.replication_device = [device]
vol1 = mocked_input['vol1']
lun1 = mocked_res['lun1']
with mock.patch.object(common_adapter, 'build_mirror_view') as fake:
fake_mirror = utils.build_fake_mirror_view()
fake_mirror.secondary_client.get_lun.return_value = lun1
fake_mirror.secondary_client.get_serial.return_value = (
device['backend_id'])
fake.return_value = fake_mirror
backend_id, updates = common_adapter.failover_host(
None, [vol1], 'default')
fake_mirror.promote_image.assert_called_once_with(
'mirror_' + vol1.id)
fake_mirror.secondary_client.get_serial.assert_called_with()
fake_mirror.secondary_client.get_lun.assert_called_with(
name=vol1.name)
self.assertEqual('default', backend_id)
for update in updates:
self.assertEqual(fields.ReplicationStatus.ENABLED,
update['updates']['replication_status'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_get_pool_name(self, common_adapter, mocked_res, mocked_input):
self.assertEqual(mocked_res['lun'].pool_name,
common_adapter.get_pool_name(mocked_input['volume']))
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_update_migrated_volume(self, common_adapter, mocked_res,
mocked_input):
data = common_adapter.update_migrated_volume(
None, mocked_input['volume'], mocked_input['new_volume'])
self.assertEqual(mocked_input['new_volume'].provider_location,
data['provider_location'])
self.assertEqual('False', data['metadata']['snapcopy'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_update_migrated_volume_smp(self, common_adapter, mocked_res,
mocked_input):
data = common_adapter.update_migrated_volume(
None, mocked_input['volume'], mocked_input['new_volume'])
self.assertEqual(mocked_input['new_volume'].provider_location,
data['provider_location'])
self.assertEqual('True', data['metadata']['snapcopy'])
@res_mock.patch_common_adapter
def test_normalize_config_naviseccli_path(self, common_adapter,
mocked_res):
old_value = common_adapter.config.naviseccli_path
common_adapter._normalize_config()
self.assertEqual(old_value, common_adapter.config.naviseccli_path)
@res_mock.patch_common_adapter
def test_normalize_config_naviseccli_path_none(self, common_adapter,
mocked_res):
common_adapter.config.naviseccli_path = ""
common_adapter._normalize_config()
self.assertIsNone(common_adapter.config.naviseccli_path)
common_adapter.config.naviseccli_path = " "
common_adapter._normalize_config()
self.assertIsNone(common_adapter.config.naviseccli_path)
common_adapter.config.naviseccli_path = None
common_adapter._normalize_config()
self.assertIsNone(common_adapter.config.naviseccli_path)
@res_mock.patch_common_adapter
def test_normalize_config_pool_names(self, common_adapter,
mocked_res):
common_adapter.config.storage_vnx_pool_names = [
'pool_1', ' pool_2 ', '', ' ']
common_adapter._normalize_config()
self.assertEqual(['pool_1', 'pool_2'],
common_adapter.config.storage_vnx_pool_names)
@res_mock.patch_common_adapter
def test_normalize_config_pool_names_none(self, common_adapter,
mocked_res):
common_adapter.config.storage_vnx_pool_names = None
common_adapter._normalize_config()
self.assertIsNone(common_adapter.config.storage_vnx_pool_names)
@res_mock.patch_common_adapter
def test_normalize_config_pool_names_empty_list(self, common_adapter,
mocked_res):
common_adapter.config.storage_vnx_pool_names = []
self.assertRaises(exception.InvalidConfigurationValue,
common_adapter._normalize_config)
common_adapter.config.storage_vnx_pool_names = [' ', '']
self.assertRaises(exception.InvalidConfigurationValue,
common_adapter._normalize_config)
@res_mock.patch_common_adapter
def test_normalize_config_io_port_list(self, common_adapter,
mocked_res):
common_adapter.config.io_port_list = [
'a-0-1', ' b-1 ', '', ' ']
common_adapter._normalize_config()
self.assertEqual(['A-0-1', 'B-1'],
common_adapter.config.io_port_list)
@res_mock.patch_common_adapter
def test_normalize_config_io_port_list_none(self, common_adapter,
mocked_res):
common_adapter.config.io_port_list = None
common_adapter._normalize_config()
self.assertIsNone(common_adapter.config.io_port_list)
@res_mock.patch_common_adapter
def test_normalize_config_io_port_list_empty_list(self, common_adapter,
mocked_res):
common_adapter.config.io_port_list = []
self.assertRaises(exception.InvalidConfigurationValue,
common_adapter._normalize_config)
common_adapter.config.io_port_list = [' ', '']
self.assertRaises(exception.InvalidConfigurationValue,
common_adapter._normalize_config)
class TestISCSIAdapter(test.TestCase):
STORAGE_PROTOCOL = common.PROTOCOL_ISCSI
def setUp(self):
super(TestISCSIAdapter, self).setUp()
self.configuration = conf.Configuration(None)
vnx_utils.init_ops(self.configuration)
self.configuration.storage_protocol = self.STORAGE_PROTOCOL
def tearDown(self):
super(TestISCSIAdapter, self).tearDown()
@res_mock.patch_iscsi_adapter
def test_validate_ports_iscsi(self, vnx_iscsi, mocked):
all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets()
valid_ports = vnx_iscsi.validate_ports(all_iscsi_ports, ['A-0-0'])
self.assertEqual([mocked['iscsi_port_a-0-0']], valid_ports)
@res_mock.patch_iscsi_adapter
def test_validate_ports_iscsi_invalid(self, vnx_iscsi, mocked):
invalid_white_list = ['A-0-0', 'A-B-0']
all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets()
self.assertRaisesRegex(
exception.VolumeBackendAPIException,
'Invalid iscsi ports %s specified for io_port_list.'
% 'A-B-0',
vnx_iscsi.validate_ports,
all_iscsi_ports,
invalid_white_list)
@res_mock.patch_iscsi_adapter
def test_validate_ports_iscsi_not_exist(self, vnx_iscsi, mocked):
nonexistent_ports = ['A-0-0', 'A-6-1']
all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets()
self.assertRaisesRegex(
exception.VolumeBackendAPIException,
'Invalid iscsi ports %s specified for io_port_list'
% 'A-6-1',
vnx_iscsi.validate_ports,
all_iscsi_ports,
nonexistent_ports)
@res_mock.patch_iscsi_adapter
def test_update_volume_stats_iscsi(self, vnx_iscsi, mocked):
with mock.patch.object(adapter.CommonAdapter, 'update_volume_stats',
return_value={'storage_protocol':
self.STORAGE_PROTOCOL}):
stats = vnx_iscsi.update_volume_stats()
self.assertEqual(self.STORAGE_PROTOCOL, stats['storage_protocol'])
self.assertEqual('VNXISCSIDriver', stats['volume_backend_name'])
@res_mock.patch_iscsi_adapter
def test_build_terminate_connection_return_data_iscsi(
self, vnx_iscsi, mocked):
re = vnx_iscsi.build_terminate_connection_return_data(None, None)
self.assertIsNone(re)
@res_mock.patch_iscsi_adapter
def test_normalize_config_iscsi_initiators(
self, vnx_iscsi, mocked):
vnx_iscsi.config.iscsi_initiators = (
'{"host1":["10.0.0.1", "10.0.0.2"],"host2":["10.0.0.3"]}')
vnx_iscsi._normalize_config()
expected = {"host1": ["10.0.0.1", "10.0.0.2"],
"host2": ["10.0.0.3"]}
self.assertEqual(expected, vnx_iscsi.config.iscsi_initiators)
vnx_iscsi.config.iscsi_initiators = '{}'
vnx_iscsi._normalize_config()
expected = {}
self.assertEqual(expected, vnx_iscsi.config.iscsi_initiators)
@res_mock.patch_iscsi_adapter
def test_normalize_config_iscsi_initiators_none(
self, vnx_iscsi, mocked):
vnx_iscsi.config.iscsi_initiators = None
vnx_iscsi._normalize_config()
self.assertIsNone(vnx_iscsi.config.iscsi_initiators)
@res_mock.patch_iscsi_adapter
def test_normalize_config_iscsi_initiators_empty_str(
self, vnx_iscsi, mocked):
vnx_iscsi.config.iscsi_initiators = ''
self.assertRaises(exception.InvalidConfigurationValue,
vnx_iscsi._normalize_config)
vnx_iscsi.config.iscsi_initiators = ' '
self.assertRaises(exception.InvalidConfigurationValue,
vnx_iscsi._normalize_config)
@res_mock.patch_iscsi_adapter
def test_normalize_config_iscsi_initiators_not_dict(
self, vnx_iscsi, mocked):
vnx_iscsi.config.iscsi_initiators = '["a", "b"]'
self.assertRaises(exception.InvalidConfigurationValue,
vnx_iscsi._normalize_config)
class TestFCAdapter(test.TestCase):
STORAGE_PROTOCOL = common.PROTOCOL_FC
def setUp(self):
super(TestFCAdapter, self).setUp()
self.configuration = conf.Configuration(None)
vnx_utils.init_ops(self.configuration)
self.configuration.storage_protocol = self.STORAGE_PROTOCOL
def tearDown(self):
super(TestFCAdapter, self).tearDown()
@res_mock.patch_fc_adapter
def test_validate_ports_fc(self, vnx_fc, mocked):
all_fc_ports = vnx_fc.client.get_fc_targets()
valid_ports = vnx_fc.validate_ports(all_fc_ports, ['A-1'])
self.assertEqual([mocked['fc_port_a-1']], valid_ports)
@res_mock.patch_fc_adapter
def test_validate_ports_fc_invalid(self, vnx_fc, mocked):
invalid_white_list = ['A-1', 'A-B']
all_fc_ports = vnx_fc.client.get_fc_targets()
self.assertRaisesRegex(
exception.VolumeBackendAPIException,
'Invalid fc ports %s specified for io_port_list.'
% 'A-B',
vnx_fc.validate_ports,
all_fc_ports,
invalid_white_list)
@res_mock.patch_fc_adapter
def test_validate_ports_fc_not_exist(self, vnx_fc, mocked):
nonexistent_ports = ['A-1', 'A-6']
all_fc_ports = vnx_fc.client.get_fc_targets()
self.assertRaisesRegex(
exception.VolumeBackendAPIException,
'Invalid fc ports %s specified for io_port_list'
% 'A-6',
vnx_fc.validate_ports,
all_fc_ports,
nonexistent_ports)
@res_mock.patch_fc_adapter
def test_update_volume_stats(self, vnx_fc, mocked):
with mock.patch.object(adapter.CommonAdapter, 'get_pool_stats'):
stats = vnx_fc.update_volume_stats()
self.assertEqual(self.STORAGE_PROTOCOL, stats['storage_protocol'])
self.assertEqual('VNXFCDriver', stats['volume_backend_name'])
@mock.patch.object(vnx_utils, 'convert_to_tgt_list_and_itor_tgt_map')
@res_mock.patch_fc_adapter
def test_build_terminate_connection_return_data_auto_zone(
self, vnx_fc, mocked, converter):
vnx_fc.lookup_service = mock.Mock()
get_mapping = vnx_fc.lookup_service.get_device_mapping_from_network
itor_tgt_map = {
'wwn1': ['wwnt1', 'wwnt2', 'wwnt3'],
'wwn2': ['wwnt1', 'wwnt2']
}
converter.return_value = ([], itor_tgt_map)
host = common.Host('fake_host',
['fake_hba1'],
wwpns=['wwn1', 'wwn2'])
sg = mocked['sg']
re = vnx_fc.build_terminate_connection_return_data(host, sg)
get_mapping.assert_called_once_with(
['wwn1', 'wwn2'], ['5006016636E01CA1'])
self.assertEqual(itor_tgt_map,
re['data']['initiator_target_map'])
@res_mock.patch_fc_adapter
def test_build_terminate_connection_return_data_sg_absent(
self, vnx_fc, mocked):
sg = mocked['sg']
re = vnx_fc.build_terminate_connection_return_data(None, sg)
self.assertEqual('fibre_channel', re['driver_volume_type'])
self.assertEqual({}, re['data'])
@res_mock.patch_fc_adapter
def test_build_terminate_connection_return_data_without_autozone(
self, vnx_fc, mocked):
self.lookup_service = None
re = vnx_fc.build_terminate_connection_return_data(None, None)
self.assertEqual('fibre_channel', re['driver_volume_type'])
self.assertEqual({}, re['data'])
@res_mock.patch_fc_adapter
def test_get_tgt_list_and_initiator_tgt_map_allow_port_only(
self, vnx_fc, mocked):
sg = mocked['sg']
host = common.Host('fake_host',
['fake_hba1'],
wwpns=['wwn1', 'wwn2'])
mapping = {
'san_1': {'initiator_port_wwn_list': ['wwn1'],
'target_port_wwn_list': ['5006016636E01CB2']}}
vnx_fc.lookup_service = mock.Mock()
vnx_fc.lookup_service.get_device_mapping_from_network = mock.Mock(
return_value=mapping)
get_mapping = vnx_fc.lookup_service.get_device_mapping_from_network
vnx_fc.allowed_ports = mocked['adapter'].allowed_ports
targets, tgt_map = vnx_fc._get_tgt_list_and_initiator_tgt_map(
sg, host, True)
self.assertEqual(['5006016636E01CB2'], targets)
self.assertEqual({'wwn1': ['5006016636E01CB2']}, tgt_map)
get_mapping.assert_called_once_with(
['wwn1', 'wwn2'], ['5006016636E01CB2'])
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class _RNNCellForTest(rnn_cell_impl.RNNCell):
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.Variable(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.Variable(2.1, name="u")
v = variables.Variable(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_graphs.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.Variable(str1_init, name=str1_name)
str2 = variables.Variable(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.Variable(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.Variable(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.Variable(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.Variable(10.0, name="x")
y = variables.Variable(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/device:GPU:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v1 = variables.Variable(1.0, name="v1")
v2 = variables.Variable(2.0, name="v2")
v3 = variables.Variable(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v = variables.Variable(10.0, name="v")
delta = variables.Variable(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.Variable(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.Variable(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.Variable(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.Variable("1", name="a")
b = variables.Variable("3", name="b")
c = variables.Variable("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
shutil.rmtree(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.Variable([10.0, 10.0], name="a")
b = variables.Variable([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.Variable(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in xrange(len(executor_step_indices) - 1):
self.assertEquals(executor_step_indices[i][1] + 1,
executor_step_indices[i + 1][1])
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in xrange(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
|
|
import time
import socket
import gevent
import numpy as np
import sys
import cv2
from load_config import LoadConfig
import cvlib
conf = LoadConfig("config.toml").config
"""
def match():
img = cv2.imread("box_in_scene2.png")#sys.argv[1])
temp = cv2.imread("box4.png")#sys.argv[2])
try:
dist = int(sys.argv[3])
except IndexError:
dist = 200
try:
num = int(sys.argv[4])
except IndexError:
num = -1
skp, tkp = findKeyPoints(img, temp, dist)
newimg = drawKeyPoints(img, temp, skp, tkp, num)
cv2.imshow("image", newimg)
cv2.waitKey(0)
"""
def supress(v, w):
#v[0],v[1],
print v
if v[2] < w/2 and v[2] > 20:# and v[0] - v[2] >0 and v[1] - v[2]>0 :
return True
def main():
print conf
target = cv2.imread(conf["app"]["target"])#sys.argv[2])
#target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)
#print type(target)
#cv2.NamedWindow("camera", 1)
#capture = cv2.VideoCapture(0)
capture = cv2.VideoCapture(conf["app"]["camera_uri"])
i = 0
pt1 = (conf["app"]["crop_start"][0],conf["app"]["crop_start"][1])
w = conf["app"]["corp_width"]
pt2 = (pt1[0]+w,pt1[1]+w)
debug = 1# conf["app"]["debug"]
cp = [0,0]
while True:
#i = i +1
#if i > 200:
# i = 0
ret, img_read = capture.read() #cv.QueryFrame(capture)
#if i == 1:
# pass
if ret == False:
print ret,
time.sleep(0.1)
#raise(Exception("can't connect camera"))
#mat=cv2.GetMat(img)
#img_p = np.asarray(mat)
#img_p = cv.CreateImage(cv.GetSize(img),cv.IPL_DEPTH_8U,1)
#print dir(img)
"""
im_gray = cv.CreateImage(cv.GetSize(img),cv.IPL_DEPTH_8U,1)
cv.CvtColor(img,im_gray,cv.CV_RGB2GRAY)
# Sobel operator
dstSobel = cv.CreateMat(im_gray.height, im_gray.width, cv.CV_32FC1)
# Sobel(src, dst, xorder, yorder, apertureSize = 3)
cv.Sobel(im_gray,dstSobel,1,1,3)
"""
#print ret
try:
# skp: source key points, tkp: target key points
t1 = time.time()
#img[200:400, 100:300] # Crop from x, y, w, h -> 100, 200, 300, 400
#im[y1:y2, x1:x2]
#
crop_img = img_read[pt1[1]:pt2[1], pt1[0]:pt2[0]]
#print(len(crop_img))
distance = conf["app"]["distance"]
#skp, tkp = cvlib.findKeyPoints(crop_img , target, distance)
skp = 1
if skp == None:
print("skp is none")
img_read = cv2.medianBlur(img_read,5)
img_read = cv2.cvtColor(img_read, cv2.COLOR_BGR2GRAY)
cv2.imshow("camera", img_read)
#continue
else:
print "==" * 20
print "time:[%.3f]" %(time.time() - t1)
#print "skp", len(skp)#, skp
#print "tkp",len(tkp)#, tkp
if debug:
crop_img = cv2.medianBlur(crop_img,5)
gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.cv.CV_HOUGH_GRADIENT,
30, ## dp
200, ## minDist
param1=100,
param2=100, ##
minRadius=70,
maxRadius=200)
print circles
circles = np.uint16(np.around(circles))
j = 0
cv2.rectangle(img_read, pt1, pt2, (0,255,0))
for i in circles[0,:]:
if supress(i, w):
j = j + 1
"""if i[0] - cp[0] > 30 or i[1] - cp[1] > 30 :
pass
else:
"""
cv2.circle(img_read,(pt1[0]+i[0],pt1[1]+i[1]),i[2],(0,255,0),2)
cv2.circle(img_read,(pt1[0]+i[0],pt1[1]+i[1]),2,(0,0,255),3)
cp = [ i[0], i[1] ]
#newimg = cvlib.drawKeyPoints(img_read, target, skp, tkp, pt1, pt2, -1)
cv2.imshow("camera", img_read)
#gevent.sleep(1)
except Exception as ex:
print(ex)
#gevent.sleep(3)
continue
#cv.ShowImage('camera', newimg)
# image smoothing and subtraction
# imageBlur = cv.CreateImage(cv.GetSize(im_gray), im_gray.depth, im_gray.nChannels)
# # filering the original image
# # Smooth(src, dst, smoothtype=CV_GAUSSIAN, param1=3, param2=0, param3=0, param4=0)
# cv.Smooth(im_gray, imageBlur, cv.CV_BLUR, 11, 11)
# diff = cv.CreateImage(cv.GetSize(im_gray), im_gray.depth, im_gray.nChannels)
# # subtraction (original - filtered)
# cv.AbsDiff(im_gray,imageBlur,diff)
# cv.ShowImage('camera', diff)
if cv2.waitKey(10) == 27:
break
#gevent.sleep(0.1)
# cv2.destroyWindow("camera")
if __name__ == "__main__":
main()
|
|
from typing import Optional, Union, Dict
import os
import os.path as osp
import shutil
from ogb.utils import smiles2graph
from ogb.utils.url import decide_download, download_url, extract_zip
import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
class PCQM4MDataset(object):
def __init__(self, root = 'dataset', smiles2graph = smiles2graph, only_smiles=False):
print('The PCQM4M has been deprecated. The leaderboard is no longer maintained.')
print('Please use PCQM4Mv2 instead.')
'''
Library-agnostic PCQM4M dataset object
- root (str): the dataset folder will be located at root/pcqm4m_kddcup2021
- smiles2graph (callable): A callable function that converts a SMILES string into a graph object
* The default smiles2graph requires rdkit to be installed
- only_smiles (bool): If this is true, we directly return the SMILES string in our __get_item__, without converting it into a graph.
'''
self.original_root = root
self.smiles2graph = smiles2graph
self.only_smiles = only_smiles
self.folder = osp.join(root, 'pcqm4m_kddcup2021')
self.version = 1
# Old url hosted at Stanford
# md5sum: 5144ebaa7c67d24da1a2acbe41f57f6a
# self.url = f'http://ogb-data.stanford.edu/data/lsc/pcqm4m_kddcup2021.zip'
# New url hosted by DGL team at AWS--much faster to download
self.url = 'https://dgl-data.s3-accelerate.amazonaws.com/dataset/OGB-LSC/pcqm4m_kddcup2021.zip'
# check version and update if necessary
if osp.isdir(self.folder) and (not osp.exists(osp.join(self.folder, f'RELEASE_v{self.version}.txt'))):
print('PCQM4M dataset has been updated.')
if input('Will you update the dataset now? (y/N)\n').lower() == 'y':
shutil.rmtree(self.folder)
super(PCQM4MDataset, self).__init__()
# Prepare everything.
# download if there is no raw file
# preprocess if there is no processed file
# load data if processed file is found.
if self.only_smiles:
self.prepare_smiles()
else:
self.prepare_graph()
def download(self):
if decide_download(self.url):
path = download_url(self.url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
else:
print('Stop download.')
exit(-1)
def prepare_smiles(self):
raw_dir = osp.join(self.folder, 'raw')
if not osp.exists(osp.join(raw_dir, 'data.csv.gz')):
# if the raw file does not exist, then download it.
self.download()
data_df = pd.read_csv(osp.join(raw_dir, 'data.csv.gz'))
smiles_list = data_df['smiles'].values
homolumogap_list = data_df['homolumogap'].values
self.graphs = list(smiles_list)
self.labels = homolumogap_list
def prepare_graph(self):
processed_dir = osp.join(self.folder, 'processed')
raw_dir = osp.join(self.folder, 'raw')
pre_processed_file_path = osp.join(processed_dir, 'data_processed')
if osp.exists(pre_processed_file_path):
# if pre-processed file already exists
loaded_dict = torch.load(pre_processed_file_path, 'rb')
self.graphs, self.labels = loaded_dict['graphs'], loaded_dict['labels']
else:
# if pre-processed file does not exist
if not osp.exists(osp.join(raw_dir, 'data.csv.gz')):
# if the raw file does not exist, then download it.
self.download()
data_df = pd.read_csv(osp.join(raw_dir, 'data.csv.gz'))
smiles_list = data_df['smiles']
homolumogap_list = data_df['homolumogap']
print('Converting SMILES strings into graphs...')
self.graphs = []
self.labels = []
for i in tqdm(range(len(smiles_list))):
smiles = smiles_list[i]
homolumogap = homolumogap_list[i]
graph = self.smiles2graph(smiles)
assert(len(graph['edge_feat']) == graph['edge_index'].shape[1])
assert(len(graph['node_feat']) == graph['num_nodes'])
self.graphs.append(graph)
self.labels.append(homolumogap)
self.labels = np.array(self.labels)
print(self.labels)
# double-check prediction target
split_dict = self.get_idx_split()
assert(all([not np.isnan(self.labels[i]) for i in split_dict['train']]))
assert(all([not np.isnan(self.labels[i]) for i in split_dict['valid']]))
assert(all([np.isnan(self.labels[i]) for i in split_dict['test']]))
print('Saving...')
torch.save({'graphs': self.graphs, 'labels': self.labels}, pre_processed_file_path, pickle_protocol=4)
def get_idx_split(self):
split_dict = torch.load(osp.join(self.folder, 'split_dict.pt'))
return split_dict
def __getitem__(self, idx):
'''Get datapoint with index'''
if isinstance(idx, (int, np.integer)):
return self.graphs[idx], self.labels[idx]
raise IndexError(
'Only integer is valid index (got {}).'.format(type(idx).__name__))
def __len__(self):
'''Length of the dataset
Returns
-------
int
Length of Dataset
'''
return len(self.graphs)
def __repr__(self): # pragma: no cover
return '{}({})'.format(self.__class__.__name__, len(self))
class PCQM4MEvaluator:
def __init__(self):
'''
Evaluator for the PCQM4M dataset
Metric is Mean Absolute Error
'''
pass
def eval(self, input_dict):
'''
y_true: numpy.ndarray or torch.Tensor of shape (num_graphs,)
y_pred: numpy.ndarray or torch.Tensor of shape (num_graphs,)
y_true and y_pred need to be of the same type (either numpy.ndarray or torch.Tensor)
'''
assert('y_pred' in input_dict)
assert('y_true' in input_dict)
y_pred, y_true = input_dict['y_pred'], input_dict['y_true']
assert((isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray))
or
(isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor)))
assert(y_true.shape == y_pred.shape)
assert(len(y_true.shape) == 1)
if isinstance(y_true, torch.Tensor):
return {'mae': torch.mean(torch.abs(y_pred - y_true)).cpu().item()}
else:
return {'mae': float(np.mean(np.absolute(y_pred - y_true)))}
def save_test_submission(self, input_dict: Dict, dir_path: str):
'''
save test submission file at dir_path
'''
assert('y_pred' in input_dict)
y_pred = input_dict['y_pred']
filename = osp.join(dir_path, 'y_pred_pcqm4m')
assert(isinstance(filename, str))
assert(isinstance(y_pred, np.ndarray) or isinstance(y_pred, torch.Tensor))
if not osp.exists(dir_path):
os.makedirs(dir_path)
if isinstance(y_pred, torch.Tensor):
y_pred = y_pred.numpy()
y_pred = y_pred.astype(np.float32)
np.savez_compressed(filename, y_pred = y_pred)
if __name__ == '__main__':
dataset = PCQM4MDataset(only_smiles=True)
print(dataset)
print(dataset[1234])
split_dict = dataset.get_idx_split()
print(split_dict['train'].shape)
print(split_dict['valid'].shape)
print('-----------------')
print(split_dict['test'].shape)
evaluator = PCQM4MEvaluator()
y_true = torch.randn(100)
y_pred = torch.randn(100)
result = evaluator.eval({'y_true': y_true, 'y_pred': y_pred})
print(result)
y_pred = torch.randn(len(split_dict['test']))
evaluator.save_test_submission({'y_pred': y_pred}, 'results')
split_dict = dataset.get_idx_split()
print(dataset[split_dict['test'][0]])
print(dataset[split_dict['valid'][0]])
dataset = PCQM4MDataset()
print(dataset)
print(dataset[100])
print(dataset.get_idx_split())
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow 2.0 layer behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import sys
import traceback
import numpy as np
# TODO(scottzhu): Move to use all frozen_keras code when other deps are moved.
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.frozen_keras.engine import legacy_base_layer
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.layers import core as legacy_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
class DynamicLayer(legacy_base_layer.LegacyBaseLayer):
def __init__(self, dynamic=False, **kwargs):
super(DynamicLayer, self).__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
samples = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
for idx, sample in enumerate(inputs):
samples = samples.write(idx, math_ops.square(sample))
return samples.stack()
def compute_output_shape(self, input_shape):
return input_shape
class InvalidLayer(legacy_base_layer.LegacyBaseLayer):
def call(self, inputs):
raise ValueError('You did something wrong!')
class BaseLayerTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer(self):
if testing_utils.get_model_type() == 'sequential':
# TODO(scottzhu): Reenable this once sequential is moved to frozen_keras.
self.skipTest('Sequential model will check layer instance type and fail.')
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer_error(self):
if testing_utils.get_model_type() == 'sequential':
# TODO(scottzhu): Reenable this once sequential is moved to frozen_keras.
self.skipTest('Sequential model will check layer instance type and fail.')
with self.assertRaisesRegexp(TypeError,
'attempting to use Python control flow'):
model = testing_utils.get_model_from_layers([DynamicLayer()],
input_shape=(3,))
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer_error_running_in_graph_mode(self):
if testing_utils.get_model_type() == 'sequential':
# TODO(scottzhu): Reenable this once sequential is moved to frozen_keras.
self.skipTest('Sequential model will check layer instance type and fail.')
with ops.get_default_graph().as_default():
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
# But then you cannot run the model since you're in a graph scope.
with self.assertRaisesRegexp(
ValueError, 'You must enable eager execution'):
model.compile(rmsprop.RMSprop(0.001), loss='mse')
def test_manual_compute_output_shape(self):
class BuildCounter(keras.layers.Layer):
def __init__(self, *args, **kwargs): # pylint: disable=redefined-outer-name
super(BuildCounter, self).__init__(*args, **kwargs)
self.build_counter = 0
def build(self, input_shape):
self.build_counter += 1
def call(self, inputs):
return inputs
with context.eager_mode():
layer = BuildCounter(dtype=dtypes.float64)
output_shape = layer.compute_output_shape((None, 10))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(output_shape.as_list(), [None, 10])
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=[None, 10]))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(output_signature.dtype, dtypes.float64)
self.assertEqual(output_signature.shape.as_list(), [None, 10])
layer(np.ones((5, 10)))
self.assertEqual(layer.build_counter, 1)
def test_eager_switch_case_input(self):
with context.eager_mode():
task = keras.Input(shape=(), dtype=dtypes.int32)
control_flow_ops.switch_case(
task[0], [lambda: constant_op.constant(1.0) for _ in range(10)])
# TODO(scottzhu): Reenable this once sequential is moved to frozen_keras.
def DISABLED_test_dynamic_layer_with_deferred_sequential_model(self):
model = keras.Sequential(
[DynamicLayer(dynamic=True),
keras.layers.Dense(3)])
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_nested_dynamic_layers_in_eager_mode(self):
inputs = keras.Input((3,))
outputs = DynamicLayer(dynamic=True)(inputs) # pylint:disable=not-callable
inner_model = keras.Model(inputs, outputs)
self.assertEqual(inner_model.dynamic, True)
inputs = keras.Input((3,))
x = DynamicLayer(dynamic=True)(inputs) # pylint:disable=not-callable
outputs = inner_model(x)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_subclassed_model_no_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs, None)
def test_dynamic_subclassed_model_with_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
def compute_output_shape(self, input_shape):
return tuple(input_shape[:-1].as_list()) + (3,)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
x, y = np.random.random((2, 3)), np.random.random((2, 3))
model.train_on_batch(x, y)
outputs = model(x)
self.assertEqual(outputs.shape.as_list(), [2, 3])
def test_deepcopy(self):
with context.eager_mode():
bias_reg = lambda x: 1e-3 * math_ops.reduce_sum(x)
layer = keras.layers.Conv2D(32, (3, 3), bias_regularizer=bias_reg)
# Call the Layer on data to generate regularize losses.
layer(array_ops.ones((1, 10, 10, 3)))
self.assertLen(layer.losses, 1)
new_layer = copy.deepcopy(layer)
self.assertEqual(new_layer.bias_regularizer, bias_reg)
self.assertEqual(layer.get_config(), new_layer.get_config())
@test_util.run_in_graph_and_eager_modes
def test_invalid_forward_pass(self):
inputs = keras.Input((3,))
with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):
_ = InvalidLayer()(inputs) # pylint:disable=not-callable
def test_no_legacy_model(self):
inputs = keras.Input((1,))
legacy_dense_0 = legacy_core.Dense(1, name='legacy_dense_0')
legacy_dense_1 = legacy_core.Dense(1, name='legacy_dense_1')
layer = legacy_dense_0(inputs)
layer = keras.layers.Dense(1)(layer)
layer = legacy_dense_1(layer)
expected_regex = (r'The following are legacy tf\.layers\.Layers:\n '
'{}\n {}'.format(legacy_dense_0, legacy_dense_1))
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Model(inputs=[inputs], outputs=[layer])
model = keras.models.Model(inputs=[inputs], outputs=[inputs])
with self.assertRaisesRegexp(TypeError, expected_regex):
model._insert_layers([legacy_dense_0, legacy_dense_1])
def test_no_legacy_sequential(self):
layers = [
keras.layers.Dense(1),
legacy_core.Dense(1, name='legacy_dense_0')
]
expected_regex = r'legacy tf\.layers\.Layers:\n {}'.format(layers[1])
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Sequential(layers)
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Sequential([keras.layers.Input(shape=(4,))] + layers)
model = keras.models.Sequential()
with self.assertRaisesRegexp(TypeError, expected_regex):
for l in layers:
model.add(l)
@keras_parameterized.run_with_all_model_types
@test_util.run_in_graph_and_eager_modes
def test_build_with_numpy_data(self):
model_layers = [
keras.layers.Dense(3, activation='relu', kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model(np.zeros((2, 4), dtype='float32'))
self.assertTrue(model.built)
@test_util.run_in_graph_and_eager_modes
def test_default_add_weight(self):
class TestLayer(keras.layers.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.default_weight = self.add_weight()
self.weight_without_name = self.add_weight(shape=(3, 4))
self.regularized_weight_without_name = self.add_weight(
shape=(3, 4), regularizer='l2')
layer = TestLayer()
self.assertEqual(layer.default_weight.shape.as_list(), [])
self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])
self.assertEqual(layer.default_weight.dtype.name, 'float32')
self.assertEqual(layer.weight_without_name.dtype.name, 'float32')
self.assertEqual(len(layer.losses), 1)
if not context.executing_eagerly():
# Cannot access tensor.name in eager execution.
self.assertTrue('Variable_2/Regularizer' in layer.losses[0].name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_learning_phase_freezing_for_layers(self):
class LearningPhaseLayer(keras.layers.Layer):
def call(self, inputs):
return keras.backend.in_train_phase(
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
model._run_eagerly = testing_utils.should_run_eagerly()
return np.sum(model(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with keras.backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 1)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
keras.backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 1)
keras.backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
# Cannot be enabled with `run_eagerly=True`, see b/123904578
@test_util.run_all_in_graph_and_eager_modes
def test_layer_can_return_variable(self):
class ComputeSum(keras.layers.Layer):
def __init__(self):
super(ComputeSum, self).__init__()
self.total = variables.Variable(
initial_value=array_ops.zeros((1, 1)), trainable=False)
if not context.executing_eagerly():
keras.backend.get_session().run(self.total.initializer)
def call(self, inputs):
self.total.assign_add(inputs)
return self.total
inputs = keras.Input(shape=(1,))
model = keras.Model(inputs, ComputeSum()(inputs))
model.predict(np.ones((1, 1)))
def _get_layer_with_training_arg(self):
class TrainingLayer(keras.layers.Layer):
"""A layer with a `training` argument in a defuned `call`."""
@def_function.function
def call(self, inputs, training=None):
if training is None:
training = keras.backend.learning_phase()
return tf_utils.smart_cond(training,
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
return TrainingLayer()
@keras_parameterized.run_with_all_model_types
# b/124459427: can't test with `run_eagerly=True` for now.
@test_util.run_in_graph_and_eager_modes
def test_training_arg_in_defun(self):
layer = self._get_layer_with_training_arg()
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 1.)
loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(loss, 0.)
# Test that the argument injection performed in `call` is not active
# when the argument is passed explicitly.
layer = self._get_layer_with_training_arg()
inputs = keras.Input(shape=(1,))
# Pass `training` by name
outputs = layer(inputs, training=False)
model = keras.Model(inputs, outputs)
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 0.)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_raw_variable_assignment(self):
class RawVariableLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(RawVariableLayer, self).__init__(**kwargs)
# Test variables in nested structure.
self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]
def call(self, inputs):
return inputs * self.var_list[0] * self.var_list[1]['a']
model = testing_utils.get_model_from_layers([RawVariableLayer()],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
# Checks that variables get initialized.
model.fit(x, y, batch_size=2, epochs=2)
@test_util.run_in_graph_and_eager_modes
def test_layer_names(self):
inputs = keras.layers.Input(shape=[2])
add1 = inputs + inputs
add2 = keras.layers.Add()([inputs, inputs])
add3 = inputs + inputs
add4 = keras.layers.Add()([inputs, inputs])
model = keras.models.Model(
inputs=[inputs], outputs=[add1, add2, add3, add4])
actual_names = [l.name for l in model.layers]
graph_names = [
'input_1', 'tf_op_layer_AddV2', 'add', 'tf_op_layer_AddV2_1', 'add_1'
]
eager_names = [
'input_1', 'tf_op_layer_add', 'add', 'tf_op_layer_add_2', 'add_1'
]
for actual, eager, graph in zip(actual_names, graph_names, eager_names):
self.assertIn(actual, {eager, graph})
def test_add_trainable_weight_on_frozen_layer(self):
class TestLayer(keras.layers.Layer):
def build(self, input_shape):
self.w = self.add_weight(shape=(), trainable=True)
def call(self, inputs):
return self.w * inputs
layer = TestLayer()
layer.trainable = False
layer.build(None)
layer.trainable = True
self.assertListEqual(layer.trainable_weights, [layer.w])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_passing_initial_weights_values(self):
kernel_value = np.random.random((10, 2))
layer_with_weights = keras.layers.Dense(
2, use_bias=False, weights=[kernel_value])
model = testing_utils.get_model_from_layers([layer_with_weights],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((3, 10))
out = model.predict(inputs)
self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)
self.assertAllClose(out, np.dot(inputs, kernel_value))
@test_util.run_in_graph_and_eager_modes
def test_set_weights_and_get_weights(self):
layer = keras.layers.Dense(2)
layer.build((None, 10))
kernel = np.random.random((10, 2))
bias = np.random.random((2,))
layer.set_weights([kernel, bias])
weights = layer.get_weights()
self.assertEqual(len(weights), 2)
self.assertAllClose(weights[0], kernel)
self.assertAllClose(weights[1], bias)
with self.assertRaisesRegexp(
ValueError, 'but the layer was expecting 2 weights'):
layer.set_weights([1, 2, 3])
with self.assertRaisesRegexp(
ValueError, 'not compatible with provided weight shape'):
layer.set_weights([kernel.T, bias])
def test_get_config_error(self):
class MyLayer(keras.layers.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
# `__init__` includes kwargs but `get_config` is not overridden, so
# an error should be thrown:
with self.assertRaisesRegexp(NotImplementedError, 'Layer MyLayer has'):
MyLayer('custom').get_config()
class MyLayerNew(keras.layers.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayerNew, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
def get_config(self):
config = super(MyLayerNew, self).get_config()
config['my_kwarg'] = self.my_kwarg
return config
# Test to make sure that error is not raised if the method call is
# from an overridden `get_config`:
self.assertEqual(MyLayerNew('custom').get_config()['my_kwarg'], 'custom')
class MyLayerNew2(keras.layers.Layer):
def __init__(self, name='MyLayerName', dtype=None, **kwargs): # pylint:disable=redefined-outer-name
super(MyLayerNew2, self).__init__(name=name, dtype=dtype, **kwargs)
# Check that if the kwargs in `__init__` are base layer constructor
# arguments, no error is thrown:
self.assertEqual(MyLayerNew2(name='New').get_config()['name'], 'New')
@test_util.run_in_graph_and_eager_modes
def test_count_params(self):
dense = keras.layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = keras.layers.Dense(16)
with self.assertRaisesRegexp(ValueError, 'call `count_params`'):
dense.count_params()
model = keras.Sequential(keras.layers.Dense(16))
with self.assertRaisesRegexp(ValueError, 'call `count_params`'):
model.count_params()
dense = keras.layers.Dense(16, input_dim=4)
model = keras.Sequential(dense)
self.assertEqual(model.count_params(), 16 * 4 + 16)
def test_super_not_called(self):
class CustomLayerNotCallingSuper(keras.layers.Layer):
def __init__(self):
pass
layer = CustomLayerNotCallingSuper()
with self.assertRaisesRegexp(RuntimeError, 'You must call `super()'):
layer(np.random.random((10, 2)))
@test_util.run_in_graph_and_eager_modes
def test_first_arg_not_called_inputs(self):
x, y = array_ops.ones((10, 1)), array_ops.ones((10, 1))
class ArgLayer(keras.layers.Layer):
def call(self, x, y):
return x + y
layer = ArgLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
class KwargLayer(keras.layers.Layer):
def call(self, x=None, y=None):
return x + y
layer = KwargLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
with self.assertRaisesRegexp(ValueError, 'must always be passed'):
layer(y=y)
class TFFunctionLayer(keras.layers.Layer):
@def_function.function
def call(self, x, y=None):
if y is None:
return x
return x + y
layer = TFFunctionLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
def test_build_input_shape(self):
class CustomLayer(keras.layers.Layer):
def build(self, input_shape):
self.add_weight('w', shape=input_shape[1:])
super(CustomLayer, self).build(input_shape)
layer = CustomLayer()
self.assertFalse(layer.built)
layer.build([None, 1, 2, 3])
self.assertTrue(layer.built)
self.assertEqual([None, 1, 2, 3], layer._build_input_shape)
layer = CustomLayer()
layer(keras.Input((3,)))
self.assertTrue(layer.built)
self.assertEqual([None, 3], layer._build_input_shape.as_list())
class SymbolicSupportTest(test.TestCase):
def test_using_symbolic_tensors_with_tf_ops(self):
# Single-input.
x = keras.Input((3,))
y = math_ops.square(x)
self.assertEqual(y.graph, keras.backend.get_graph())
# Multi-inputs.
x1, x2 = keras.Input((3,)), keras.Input((3,))
y = array_ops.concat([x1, x2], axis=1)
self.assertEqual(y.graph, keras.backend.get_graph())
# Mixing Keras symbolic tensors and graph tensors from the same graph works.
with keras.backend.get_graph().as_default():
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
# Creating same op type (matmul) multiple times in the Keras graph works.
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
def test_mixing_eager_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = array_ops.ones((3, 3))
self.assertIsInstance(x2, ops.EagerTensor)
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
def test_mixing_numpy_arrays_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = np.ones((3, 3), dtype='float32')
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):
x1 = keras.Input((3,))
x2 = array_ops.ones((3, 3))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):
x1 = keras.Input((3,))
x2 = np.ones((3, 3), dtype='float32')
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_reraising_exception(self):
# When layer is not dynamic, we have some pattern matching during exception
# handling to detect when the user is trying to use python control flow.
# When an exception is thrown but the pattern doesn't match, we want to
# preserve the originating stack trace. An early implementation of this
# logic lost the stack trace. We test the correct behavior here.
class TypeErrorLayer(legacy_base_layer.LegacyBaseLayer):
def call(self, inputs):
def easily_identifiable_name():
raise TypeError('Non-matching TypeError message.')
easily_identifiable_name()
inputs = keras.Input((3,))
try:
_ = TypeErrorLayer()(inputs) # pylint:disable=not-callable
except TypeError as e:
if hasattr(e, 'ag_error_metadata'):
self.assertIn('easily_identifiable_name', str(e))
# See ErrorMetadataBase in autograph/pyct/errors.py
function_name = e.ag_error_metadata.translated_stack[-1].function_name
else:
tb = traceback.extract_tb(sys.exc_info()[2])
last_entry = tb[-1]
function_name = last_entry[2]
self.assertEqual(function_name, 'easily_identifiable_name')
@test_util.run_in_graph_and_eager_modes
def test_summaries_in_tf_function(self):
if not context.executing_eagerly():
return
class MyLayer(keras.layers.Layer):
def call(self, inputs):
summary_ops_v2.scalar('mean', math_ops.reduce_mean(inputs))
return inputs
tmp_dir = self.get_temp_dir()
writer = summary_ops_v2.create_file_writer_v2(tmp_dir)
with writer.as_default(), summary_ops_v2.always_record_summaries():
my_layer = MyLayer()
x = array_ops.ones((10, 10))
def my_fn(x):
return my_layer(x)
_ = my_fn(x)
event_file = gfile.Glob(os.path.join(tmp_dir, 'events*'))
self.assertLen(event_file, 1)
event_file = event_file[0]
tags = set()
for e in summary_iterator.summary_iterator(event_file):
for val in e.summary.value:
tags.add(val.tag)
self.assertEqual(set(['my_layer/mean']), tags)
@test_util.run_all_in_graph_and_eager_modes
class NestedTrackingTest(test.TestCase):
def test_nested_layer_variable_tracking(self):
# Test that variables from nested sublayers are
# being tracked by subclassed layers.
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.dense1 = keras.layers.Dense(1)
self.dense2 = keras.layers.BatchNormalization()
def build(self, input_shape):
self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())
self.v2 = variables.Variable(
name='v2',
initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),
trainable=False)
def call(self, inputs):
x = self.dense1(inputs) + self.dense2(inputs)
return x + self.v1 + self.v2
layer = MyLayer()
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 5)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.dense1.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 5)
layer.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 8)
self.assertEqual(
{id(v) for v in [layer.dense1, layer.dense2, layer.v1, layer.v2]},
{id(v) for _, v in layer._checkpoint_dependencies})
def test_nested_layer_updates_losses_tracking(self):
# Test that updates and losses from nested sublayers are
# being tracked by subclassed layers.
class UpdateAndLossLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
return inputs + 1
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def __init__(self):
super(MyLayer, self).__init__()
self.ul1 = UpdateAndLossLayer()
self.ul2 = UpdateAndLossLayer()
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
x = self.ul1(inputs)
return self.ul2(x)
layer = MyLayer()
if context.executing_eagerly():
inputs = array_ops.ones((3, 1))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertLen(layer.get_losses_for(None), 3)
else:
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.updates), 3)
self.assertLen(layer.get_losses_for(None), 3)
def test_attribute_reassignment(self):
l = keras.layers.Layer()
l.a = keras.layers.Layer()
l.a = []
l.a = variables.Variable(1.)
l.a = keras.layers.Layer()
last_assignment = keras.layers.Layer()
l.a = last_assignment
l.b = variables.Variable(1.)
del l.b
l.c = keras.layers.Layer()
del l.c
l.d = last_assignment
del l.d
self.assertEqual([last_assignment], l._layers)
self.assertEqual([], l.trainable_weights)
self.assertEqual([], l.non_trainable_weights)
self.assertEqual([], l.weights)
del l.a
self.assertEqual([], l._layers)
def test_assign_op_not_tracked_as_variable(self):
class LayerWithAssignAttr(keras.layers.Layer):
def build(self, input_shape):
self.v = variables.Variable(1.)
self.v_assign = self.v.assign_add(2.)
layer = LayerWithAssignAttr()
layer.build((10, 10))
self.assertEqual([layer.v], layer.variables)
def test_layer_class_not_tracked_as_sublayer(self):
# See https://github.com/tensorflow/tensorflow/issues/27431 for details.
class LayerWithClassAttribute(keras.layers.Layer):
def __init__(self):
super(LayerWithClassAttribute, self).__init__()
self.layer_fn = keras.layers.Dense
layer = LayerWithClassAttribute()
self.assertEmpty(layer.variables)
self.assertEmpty(layer.submodules)
def test_layer_call_fn_args(self):
class NonDefunLayer(keras.layers.Layer):
def call(self, inputs, a, mask, b=None, training=None):
return inputs
class DefunLayer(keras.layers.Layer):
@def_function.function
def call(self, x, mask, a, training=None, b=None):
return x
nondefun_layer = NonDefunLayer()
self.assertEqual(nondefun_layer._call_fn_args,
['inputs', 'a', 'mask', 'b', 'training'])
defun_layer = DefunLayer()
self.assertEqual(defun_layer._call_fn_args,
['x', 'mask', 'a', 'training', 'b'])
def test_sequential_model(self):
model = keras.Sequential([keras.layers.Dense(10, input_shape=(10,)),
keras.layers.Dense(5)])
self.assertLen(model.layers, 2)
self.assertLen(model.weights, 4)
# Make sure a subclass model also works when it is called 'Sequential'.
class Sequential(keras.Model):
def __init__(self):
super(Sequential, self).__init__()
self.dense_layers = [keras.layers.Dense(10),
keras.layers.Dense(5)]
def call(self, inputs):
x = inputs
for d in self.dense_layers:
x = d(x)
return x
s = Sequential()
self.assertLen(s.layers, 2)
self.assertLen(s.weights, 0)
s(keras.Input((10,)))
self.assertLen(s.weights, 4)
@test_util.run_all_in_graph_and_eager_modes
class NameScopingTest(keras_parameterized.TestCase):
def test_name_scope_layer(self):
x = keras.backend.placeholder(shape=(10, 10))
layer = keras.layers.Dense(10, name='MyName')
layer(x)
self.assertEqual(layer.bias.name, 'MyName/bias:0')
self.assertEqual(layer.kernel.name, 'MyName/kernel:0')
def test_name_scope_sublayer(self):
class NameScopeTracker(keras.layers.Layer):
def call(self, inputs):
self.active_name_scope = ops.get_name_scope()
return inputs
x = keras.backend.placeholder(shape=(10, 10))
sublayer = NameScopeTracker(name='Sublayer')
layer = keras.layers.Dense(10, activation=sublayer, name='MyName2')
layer(x)
self.assertEqual(layer.bias.name, 'MyName2/bias:0')
self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')
self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer')
def test_name_scope_tf_tensor(self):
x = ops.convert_to_tensor_v2(np.ones((10, 10)))
layer = keras.layers.Dense(
10, activation=keras.layers.ReLU(name='MyAct'), name='MyName3')
layer(x)
self.assertEqual(layer.bias.name, 'MyName3/bias:0')
self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class AutographControlFlowTest(keras_parameterized.TestCase):
def test_disabling_in_context_is_matched(self):
test_obj = self
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
with test_obj.assertRaisesRegex(TypeError, 'Tensor.*as.*bool'):
if constant_op.constant(False):
return inputs * 1.
return inputs * 0.
@def_function.function(autograph=False)
def test_fn():
return MyLayer()(constant_op.constant([[1., 2., 3.]]))
test_fn()
def test_if_training_pattern_output(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
return inputs * 1.
return inputs * 0.
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 0.)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 1.)
def test_if_training_pattern_loss(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
loss = math_ops.reduce_sum(inputs)
else:
loss = 0.
self.add_loss(loss)
return inputs
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 2 * 3)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 0)
def test_if_training_pattern_metric(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
metric = math_ops.reduce_sum(inputs)
else:
metric = 0.
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
for _ in range(3):
_, train_metric = model.train_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(train_metric, 2 * 3)
_, test_metric = model.test_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(test_metric, 0)
def test_if_training_pattern_update(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer='zeros')
def call(self, inputs, training=None):
if training:
increment = 1.
else:
increment = 0.
self.counter.assign_add(increment)
return inputs
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 1.)
def test_conditional_updates_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer='zeros')
def call(self, inputs, training=None):
if training:
z = math_ops.reduce_sum(inputs)
self.add_update(lambda: self.counter.assign_add(z))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 6.)
else:
# TODO(fchollet): support the same workflow in graph mode.
with self.assertRaisesRegexp(RuntimeError,
'`add_update` in a control flow branch'):
layer = MyLayer()
layer(keras.Input((3,)))
_ = layer.updates
def test_conditional_losses_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
else:
with self.assertRaisesRegexp(RuntimeError,
'`add_loss` in a control flow branch'):
layer = MyLayer()(keras.Input((3,)))
def test_conditional_callable_losses(self):
model = keras.Sequential([
keras.layers.Dense(
1, kernel_regularizer=keras.regularizers.l2(1e-4), input_shape=(1,))
])
model._run_eagerly = testing_utils.should_run_eagerly()
def assert_graph(t):
if not context.executing_eagerly():
self.assertEqual(t.graph, ops.get_default_graph())
@def_function.function
def get_losses(t):
if t < 0:
return math_ops.reduce_sum(model.losses) * t
else:
return math_ops.reduce_sum(model.losses)
assert_graph(get_losses(constant_op.constant(2.)))
assert_graph(get_losses(constant_op.constant(0.5)))
def test_conditional_metrics_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_metric(math_ops.reduce_sum(inputs),
name='sum',
aggregation='mean')
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(history.history['sum'][-1], 2 * 3)
else:
# TODO(fchollet): support the same workflow in graph mode.
with self.assertRaisesRegexp(RuntimeError,
'`add_metric` in a control flow branch'):
layer = MyLayer()(keras.Input((3,)))
def test_conditional_activity_regularizer_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = keras.layers.Dense(2, activity_regularizer='l2')
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(
RuntimeError, '`activity_regularizer` in a control flow branch'):
model.fit(x, y, epochs=2, batch_size=5)
def test_conditional_activity_regularizer_with_wrappers_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = keras.layers.TimeDistributed(
keras.layers.Dense(2, activity_regularizer='l2'),
input_shape=(3, 4))
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 3, 4))
y = np.ones(shape=(10, 3, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(
RuntimeError, '`activity_regularizer` in a control flow branch'):
model.fit(x, y, epochs=2, batch_size=5)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests model set evaluation and fitting for some common use cases.
"""
# pylint: disable=invalid-name
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.modeling.models import (Polynomial1D, Polynomial2D, Legendre1D, Legendre2D,
Chebyshev2D, Chebyshev1D, Hermite1D, Hermite2D,
Linear1D, Planar2D)
from astropy.modeling.fitting import LinearLSQFitter, FittingWithOutlierRemoval
from astropy.modeling.core import Model
from astropy.modeling.parameters import Parameter
from astropy.utils import NumpyRNGContext
from astropy.stats import sigma_clip
x = np.arange(4)
xx = np.array([x, x + 10])
xxx = np.arange(24).reshape((3, 4, 2))
_RANDOM_SEED = 0x1337
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
# standard_broadasting = False
n_inputs = 1
outputs = ('x',)
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(x, coeff, e):
return x*coeff + e
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_model1d_axis_1(model_class):
"""
Test that a model initialized with model_set_axis=1
can be evaluated with model_set_axis=False.
"""
n_models = 2
model_axis = 1
c0 = [[2, 3]]
c1 = [[1, 2]]
t1 = model_class(1, c0=2, c1=1)
t2 = model_class(1, c0=3, c1=2)
p1 = model_class(1, c0=c0, c1=c1, n_models=n_models, model_set_axis=model_axis)
with pytest.raises(ValueError):
p1(x)
y = p1(x, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0], t1(x))
assert_allclose(y[:, 1], t2(x))
y = p1(xx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :], t1(xx))
assert_allclose(y[:, 1, :], t2(xx))
y = p1(xxx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :, :], t1(xxx))
assert_allclose(y[:, 1, :, :], t2(xxx))
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_model1d_axis_2(model_class):
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p1 = model_class(1, c0=[[[1, 2, 3]]], c1=[[[10, 20, 30]]],
n_models=3, model_set_axis=2)
t1 = model_class(1, c0=1, c1=10)
t2 = model_class(1, c0=2, c1=20)
t3 = model_class(1, c0=3, c1=30)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape == (1, 4, 3)
assert_allclose(y[:, :, 0].flatten(), t1(x))
assert_allclose(y[:, :, 1].flatten(), t2(x))
assert_allclose(y[:, :, 2].flatten(), t3(x))
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_model1d_axis_0(model_class):
"""
Test that a model initialized with model_set_axis=0
can be evaluated with model_set_axis=False.
"""
p1 = model_class(1, n_models=2, model_set_axis=0)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = model_class(1, c0=2, c1=1)
t2 = model_class(1, c0=3, c1=2)
with pytest.raises(ValueError):
p1(x)
y = p1(xx)
assert len(y) == 2
assert_allclose(y[0], t1(xx[0]))
assert_allclose(y[1], t2(xx[1]))
y = p1(x, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(x))
assert_allclose(y[1], t2(x))
y = p1(xx, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(xx))
assert_allclose(y[1], t2(xx))
y = p1(xxx, model_set_axis=False)
assert_allclose(y[0], t1(xxx))
assert_allclose(y[1], t2(xxx))
assert len(y) == 2
@pytest.mark.parametrize('model_class', [Chebyshev2D, Legendre2D, Hermite2D])
def test_model2d_axis_2(model_class):
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p2 = model_class(1, 1, c0_0=[[[0, 1, 2]]], c0_1=[[[3, 4, 5]]],
c1_0=[[[5, 6, 7]]], c1_1=[[[1,1,1]]], n_models=3, model_set_axis=2)
t1 = model_class(1, 1, c0_0=0, c0_1=3, c1_0=5, c1_1=1)
t2 = model_class(1, 1, c0_0=1, c0_1=4, c1_0=6, c1_1=1)
t3 = model_class(1, 1, c0_0=2, c0_1=5, c1_0=7, c1_1=1)
assert p2.c0_0.shape == (1, 1, 3)
y = p2(x, x, model_set_axis=False)
assert y.shape == (1, 4, 3)
# These are columns along the 2nd axis.
assert_allclose(y[:, :, 0].flatten(), t1(x, x))
assert_allclose(y[:, :, 1].flatten(), t2(x, x))
assert_allclose(y[:, :, 2].flatten(), t3(x, x))
def test_negative_axis():
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
t1 = Polynomial1D(1, c0=1, c1=3)
t2 = Polynomial1D(1, c0=2, c1=4)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
xxt = xx.T
y = p1(xxt)
assert_allclose(y[:, 0], t1(xxt[:, 0]))
assert_allclose(y[:, 1], t2(xxt[:, 1]))
def test_shapes():
p2 = Polynomial1D(1, n_models=3, model_set_axis=2)
assert p2.c0.shape == (1, 1, 3)
assert p2.c1.shape == (1, 1, 3)
p1 = Polynomial1D(1, n_models=2, model_set_axis=1)
assert p1.c0.shape == (1, 2)
assert p1.c1.shape == (1, 2)
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
assert p1.c0.shape == (2,)
assert p1.c1.shape == (2,)
e1 = [1, 2]
e2 = [3, 4]
a1 = np.array([[10, 20], [30, 40]])
a2 = np.array([[50, 60], [70, 80]])
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=-1)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
t = TParModel([[a1, a2]], [[e1, e2]], n_models=2, model_set_axis=1)
assert t.coeff.shape == (1, 2, 2, 2)
assert t.e.shape == (1, 2, 2)
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
t = TParModel([a1, a2], e=[1, 2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2,)
def test_eval():
""" Tests evaluation of Linear1D and Planar2D with different model_set_axis."""
model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2)
p = Polynomial1D(1, c0=[3, 4], c1=[1, 2], n_models=2)
assert_allclose(model(xx), p(xx))
assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False))
with pytest.raises(ValueError):
model(x)
model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1)
p = Polynomial1D(1, c0=[[3, 4]], c1=[[1, 2]], n_models=2, model_set_axis=1)
assert_allclose(model(xx.T), p(xx.T))
assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False))
with pytest.raises(ValueError):
model(xx)
model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx, xx)
assert y.shape == (2, 4)
with pytest.raises(ValueError):
model(x)
# Test fitting
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_linearlsqfitter(model_class):
"""
Issue #7159
"""
p = model_class(1, n_models=2, model_set_axis=1)
# Generate data for fitting 2 models and re-stack them along the last axis:
y = np.array([2*x+1, x+4])
y = np.rollaxis(y, 0, -1).T
f = LinearLSQFitter()
# This seems to fit the model_set correctly:
fit = f(p, x, y)
model_y = fit(x, model_set_axis=False)
m1 = model_class(1, c0=fit.c0[0][0], c1=fit.c1[0][0], domain=fit.domain)
m2 = model_class(1, c0=fit.c0[0][1], c1=fit.c1[0][1], domain=fit.domain)
assert_allclose(model_y[:, 0], m1(x))
assert_allclose(model_y[:, 1], m2(x))
p = model_class(1, n_models=2, model_set_axis=0)
fit = f(p, x, y.T)
def test_model_set_axis_outputs():
fitter = LinearLSQFitter()
model_set = Polynomial2D(1, n_models=2, model_set_axis=2)
y2, x2 = np.mgrid[: 5, : 5]
# z = np.moveaxis([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 2)
z = np.rollaxis(np.array([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 3)
model = fitter(model_set, x2, y2, z)
res = model(x2, y2, model_set_axis=False)
assert z.shape == res.shape
# Test initializing with integer model_set_axis
# and evaluating with a different model_set_axis
model_set = Polynomial1D(1, c0=[1, 2], c1=[2, 3],
n_models=2, model_set_axis=0)
y0 = model_set(xx)
y1 = model_set(xx.T, model_set_axis=1)
assert_allclose(y0[0], y1[:, 0])
assert_allclose(y0[1], y1[:, 1])
model_set = Polynomial1D(1, c0=[[1, 2]], c1=[[2, 3]],
n_models=2, model_set_axis=1)
y0 = model_set(xx.T)
y1 = model_set(xx, model_set_axis=0)
assert_allclose(y0[:, 0], y1[0])
assert_allclose(y0[:, 1], y1[1])
with pytest.raises(ValueError):
model_set(x)
def test_fitting_shapes():
""" Test fitting model sets of Linear1D and Planar2D."""
fitter = LinearLSQFitter()
model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx)
fit_model = fitter(model, x, y)
model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1)
fit_model = fitter(model, x, y.T)
model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx, xx)
fit_model = fitter(model, x, x, y)
def test_compound_model_sets():
with pytest.raises(ValueError):
Polynomial1D(1, n_models=2, model_set_axis=1) | Polynomial1D(1, n_models=2, model_set_axis=0)
def test_linear_fit_model_set_errors():
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y = init_model(x, model_set_axis=False)
fitter = LinearLSQFitter()
with pytest.raises(ValueError):
fitter(init_model, x[:5], y)
with pytest.raises(ValueError):
fitter(init_model, x, y[:, :5])
def test_linear_fit_model_set_common_weight():
"""Tests fitting multiple models simultaneously."""
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
weights = np.ones(10)
weights[[0, -1]] = 0
fitted_model = fitter(init_model, x, y, weights=weights)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
# Check that using null weights raises an error
# ValueError: On entry to DLASCL parameter number 4 had an illegal value
with pytest.raises(ValueError,
match='Found NaNs in the coefficient matrix'):
with pytest.warns(RuntimeWarning,
match=r'invalid value encountered in true_divide'):
fitted_model = fitter(init_model, x, y, weights=np.zeros(10))
def test_linear_fit_model_set_weights():
"""Tests fitting multiple models simultaneously."""
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
weights = np.ones_like(y)
# Put a null weight for the min and max values
weights[[0, 1], weights.argmin(axis=1)] = 0
weights[[0, 1], weights.argmax(axis=1)] = 0
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, weights=weights)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
# Check that using null weights raises an error
weights[0] = 0
with pytest.raises(ValueError,
match='Found NaNs in the coefficient matrix'):
with pytest.warns(RuntimeWarning,
match=r'invalid value encountered in true_divide'):
fitted_model = fitter(init_model, x, y, weights=weights)
# Now we mask the values where weight is 0
with pytest.warns(RuntimeWarning,
match=r'invalid value encountered in true_divide'):
fitted_model = fitter(init_model, x,
np.ma.array(y, mask=np.isclose(weights, 0)),
weights=weights)
# Parameters for the first model are all NaNs
assert np.all(np.isnan(fitted_model.param_sets[:, 0]))
assert np.all(np.isnan(fitted_model(x, model_set_axis=False)[0]))
# Second model is fitted correctly
assert_allclose(fitted_model(x, model_set_axis=False)[1], y_expected[1],
rtol=1e-1)
def test_linear_fit_2d_model_set_errors():
init_model = Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z = init_model(x, y, model_set_axis=False)
fitter = LinearLSQFitter()
with pytest.raises(ValueError):
fitter(init_model, x[:5], y, z)
with pytest.raises(ValueError):
fitter(init_model, x, y, z[:, :5])
def test_linear_fit_2d_model_set_common_weight():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz, weights=np.ones((5, 5)))
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_flat_2d_model_set_common_weight():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
x, y = x.flatten(), y.flatten()
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
weights = np.ones(25)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz, weights=weights)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_2d_model_set_weights():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
weights = [np.ones((5, 5)), np.ones((5, 5))]
fitted_model = fitter(init_model, x, y, zz, weights=weights)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_flat_2d_model_set_weights():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
x, y = x.flatten(), y.flatten()
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
weights = np.ones((2, 25))
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz, weights=weights)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
class Test1ModelSet:
"""
Check that fitting a single model works with a length-1 model set axis.
It's not clear that this was originally intended usage, but it can be
convenient, eg. when fitting a range of image rows that may be a single
row, and some existing scripts might rely on it working.
Currently this does not work with FittingWithOutlierRemoval.
"""
def setup_class(self):
self.x1 = np.arange(0, 10)
self.y1 = np.array([0.5 + 2.5*self.x1])
self.w1 = np.ones((10,))
self.y1[0, 8] = 100.
self.w1[8] = 0.
self.y2, self.x2 = np.mgrid[0:10, 0:10]
self.z2 = np.array([1 - 0.1*self.x2 + 0.2*self.y2])
self.w2 = np.ones((10, 10))
self.z2[0, 1, 2] = 100.
self.w2[1, 2] = 0.
def test_linear_1d_common_weights(self):
model = Polynomial1D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x1, self.y1, weights=self.w1)
assert_allclose(model.c0, 0.5, atol=1e-12)
assert_allclose(model.c1, 2.5, atol=1e-12)
def test_linear_1d_separate_weights(self):
model = Polynomial1D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x1, self.y1,
weights=self.w1[np.newaxis, ...])
assert_allclose(model.c0, 0.5, atol=1e-12)
assert_allclose(model.c1, 2.5, atol=1e-12)
def test_linear_1d_separate_weights_axis_1(self):
model = Polynomial1D(1, model_set_axis=1)
fitter = LinearLSQFitter()
model = fitter(model, self.x1, self.y1.T,
weights=self.w1[..., np.newaxis])
assert_allclose(model.c0, 0.5, atol=1e-12)
assert_allclose(model.c1, 2.5, atol=1e-12)
def test_linear_2d_common_weights(self):
model = Polynomial2D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x2, self.y2, self.z2, weights=self.w2)
assert_allclose(model.c0_0, 1., atol=1e-12)
assert_allclose(model.c1_0, -0.1, atol=1e-12)
assert_allclose(model.c0_1, 0.2, atol=1e-12)
def test_linear_2d_separate_weights(self):
model = Polynomial2D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x2, self.y2, self.z2,
weights=self.w2[np.newaxis, ...])
assert_allclose(model.c0_0, 1., atol=1e-12)
assert_allclose(model.c1_0, -0.1, atol=1e-12)
assert_allclose(model.c0_1, 0.2, atol=1e-12)
def test_linear_2d_separate_weights_axis_2(self):
model = Polynomial2D(1, model_set_axis=2)
fitter = LinearLSQFitter()
model = fitter(model, self.x2, self.y2, np.rollaxis(self.z2, 0, 3),
weights=self.w2[..., np.newaxis])
assert_allclose(model.c0_0, 1., atol=1e-12)
assert_allclose(model.c1_0, -0.1, atol=1e-12)
assert_allclose(model.c0_1, 0.2, atol=1e-12)
|
|
from __future__ import print_function
import os, sys, ctypes
import re
try:
import urllib2
import Tkinter
import tkFileDialog
except ImportError:
import urllib.request as urllib2
import tkinter as Tkinter
import tkinter.filedialog as tkFileDialog
default_folder = "~"
class init:
@staticmethod
def pkgmgr_installprefix():
if sys.version_info[0] == 2:
pkgmgr_installprefix = ['py', '-2']
elif sys.version_info[0] == 3:
pkgmgr_installprefix = ['py', '-3']
else:
raise('This script only works with Python 2 or 3')
sys.exit()
return pkgmgr_installprefix
@staticmethod
def is_admin():
if sys.platform == "win32":
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
elif sys.platform == "cygwin":
is_admin = None
else:
is_admin = os.getuid() == 0
if __name__ == '__main__':
if is_admin == True:
print("""You are running this script as root/administrator.
All installations shall be installed for all users.""")
elif is_admin == False:
print("""You are not running this script as root/administrator.
All installations shall be installed in your user path.""")
if is_admin == None:
cygwinchoice = input("You are using Cygwin. There is no way to determine if you are running as an administrator or not.\nAre you (Y/n)?\nWarning: If you accidentally answer yes, when not running as admin, all installations shall end in error.")
cygwinadminyes = ['yes', 'ye', 'y']
cygwinadminno = ['no', 'n']
cygwinadmin = cygwinadminyes + cygwinadminno
while cygwinchoice.lower() not in cygwinadmin:
cygwinchoice = input("This is a yes or no question.\nAnswer it as such, Y for yes or n for no.\n")
if cygwinchoice in cygwinadminyes:
is_admin = True
elif cygwinchoice in cygwinadminno:
is_admin = False
return is_admin
@staticmethod
def installer():
if sys.version_info < (2, 4):
easy_install = False
legacy_easy_install = False
if __name__ == '__main__':
print("Easy_Install is incompatable with this version.")
elif (sys.version_info == (2, 4) or sys.version_info == (2, 5)):
easy_install = True
legacy_easy_install = True
elif sys.version_info >= (2, 6)
easy_install = True
legacy_easy_install = False
if (sys.version_info < (2, 6) or sys.version_info == (3, 0) or sys.version_info == (3, 1)):
pip = False
if __name__ == '__main__':
print("PIP is incompatible with this version")
else:
pip = True
# if (pip == False and easy_install == False):
# installmethod = "1"
# installoptions = ["1"]
# elif (pip == True and easy_install == False):
# installmethod = input("Input \"1\" without quotes if you would like to install via a local setup.py.\nInput \"2\" without quotes if you would like to install via pip.\n")
# installoptions = ["1", "2"]
# elif (pip == False and easy_install == True):
# installmethod = input("Input \"1\" without quotes if you would like to install via a local setup.py.\nInput \"3\" without quotes if you would like to install via easy_install.\n")
# installoptions = ["1", "3"]
# elif (pip == True and easy_install == True):
# installmethod = input("Input \"1\" without quotes if you would like to install via a local setup.py.\nInput \"2\" without quotes if you would like to install via pip.\nInput \"3\" without quotes if you would like to install via easy_install.\n")
# installoptions = ["1", "2", "3"]
# while installmethod not in installoptions:
# print("You did not input correctly")
# if (pip == True and easy_install == False):
# installmethod = input("Input \"1\" without quotes if you would like to install via a local setup.py.\nInput \"2\" without quotes if you would like to install via pip.\n")
# elif (pip == False and easy_install == True):
# installmethod = input("Input \"1\" without quotes if you would like to install via a local setup.py.\nInput \"3\" without quotes if you would like to install via easy_install.\n")
# elif (pip == True and easy_install == True):
# installmethod = input("Input \"1\" without quotes if you would like to install via a local setup.py.\nInput \"2\" without quotes if you would like to install via pip.\nInput \"3\" without quotes if you would like to install via easy_install.\n")
# return installmethod, legacy_easy_install
return pip, easy_install, legacy_easy_install
class install_package:
def default(choosefile=None):
if choosefile == None:
window = Tkinter.Tk()
window.withdraw()
filetypelist = [('All Files', '.*'), ('All Python Scripts', '.py*'), ('Python Scripts (Terminal)', '.py'), ('Python Scripts (Compiled)', '.pyc'), ('Python Scripts (Optimized and Compiled)', '.pyo'), ('Python Scripts (No Terminal)', '.pyw'), ('Python DLLs', '.pyd'), ('Cython Scripts', '.pyx'), ('Cython Scripts (Header)', '.pxd')]
choosefile = '"' + tkFileDialog.askopenfilename(defaultextension='.py', filetypes=filetypelist, initialdir="/", initialfile="setup.py", multiple=False, title="Choose a Setup Script", parent=window) + '" install'
while choosefile = '"" install':
choosefile = '"' + tkFileDialog.askopenfilename(defaultextension='.py', filetypes=filetypelist, initialdir="/", initialfile="setup.py", multiple=False, title="You must choose a Setup Script", parent=window) + '" install'
choosefile = [choosefile]
else:
choosefile = '"' + choosefile + '" install'
choosefile = [choosefile]
try:
fullcmd = pkgmgr_installprefix + choosefile + installsuffix
fullcmd = ' '.join(fullcmd)
except NameError:
fullcmd = ['python'] + choosefile + ['--user']
fullcmd = ' '.join(fullcmd)
successful = os.system(fullcmd)
if successful == 0:
print("According to the checks in place, the package has installed successfully.\nYou should still make sure of this however.")
if "--user" in fullcmd:
print("The script was installed in your user path, since you have either not specified admin priveleges, or this script was run not as an admin.")
else:
print("Installation unsuccessful.")
def main():
pkgmgr_installprefix = pkgmgr_installprefix()
installsuffix = installsuffix()
pkgmgr_installfileprefix = pkgmgr_installfileprefix()
installmethod, legacy_easy_install = installercheck()
CHOOSER = True
while CHOOSER == True:
if installmethod = "1":
install_package.default()
elif installmethod = "2":
install_package.pip()
elif installmethod = "3"
install_package.easy_install()
again = input("Would you like to install another package?(Y/n)\n")
yes = ['yes', 'ye', 'y']
no = ['no', 'n']
yesno = yes + no
while again not in yesno:
again = input("Invalid Answer.\nWould you like to install another package (Same installation method applies)?(Y/n)\n")
if again.lower() in yes:
CHOOSER = True
elif again.lower() in no:
CHOOSER = False
def install_packagemanager(pip_or_easy_install):
url = "https://bootstrap.pypa.io/" + pip_or_easy_install
file_name = url.split('/')[-1]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
install_name = "py -3 " + file_name
os.system(install_name)
print("Pip has been installed!")
def install_package_pip(packagename, version=None, minimum_version=None, maximum_version=None, update=None):
try:
import pip
except ImportError:
print("You don't have pip. Pip will be installed now.")
try:
install_pip()
import pip
except PermissionError:
print("You don't have the necessary permissions.\nPlease rerun this script as an administrator.")
except IOError:
print("You don't have the necessary permissions.\nPlease rerun this script as an administrator.")
except OSError:
print("You don't have the necessary permissions.\nPlease rerun this script as an administrator.")
if (version != None and (minimum_version != None or maximum_version != None or update != None):
print("You have chosen a specific version.\nThe minimum version, and/or maximum version, and/or update parameters you have chosen shall be stripped from the process.")
minimum_version = maximum_version = update = None
elif (version == None and (minimum_version != None or maximum_version != None) and update != None)
print("You have chosen to update to the lates version.\nThe minimum version, and/or maximum version parameters you have chosen shall be stripped from the process.")
minimum_version = maximum_version = None
if (version == None and minimum_version == None and maximum_version == None and update == None):
package = packagename
install_args = ['install', package]
install_info = "Pip shall now install the latest version of " + packagename
elif (version != None and minimum_version == None and maximum_version == None and update == None):
package = packagename + "==" + version
install_args = ['install', package]
install_info = "Pip shall now install " + packagename + " at version " + version
elif (version == None and minimum_version != None and maximum_version == None and update == None):
package = "'" + packagename + ">=" + minimum_version + "'"
install_args = ['install', package]
install_info = "Pip shall now install " + packagename + " at a minimum of version " + minimum_version
elif (version == None and minimum_version == None and maximum_version != None and update == None):
package = "'" + packagename + "<=" + maximum_version + "'"
install_args = ['install', package]
install_info = "Pip shall now install " + packagename + " at a maximum of version " + maximum_version
elif (version == None and minimum_version != None and maximum_version == None and update == None):
package = "'" + packagename + ">=" + minimum_version + "'"
install_args = ['install', package]
pip.main(['install', ])
def install_package_easy_install(packagename, version=None, minimum_version=None, maximum_version=None, update=None):
try:
from setuptools.command import easy_install
except ImportError:
print("You don't have pip. Pip will be installed now.")
try:
|
|
import keras.backend as K
import logging
import numpy as np
from aes.layers import MeanOverTime
from keras.layers import Activation, Dense, Dropout, Input
from keras.layers.embeddings import Embedding
from keras.models import Model
logger = logging.getLogger(__name__)
class Models:
def __init__(self, prompt_id, initial_mean_value):
self.prompt_id = prompt_id
self.initial_mean_value = initial_mean_value
self.dropout = 0.5
self.recurrent_dropout = 0.1
if self.initial_mean_value.ndim == 0:
self.initial_mean_value = np.expand_dims(
self.initial_mean_value, axis=1)
self.num_outputs = len(self.initial_mean_value)
self.bias = (np.log(self.initial_mean_value) -
np.log(1 - self.initial_mean_value)).astype(K.floatx())
def create_gate_positional_model(self, char_cnn_kernel, cnn_kernel,
emb_dim, emb_path, vocab_word,
vocab_word_size, word_maxlen,
vocab_char_size, char_maxlen):
from aes.layers import Conv1DMask, GatePositional, MaxPooling1DMask
logger.info('Building gate positional model')
input_char = Input(shape=(char_maxlen, ), name='input_char')
char_emb = Embedding(
vocab_char_size, emb_dim, mask_zero=True)(input_char)
char_cnn = Conv1DMask(
filters=emb_dim,
kernel_size=3,
padding='same')(char_emb)
char_input = MaxPooling1DMask(
pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
input_word = Input(shape=(word_maxlen, ), name='input_word')
word_input = Embedding(
vocab_word_size, emb_dim, mask_zero=True,
name='word_emb')(input_word)
gate = GatePositional()([char_input, word_input])
final_input = Dense(50)(gate)
cnn = Conv1DMask(
filters=emb_dim,
kernel_size=3,
padding='same')(final_input)
dropped = Dropout(0.5)(cnn)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=[input_char, input_word], outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
if emb_path:
from emb_reader import EmbReader as EmbReader
logger.info('Initializing lookup table')
emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
model.get_layer('word_emb').embeddings.set_value(
emb_reader.get_emb_matrix_given_vocab(
vocab_word,
model.get_layer('word_emb').embeddings.get_value()))
logger.info(' Done')
return model
def create_gate_matrix_model(self, char_cnn_kernel, cnn_kernel, emb_dim,
emb_path, vocab_word, vocab_word_size,
word_maxlen, vocab_char_size, char_maxlen):
from aes.layers import Conv1DMask, GateMatrix, MaxPooling1DMask
logger.info('Building gate matrix model')
input_char = Input(shape=(char_maxlen, ), name='input_char')
char_emb = Embedding(
vocab_char_size, emb_dim, mask_zero=True)(input_char)
char_cnn = Conv1DMask(
filters=emb_dim,
kernel_size=char_cnn_kernel,
padding='same')(char_emb)
char_input = MaxPooling1DMask(
pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
input_word = Input(shape=(word_maxlen, ), name='input_word')
word_input = Embedding(
vocab_word_size, emb_dim, mask_zero=True,
name='word_emb')(input_word)
gate = GateMatrix()([char_input, word_input])
final_input = Dense(50)(gate)
cnn = Conv1DMask(
filters=emb_dim,
kernel_size=cnn_kernel,
padding='same')(final_input)
dropped = Dropout(0.5)(cnn)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=[input_char, input_word], outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
if emb_path:
from emb_reader import EmbReader as EmbReader
logger.info('Initializing lookup table')
emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
model.get_layer('word_emb').embeddings.set_value(
emb_reader.get_emb_matrix_given_vocab(
vocab_word,
model.get_layer('word_emb').embeddings.get_value()))
logger.info(' Done')
return model
def create_gate_vector_model(self, char_cnn_kernel, cnn_kernel, emb_dim,
emb_path, vocab_word, vocab_word_size,
word_maxlen, vocab_char_size, char_maxlen):
from aes.layers import Conv1DMask, GateVector, MaxPooling1DMask
logger.info('Building gate vector model')
input_char = Input(shape=(char_maxlen, ), name='input_char')
char_emb = Embedding(
vocab_char_size, emb_dim, mask_zero=True)(input_char)
char_cnn = Conv1DMask(
filters=emb_dim,
kernel_size=char_cnn_kernel,
padding='same')(char_emb)
char_input = MaxPooling1DMask(
pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
input_word = Input(shape=(word_maxlen, ), name='input_word')
word_input = Embedding(
vocab_word_size, emb_dim, mask_zero=True,
name='word_emb')(input_word)
gate = GateVector()([char_input, word_input])
final_input = Dense(50)(gate)
cnn = Conv1DMask(
filters=emb_dim,
kernel_size=cnn_kernel,
padding='same')(final_input)
dropped = Dropout(0.5)(cnn)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=[input_char, input_word], outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
if emb_path:
from emb_reader import EmbReader as EmbReader
logger.info('Initializing lookup table')
emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
model.get_layer('word_emb').embeddings.set_value(
emb_reader.get_emb_matrix_given_vocab(
vocab_word,
model.get_layer('word_emb').embeddings.get_value()))
logger.info(' Done')
return model
def create_concat_model(self, emb_dim, emb_path, vocab_word,
vocab_word_size, word_maxlen, vocab_char_size,
char_maxlen):
from aes.layers import Conv1DMask, MaxPooling1DMask
from keras.layers import concatenate
logger.info('Building concatenation model')
input_char = Input(shape=(char_maxlen, ), name='input_char')
char_emb = Embedding(
vocab_char_size, emb_dim, mask_zero=True)(input_char)
char_cnn = Conv1DMask(
filters=emb_dim, kernel_size=3, padding='same')(char_emb)
char_input = MaxPooling1DMask(
pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
input_word = Input(shape=(word_maxlen, ), name='input_word')
word_input = Embedding(
vocab_word_size, emb_dim, mask_zero=True,
name='word_emb')(input_word)
merged = concatenate([char_input, word_input], axis=1)
merged_dropped = Dropout(0.5)(merged)
final_input = Dense(50)(merged_dropped)
cnn = Conv1DMask(
filters=emb_dim, kernel_size=3, padding='same')(final_input)
dropped = Dropout(0.5)(cnn)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=[input_char, input_word], outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
if emb_path:
from emb_reader import EmbReader as EmbReader
logger.info('Initializing lookup table')
emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
model.get_layer('word_emb').embeddings.set_value(
emb_reader.get_emb_matrix_given_vocab(
vocab_word,
model.get_layer('word_emb').embeddings.get_value()))
logger.info(' Done')
return model
def create_char_cnn_model(self, emb_dim, word_maxlen, vocab_char_size,
char_maxlen):
from aes.layers import Conv1DMask
logger.info('Building character CNN model')
input_char = Input(shape=(char_maxlen, ), name='input_char')
char_emb = Embedding(
vocab_char_size, emb_dim, mask_zero=True)(input_char)
cnn = Conv1DMask(
filters=emb_dim, kernel_size=3, padding='same')(char_emb)
dropped = Dropout(0.5)(cnn)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=input_char, outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
logger.info(' Done')
return model
def create_char_lstm_model(self, emb_dim, word_maxlen, vocab_char_size,
char_maxlen):
from keras.layers import LSTM
logger.info('Building character LSTM model')
input_char = Input(shape=(char_maxlen, ), name='input_char')
char_emb = Embedding(
vocab_char_size, emb_dim, mask_zero=True)(input_char)
lstm = LSTM(
300,
return_sequences=True,
dropout=self.dropout,
recurrent_dropout=self.recurrent_dropout)(char_emb)
dropped = Dropout(0.5)(lstm)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=input_char, outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
logger.info(' Done')
return model
def create_char_gru_model(self, emb_dim, word_maxlen, vocab_char_size,
char_maxlen):
from keras.layers import GRU
logger.info('Building character GRU model')
input_char = Input(shape=(char_maxlen, ), name='input_char')
char_emb = Embedding(
vocab_char_size, emb_dim, mask_zero=True)(input_char)
gru = GRU(
300,
return_sequences=True,
dropout=self.dropout,
recurrent_dropout=self.recurrent_dropout)(char_emb)
dropped = Dropout(0.5)(gru)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=input_char, outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
logger.info(' Done')
return model
def create_char_rnn_model(self, emb_dim, word_maxlen, vocab_char_size,
char_maxlen):
from keras.layers import SimpleRNN
logger.info('Building character RNN model')
input_char = Input(shape=(char_maxlen, ), name='input_char')
char_emb = Embedding(
vocab_char_size, emb_dim, mask_zero=True)(input_char)
rnn = SimpleRNN(
300,
return_sequences=True,
dropout=self.dropout,
recurrent_dropout=self.recurrent_dropout)(char_emb)
dropped = Dropout(0.5)(rnn)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=input_char, outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
logger.info(' Done')
return model
def create_word_cnn_model(self, emb_dim, emb_path, vocab_word,
vocab_word_size, word_maxlen):
from aes.layers import Conv1DMask
logger.info('Building word CNN model')
input_word = Input(shape=(word_maxlen, ), name='input_word')
word_emb = Embedding(
vocab_word_size, emb_dim, mask_zero=True,
name='word_emb')(input_word)
cnn = Conv1DMask(
filters=emb_dim, kernel_size=3, padding='same')(word_emb)
dropped = Dropout(0.5)(cnn)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=input_word, outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
if emb_path:
from emb_reader import EmbReader as EmbReader
logger.info('Initializing lookup table')
emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
model.get_layer('word_emb').embeddings.set_value(
emb_reader.get_emb_matrix_given_vocab(
vocab_word,
model.get_layer('word_emb').embeddings.get_value()))
logger.info(' Done')
return model
def create_word_lstm_model(self, emb_dim, emb_path, vocab_word,
vocab_word_size, word_maxlen):
from keras.layers import LSTM
logger.info('Building word LSTM model')
input_word = Input(shape=(word_maxlen, ), name='input_word')
word_emb = Embedding(
vocab_word_size, emb_dim, mask_zero=True,
name='word_emb')(input_word)
lstm = LSTM(
300,
return_sequences=True,
dropout=self.dropout,
recurrent_dropout=self.recurrent_dropout)(word_emb)
dropped = Dropout(0.5)(lstm)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=input_word, outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
if emb_path:
from emb_reader import EmbReader as EmbReader
logger.info('Initializing lookup table')
emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
model.get_layer('word_emb').embeddings.set_value(
emb_reader.get_emb_matrix_given_vocab(
vocab_word,
model.get_layer('word_emb').embeddings.get_value()))
logger.info(' Done')
return model
def create_word_gru_model(self, emb_dim, emb_path, vocab_word,
vocab_word_size, word_maxlen):
from keras.layers import GRU
logger.info('Building word GRU model')
input_word = Input(shape=(word_maxlen, ), name='input_word')
word_emb = Embedding(
vocab_word_size, emb_dim, mask_zero=True,
name='word_emb')(input_word)
gru = GRU(
300,
return_sequences=True,
dropout=self.dropout,
recurrent_dropout=self.recurrent_dropout)(word_emb)
dropped = Dropout(0.5)(gru)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=input_word, outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
if emb_path:
from emb_reader import EmbReader as EmbReader
logger.info('Initializing lookup table')
emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
model.get_layer('word_emb').embeddings.set_value(
emb_reader.get_emb_matrix_given_vocab(
vocab_word,
model.get_layer('word_emb').embeddings.get_value()))
logger.info(' Done')
return model
def create_word_rnn_model(self, emb_dim, emb_path, vocab_word,
vocab_word_size, word_maxlen):
from keras.layers import SimpleRNN
logger.info('Building word SimpleRNN model')
input_word = Input(shape=(word_maxlen, ), name='input_word')
word_emb = Embedding(
vocab_word_size, emb_dim, mask_zero=True,
name='word_emb')(input_word)
rnn = SimpleRNN(
300,
return_sequences=True,
dropout=self.dropout,
recurrent_dropout=self.recurrent_dropout)(word_emb)
dropped = Dropout(0.5)(rnn)
mot = MeanOverTime(mask_zero=True)(dropped)
densed = Dense(self.num_outputs, name='dense')(mot)
output = Activation('sigmoid')(densed)
model = Model(inputs=input_word, outputs=output)
model.get_layer('dense').bias.set_value(self.bias)
if emb_path:
from emb_reader import EmbReader as EmbReader
logger.info('Initializing lookup table')
emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
model.get_layer('word_emb').embeddings.set_value(
emb_reader.get_emb_matrix_given_vocab(
vocab_word,
model.get_layer('word_emb').embeddings.get_value()))
logger.info(' Done')
return model
|
|
# This file is part of gorm, an object relational mapper for versioned graphs.
# Copyright (C) 2014 Zachary Spector.
from collections import MutableMapping, MutableSequence
from json import dumps, loads
from copy import deepcopy
def enc_tuple(o):
"""Return the object, converted to a form that will preserve the
distinction between lists and tuples when written to JSON
"""
if isinstance(o, tuple):
return ['tuple'] + [enc_tuple(p) for p in o]
elif isinstance(o, list):
return ['list'] + [enc_tuple(v) for v in o]
elif isinstance(o, dict):
r = {}
for (k, v) in o.items():
r[enc_tuple(k)] = enc_tuple(v)
return r
else:
return o
def dec_tuple(o):
"""Take an object previously encoded with ``enc_tuple`` and return it
with the encoded tuples turned back into actual tuples
"""
if isinstance(o, dict):
r = {}
for (k, v) in o.items():
r[dec_tuple(k)] = dec_tuple(v)
return r
elif isinstance(o, list):
if o[0] == 'list':
return list(dec_tuple(p) for p in o[1:])
else:
assert(o[0] == 'tuple')
return tuple(dec_tuple(p) for p in o[1:])
else:
return o
json_dump_hints = {}
def json_dump(obj, hint=True):
"""JSON dumper that distinguishes lists from tuples"""
if not hint:
return dumps(enc_tuple(obj))
k = str(obj)
if k not in json_dump_hints:
json_dump_hints[k] = dumps(enc_tuple(obj))
return json_dump_hints[k]
json_load_hints = {}
def json_load(s, hint=True):
"""JSON loader that distinguishes lists from tuples"""
if s is None:
return None
if s == '["list"]':
return []
if s == '["tuple"]':
return tuple()
if not hint:
return dec_tuple(loads(s))
if s not in json_load_hints:
json_load_hints[s] = dec_tuple(loads(s))
return json_load_hints[s]
class JSONWrapper(MutableMapping):
__slots__ = ['outer', 'outkey']
def __init__(self, outer, outkey):
self.outer = outer
self.outkey = outkey
def __contains__(self, wot):
return self._get().__contains__(wot)
def _get(self, k=None):
if k is None:
return self.outer._get(self.outkey)
return self._get()[k]
def _set(self, v):
self.outer[self.outkey] = v
def __iter__(self):
return iter(self._get())
def __len__(self):
return len(self._get())
def __getitem__(self, k):
r = self._get()[k]
if isinstance(r, list):
return JSONListWrapper(self, k)
elif isinstance(r, dict):
return JSONWrapper(self, k)
else:
return r
def __setitem__(self, k, v):
me = self._get()
me[k] = v
self._set(me)
def __delitem__(self, k):
me = self._get()
del me[k]
self._set(me)
def __str__(self):
return self._get().__str__()
def __repr__(self):
return self._get().__repr__()
def __eq__(self, other):
return self._get() == other
def __list__(self):
return list(self._get())
def __dict__(self):
return dict(self._get())
def copy(self):
return self._get().copy()
class JSONListWrapper(JSONWrapper):
def append(self, v):
me = self._get()
me.append(v)
self._set(me)
def insert(self, i, v):
me = self._get()
me.insert(i, v)
self._set(me)
class JSONReWrapper(MutableMapping):
"""Like JSONWrapper with a cache."""
__slots__ = ['_outer', '_key', '_inner', '_v']
def __init__(self, outer, key, initval):
self._outer = outer
self._key = key
self._inner = JSONWrapper(outer, key)
self._v = initval
if not isinstance(self._v, dict):
raise TypeError(
"JSONReWrapper only wraps dicts"
)
def _get(self, k=None):
if k is None:
return self._v
return self._v[k]
def __iter__(self):
return iter(self._v)
def __len__(self):
return len(self._v)
def __eq__(self, other):
return self._v == other
def __getitem__(self, k):
r = self._v[k]
if isinstance(r, dict):
return JSONReWrapper(self, k, r)
if isinstance(r, list):
return JSONListReWrapper(self, k, r)
return r
def __setitem__(self, k, v):
self._v[k] = v
self._outer[self._key] = self._v
def __delitem__(self, k):
del self._inner[k]
del self._v[k]
def __repr__(self):
return repr(self._v)
class JSONListReWrapper(MutableSequence):
"""Like JSONListWrapper with a cache."""
__slots__ = ['_inner', '_v']
def __init__(self, outer, key, initval=None):
self._inner = JSONListWrapper(outer, key)
self._v = initval
if not isinstance(self._v, list):
raise TypeError(
"JSONListReWrapper only wraps lists"
)
def __iter__(self):
return iter(self._v)
def __len__(self):
return len(self._v)
def __eq__(self, other):
return self._v == other
def __getitem__(self, i):
r = self._v[i]
if isinstance(r, dict):
return JSONReWrapper(self, i, r)
if isinstance(r, list):
return JSONListReWrapper(self, i, r)
return r
def __setitem__(self, i, v):
self._inner[i] = v
self._v[i] = v
def __delitem__(self, i, v):
del self._inner[i]
del self._v[i]
def insert(self, i, v):
self._inner.insert(i, v)
self._v.insert(i, v)
def __repr__(self):
return repr(self._v)
def json_deepcopy(obj):
r = {}
for (k, v) in obj.items():
if (
isinstance(v, JSONReWrapper) or
isinstance(v, JSONListReWrapper)
):
r[k] = deepcopy(v._v)
else:
r[k] = deepcopy(v)
return r
|
|
from copy import deepcopy
import unittest
from tests import common
from romaine.parser.exceptions import UnclosedPythonishString
class TestMultilineParser(unittest.TestCase):
"""
Test multi-line gherkin parser functionality of romaine's core.
"""
def setUp(self):
"""
Prepare the environment for testing.
"""
# We're doing a lot of long tests- don't limit the diff output length
self.maxDiff = None
def test_getting_pythonish_string_no_input_modification(self):
"""
Test that getting a pythonish string doesn't change the input var.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_pythonish_string with input from
# multiline/pythonish_string_input
input_data = common.get_parser_input(
'multiline/pythonish_string_input',
)
expected_data = deepcopy(input_data)
parser.multiline.get_pythonish_string(input_data)
# Then my input variable is not modified
self.assertEqual(
input_data,
expected_data,
)
def test_getting_pythonish_string(self):
"""
Check we can get a pythonish string.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_pythonish_string with input from
# multiline/pythonish_string_input
input_data = common.get_parser_input(
'multiline/pythonish_string_input',
)
result = parser.multiline.get_pythonish_string(input_data)
# Then I see the results from multiline/pythonish_string_expected
self.assertEqual(
result,
common.get_parser_output('multiline/pythonish_string_expected'),
)
def test_getting_pythonish_string_with_delimiter_noise(self):
"""
Check we can get a pythonish string with noise by the delimiters
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_pythonish_string with input from
# multiline/pythonish_string_noisy_delimiters_input
input_data = common.get_parser_input(
'multiline/pythonish_string_noisy_delimiters_input',
)
result = parser.multiline.get_pythonish_string(input_data)
# Then I see the results from
# multiline/pythonish_string_noisy_delimiters_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/pythonish_string_noisy_delimiters_expected',
),
)
def test_getting_pythonish_string_with_trailing_data(self):
"""
Check we can get a pythonish string with noise by the delimiters
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_pythonish_string with input from
# multiline/pythonish_string_trailing_data_input
input_data = common.get_parser_input(
'multiline/pythonish_string_trailing_data_input',
)
result = parser.multiline.get_pythonish_string(input_data)
# Then I see the results from
# multiline/pythonish_string_trailing_data_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/pythonish_string_trailing_data_expected',
),
)
def test_fail_getting_single_quoted_pythonish_string(self):
"""
Check we do not get a pythonish string with '''.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_pythonish_string with input from
# multiline/pythonish_string_wrong_delimiter_input
input_data = common.get_parser_input(
'multiline/pythonish_string_wrong_delimiter_input',
)
result = parser.multiline.get_pythonish_string(input_data)
# Then I see the results from
# multiline/pythonish_string_wrong_delimiter_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/pythonish_string_wrong_delimiter_expected',
),
)
def test_fail_getting_not_following_pythonish_string(self):
"""
Check we do not get a pythonish string with a leading line.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_pythonish_string with input from
# multiline/pythonish_string_leading_noise_input
input_data = common.get_parser_input(
'multiline/pythonish_string_leading_noise_input',
)
result = parser.multiline.get_pythonish_string(input_data)
# Then I see the results from
# multiline/pythonish_string_leading_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/pythonish_string_leading_noise_expected',
),
)
def test_fail_getting_unclosed_pythonish_string(self):
"""
Check we fail noisily on an unclosed pythonish string.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_pythonish_string with input from
# multiline/pythonish_string_unclosed_input
input_data = common.get_parser_input(
'multiline/pythonish_string_unclosed_input',
)
# Then an UnclosedPythonishString exception is raised
with self.assertRaises(UnclosedPythonishString):
parser.multiline.get_pythonish_string(input_data)
def test_fail_getting_last_line_opening_pythonish_string(self):
"""
Check we fail noisily when last line is opening python string.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_pythonish_string with input from
# multiline/pythonish_string_unclosed_input
input_data = common.get_parser_input(
'multiline/pythonish_string_just_opening_input',
)
# Then an UnclosedPythonishString exception is raised
with self.assertRaises(UnclosedPythonishString):
parser.multiline.get_pythonish_string(input_data)
def test_getting_table_no_input_modification(self):
"""
Test that getting a table doesn't change the input var.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_table with input from
# multiline/table_input
input_data = common.get_parser_input(
'multiline/table_input',
)
expected_data = deepcopy(input_data)
parser.multiline.get_table(input_data)
# Then my input variable is not modified
self.assertEqual(
input_data,
expected_data,
)
def test_get_one_row_one_cell_table(self):
"""
Test that we can get a table consisting of one row with one cell.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_table with input from
# multiline/table_input
input_data = common.get_parser_input(
'multiline/table_input',
)
result = parser.multiline.get_table(input_data)
# Then I see the results from
# multiline/table_expected
self.assertEqual(
result,
common.get_parser_output('multiline/table_expected'),
)
def test_get_one_row_multi_cell_table(self):
"""
Test that we can get a table consisting of one row with two cells.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_table with input from
# multiline/table_multi_column_input
input_data = common.get_parser_input(
'multiline/table_multi_column_input',
)
result = parser.multiline.get_table(input_data)
# Then I see the results from
# multiline/table_multi_column_expected
self.assertEqual(
result,
common.get_parser_output('multiline/table_multi_column_expected'),
)
def test_get_multi_row_multi_cell_table(self):
"""
Test that we can get a table consisting of multiple rows.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_table with input from
# multiline/table_multi_column_multi_row_input
input_data = common.get_parser_input(
'multiline/table_multi_column_multi_row_input',
)
result = parser.multiline.get_table(input_data)
# Then I see the results from
# multiline/table_multi_column_multi_row_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/table_multi_column_multi_row_expected',
),
)
def test_get_table_with_trailing_data(self):
"""
Test that we can get a table consisting of multiple rows.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_table with input from
# multiline/table_trailing_data_input
input_data = common.get_parser_input(
'multiline/table_trailing_data_input',
)
result = parser.multiline.get_table(input_data)
# Then I see the results from
# multiline/table_trailing_data_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/table_trailing_data_expected',
),
)
def test_fail_to_get_table(self):
"""
Confirm we cannot get a table when we do not start with a row.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_table with input from
# multiline/table_leading_noise_input
input_data = common.get_parser_input(
'multiline/table_leading_noise_input',
)
result = parser.multiline.get_table(input_data)
# Then I see the results from
# multiline/table_leading_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/table_leading_noise_expected',
),
)
def test_getting_multiline_arg_no_input_modification(self):
"""
Test that getting a multiline arg doesn't change the input var.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_multiline_arg with input from
# multiline/multiline_table_input
input_data = common.get_parser_input(
'multiline/multiline_table_input',
)
expected_data = deepcopy(input_data)
parser.multiline.get_multiline_arg(input_data)
# Then my input variable is not modified
self.assertEqual(
input_data,
expected_data,
)
def test_get_multiline_arg_table(self):
"""
Check we can get a table as a multiline arg.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_multiline_arg with input from
# multiline/multiline_table_input
input_data = common.get_parser_input(
'multiline/multiline_table_input',
)
result = parser.multiline.get_multiline_arg(input_data)
# Then I see the results from
# multiline/multiline_table_expected
self.assertEqual(
result,
common.get_parser_output('multiline/multiline_table_expected'),
)
def test_get_multiline_arg_pythonish_string(self):
"""
Check we can get a pythonish string as a multiline arg.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_multiline_arg with input from
# multiline/multiline_string_input
input_data = common.get_parser_input(
'multiline/multiline_string_input',
)
result = parser.multiline.get_multiline_arg(input_data)
# Then I see the results from
# multiline/multiline_string_expected
self.assertEqual(
result,
common.get_parser_output('multiline/multiline_string_expected'),
)
def test_get_no_multiline_arg(self):
"""
Confirm we get nothing when no multiline arg is following.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_multiline_arg with input from
# multiline/multiline_leading_noise_input
input_data = common.get_parser_input(
'multiline/multiline_leading_noise_input',
)
result = parser.multiline.get_multiline_arg(input_data)
# Then I see the results from
# multiline/multiline_leading_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/multiline_leading_noise_expected',
),
)
def test_getting_leading_comment_no_input_modification(self):
"""
Test that getting leading comments doesn't change the input var.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comments_space_basic_input
input_data = common.get_parser_input(
'multiline/comment_space_basic_input',
)
expected_data = deepcopy(input_data)
parser.multiline.get_comments_with_space(input_data)
# Then my input variable is not modified
self.assertEqual(
input_data,
expected_data,
)
def test_get_leading_comment_with_blank_line(self):
"""
Check we can get a leading comment with following blank line.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_basic_input
input_data = common.get_parser_input(
'multiline/comment_space_basic_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_basic_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_basic_expected',
),
)
def test_get_leading_comment_without_space(self):
"""
Check we can get a leading comment with no following whitespace.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_just_comment_input
input_data = common.get_parser_input(
'multiline/comment_space_just_comment_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_just_comment_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_just_comment_expected',
),
)
def test_get_leading_comment_with_trailing_data(self):
"""
Check we can get a leading comment with trailing data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_trailing_data_input
input_data = common.get_parser_input(
'multiline/comment_space_trailing_data_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_trailing_data_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_trailing_data_expected',
),
)
def test_get_leading_comment_with_whitespace(self):
"""
Check we can get a leading comment with following whitespace.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_trailing_whitespace_input
input_data = common.get_parser_input(
'multiline/comment_space_trailing_whitespace_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_trailing_whitespace_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_trailing_whitespace_expected',
),
)
def test_get_leading_comment_with_whitespace_and_trailing_data(self):
"""
Check we can get a leading comment with following space and data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_whitespace_trailing_data_input
input_data = common.get_parser_input(
'multiline/comment_space_whitespace_trailing_data_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_whitespace_trailing_data_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_whitespace_trailing_data_expected',
),
)
def test_get_leading_comments(self):
"""
Check we can get multiple leading comments.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_multiple_comments_input
input_data = common.get_parser_input(
'multiline/comment_space_multiple_comments_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_multiple_comments_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_multiple_comments_expected',
),
)
def test_get_comments_with_space_and_trailing_data(self):
"""
Check we can get leading comments and space with trailing data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_comments_space_noise_input
input_data = common.get_parser_input(
'multiline/comment_space_comments_space_noise_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_comments_space_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_comments_space_noise_expected',
),
)
def test_get_leading_comments_with_multiple_trailing_lines(self):
"""
Check we can get leading comments with multiple trailing lines.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_comments_more_noise_input
input_data = common.get_parser_input(
'multiline/comment_space_comments_more_noise_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_comments_more_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_comments_more_noise_expected',
),
)
def test_get_leading_comments_space_separated_with_trailing_data(self):
"""
Check we can get leading comments and spaces, and trailing data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_intervening_space_input
input_data = common.get_parser_input(
'multiline/comment_space_intervening_space_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_intervening_space_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_intervening_space_expected',
),
)
def test_get_no_leading_comments_with_leading_space(self):
"""
Confirm we cannot get a leading comment preceded by a blank line.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_leading_space_input
input_data = common.get_parser_input(
'multiline/comment_space_leading_space_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_leading_space_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_leading_space_expected',
),
)
def test_get_no_leading_comments_with_leading_noise(self):
"""
Confirm we cannot get a leading comment preceded data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_leading_noise_input
input_data = common.get_parser_input(
'multiline/comment_space_leading_noise_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_leading_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_leading_noise_expected',
),
)
def test_get_no_leading_comments_from_no_lines(self):
"""
Confirm we cannot get a leading comment from nothing.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/comment_space_empty_input
input_data = common.get_parser_input(
'multiline/comment_space_empty_input',
)
result = parser.multiline.get_comments_with_space(input_data)
# Then I see the results from
# multiline/comment_space_empty_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/comment_space_empty_expected',
),
)
def test_getting_space_no_input_modification(self):
"""
Test that getting space doesn't change the input var.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_pythonish_string with input from
# multiline/pythonish_string_input
input_data = common.get_parser_input(
'multiline/space_no_space_input',
)
expected_data = deepcopy(input_data)
parser.multiline.get_space(input_data)
# Then my input variable is not modified
self.assertEqual(
input_data,
expected_data,
)
def test_get_space_non_whitespace(self):
"""
Confirm that we get no whitespace from a non space line.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/space_no_space_input
input_data = common.get_parser_input(
'multiline/space_no_space_input',
)
result = parser.multiline.get_space(input_data)
# Then I see the results from
# multiline/space_no_space_expected
self.assertEqual(
result,
common.get_parser_output('multiline/space_no_space_expected'),
)
def test_get_space_non_whitespace_then_space(self):
"""
Confirm that we get no whitespace from a starting non space line.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/space_leading_noise_input
input_data = common.get_parser_input(
'multiline/space_leading_noise_input',
)
result = parser.multiline.get_space(input_data)
# Then I see the results from
# multiline/space_leading_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/space_leading_noise_expected',
),
)
def test_get_space_blank_line(self):
"""
Confirm that we get a blank line as space.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/space_basic_input
input_data = common.get_parser_input(
'multiline/space_basic_input',
)
result = parser.multiline.get_space(input_data)
# Then I see the results from
# multiline/space_basic_expected
self.assertEqual(
result,
common.get_parser_output('multiline/space_basic_expected'),
)
def test_get_space_whitespace_line(self):
"""
Confirm that we get a whitespace line as space.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/space_whitespace_input
input_data = common.get_parser_input(
'multiline/space_whitespace_input',
)
result = parser.multiline.get_space(input_data)
# Then I see the results from
# multiline/space_whitespace_expected
self.assertEqual(
result,
common.get_parser_output('multiline/space_whitespace_expected'),
)
def test_get_space_whitespace_and_blank_line(self):
"""
Confirm that we get a whitespace and blank line as space.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/space_whitespace_and_blank_input
input_data = common.get_parser_input(
'multiline/space_whitespace_and_blank_input',
)
result = parser.multiline.get_space(input_data)
# Then I see the results from
# multiline/space_whitespace_and_blank_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/space_whitespace_and_blank_expected',
),
)
def test_get_space_followed_by_non_space(self):
"""
Confirm that we get space with remaining non space.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/space_whitespace_trailing_noise_input
input_data = common.get_parser_input(
'multiline/space_whitespace_trailing_noise_input',
)
result = parser.multiline.get_space(input_data)
# Then I see the results from
# multiline/space_whitespace_trailing_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/space_whitespace_trailing_noise_expected',
),
)
def test_get_space_nothing(self):
"""
Confirm that we get no space from nothing.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/space_empty_input
input_data = common.get_parser_input(
'multiline/space_empty_input',
)
result = parser.multiline.get_space(input_data)
# Then I see the results from
# multiline/space_empty_expected
self.assertEqual(
result,
common.get_parser_output('multiline/space_empty_expected'),
)
def test_getting_leading_tag_no_input_modification(self):
"""
Test that getting leading tags doesn't change the input var.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_tag with input from
# multiline/tag_trailing_blank_line_input
input_data = common.get_parser_input(
'multiline/tag_trailing_blank_line_input',
)
expected_data = deepcopy(input_data)
parser.multiline.get_tags(input_data)
# Then my input variable is not modified
self.assertEqual(
input_data,
expected_data,
)
def test_get_leading_tag_with_blank_line(self):
"""
Check we can get a leading tag with following blank line.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_trailing_blank_line_input
input_data = common.get_parser_input(
'multiline/tag_trailing_blank_line_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_trailing_blank_line_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/tag_trailing_blank_line_expected',
),
)
def test_get_leading_tag_without_space(self):
"""
Check we can get a leading tag with no following whitespace.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_basic_input
input_data = common.get_parser_input(
'multiline/tag_basic_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_basic_expected
self.assertEqual(
result,
common.get_parser_output('multiline/tag_basic_expected'),
)
def test_get_leading_tag_with_trailing_data(self):
"""
Check we can get a leading tag with trailing data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_trailing_data_input
input_data = common.get_parser_input(
'multiline/tag_trailing_data_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_trailing_data_expected
self.assertEqual(
result,
common.get_parser_output('multiline/tag_trailing_data_expected'),
)
def test_get_leading_tag_with_whitespace(self):
"""
Check we can get a leading tag with following whitespace.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_trailing_space_input
input_data = common.get_parser_input(
'multiline/tag_trailing_space_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_trailing_space_expected
self.assertEqual(
result,
common.get_parser_output('multiline/tag_trailing_space_expected'),
)
def test_get_leading_tag_with_whitespace_and_trailing_data(self):
"""
Check we can get a leading tag with following space and data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_trailing_space_and_noise_input
input_data = common.get_parser_input(
'multiline/tag_trailing_space_and_noise_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_trailing_space_and_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/tag_trailing_space_and_noise_expected',
),
)
def test_get_leading_tags(self):
"""
Check we can get multiple leading tags.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_multiple_tags_input
input_data = common.get_parser_input(
'multiline/tag_multiple_tags_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_multiple_tags_expected
self.assertEqual(
result,
common.get_parser_output('multiline/tag_multiple_tags_expected'),
)
def test_get_tags_and_trailing_data(self):
"""
Check we can get leading tags and space with trailing data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_multiple_tags_trailing_noise_input
input_data = common.get_parser_input(
'multiline/tag_multiple_tags_trailing_noise_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_multiple_tags_trailing_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/tag_multiple_tags_trailing_noise_expected',
),
)
def test_get_leading_tags_with_multiple_trailing_lines(self):
"""
Check we can get leading tags with multiple trailing lines.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_longer_trailing_noise_input
input_data = common.get_parser_input(
'multiline/tag_longer_trailing_noise_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_longer_trailing_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/tag_longer_trailing_noise_expected',
),
)
def test_get_leading_tags_space_separated_with_trailing_data(self):
"""
Check we can get leading tags and spaces, and trailing data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_tags_with_space_trailing_noise_input
input_data = common.get_parser_input(
'multiline/tag_tags_with_space_trailing_noise_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_tags_with_space_trailing_noise_expected
self.assertEqual(
result,
common.get_parser_output(
'multiline/tag_tags_with_space_trailing_noise_expected',
),
)
def test_get_no_leading_tags_with_leading_space(self):
"""
Confirm we cannot get a leading tag preceded by a blank line.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_leading_space_input
input_data = common.get_parser_input(
'multiline/tag_leading_space_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_leading_space_expected
self.assertEqual(
result,
common.get_parser_output('multiline/tag_leading_space_expected'),
)
def test_get_no_leading_tags_with_leading_noise(self):
"""
Confirm we cannot get a leading tag preceded data.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_leading_noise_input
input_data = common.get_parser_input(
'multiline/tag_leading_noise_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_leading_noise_expected
self.assertEqual(
result,
common.get_parser_output('multiline/tag_leading_noise_expected'),
)
def test_get_no_leading_tags_from_no_lines(self):
"""
Confirm we cannot get a leading tag from nothing.
"""
# Given I have Romaine core's parser
parser = common.get_romaine_parser()
# When I call get_comments_with_space with input from
# multiline/tag_empty_input
input_data = common.get_parser_input(
'multiline/tag_empty_input',
)
result = parser.multiline.get_tags(input_data)
# Then I see the results from
# multiline/tag_empty_expected
self.assertEqual(
result,
common.get_parser_output('multiline/tag_empty_expected'),
)
|
|
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from twisted.internet import defer
from synapse.events import FrozenEvent
from synapse.api.auth import Auth
from synapse.api.constants import EventTypes, Membership
from synapse.state import StateHandler
from .utils import MockClock
from mock import Mock
_next_event_id = 1000
def create_event(name=None, type=None, state_key=None, depth=2, event_id=None,
prev_events=[], **kwargs):
global _next_event_id
if not event_id:
_next_event_id += 1
event_id = str(_next_event_id)
if not name:
if state_key is not None:
name = "<%s-%s, %s>" % (type, state_key, event_id,)
else:
name = "<%s, %s>" % (type, event_id,)
d = {
"event_id": event_id,
"type": type,
"sender": "@user_id:example.com",
"room_id": "!room_id:example.com",
"depth": depth,
"prev_events": prev_events,
}
if state_key is not None:
d["state_key"] = state_key
d.update(kwargs)
event = FrozenEvent(d)
return event
class StateGroupStore(object):
def __init__(self):
self._event_to_state_group = {}
self._group_to_state = {}
self._next_group = 1
def get_state_groups(self, event_ids):
groups = {}
for event_id in event_ids:
group = self._event_to_state_group.get(event_id)
if group:
groups[group] = self._group_to_state[group]
return defer.succeed(groups)
def store_state_groups(self, event, context):
if context.current_state is None:
return
state_events = context.current_state
if event.is_state():
state_events[(event.type, event.state_key)] = event
state_group = context.state_group
if not state_group:
state_group = self._next_group
self._next_group += 1
self._group_to_state[state_group] = state_events.values()
self._event_to_state_group[event.event_id] = state_group
class DictObj(dict):
def __init__(self, **kwargs):
super(DictObj, self).__init__(kwargs)
self.__dict__ = self
class Graph(object):
def __init__(self, nodes, edges):
events = {}
clobbered = set(events.keys())
for event_id, fields in nodes.items():
refs = edges.get(event_id)
if refs:
clobbered.difference_update(refs)
prev_events = [(r, {}) for r in refs]
else:
prev_events = []
events[event_id] = create_event(
event_id=event_id,
prev_events=prev_events,
**fields
)
self._leaves = clobbered
self._events = sorted(events.values(), key=lambda e: e.depth)
def walk(self):
return iter(self._events)
def get_leaves(self):
return (self._events[i] for i in self._leaves)
class StateTestCase(unittest.TestCase):
def setUp(self):
self.store = Mock(
spec_set=[
"get_state_groups",
"add_event_hashes",
]
)
hs = Mock(spec=[
"get_datastore", "get_auth", "get_state_handler", "get_clock",
])
hs.get_datastore.return_value = self.store
hs.get_state_handler.return_value = None
hs.get_auth.return_value = Auth(hs)
hs.get_clock.return_value = MockClock()
self.state = StateHandler(hs)
self.event_id = 0
@defer.inlineCallbacks
def test_branch_no_conflict(self):
graph = Graph(
nodes={
"START": DictObj(
type=EventTypes.Create,
state_key="",
depth=1,
),
"A": DictObj(
type=EventTypes.Message,
depth=2,
),
"B": DictObj(
type=EventTypes.Message,
depth=3,
),
"C": DictObj(
type=EventTypes.Name,
state_key="",
depth=3,
),
"D": DictObj(
type=EventTypes.Message,
depth=4,
),
},
edges={
"A": ["START"],
"B": ["A"],
"C": ["A"],
"D": ["B", "C"]
}
)
store = StateGroupStore()
self.store.get_state_groups.side_effect = store.get_state_groups
context_store = {}
for event in graph.walk():
context = yield self.state.compute_event_context(event)
store.store_state_groups(event, context)
context_store[event.event_id] = context
self.assertEqual(2, len(context_store["D"].current_state))
@defer.inlineCallbacks
def test_branch_basic_conflict(self):
graph = Graph(
nodes={
"START": DictObj(
type=EventTypes.Create,
state_key="creator",
content={"membership": "@user_id:example.com"},
depth=1,
),
"A": DictObj(
type=EventTypes.Member,
state_key="@user_id:example.com",
content={"membership": Membership.JOIN},
membership=Membership.JOIN,
depth=2,
),
"B": DictObj(
type=EventTypes.Name,
state_key="",
depth=3,
),
"C": DictObj(
type=EventTypes.Name,
state_key="",
depth=4,
),
"D": DictObj(
type=EventTypes.Message,
depth=5,
),
},
edges={
"A": ["START"],
"B": ["A"],
"C": ["A"],
"D": ["B", "C"]
}
)
store = StateGroupStore()
self.store.get_state_groups.side_effect = store.get_state_groups
context_store = {}
for event in graph.walk():
context = yield self.state.compute_event_context(event)
store.store_state_groups(event, context)
context_store[event.event_id] = context
self.assertSetEqual(
{"START", "A", "C"},
{e.event_id for e in context_store["D"].current_state.values()}
)
@defer.inlineCallbacks
def test_branch_have_banned_conflict(self):
graph = Graph(
nodes={
"START": DictObj(
type=EventTypes.Create,
state_key="creator",
content={"membership": "@user_id:example.com"},
depth=1,
),
"A": DictObj(
type=EventTypes.Member,
state_key="@user_id:example.com",
content={"membership": Membership.JOIN},
membership=Membership.JOIN,
depth=2,
),
"B": DictObj(
type=EventTypes.Name,
state_key="",
depth=3,
),
"C": DictObj(
type=EventTypes.Member,
state_key="@user_id_2:example.com",
content={"membership": Membership.BAN},
membership=Membership.BAN,
depth=4,
),
"D": DictObj(
type=EventTypes.Name,
state_key="",
depth=4,
sender="@user_id_2:example.com",
),
"E": DictObj(
type=EventTypes.Message,
depth=5,
),
},
edges={
"A": ["START"],
"B": ["A"],
"C": ["B"],
"D": ["B"],
"E": ["C", "D"]
}
)
store = StateGroupStore()
self.store.get_state_groups.side_effect = store.get_state_groups
context_store = {}
for event in graph.walk():
context = yield self.state.compute_event_context(event)
store.store_state_groups(event, context)
context_store[event.event_id] = context
self.assertSetEqual(
{"START", "A", "B", "C"},
{e.event_id for e in context_store["E"].current_state.values()}
)
@defer.inlineCallbacks
def test_annotate_with_old_message(self):
event = create_event(type="test_message", name="event")
old_state = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
context = yield self.state.compute_event_context(
event, old_state=old_state
)
for k, v in context.current_state.items():
type, state_key = k
self.assertEqual(type, v.type)
self.assertEqual(state_key, v.state_key)
self.assertEqual(
set(old_state), set(context.current_state.values())
)
self.assertIsNone(context.state_group)
@defer.inlineCallbacks
def test_annotate_with_old_state(self):
event = create_event(type="state", state_key="", name="event")
old_state = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
context = yield self.state.compute_event_context(
event, old_state=old_state
)
for k, v in context.current_state.items():
type, state_key = k
self.assertEqual(type, v.type)
self.assertEqual(state_key, v.state_key)
self.assertEqual(
set(old_state),
set(context.current_state.values())
)
self.assertIsNone(context.state_group)
@defer.inlineCallbacks
def test_trivial_annotate_message(self):
event = create_event(type="test_message", name="event")
old_state = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
group_name = "group_name_1"
self.store.get_state_groups.return_value = {
group_name: old_state,
}
context = yield self.state.compute_event_context(event)
for k, v in context.current_state.items():
type, state_key = k
self.assertEqual(type, v.type)
self.assertEqual(state_key, v.state_key)
self.assertEqual(
set([e.event_id for e in old_state]),
set([e.event_id for e in context.current_state.values()])
)
self.assertEqual(group_name, context.state_group)
@defer.inlineCallbacks
def test_trivial_annotate_state(self):
event = create_event(type="state", state_key="", name="event")
old_state = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
group_name = "group_name_1"
self.store.get_state_groups.return_value = {
group_name: old_state,
}
context = yield self.state.compute_event_context(event)
for k, v in context.current_state.items():
type, state_key = k
self.assertEqual(type, v.type)
self.assertEqual(state_key, v.state_key)
self.assertEqual(
set([e.event_id for e in old_state]),
set([e.event_id for e in context.current_state.values()])
)
self.assertIsNone(context.state_group)
@defer.inlineCallbacks
def test_resolve_message_conflict(self):
event = create_event(type="test_message", name="event")
old_state_1 = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
old_state_2 = [
create_event(type="test1", state_key="1"),
create_event(type="test3", state_key="2"),
create_event(type="test4", state_key=""),
]
context = yield self._get_context(event, old_state_1, old_state_2)
self.assertEqual(len(context.current_state), 5)
self.assertIsNone(context.state_group)
@defer.inlineCallbacks
def test_resolve_state_conflict(self):
event = create_event(type="test4", state_key="", name="event")
old_state_1 = [
create_event(type="test1", state_key="1"),
create_event(type="test1", state_key="2"),
create_event(type="test2", state_key=""),
]
old_state_2 = [
create_event(type="test1", state_key="1"),
create_event(type="test3", state_key="2"),
create_event(type="test4", state_key=""),
]
context = yield self._get_context(event, old_state_1, old_state_2)
self.assertEqual(len(context.current_state), 5)
self.assertIsNone(context.state_group)
@defer.inlineCallbacks
def test_standard_depth_conflict(self):
event = create_event(type="test4", name="event")
member_event = create_event(
type=EventTypes.Member,
state_key="@user_id:example.com",
content={
"membership": Membership.JOIN,
}
)
old_state_1 = [
member_event,
create_event(type="test1", state_key="1", depth=1),
]
old_state_2 = [
member_event,
create_event(type="test1", state_key="1", depth=2),
]
context = yield self._get_context(event, old_state_1, old_state_2)
self.assertEqual(old_state_2[1], context.current_state[("test1", "1")])
# Reverse the depth to make sure we are actually using the depths
# during state resolution.
old_state_1 = [
member_event,
create_event(type="test1", state_key="1", depth=2),
]
old_state_2 = [
member_event,
create_event(type="test1", state_key="1", depth=1),
]
context = yield self._get_context(event, old_state_1, old_state_2)
self.assertEqual(old_state_1[1], context.current_state[("test1", "1")])
def _get_context(self, event, old_state_1, old_state_2):
group_name_1 = "group_name_1"
group_name_2 = "group_name_2"
self.store.get_state_groups.return_value = {
group_name_1: old_state_1,
group_name_2: old_state_2,
}
return self.state.compute_event_context(event)
|
|
# -*- coding: utf-8 -*-
"""
werkzeug.utils
~~~~~~~~~~~~~~
This module implements various utilities for WSGI applications. Most of
them are used by the request and response wrappers but especially for
middleware development it makes sense to use them without the wrappers.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import inspect
import re
import os
import sys
import pkgutil
from asyncio import coroutine
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
from werkzeug._compat import unichr, text_type, string_types, iteritems, \
reraise, PY2
from werkzeug._internal import _DictAccessorProperty, \
_parse_signature, _missing
_format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
_entity_re = re.compile(r'&([^;]+);')
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
'LPT2', 'LPT3', 'PRN', 'NUL')
def yields(value):
import asyncio
return isinstance(value, asyncio.futures.Future) or inspect.isgenerator(value)
@coroutine
def call_maybe_yield(func, *args, **kwargs):
rv = func(*args, **kwargs)
if yields(rv):
rv = yield from rv
return rv
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class environ_property(_DictAccessorProperty):
"""Maps request attributes to environment variables. This works not only
for the Werzeug request object, but also any other class with an
environ attribute:
>>> class Test(object):
... environ = {'key': 'value'}
... test = environ_property('key')
>>> var = Test()
>>> var.test
'value'
If you pass it a second value it's used as default if the key does not
exist, the third one can be a converter that takes a value and converts
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
is used. If no default value is provided `None` is used.
Per default the property is read only. You have to explicitly enable it
by passing ``read_only=False`` to the constructor.
"""
read_only = True
def lookup(self, obj):
return obj.environ
class header_property(_DictAccessorProperty):
"""Like `environ_property` but for headers."""
def lookup(self, obj):
return obj.headers
class HTMLBuilder(object):
"""Helper object for HTML generation.
Per default there are two instances of that class. The `html` one, and
the `xhtml` one for those two dialects. The class uses keyword parameters
and positional parameters to generate small snippets of HTML.
Keyword parameters are converted to XML/SGML attributes, positional
arguments are used as children. Because Python accepts positional
arguments before keyword arguments it's a good idea to use a list with the
star-syntax for some children:
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
... html.a('bar', href='bar.html')])
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
This class works around some browser limitations and can not be used for
arbitrary SGML/XML generation. For that purpose lxml and similar
libraries exist.
Calling the builder escapes the string passed:
>>> html.p(html("<foo>"))
u'<p><foo></p>'
"""
_entity_re = re.compile(r'&([^;]+);')
_entities = name2codepoint.copy()
_entities['apos'] = 39
_empty_elements = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param',
'source', 'wbr'
])
_boolean_attributes = set([
'selected', 'checked', 'compact', 'declare', 'defer', 'disabled',
'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap'
])
_plaintext_elements = set(['textarea'])
_c_like_cdata = set(['script', 'style'])
def __init__(self, dialect):
self._dialect = dialect
def __call__(self, s):
return escape(s)
def __getattr__(self, tag):
if tag[:2] == '__':
raise AttributeError(tag)
def proxy(*children, **arguments):
buffer = '<' + tag
for key, value in iteritems(arguments):
if value is None:
continue
if key[-1] == '_':
key = key[:-1]
if key in self._boolean_attributes:
if not value:
continue
if self._dialect == 'xhtml':
value = '="' + key + '"'
else:
value = ''
else:
value = '="' + escape(value) + '"'
buffer += ' ' + key + value
if not children and tag in self._empty_elements:
if self._dialect == 'xhtml':
buffer += ' />'
else:
buffer += '>'
return buffer
buffer += '>'
children_as_string = ''.join([text_type(x) for x in children
if x is not None])
if children_as_string:
if tag in self._plaintext_elements:
children_as_string = escape(children_as_string)
elif tag in self._c_like_cdata and self._dialect == 'xhtml':
children_as_string = '/*<![CDATA[*/' + \
children_as_string + '/*]]>*/'
buffer += children_as_string + '</' + tag + '>'
return buffer
return proxy
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__,
self._dialect
)
html = HTMLBuilder('html')
xhtml = HTMLBuilder('xhtml')
def get_content_type(mimetype, charset):
"""Return the full content type string with charset for a mimetype.
If the mimetype represents text the charset will be appended as charset
parameter, otherwise the mimetype is returned unchanged.
:param mimetype: the mimetype to be used as content type.
:param charset: the charset to be appended in case it was a text mimetype.
:return: the content type.
"""
if mimetype.startswith('text/') or \
mimetype == 'application/xml' or \
(mimetype.startswith('application/') and
mimetype.endswith('+xml')):
mimetype += '; charset=' + charset
return mimetype
def format_string(string, context):
"""String-template format a string:
>>> format_string('$foo and ${foo}s', dict(foo=42))
'42 and 42s'
This does not do any attribute lookup etc. For more advanced string
formattings have a look at the `werkzeug.template` module.
:param string: the format string.
:param context: a dict with the variables to insert.
"""
def lookup_arg(match):
x = context[match.group(1) or match.group(2)]
if not isinstance(x, string_types):
x = type(string)(x)
return x
return _format_re.sub(lookup_arg, string)
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows system the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def escape(s, quote=None):
"""Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
There is a special handling for `None` which escapes to an empty string.
.. versionchanged:: 0.9
`quote` is now implicitly on.
:param s: the string to escape.
:param quote: ignored.
"""
if s is None:
return ''
elif hasattr(s, '__html__'):
return text_type(s.__html__())
elif not isinstance(s, string_types):
s = text_type(s)
if quote is not None:
from warnings import warn
warn(DeprecationWarning('quote parameter is implicit now'), stacklevel=2)
s = s.replace('&', '&').replace('<', '<') \
.replace('>', '>').replace('"', """)
return s
def unescape(s):
"""The reverse function of `escape`. This unescapes all the HTML
entities, not only the XML entities inserted by `escape`.
:param s: the string to unescape.
"""
def handle_match(m):
name = m.group(1)
if name in HTMLBuilder._entities:
return unichr(HTMLBuilder._entities[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, s)
def redirect(location, code=302):
"""Return a response object (a WSGI application) that, if called,
redirects the client to the target location. Supported codes are 301,
302, 303, 305, and 307. 300 is not supported because it's not a real
redirect and 304 because it's the answer for a request with a request
with defined If-Modified-Since headers.
.. versionadded:: 0.6
The location can now be a unicode string that is encoded using
the :func:`iri_to_uri` function.
:param location: the location the response should redirect to.
:param code: the redirect status code. defaults to 302.
"""
from werkzeug.wrappers import Response
display_location = escape(location)
if isinstance(location, text_type):
from werkzeug.urls import iri_to_uri
location = iri_to_uri(location)
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(escape(location), display_location), code, mimetype='text/html')
response.headers['Location'] = location
return response
def append_slash_redirect(environ, code=301):
"""Redirect to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect.
"""
new_path = environ['PATH_INFO'].strip('/') + '/'
query_string = environ.get('QUERY_STRING')
if query_string:
new_path += '?' + query_string
return redirect(new_path, code)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
#XXX: py3 review needed
assert isinstance(import_name, string_types)
# force the import name to automatically convert to strings
import_name = str(import_name)
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
return __import__(import_name)
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
if PY2 and isinstance(obj, unicode):
obj = obj.encode('utf-8')
try:
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
# support importing modules not yet set up by the parent module
# (or package for that matter)
modname = module + '.' + obj
__import__(modname)
return sys.modules[modname]
except ImportError as e:
if not silent:
reraise(
ImportStringError,
ImportStringError(import_name, e),
sys.exc_info()[2])
def find_modules(import_path, include_packages=False, recursive=False):
"""Find all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_name: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, '__path__', None)
if path is None:
raise ValueError('%r is not a package' % import_path)
basename = module.__name__ + '.'
for importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
def validate_arguments(func, args, kwargs, drop_extra=True):
"""Check if the function accepts the arguments and keyword arguments.
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
the function without causing a `TypeError` because the function signature
is incompatible. If `drop_extra` is set to `True` (which is the default)
any extra positional or keyword arguments are dropped automatically.
The exception raised provides three attributes:
`missing`
A set of argument names that the function expected but where
missing.
`extra`
A dict of keyword arguments that the function can not handle but
where provided.
`extra_positional`
A list of values that where given by positional argument but the
function cannot accept.
This can be useful for decorators that forward user submitted data to
a view function::
from werkzeug.utils import ArgumentValidationError, validate_arguments
def sanitize(f):
def proxy(request):
data = request.values.to_dict()
try:
args, kwargs = validate_arguments(f, (request,), data)
except ArgumentValidationError:
raise BadRequest('The browser failed to transmit all '
'the data expected.')
return f(*args, **kwargs)
return proxy
:param func: the function the validation is performed against.
:param args: a tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:param drop_extra: set to `False` if you don't want extra arguments
to be silently dropped.
:return: tuple in the form ``(args, kwargs)``.
"""
parser = _parse_signature(func)
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
if missing:
raise ArgumentValidationError(tuple(missing))
elif (extra or extra_positional) and not drop_extra:
raise ArgumentValidationError(None, extra, extra_positional)
return tuple(args), kwargs
def bind_arguments(func, args, kwargs):
"""Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
"""
args, kwargs, missing, extra, extra_positional, \
arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs)
values = {}
for (name, has_default, default), value in zip(arg_spec, args):
values[name] = value
if vararg_var is not None:
values[vararg_var] = tuple(extra_positional)
elif extra_positional:
raise TypeError('too many positional arguments')
if kwarg_var is not None:
multikw = set(extra) & set([x[0] for x in arg_spec])
if multikw:
raise TypeError('got multiple values for keyword argument ' +
repr(next(iter(multikw))))
values[kwarg_var] = extra
elif extra:
raise TypeError('got unexpected keyword argument ' +
repr(next(iter(extra))))
return values
class ArgumentValidationError(ValueError):
"""Raised if :func:`validate_arguments` fails to validate"""
def __init__(self, missing=None, extra=None, extra_positional=None):
self.missing = set(missing or ())
self.extra = extra or {}
self.extra_positional = extra_positional or []
ValueError.__init__(self, 'function arguments invalid. ('
'%d missing, %d additional)' % (
len(self.missing),
len(self.extra) + len(self.extra_positional)
))
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
# circular dependencies
from werkzeug.http import quote_header_value, unquote_header_value, \
cookie_date
# DEPRECATED
# these objects were previously in this module as well. we import
# them here for backwards compatibility with old pickles.
from werkzeug.datastructures import MultiDict, CombinedMultiDict, \
Headers, EnvironHeaders
from werkzeug.http import parse_cookie, dump_cookie
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
main_window
The main window and system tray for the commotion_client pyqt GUI.
Key componenets handled within:
* exiting/hiding the application
* Creating the main window and systemTray icon.
"""
#Standard Library Imports
import logging
#PyQt imports
from PyQt4 import QtCore
from PyQt4 import QtGui
#Commotion Client Imports
import commotion_assets_rc
from commotion_client.GUI.menu_bar import MenuBar
from commotion_client.GUI.crash_report import CrashReport
from commotion_client.GUI import welcome_page
from commotion_client.GUI import toolbar_builder
from commotion_client.utils import extension_manager
class MainWindow(QtGui.QMainWindow):
"""
The central widget for the commotion client. This widget initalizes all other sub-widgets and extensions as well as defines the paramiters of the main GUI container.
"""
#Clean up signal atched by children to do any clean-up or saving needed
clean_up = QtCore.pyqtSignal()
app_message = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
super().__init__()
#Keep track of if the gui needs any clean up / saving.
self._dirty = False
self.log = logging.getLogger("commotion_client."+__name__)
self.translate = QtCore.QCoreApplication.translate
self.init_crash_reporter()
self.setup_menu_bar()
#Setup extension manager for viewports
self.ext_manager = extension_manager.ExtensionManager()
self.viewport = welcome_page.ViewPort
self.apply_viewport(self.viewport)
#Default Paramiters #TODO to be replaced with paramiters saved between instances later
try:
self.load_settings()
except Exception as _excp:
self.log.critical(self.translate("logs", "Failed to load window settings."))
self.log.exception(_excp)
raise
#set main menu to not close application on exit events
self.exitOnClose = False
self.remove_on_close = False
#==================================
def toggle_menu_bar(self):
#if menu shown... then
#DockToHide = self.findChild(name="MenuBarDock")
#QMainWindow.removeDockWidget (self, QDockWidget dockwidget)
#else
#bool QMainWindow.restoreDockWidget (self, QDockWidget dockwidget)
pass
def setup_menu_bar(self):
""" Set up menu bar. """
self.menu_bar = MenuBar(self)
#Create dock for menu-bar TEST
self.menu_dock = QtGui.QDockWidget(self)
#turn off title bar
#TODO create a vertical title bar that is the "dock handle"
self.menu_dock.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
#Set Name of dock so we can hide and show it.
self.menu_dock.setObjectName("MenuBarDock")
#force bar to the left side
self.menu_dock.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea)
#apply menu bar to dock and dock to the main window
self.menu_dock.setWidget(self.menu_bar)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.menu_dock)
#Create slot to monitor when menu-bar wants the main window to change the main-viewport
self.menu_bar.viewport_requested.connect(self.change_viewport)
def init_crash_reporter(self):
""" """
try:
self.crash_report = CrashReport()
except Exception as _excp:
self.log.critical(self.translate("logs", "Failed to load crash reporter. Ironically, this means that the application must be halted."))
self.log.exception(_excp)
raise
else:
self.crash_report.crash.connect(self.crash)
def set_viewport(self):
"""Load and set viewport to next viewport and load viewport """
self.log.info(self.next_extension)
next_view = self.next_extension
ext_viewport = self.ext_manager.load_user_interface(str(next_view), "main")
ext_toolbar = self.ext_manager.load_user_interface(str(next_view), "toolbar")
self.apply_viewport(ext_viewport, ext_toolbar)
def apply_viewport(self, viewport, toolbar=None):
"""Apply current viewport to the central widget and set up proper signal's for communication. """
#Create central widget (replaced due to splitter)
# self.central_widget = QtGui.QWidget(self)
self.central_widget = QtGui.QSplitter(QtCore.Qt.Vertical, self)
self.viewport = viewport(self.central_widget)
if not toolbar:
toolbar = False
self.toolbar = self.init_toolbar(toolbar)
#Set up central layout (Replaced due to splitter)
#self.central_layout = QtGui.QVBoxLayout(self.central_widget)
self.scroll_area = QtGui.QScrollArea(self.central_widget)
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setWidget(self.viewport)
#add scroll area to central layout (replaced due to splitter)
#self.central_layout.addWidget(self.scroll_area)
self.central_widget.addWidget(self.scroll_area)
self.central_widget.addWidget(self.toolbar)
self.setCentralWidget(self.central_widget)
self.init_viewport_signals()
self.central_widget.show()
self.viewport.show()
def init_viewport_signals(self):
#connect viewport extension to crash reporter
self.viewport.data_report.connect(self.crash_report.crash_info)
self.crash_report.crash_override.connect(self.viewport.start_report_collection)
#connect error reporter to crash reporter
self.viewport.error_report.connect(self.crash_report.alert_user)
#Attach clean up signal
self.clean_up.connect(self.viewport.clean_up)
def change_viewport(self, viewport):
"""Prepare next viewport for loading and start loading process when ready."""
self.log.debug(self.translate("logs", "Request to change viewport received."))
self.next_extension = viewport
if self.viewport.is_dirty:
self.viewport.on_stop.connect(self.set_viewport)
self.clean_up.emit()
else:
self.set_viewport()
def init_toolbar(self, ext_toolbar):
""" """
toolbar = toolbar_builder.ToolBar(self.central_widget, self.viewport, ext_toolbar,)
return toolbar
def purge(self):
"""
Closes the menu and sets its data up for immediate removal.
"""
self.cleanup()
self.main.remove_on_close = True
self.close()
def closeEvent(self, event):
"""
Captures the close event for the main window. When called from exitEvent removes a trayIcon and accepts its demise. When called otherwise will simply hide the main window and ignore the event.
"""
if self.exitOnClose:
self.log.debug(self.translate("logs", "Application has received a EXIT close event and will shutdown completely."))
event.accept()
elif self.remove_on_close:
self.log.debug(self.translate("logs", "Application has received a GUI closing close event and will close its main window."))
self.deleteLater()
event.accept()
else:
self.log.debug(self.translate("logs", "Application has received a non-exit close event and will hide its main window."))
self.hide()
event.setAccepted(True)
event.ignore()
def exitEvent(self):
"""
Closes and exits the entire commotion program.
"""
self.cleanup()
self.exitOnClose = True
self.close()
def cleanup(self):
self.clean_up.emit() #send signal for others to clean up if they need to
if self.is_dirty:
self.save_settings()
def bring_front(self):
"""
Brings the main window to the front of the screen.
"""
self.show()
self.raise_()
def load_settings(self):
"""
Loads window geometry from saved settings and sets window to those settings.
"""
defaults = {
#QRect(posX, posY, width, height)
"geometry":QtCore.QRect(300, 300, 640, 480), #TODO set sane defaults and catalogue in HIG
}
_settings = QtCore.QSettings()
_settings.beginGroup("MainWindow")
#Load settings from saved, or use defaults
geometry = _settings.value("geometry", defaults['geometry'])
if geometry.isNull() == True:
_error = self.translate("logs", "Could not load window geometry from settings file or defaults.")
self.log.critical(_error)
raise EnvironmentError(_error)
_settings.endGroup()
self.setGeometry(geometry)
def save_settings(self):
"""
Saves current window geometry
"""
_settings = QtCore.QSettings()
_settings.beginGroup("MainWindow")
#Save settings
try:
_settings.setValue("geometry", self.geometry())
except Exception as _excp:
self.log.warn(self.translate("logs", "Could not save window geometry. Will continue without saving window geometry."))
self.log.exception(_excp)
_settings.endGroup()
def crash(self, crash_type):
"""
Emits a closing signal to allow other windows who need to clean up to clean up and then exits the application.
"""
self.clean_up.emit() #send signal for others to clean up if they need to
if crash_type == "restart":
self.app_message.emit("restart")
else:
self.exitOnClose = True
self.close()
@property
def is_dirty(self):
"""Get the current state of the main window"""
return self._dirty
|
|
# General purpose utilities for PyBERT.
#
# Original author: David Banas <capn.freako@gmail.com>
# Original date: September 27, 2014 (Copied from `pybert_cntrl.py'.)
#
# Copyright (c) 2014 David Banas; all rights reserved World wide.
from numpy import sign, sin, pi, array, linspace, float, zeros, ones, repeat, where, diff, log10, sqrt, power, exp, cumsum
from numpy.random import normal
from numpy.fft import fft
from scipy.signal import lfilter, iirfilter, invres, freqs, medfilt
from dfe import DFE
from cdr import CDR
import time
from pylab import *
import numpy as np
import scipy.stats as ss
debug = False
def moving_average(a, n=3) :
"""Calculates a sliding average over the input vector."""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return np.insert(ret[n - 1:], 0, ret[n - 1] * ones(n - 1)) / n
def find_crossing_times(t, x, min_delay=0., rising_first=True, min_init_dev=0.1, thresh = 0.):
"""
Finds the threshold crossing times of the input signal.
Inputs:
- t Vector of sample times. Intervals do NOT need to be uniform.
- x Sampled input vector.
- min_delay Minimum delay required, before allowing crossings.
(Helps avoid false crossings at beginning of signal.)
Optional. Default = 0.
- rising_first When True, start with the first rising edge found.
Optional. Default = True.
When this option is True, the first rising edge crossing
is the first crossing returned. This is the desired
behavior for PyBERT, because we always initialize the
bit stream with [0, 1, 1], in order to provide a known
synchronization point for jitter analysis.
- min_init_dev The minimum initial deviation from zero, which must
be detected, before searching for crossings.
Normalized to maximum input signal magnitude.
Optional. Default = 0.1.
- thresh Vertical crossing threshold.
Outputs:
- xings The crossing times.
"""
assert len(t) == len(x), "len(t) (%d) and len(x) (%d) need to be the same." % (len(t), len(x))
t = array(t)
x = array(x)
max_mag_x = max(abs(x))
min_mag_x = min_init_dev * max_mag_x
i = 0
while(abs(x[i]) < min_mag_x):
i += 1
assert i < len(x), "ERROR: find_crossing_times(): Input signal minimum deviation not detected!"
x = x[i:] - thresh
t = t[i:]
sign_x = sign(x)
sign_x = where(sign_x, sign_x, ones(len(sign_x))) # "0"s can produce duplicate xings.
diff_sign_x = diff(sign_x)
xing_ix = where(diff_sign_x)[0]
xings = [t[i] + (t[i + 1] - t[i]) * x[i] / (x[i] - x[i + 1]) for i in xing_ix]
min_time = t[0]
if(min_delay):
assert min_delay < t[-1], "Error: min_delay must be less than final time value."
i = 0
while(i < len(t) and t[i] < min_delay):
i += 1
min_time = t[i]
i = 0
while(xings[i] < min_time):
i += 1
if(rising_first and diff_sign_x[xing_ix[i]] < 0.):
i += 1
if(debug):
print "find_crossing_times(): min_delay:", min_delay, "; first crossing returned:", xings[i], "rising_first:", rising_first
return array(xings[i:])
def find_crossings(t, x, amplitude, min_delay = 0., rising_first = True, min_init_dev = 0.1, mod_type = 0):
"""
Finds the crossing times in a signal, according to the modulation type.
Inputs:
Required:
- t: The times associated with each signal sample.
- x: The signal samples.
- amplitude: The nominal signal amplitude.
(Used for determining thresholds, in the case of some modulation types.)
Optional:
- min_delay: The earliest possible sample time we want returned.
Default = 0.
- rising_first When True, start with the first rising edge found.
When this option is True, the first rising edge crossing
is the first crossing returned. This is the desired
behavior for PyBERT, because we always initialize the
bit stream with [0, 1, 1], in order to provide a known
synchronization point for jitter analysis.
Default = True.
- min_init_dev The minimum initial deviation from zero, which must
be detected, before searching for crossings.
Normalized to maximum input signal magnitude.
Default = 0.1.
- mod_type: The modulation type. Allowed values are: (Default = 0.)
- 0: NRZ
- 1: Duo-binary
- 2: PAM-4
Outputs:
- xings: The crossing times.
"""
if (mod_type == 0): # NRZ
xings = find_crossing_times(t, x, min_delay = min_delay, rising_first = rising_first, min_init_dev = min_init_dev)
elif(mod_type == 1): # Duo-binary
xings_low = list(find_crossing_times(t, x, min_delay = min_delay, rising_first = rising_first, min_init_dev = min_init_dev, thresh = -amplitude / 2.))
xings_high = list(find_crossing_times(t, x, min_delay = min_delay, rising_first = rising_first, min_init_dev = min_init_dev, thresh = amplitude / 2.))
xings = (xings_low + xings_high)
xings.sort()
elif(mod_type == 2): # PAM-4
xings = find_crossing_times(t, x, min_delay = min_delay, rising_first = rising_first, min_init_dev = min_init_dev)
# xings_low = list(find_crossing_times(t, x, min_delay = min_delay, rising_first = rising_first, min_init_dev = min_init_dev, thresh = -amplitude * 2. / 3.))
# xings_mid = list(find_crossing_times(t, x, min_delay = min_delay, rising_first = rising_first, min_init_dev = min_init_dev, thresh = 0.))
# xings_high = list(find_crossing_times(t, x, min_delay = min_delay, rising_first = rising_first, min_init_dev = min_init_dev, thresh = amplitude * 2. / 3.))
# xings = (xings_low + xings_mid + xings_high)
# xings.sort()
else: # Unknown
raise Exception("ERROR: my_run_simulation(): Unknown modulation type requested!")
return array(xings)
def calc_jitter(ui, nbits, pattern_len, ideal_xings, actual_xings, rel_thresh=6, num_bins=99, zero_mean=True):
"""
Calculate the jitter in a set of actual zero crossings, given the ideal crossings and unit interval.
Inputs:
- ui : The nominal unit interval.
- nbits : The number of unit intervals spanned by the input signal.
- pattern_len : The number of unit intervals, before input bit stream repeats.
- ideal_xings : The ideal zero crossing locations of the edges.
- actual_xings : The actual zero crossing locations of the edges.
- rel_thresh : (optional) The threshold for determining periodic jitter spectral components (sigma).
- num_bins : (optional) The number of bins to use, when forming histograms.
- zero_mean : (optional) Force the mean jitter to zero, when True.
Outputs:
- jitter : The total jitter.
- t_jitter : The times (taken from 'ideal_xings') corresponding to the returned jitter values.
- isi : The peak to peak jitter due to intersymbol interference.
- dcd : The peak to peak jitter due to duty cycle distortion.
- pj : The peak to peak jitter due to uncorrelated periodic sources.
- rj : The standard deviation of the jitter due to uncorrelated unbounded random sources.
- tie_ind : The data independent jitter.
- thresh : Threshold for determining periodic components.
- jitter_spectrum : The spectral magnitude of the total jitter.
- tie_ind_spectrum : The spectral magnitude of the data independent jitter.
- spectrum_freqs : The frequencies corresponding to the spectrum components.
- hist : The histogram of the actual jitter.
- hist_synth : The histogram of the extrapolated jitter.
- bin_centers : The bin center values for both histograms.
"""
def my_hist(x):
"""
Calculates the probability mass function (PMF) of the input vector,
enforcing an output range of [-UI/2, +UI/2], sweeping everything in [-UI, -UI/2] into the first bin,
and everything in [UI/2, UI] into the last bin.
"""
hist, bin_edges = histogram(x, [-ui] + [-ui / 2. + i * ui / (num_bins - 2) for i in range(num_bins - 1)] + [ui])
bin_centers = [-ui / 2.] + [mean([bin_edges[i + 1], bin_edges[i + 2]]) for i in range(len(bin_edges) - 3)] + [ui / 2.]
return (array(map(float, hist)) / sum(hist), bin_centers)
# Assemble the TIE track.
jitter = []
t_jitter = []
i = 0
ideal_xings = array(ideal_xings) - (ideal_xings[0] - ui / 2.)
actual_xings = array(actual_xings) - (actual_xings[0] - ideal_xings[0])
skip_next_ideal_xing = False
pad_ixs = []
for ideal_xing in ideal_xings:
if(skip_next_ideal_xing):
t_jitter.append(ideal_xing)
skip_next_ideal_xing = False
continue
# Find the closest actual crossing, occuring within [-ui, ui],
# to the ideal crossing, checking for missing crossings.
min_t = ideal_xing - ui
max_t = ideal_xing + ui
while(i < len(actual_xings) and actual_xings[i] < min_t):
i += 1
if(i == len(actual_xings)): # We've exhausted the list of actual crossings; we're done.
break
if(actual_xings[i] > max_t): # Means the xing we're looking for didn't occur, in the actual signal.
pad_ixs.append(len(jitter) + 2 * len(pad_ixs))
skip_next_ideal_xing = True # If we missed one, we missed two.
else:
candidates = []
j = i
while(j < len(actual_xings) and actual_xings[j] <= max_t):
candidates.append(actual_xings[j])
j += 1
ties = array(candidates) - ideal_xing
tie_mags = abs(ties)
best_ix = where(tie_mags == min(tie_mags))[0][0]
tie = ties[best_ix]
jitter.append(tie)
i += best_ix + 1
t_jitter.append(ideal_xing)
jitter = array(jitter)
if(debug):
print "mean(jitter):", mean(jitter)
print "len(jitter):", len(jitter)
if(zero_mean):
jitter -= mean(jitter)
jitter = list(jitter)
for pad_ix in pad_ixs:
jitter.insert(pad_ix, -3. * ui / 4.) # Pad the jitter w/ alternating +/- 3UI/4. (Will get pulled into [-UI/2, UI/2], later.
jitter.insert(pad_ix, 3. * ui / 4.)
jitter = array(jitter)
# Do the jitter decomposition.
# - Separate the rising and falling edges, shaped appropriately for averaging over the pattern period.
xings_per_pattern = where(ideal_xings >= pattern_len * ui)[0][0]
fallings_per_pattern = xings_per_pattern // 2
risings_per_pattern = xings_per_pattern - fallings_per_pattern
num_patterns = nbits // pattern_len
# -- Check and adjust vector lengths, reporting out if any modifications were necessary.
if(False):
if(len(jitter) < xings_per_pattern * num_patterns):
print "Added %d zeros to 'jitter'." % (xings_per_pattern * num_patterns - len(jitter))
jitter = np.append(jitter, zeros(xings_per_pattern * num_patterns - len(jitter)))
try:
t_jitter = t_jitter[:len(jitter)]
if(len(jitter) > len(t_jitter)):
jitter = jitter[:len(t_jitter)]
print "Had to shorten 'jitter', due to 't_jitter'."
except:
print "jitter:", jitter
raise
# -- Do the reshaping and check results thoroughly.
try:
tie_risings = reshape(jitter.take(range(0, num_patterns * risings_per_pattern * 2, 2)), (num_patterns, risings_per_pattern))
tie_fallings = reshape(jitter.take(range(1, num_patterns * fallings_per_pattern * 2, 2)), (num_patterns, fallings_per_pattern))
except:
print "ideal_xings[xings_per_pattern - 1]:", ideal_xings[xings_per_pattern - 1], "ideal_xings[-1]:", ideal_xings[-1]
print "num_patterns:", num_patterns, "risings_per_pattern:", risings_per_pattern, "fallings_per_pattern:", fallings_per_pattern, "len(jitter):", len(jitter)
print "nbits:", nbits, "pattern_len:", pattern_len
raise
assert len(filter(lambda x: x == None, tie_risings)) == 0, "num_patterns: %d, risings_per_pattern: %d, len(jitter): %d" % \
(num_patterns, risings_per_pattern, len(jitter))
assert len(filter(lambda x: x == None, tie_fallings)) == 0, "num_patterns: %d, fallings_per_pattern: %d, len(jitter): %d" % \
(num_patterns, fallings_per_pattern, len(jitter))
# - Use averaging to remove the uncorrelated components, before calculating data dependent components.
tie_risings_ave = tie_risings.mean(axis=0)
tie_fallings_ave = tie_fallings.mean(axis=0)
isi = max(tie_risings_ave.ptp(), tie_fallings_ave.ptp())
isi = min(isi, ui) # Cap the ISI at the unit interval.
dcd = abs(mean(tie_risings_ave) - mean(tie_fallings_ave))
# - Subtract the data dependent jitter from the original TIE track, in order to yield the data independent jitter.
tie_ave = concatenate(zip(tie_risings_ave, tie_fallings_ave))
tie_ave = resize(tie_ave, len(jitter))
tie_ind = jitter - tie_ave
# - Use spectral analysis to help isolate the periodic components of the data independent jitter.
# -- Calculate the total jitter spectrum, for display purposes only.
# --- Make vector uniformly sampled in time, via zero padding where necessary.
# --- (It's necessary to keep track of those elements in the resultant vector, which aren't paddings; hence, 'valid_ix'.)
x, valid_ix = make_uniform(t_jitter, jitter, ui, nbits)
y = fft(x)
jitter_spectrum = abs(y[:len(y) / 2]) / sqrt(len(jitter)) # Normalized, in order to make power correct.
f0 = 1. / (ui * nbits)
spectrum_freqs = [i * f0 for i in range(len(y) / 2)]
# -- Use the data independent jitter spectrum for our calculations.
tie_ind_uniform, valid_ix = make_uniform(t_jitter, tie_ind, ui, nbits)
# --- Normalized, in order to make power correct, since we grab Rj from the freq. domain.
# --- (I'm using the length of the vector before zero padding, because zero padding doesn't add energy.)
# --- (This has the effect of making our final Rj estimate more conservative.)
y = fft(tie_ind_uniform) / sqrt(len(tie_ind))
y_mag = abs(y)
y_mean = moving_average(y_mag, n = len(y_mag) / 10)
y_var = moving_average((y_mag - y_mean) ** 2, n = len(y_mag) / 10)
y_sigma = sqrt(y_var)
thresh = y_mean + rel_thresh * y_sigma
y_per = where(y_mag > thresh, y, zeros(len(y))) # Periodic components are those lying above the threshold.
y_rnd = where(y_mag > thresh, zeros(len(y)), y) # Random components are those lying below.
y_rnd = abs(y_rnd)
rj = sqrt(mean((y_rnd - mean(y_rnd)) ** 2))
tie_per = real(ifft(y_per)).take(valid_ix) * sqrt(len(tie_ind)) # Restoring shape of vector to its original, non-uniformly sampled state.
pj = tie_per.ptp()
# --- Save the spectrum, for display purposes.
tie_ind_spectrum = y_mag[:len(y_mag) / 2]
# - Reassemble the jitter, excluding the Rj.
# -- Here, we see why it was necessary to keep track of the non-padded elements with 'valid_ix':
# -- It was so that we could add the average and periodic components back together, maintaining correct alignment between them.
jitter_synth = tie_ave + tie_per
# - Calculate the histogram of original, for comparison.
hist, bin_centers = my_hist(jitter)
# - Calculate the histogram of everything, except Rj.
hist_synth, bin_centers = my_hist(jitter_synth)
# - Extrapolate the tails by convolving w/ complete Gaussian.
rv = ss.norm(loc = 0., scale = rj)
rj_pdf = rv.pdf(bin_centers)
rj_pmf = (rj_pdf / sum(rj_pdf))
hist_synth = convolve(hist_synth, rj_pmf)
tail_len = (len(bin_centers) - 1) / 2
hist_synth = [sum(hist_synth[: tail_len + 1])] + list(hist_synth[tail_len + 1 : len(hist_synth) - tail_len - 1]) + [sum(hist_synth[len(hist_synth) - tail_len - 1 :])]
return (jitter, t_jitter, isi, dcd, pj, rj, tie_ind,
thresh[:len(thresh) / 2], jitter_spectrum, tie_ind_spectrum, spectrum_freqs,
hist, hist_synth, bin_centers)
def make_uniform(t, jitter, ui, nbits):
"""
Make the jitter vector uniformly sampled in time, by zero-filling where necessary.
The trick, here, is creating a uniformly sampled input vector for the FFT operation,
since the jitter samples are almost certainly not uniformly sampled.
We do this by simply zero padding the missing samples.
Inputs:
- t : The sample times for the 'jitter' vector.
- jitter : The input jitter samples.
- ui : The nominal unit interval.
- nbits : The desired number of unit intervals, in the time domain.
Output:
- y : The uniformly sampled, zero padded jitter vector.
- y_ix : The indices where y is valid (i.e. - not zero padded).
"""
assert len(t) == len(jitter), "Length of t (%d) and jitter (%d) must be equal!" % (len(t), len(jitter))
run_lengths = map(int, diff(t) / ui + 0.5)
valid_ix = [0] + list(cumsum(run_lengths))
valid_ix = filter(lambda x: x < nbits, valid_ix)
missing = where(array(run_lengths) > 1)[0]
num_insertions = 0
jitter = list(jitter) # Because we use 'insert'.
for i in missing:
for j in range(run_lengths[i] - 1):
jitter.insert(i + 1 + num_insertions, 0.)
num_insertions += 1
if(len(jitter) < nbits):
jitter.extend([0.] * (nbits - len(jitter)))
if(len(jitter) > nbits):
jitter = jitter[:nbits]
return jitter, valid_ix
def calc_gamma(R0, w0, Rdc, Z0, v0, Theta0, ws):
"""
Calculates propagation constant from cross-sectional parameters.
The formula's applied are taken from Howard Johnson's "Metallic Transmission Model"
(See "High Speed Signal Propagation", Sec. 3.1.)
Inputs:
- R0 skin effect resistance (Ohms/m)
- w0 cross-over freq.
- Rdc d.c. resistance (Ohms/m)
- Z0 characteristic impedance in LC region (Ohms)
- v0 propagation velocity (m/s)
- Theta0 loss tangent
- ws frequency sample points vector
Outputs:
- gamma frequency dependent propagation constant
- Zc frequency dependent characteristic impedance
"""
w = array(ws).copy()
# Guard against /0.
if(w[0] == 0):
w[0] = 1.e-12
Rac = R0 * sqrt(2 * 1j * w / w0) # AC resistance vector
R = sqrt(np.power(Rdc, 2) + np.power(Rac, 2)) # total resistance vector
L0 = Z0 / v0 # "external" inductance per unit length (H/m)
C0 = 1. / (Z0 * v0) # nominal capacitance per unit length (F/m)
C = C0 * np.power((1j * w / w0), (-2. * Theta0 / pi)) # complex capacitance per unit length (F/m)
gamma = sqrt((1j * w * L0 + R) * (1j * w * C)) # propagation constant (nepers/m)
Zc = sqrt((1j * w * L0 + R) / (1j * w * C)) # characteristic impedance (Ohms)
return (gamma, Zc)
def calc_G(H, Rs, Cs, Zc, RL, Cp, CL, ws):
"""
Calculates fully loaded transfer function of complete channel.
Inputs:
- H unloaded transfer function of interconnect
- Rs source series resistance
- Cs source parallel (parasitic) capacitance
- Zc frequency dependent characteristic impedance of the interconnect
- RL load resistance (differential)
- Cp load parallel (parasitic) capacitance (single ended)
- CL load series (d.c. blocking) capacitance (single ended)
- ws frequency sample points vector
Outputs:
- G frequency dependent transfer function of channel
"""
w = array(ws).copy()
# Guard against /0.
if(w[0] == 0):
w[0] = 1.e-12
# Impedance looking back into the Tx output is a simple parallel RC network.
Zs = Rs / (1. + 1j * w * Rs * Cs)
# Rx load impedance is 2 series, a.c.-coupling capacitors, in series w/ parallel comb. of Rterm & parasitic cap.
# (The two parasitic capacitances are in series.)
ZL = 2. * 1. / (1j * w * CL) + RL / (1. + 1j * w * RL * Cp / 2)
# Admittance into the interconnect is (Cs || Zc) / (Rs + (Cs || Zc)).
Cs_par_Zc = Zc / (1. + 1j * w * Zc * Cs)
A = Cs_par_Zc / (Rs + Cs_par_Zc)
# Reflection coefficient at Rx:
R1 = (ZL - Zc) / (ZL + Zc)
# Reflection coefficient at Tx:
R2 = (Zs - Zc) / (Zs + Zc)
# Fully loaded channel transfer function:
G = A * H * (1 + R1) / (1 - R1 * R2 * H**2)
G = G * (((RL/(1j*w*Cp/2))/(RL + 1/(1j*w*Cp/2))) / ZL) # Corrected for divider action.
# (i.e. - We're interested in what appears across RL.)
return G
def calc_eye(ui, samps_per_ui, height, ys, clock_times=None):
"""
Calculates the "eye" diagram of the input signal vector.
Inputs:
- ui unit interval (s)
- samps_per_ui # of samples per unit interval
- height height of output image data array
- ys signal vector of interest
- clock_times (optional)
vector of clock times to use for eye centers.
If not provided, just use mean zero-crossing and
assume constant UI and no phase jumps.
(This allows the same function to be used for
eye diagram creation,
for both pre and post-CDR signals.)
Outputs:
- img_array The "heat map" representing the eye diagram.
Each grid location contains a value indicating
the number of times the signal passed through
that location.
"""
# List/array necessities.
ys = array(ys)
# Intermediate variable calculation.
tsamp = ui / samps_per_ui
# Adjust the scaling.
width = 2 * samps_per_ui
y_max = 1.1 * max(abs(ys))
y_scale = height / (2 * y_max) # (pixels/V)
y_offset = height / 2 # (pixels)
# Generate the "heat" picture array.
img_array = zeros([height, width])
if(clock_times):
for clock_time in clock_times:
start_time = clock_time - ui
stop_time = clock_time + ui
start_ix = int(start_time / tsamp)
if(start_ix + 2 * samps_per_ui > len(ys)):
break
interp_fac = (start_time - start_ix * tsamp) / tsamp
last_y = ys[start_ix]
i = 0
for (samp1, samp2) in zip(ys[start_ix : start_ix + 2 * samps_per_ui],
ys[start_ix + 1 : start_ix + 1 + 2 * samps_per_ui]):
y = samp1 + (samp2 - samp1) * interp_fac
img_array[int(y * y_scale + 0.5) + y_offset, i] += 1
# if(sign(y) != sign(last_y)): # Trap zero crossings.
# img_array[y_offset, int(i - 1 + y / (y - last_y) + 0.5)] += 1
last_y = y
i += 1
else:
start_ix = (where(diff(sign(ys)))[0] % samps_per_ui).mean() + samps_per_ui // 2
last_start_ix = len(ys) - 2 * samps_per_ui
while(start_ix < last_start_ix):
last_y = ys[start_ix]
i = 0
for y in ys[start_ix : start_ix + 2 * samps_per_ui]:
img_array[int(y * y_scale + 0.5) + y_offset, i] += 1
# if(sign(y) != sign(last_y)): # Trap zero crossings.
# img_array[y_offset, int(i - 1 + y / (y - last_y) + 0.5)] += 1
last_y = y
i += 1
start_ix += samps_per_ui
return img_array
def make_ctle(rx_bw, peak_freq, peak_mag, w):
"""
Generate the frequency response of a continuous time linear
equalizer (CTLE), given the:
- signal path bandwidth,
- peaking specification, and
- list of frequencies of interest.
We use the 'invres()' function from scipy.signal, as it suggests
itself as a natural approach, given our chosen use model of having
the user provide the peaking frequency and degree of peaking.
That is, we define our desired frequency response using one zero
and two poles, where:
- The pole locations are equal to:
- the signal path natural bandwidth, and
- the user specified peaking frequency.
- The zero location is chosen, so as to provide the desired degree
of peaking.
Inputs:
- rx_bw The natural (or, unequalized) signal path bandwidth (Hz).
- peak_freq The location of the desired peak in the frequency
response (Hz).
- peak_mag The desired relative magnitude of the peak (dB). (mag(H(0)) = 1)
- w The list of frequencies of interest (rads./s).
Outputs:
- w, H The resultant complex frequency response, at the
given frequencies.
"""
p2 = -2. * pi * rx_bw
p1 = -2. * pi * peak_freq
z = p1 / pow(10., peak_mag / 20.)
if(p2 != p1):
r1 = (z - p1) / (p2 - p1)
r2 = 1 - r1
else:
r1 = -1.
r2 = z - p1
b, a = invres([r1, r2], [p1, p2], [])
return freqs(b, a, w)
def trim_impulse(g, Ts, chnl_dly=0.):
"""
Trim impulse response, for more useful display, by:
- eliminating 90% of the overall delay from the beginning, and
- clipping off the tail, after 99.9% of the total power has been captured.
Inputs:
- g impulse response
- Ts sample interval (same units as 'chnl_dly')
- chnl_dly (optional) channel delay
Outputs:
- g_trim trimmed impulse response
- start_ix index of first returned sample
"""
g = array(g)
start_ix = int(0.9 * chnl_dly / Ts)
Pt = 0.999 * sum(g ** 2)
i = 0
P = 0
while(P < Pt):
P += g[i] ** 2
i += 1
return (g[start_ix : i], start_ix)
|
|
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class of 3PAR Client handling File Persona API."""
import mock
import pprint
from pytest_testconfig import config
from test import HPE3ParClient_base as hpe3parbase
from hpe3parclient import exceptions
from hpe3parclient import file_client
from hpe3parclient import ssh
class HPE3ParFilePersonaClientMockTestCase(hpe3parbase
.HPE3ParClientBaseTestCase):
interfaces = None
DEBUG = config['TEST']['debug'].lower() == 'true'
def debug_print(self, obj, **kwargs):
if self.DEBUG:
print(pprint.pformat(obj, **kwargs))
def setUp(self, **kwargs):
version = (file_client.HPE3ParFilePersonaClient
.HPE3PAR_WS_MIN_BUILD_VERSION)
mock_version = mock.Mock()
mock_version.return_value = {'build': version}
with mock.patch('hpe3parclient.client.HPE3ParClient.getWsApiVersion',
mock_version):
self.cl = file_client.HPE3ParFilePersonaClient('anyurl')
self.cl.ssh = mock.Mock()
self.cl.http = mock.Mock()
self.cl.ssh.run = mock.Mock()
self.cl.ssh.run.return_value = 'anystring'
def tearDown(self):
pass
class ArgMatcher(object):
"""Test args vs. expected. Options order can vary."""
def __init__(self, f, cmd, options, specifiers):
self.assertEqual = f
self.cmd = cmd
self.options = options
self.specifiers = specifiers
def __eq__(self, actual):
# Command has to be first. Allow string or list ['doit','nfs'].
if isinstance(self.cmd, str):
self.cmd = [self.cmd]
for c in self.cmd:
self.assertEqual(c, actual[0])
del actual[0]
# Specifiers have to be last.
if self.specifiers:
num_specs = len(self.specifiers)
self.assertEqual(self.specifiers, actual[-num_specs:])
actual = actual[0:-num_specs]
# Options can be in any order. Some are flags. Some are pairs.
if self.options:
for option in self.options:
if isinstance(option, str):
actual.remove(option)
else:
first = actual.index(option[0])
self.assertEqual(option[1], actual[first + 1])
del actual[first + 1]
del actual[first]
self.assertEqual(actual, [])
else:
# No options should match and empty actual.
self.assertEqual(self.options, actual)
return True
def test_cli_from_sig_varargs(self):
"""Use mock and removefpg to test cli_from sig with varargs and
kwargs."""
self.cl.removefpg()
self.cl.ssh.run.assert_called_with(['removefpg', '-f'],
multi_line_stripper=True)
self.cl.removefpg("foo")
self.cl.ssh.run.assert_called_with(['removefpg', '-f', 'foo'],
multi_line_stripper=True)
self.cl.removefpg("foo", "bar")
self.cl.ssh.run.assert_called_with(['removefpg', '-f',
'foo', 'bar'],
multi_line_stripper=True)
self.cl.removefpg("foo", "bar", f=False) # f=False needs to be ignored
self.cl.ssh.run.assert_called_with(
self.ArgMatcher(self.assertEqual,
'removefpg', ['-f'], ['foo', 'bar']),
multi_line_stripper=True)
self.cl.removefpg("foo", "bar", forget="4gotten", wait=True)
self.cl.ssh.run.assert_called_with(
self.ArgMatcher(self.assertEqual,
'removefpg',
['-f', '-wait', ('-forget', '4gotten')],
['foo', 'bar']),
multi_line_stripper=True)
# what if string 'True' is used. That is not a boolean!
self.cl.removefpg("foo", "bar", forget='True', wait=True)
self.cl.ssh.run.assert_called_with(
self.ArgMatcher(self.assertEqual,
'removefpg',
['-f', '-wait', ('-forget', 'True')],
['foo', 'bar']),
multi_line_stripper=True)
# keyword=None is skipped
# keyword=False (boolean) is skipped
self.cl.removefpg("foo", "bar", forget=None, wait=False)
self.cl.ssh.run.assert_called_with(['removefpg', '-f', 'foo', 'bar'],
multi_line_stripper=True)
def test_build_cmd_from_str_or_list(self):
"""Test that build_cmd works with list or string."""
result1 = self.cl._build_command('test -foo')
self.assertEqual(['test', '-foo'], result1)
result2 = self.cl._build_command(['test', '-foo'])
self.assertEqual(['test', '-foo'], result2)
def test_get_details(self):
"""Test that get_details cannot be overridden by an arg."""
test_function_name = 'testdetails'
file_client.GET_DETAILS[test_function_name] = True
result = self.cl._build_command(test_function_name, d=False)
self.assertEqual([test_function_name, '-d'], result)
def test_removefpg_mock(self):
"""Use mock to test removefpg -f."""
self.cl.removefpg()
self.cl.ssh.run.assert_called_with(
['removefpg', '-f'], multi_line_stripper=True)
self.cl.removefpg('testfpg')
self.cl.ssh.run.assert_called_with(
['removefpg', '-f', 'testfpg'], multi_line_stripper=True)
def test_createfstore_mock(self):
"""Use mock to test createfstore."""
self.assertRaises(TypeError, self.cl.createfstore)
self.cl.createfstore('testvfs', 'testfstore')
self.cl.ssh.run.assert_called_with(['createfstore',
'testvfs', 'testfstore'],
multi_line_stripper=True)
self.cl.createfstore('testvfs', 'testfstore', fpg='testfpg',
comment='test comment')
self.cl.ssh.run.assert_called_with(
self.ArgMatcher(self.assertEqual,
'createfstore',
[('-comment', '"test comment"'),
('-fpg', 'testfpg')],
['testvfs', 'testfstore']),
multi_line_stripper=True)
def test_createfshare_mock(self):
"""Use mock to test createfshare with protocol first."""
self.assertRaises(TypeError, self.cl.createfshare)
self.cl.createfshare('nfs', 'testvfs', 'testfshare')
self.cl.ssh.run.assert_called_with(['createfshare', 'nfs', '-f',
'testvfs', 'testfshare'],
multi_line_stripper=True)
self.cl.createfshare('smb', 'testvfs', 'testfshare')
self.cl.ssh.run.assert_called_with(['createfshare', 'smb', '-f',
'testvfs', 'testfshare'],
multi_line_stripper=True)
self.cl.createfshare('nfs', 'testvfs', 'testfstore', fpg='testfpg',
fstore='testfstore', sharedir='testsharedir',
comment='test comment')
self.cl.ssh.run.assert_called_with(self.ArgMatcher(
self.assertEqual,
['createfshare', 'nfs'],
['-f',
('-fpg', 'testfpg'),
('-fstore', 'testfstore'),
('-sharedir', 'testsharedir'),
('-comment', '"test comment"')], # Comments get quoted
['testvfs', 'testfstore']), multi_line_stripper=True)
def test_createfshare_mock_smb_ca(self):
"""Use mock to test createfshare smb -ca argument."""
self.cl.createfshare('smb', 'testvfs', 'testfshare', ca=None)
self.cl.ssh.run.assert_called_with(['createfshare', 'smb', '-f',
'testvfs', 'testfshare'],
multi_line_stripper=True)
self.cl.createfshare('smb', 'testvfs', 'testfshare', ca='true')
self.cl.ssh.run.assert_called_with(self.ArgMatcher(
self.assertEqual,
['createfshare', 'smb'],
['-f', ('-ca', 'true')],
['testvfs', 'testfshare']), multi_line_stripper=True)
self.cl.createfshare('smb', 'testvfs', 'testfshare', ca='false')
self.cl.ssh.run.assert_called_with(self.ArgMatcher(
self.assertEqual,
['createfshare', 'smb'],
['-f', ('-ca', 'false')],
['testvfs', 'testfshare']), multi_line_stripper=True)
def test_setfshare_mock_smb_ca(self):
"""Use mock to test setfshare smb -ca argument."""
self.cl.setfshare('smb', 'testvfs', 'testfshare', ca=None)
self.cl.ssh.run.assert_called_with(['setfshare', 'smb',
'testvfs', 'testfshare'],
multi_line_stripper=True)
self.cl.setfshare('smb', 'testvfs', 'testfshare', ca='true')
self.cl.ssh.run.assert_called_with(['setfshare', 'smb',
'-ca', 'true',
'testvfs', 'testfshare'],
multi_line_stripper=True)
self.cl.setfshare('smb', 'testvfs', 'testfshare', ca='false')
self.cl.ssh.run.assert_called_with(['setfshare', 'smb',
'-ca', 'false',
'testvfs', 'testfshare'],
multi_line_stripper=True)
def test_strip_input_from_output(self):
cmd = [
'createvfs',
'-fpg',
'marktestfpg',
'-wait',
'127.0.0.2',
'255.255.255.0',
'UT5_VFS_150651'
]
out = [
'setclienv csvtable 1',
'createvfs -fpg marktestfpg -wait 127.0.0.2 255.255.255.0 '
'UT5_VFS_150651',
'exit',
'CSIM-EOS08_1611165 cli% setclienv csvtable 1\r',
'CSIM-EOS08_1611165 cli% createvfs -fpg marktestfpg -wait '
'127.0.0.2 255.255.255.\r',
'0 UT5_VFS_150651\r',
'VFS UT5_VFS_150651 already exists within FPG marktestfpg\r',
'CSIM-EOS08_1611165 cli% exit\r',
''
]
expected = [
'VFS UT5_VFS_150651 already exists within FPG marktestfpg\r']
actual = ssh.HPE3PARSSHClient.strip_input_from_output(cmd, out)
self.assertEqual(expected, actual)
def test_strip_input_from_output_no_exit(self):
cmd = [
'createvfs',
'-fpg',
'marktestfpg',
'-wait',
'127.0.0.2',
'255.255.255.0',
'UT5_VFS_150651'
]
out = [
'setclienv csvtable 1',
'createvfs -fpg marktestfpg -wait 127.0.0.2 255.255.255.0 '
'UT5_VFS_150651',
'XXXt', # Don't match
'CSIM-EOS08_1611165 cli% setclienv csvtable 1\r',
'CSIM-EOS08_1611165 cli% createvfs -fpg marktestfpg -wait '
'127.0.0.2 255.255.255.\r',
'0 UT5_VFS_150651\r',
'VFS UT5_VFS_150651 already exists within FPG marktestfpg\r',
'CSIM-EOS08_1611165 cli% exit\r',
''
]
self.assertRaises(exceptions.SSHException,
ssh.HPE3PARSSHClient.strip_input_from_output,
cmd, out)
def test_strip_input_from_output_no_setclienv(self):
cmd = [
'createvfs',
'-fpg',
'marktestfpg',
'-wait',
'127.0.0.2',
'255.255.255.0',
'UT5_VFS_150651'
]
out = [
'setclienv csvtable 1',
'createvfs -fpg marktestfpg -wait 127.0.0.2 255.255.255.0 '
'UT5_VFS_150651',
'exit',
'CSIM-EOS08_1611165 cli% setcliXXX csvtable 1\r', # Don't match
'CSIM-EOS08_1611165 cli% createvfs -fpg marktestfpg -wait '
'127.0.0.2 255.255.255.\r',
'0 UT5_VFS_150651\r',
'VFS UT5_VFS_150651 already exists within FPG marktestfpg\r',
'CSIM-EOS08_1611165 cli% exit\r',
''
]
self.assertRaises(exceptions.SSHException,
ssh.HPE3PARSSHClient.strip_input_from_output,
cmd, out)
def test_strip_input_from_output_no_cmd_match(self):
cmd = [
'createvfs',
'-fpg',
'marktestfpg',
'-wait',
'127.0.0.2',
'255.255.255.0',
'UT5_VFS_150651'
]
out = [
'setclienv csvtable 1',
'createvfs -fpg marktestfpg -wait 127.0.0.2 255.255.255.0 '
'UT5_VFS_150651',
'exit',
'CSIM-EOS08_1611165 cli% setclienv csvtable 1\r',
'CSIM-EOS08_1611165 cli% createvfs -fpg marktestfpg -wait '
'127.0.0.2 255.255.255.\r',
'0 UT5_VFS_XXXXXX\r', # Don't match
'VFS UT5_VFS_150651 already exists within FPG marktestfpg\r',
'CSIM-EOS08_1611165 cli% exit\r',
''
]
self.assertRaises(exceptions.SSHException,
ssh.HPE3PARSSHClient.strip_input_from_output,
cmd, out)
# testing
# suite = unittest.TestLoader().
# loadTestsFromTestCase(HPE3ParFilePersonaClientTestCase)
# unittest.TextTestRunner(verbosity=2).run(suite)
|
|
from django.db import connections
from django.db.models.query import QuerySet, ValuesQuerySet, ValuesListQuerySet
from django.contrib.gis import memoryview
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import get_srid_info, PointField, LineStringField
from django.contrib.gis.db.models.sql import AreaField, DistanceField, GeomField, GeoQuery
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.utils import six
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
### Methods overloaded from QuerySet ###
def __init__(self, model=None, query=None, using=None):
super(GeoQuerySet, self).__init__(model=model, query=query, using=using)
self.query = query or GeoQuery(self.model)
def values(self, *fields):
return self._clone(klass=GeoValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=GeoValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Peforming setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup('area', field_name=kwargs.get('field_name', None))
s = {'procedure_args' : procedure_args,
'geo_field' : geo_field,
'setup' : False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analagous to a union operation, but much faster because
boundaries are not dissolved.
"""
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geomtry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the users wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
# Setting the options flag -- which depends on which version of
# PostGIS we're using. SpatiaLite only uses the first group of options.
if backend.spatial_version >= (1, 4, 0):
options = 0
if crs and bbox: options = 3
elif bbox: options = 1
elif crs: options = 2
else:
options = 0
if crs and bbox: options = 3
elif crs: options = 1
elif bbox: options = 2
s = {'desc' : 'GeoJSON',
'procedure_args' : {'precision' : precision, 'options' : options},
'procedure_fmt' : '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc' : 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc' : 'GML', 'procedure_args' : {'precision' : precision}}
if backend.postgis:
# PostGIS AsGML() aggregate function parameter order depends on the
# version -- uggh.
if backend.spatial_version > (1, 3, 1):
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
else:
s['procedure_fmt'] = '%(geo_col)s,%(precision)s,%(version)s'
s['procedure_args'] = {'precision' : precision, 'version' : version}
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc' : 'KML',
'procedure_fmt' : '%(geo_col)s,%(precision)s',
'procedure_args' : {'precision' : kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field' : GeomField(),}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s',
'procedure_args' : {'x' : x, 'y' : y},
'select_field' : GeomField(),
}
else:
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args' : {'x' : x, 'y' : y, 'z' : z},
'select_field' : GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size' : size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize' : xsize, 'ysize' : ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize' : xsize, 'ysize' : ysize,
'xorigin' : xorigin, 'yorigin' : yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt' : procedure_fmt,
'procedure_args' : procedure_args,
'select_field' : GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {'desc' : 'SVG',
'procedure_fmt' : '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args' : {'rel' : relative,
'precision' : precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s',
'procedure_args' : {'x' : x, 'y' : y},
'select_field' : GeomField(),
}
else:
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args' : {'x' : x, 'y' : y, 'z' : z},
'select_field' : GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name', None)
tmp, geo_field = self._spatial_setup('transform', field_name=field_name)
# Getting the selection SQL for the given geographic field.
field_col = self._geocol_select(geo_field, field_name)
# Why cascading substitutions? Because spatial backends like
# Oracle and MySQL already require a function call to convert to text, thus
# when there's also a transformation we need to cascade the substitutions.
# For example, 'SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM( ... )'
geo_col = self.query.custom_select.get(geo_field, field_col)
# Setting the key for the field's column with the custom SELECT SQL to
# override the geometry column returned from the database.
custom_sel = '%s(%s, %s)' % (connections[self.db].ops.transform, geo_col, srid)
# TODO: Should we have this as an alias?
# custom_sel = '(%s(%s, %s)) AS %s' % (SpatialBackend.transform, geo_col, srid, qn(geo_field.name))
self.query.transformed_srid = srid # So other GeoQuerySet methods
self.query.custom_select[geo_field] = custom_sel
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
return self._spatial_aggregate(aggregates.Union, **kwargs)
### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None: desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function' : func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self.query._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if not geo_field_type is None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self.query._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if not geo_field_type is None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle: agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
backend = connection.ops
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type', None))
for k, v in six.iteritems(default_args): settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types): model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field', False):
sel_fld = settings['select_field']
if isinstance(sel_fld, GeomField) and backend.select:
self.query.custom_select[model_att] = backend.select
if connection.ops.oracle:
sel_fld.empty_strings_allowed = False
self.query.extra_select_fields[model_att] = sel_fld
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
return self.extra(select={model_att : fmt % settings['procedure_args']},
select_params=settings['select_params'])
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name', None))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
if self.query.transformed_srid:
u, unit_name, s = get_srid_info(self.query.transformed_srid, connection)
geodetic = unit_name in geo_field.geodetic_units
if backend.spatialite and geodetic:
raise ValueError('SQLite does not support linear distance calculations on geodetic coordinate systems.')
if distance:
if self.query.transformed_srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, self.query.transformed_srid)
if geom.srid is None or geom.srid == self.query.transformed_srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, self.query.transformed_srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += ', %s(%s(%%%%s, %s), %s)' % (backend.transform, backend.from_text,
geom.srid, self.query.transformed_srid)
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, self.query.transformed_srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError('Spherical distance calculation only supported with Point Geometry parameters')
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function' : backend.distance_spheroid, 'spheroid' : params[1]})
else:
procedure_args.update({'function' : backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function' : backend.length_spheroid, 'spheroid' : params[1]})
elif geom_3d and backend.postgis:
# Use 3D variants of perimeter and length routines on PostGIS.
if perimeter:
procedure_args.update({'function' : backend.perimeter3d})
elif length:
procedure_args.update({'function' : backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field' : DistanceField(dist_att),
'setup' : False,
'geo_field' : geo_field,
'procedure_args' : procedure_args,
'procedure_fmt' : procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field' : GeomField(),}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance' : tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {'geom_args' : ('geom',),
'select_field' : GeomField(),
'procedure_fmt' : '%(geo_col)s,%(geom)s',
'procedure_args' : {'geom' : geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
opts = self.model._meta
if not geo_field in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
self.query.add_select_related([field_name])
compiler = self.query.get_compiler(self.db)
compiler.pre_sql_setup()
for (rel_table, rel_col), field in self.query.related_select_cols:
if field == geo_field:
return compiler._field_column(geo_field, rel_table)
raise ValueError("%r not in self.query.related_select_cols" % geo_field)
elif not geo_field in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
tmp_fld, parent_model, direct, m2m = opts.get_field_by_name(geo_field.name)
return self.query.get_compiler(self.db)._field_column(geo_field, parent_model._meta.db_table)
else:
return self.query.get_compiler(self.db)._field_column(geo_field)
class GeoValuesQuerySet(ValuesQuerySet):
def __init__(self, *args, **kwargs):
super(GeoValuesQuerySet, self).__init__(*args, **kwargs)
# This flag tells `resolve_columns` to run the values through
# `convert_values`. This ensures that Geometry objects instead
# of string values are returned with `values()` or `values_list()`.
self.query.geo_values = True
class GeoValuesListQuerySet(GeoValuesQuerySet, ValuesListQuerySet):
pass
|
|
# -*- coding: utf-8 -*-
"""
Class for reading/writing analog signals in a text file.
Each columns represents a AnalogSignal. All AnalogSignal have the same sampling rate.
Covers many case when part of a file can be viewed as a CSV format.
Supported : Read/Write
Author: sgarcia
"""
import csv
import os
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import AnalogSignal, Segment
class AsciiSignalIO(BaseIO):
"""
Class for reading signal in generic ascii format.
Columns respresents signal. They share all the same sampling rate.
The sampling rate is externally known or the first columns could hold the time
vector.
Usage:
>>> from neo import io
>>> r = io.AsciiSignalIO(filename='File_asciisignal_2.txt')
>>> seg = r.read_segment(lazy=False, cascade=True)
>>> print seg.analogsignals
[<AnalogSignal(array([ 39.0625 , 0. , 0. , ..., -26.85546875 ...
"""
is_readable = True
is_writable = True
supported_objects = [ Segment , AnalogSignal]
readable_objects = [ Segment]
writeable_objects = [Segment]
has_header = False
is_streameable = False
read_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
('usecols' , { 'value' : None , 'type' : int } ),
('skiprows' , { 'value' :0 } ),
('timecolumn' , { 'value' : None, 'type' : int } ) ,
('unit' , { 'value' : 'V', } ),
('sampling_rate' , { 'value' : 1000., } ),
('t_start' , { 'value' : 0., } ),
('method' , { 'value' : 'homemade', 'possible' : ['genfromtxt' , 'csv' , 'homemade' ] }) ,
]
}
write_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
('writetimecolumn' , { 'value' : True, } ) ,
]
}
name = None
extensions = [ 'txt' , 'asc', ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read/write AnalogSignal in a text file.
Each signal is a column.
One of the column can be the time vector
Arguments:
filename : the filename to read/write
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy = False,
cascade = True,
delimiter = '\t',
usecols = None,
skiprows =0,
timecolumn = None,
sampling_rate = 1.*pq.Hz,
t_start = 0.*pq.s,
unit = pq.V,
method = 'genfromtxt',
):
"""
Arguments:
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
usecols : if None take all columns otherwise a list for selected columns
skiprows : skip n first lines in case they contains header informations
timecolumn : None or a valid int that point the time vector
samplerate : the samplerate of signals if timecolumn is not None this is not take in account
t_start : time of the first sample
unit : unit of AnalogSignal can be a str or directly a Quantities
method : 'genfromtxt' or 'csv' or 'homemade'
in case of bugs you can try one of this methods
'genfromtxt' use numpy.genfromtxt
'csv' use cvs module
'homemade' use a intuitive more robust but slow method
"""
seg = Segment(file_origin = os.path.basename(self.filename))
if not cascade:
return seg
if type(sampling_rate) == float or type(sampling_rate)==int:
# if not quantitities Hz by default
sampling_rate = sampling_rate*pq.Hz
if type(t_start) == float or type(t_start)==int:
# if not quantitities s by default
t_start = t_start*pq.s
unit = pq.Quantity(1, unit)
#loadtxt
if method == 'genfromtxt' :
sig = np.genfromtxt(self.filename,
delimiter = delimiter,
usecols = usecols ,
skiprows = skiprows,
dtype = 'f')
if len(sig.shape) ==1:
sig = sig[:, np.newaxis]
elif method == 'csv' :
tab = [l for l in csv.reader( file(self.filename,'rU') , delimiter = delimiter ) ]
tab = tab[skiprows:]
sig = np.array( tab , dtype = 'f')
elif method == 'homemade' :
fid = open(self.filename,'rU')
for l in range(skiprows):
fid.readline()
tab = [ ]
for line in fid.readlines():
line = line.replace('\r','')
line = line.replace('\n','')
l = line.split(delimiter)
while '' in l :
l.remove('')
tab.append(l)
sig = np.array( tab , dtype = 'f')
if timecolumn is not None:
sampling_rate = 1./np.mean(np.diff(sig[:,timecolumn])) * pq.Hz
t_start = sig[0,timecolumn] * pq.s
for i in range(sig.shape[1]) :
if timecolumn == i : continue
if usecols is not None and i not in usecols: continue
if lazy:
signal = [ ]*unit
else:
signal = sig[:,i]*unit
anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
t_start=t_start, channel_index=i,
name='Column %d'%i)
if lazy:
anaSig.lazy_shape = sig.shape
seg.analogsignals.append( anaSig )
seg.create_many_to_one_relationship()
return seg
def write_segment(self, segment,
delimiter = '\t',
skiprows =0,
writetimecolumn = True,
):
"""
Write a segment and AnalogSignal in a text file.
**Arguments**
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
writetimecolumn : True or Flase write time vector as first column
"""
if skiprows:
raise NotImplementedError('skiprows values other than 0 are not ' +
'supported')
l = [ ]
if writetimecolumn is not None:
l.append(segment.analogsignals[0].times[:, np.newaxis])
for anaSig in segment.analogsignals:
l.append(anaSig.magnitude[:, np.newaxis])
sigs = np.concatenate(l, axis=1)
#print sigs.shape
np.savetxt(self.filename , sigs , delimiter = delimiter)
|
|
# TODO: Move this rules to model or be smarter
import sys
import inspect
from models import TopologyParameterCustomValue
from system.models import Configuration
def configuration_factory(databaseinfra, memory_size):
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and '__ENGINE__' in obj.__dict__:
if obj.__ENGINE__ == databaseinfra.engine.name:
return obj(databaseinfra, memory_size)
raise NotImplementedError
def configuration_exists(engine_name, parameter_name):
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and '__ENGINE__' in obj.__dict__:
if obj.__ENGINE__ == engine_name:
parameter_name = parameter_name.replace('-', '_')
if parameter_name in obj.__dict__:
return True
return False
class ParameterObject(object):
def __init__(self, value, default):
self.value = str(value)
self.default = str(default)
class ConfigurationBase(object):
__ENGINE__ = 'None'
MB_TO_GB_FACTOR = 1.0 / 1024
MB_FORMATTER = 'MB'
GB_FORMATTER = 'GB'
def __init__(self, databaseinfra, memory_size_mega):
self.databaseinfra = databaseinfra
self._memory_size = memory_size_mega
@property
def memory_size_in_mb(self):
return self._memory_size
@property
def memory_size_in_gb(self):
return round(self._memory_size * self.MB_TO_GB_FACTOR, 2)
@property
def memory_size_in_bytes(self):
return self._memory_size * 1024 * 1024
def value_in_mb(self, value):
return "{}{}".format(int(value), self.MB_FORMATTER)
def value_in_gb(self, value):
return "{}{}".format(int(value), self.GB_FORMATTER)
def value_format(self, value):
value_in_gb = value * self.MB_TO_GB_FACTOR
if isinstance(value_in_gb, int) and value_in_gb >= 1:
return self.value_in_gb(value_in_gb)
return self.value_in_mb(value)
def get_parameter(self, parameter_name, default):
value = self.databaseinfra.get_parameter_value_by_parameter_name(
parameter_name=parameter_name
)
if not value:
value = default
return ParameterObject(value, default)
def __getattribute__(self, item):
if item == 'databaseinfra':
return object.__getattribute__(self, item)
topology = self.databaseinfra.plan.replication_topology
try:
attribute = TopologyParameterCustomValue.objects.get(
topology=topology, parameter__name=item.replace("_", "-")
)
return object.__getattribute__(self, attribute.attr_name)
except TopologyParameterCustomValue.DoesNotExist:
return object.__getattribute__(self, item)
class ConfigurationRedis(ConfigurationBase):
__ENGINE__ = 'redis'
@property
def maxmemory(self):
parameter_name = inspect.stack()[0][3]
if self.memory_size_in_gb <= 1:
value = self.memory_size_in_bytes / 2
else:
value = self.memory_size_in_bytes * 0.75
default = int(value)
return self.get_parameter(parameter_name, default)
@property
def appendonly(self):
parameter_name = inspect.stack()[0][3]
if self.databaseinfra.plan.has_persistence:
default = 'yes'
else:
default = 'no'
return self.get_parameter(parameter_name, default)
@property
def maxmemory_policy(self):
parameter_name = inspect.stack()[0][3]
if self.databaseinfra.plan.has_persistence:
default = 'volatile-lru'
else:
default = 'allkeys-lru'
return self.get_parameter(parameter_name, default)
@property
def loglevel(self):
parameter_name = inspect.stack()[0][3]
default = 'notice'
return self.get_parameter(parameter_name, default)
@property
def databases(self):
parameter_name = inspect.stack()[0][3]
default = '1'
return self.get_parameter(parameter_name, default)
@property
def timeout(self):
parameter_name = inspect.stack()[0][3]
default = 0
return self.get_parameter(parameter_name, default)
@property
def rdbcompression(self):
parameter_name = inspect.stack()[0][3]
default = 'yes'
return self.get_parameter(parameter_name, default)
@property
def rdbchecksum(self):
parameter_name = inspect.stack()[0][3]
default = 'yes'
return self.get_parameter(parameter_name, default)
@property
def slave_serve_stale_data(self):
parameter_name = inspect.stack()[0][3]
default = 'yes'
return self.get_parameter(parameter_name, default)
@property
def slave_read_only(self):
parameter_name = inspect.stack()[0][3]
default = 'yes'
return self.get_parameter(parameter_name, default)
@property
def maxclients(self):
parameter_name = inspect.stack()[0][3]
default = 10000
return self.get_parameter(parameter_name, default)
@property
def appendfsync(self):
parameter_name = inspect.stack()[0][3]
default = 'everysec'
return self.get_parameter(parameter_name, default)
@property
def no_appendfsync_on_rewrite(self):
parameter_name = inspect.stack()[0][3]
default = 'no'
return self.get_parameter(parameter_name, default)
@property
def auto_aof_rewrite_percentage(self):
parameter_name = inspect.stack()[0][3]
default = 100
return self.get_parameter(parameter_name, default)
@property
def auto_aof_rewrite_min_size(self):
parameter_name = inspect.stack()[0][3]
default = 1073741824
return self.get_parameter(parameter_name, default)
@property
def lua_time_limit(self):
parameter_name = inspect.stack()[0][3]
default = 5000
return self.get_parameter(parameter_name, default)
@property
def slowlog_log_slower_than(self):
parameter_name = inspect.stack()[0][3]
default = 10000
return self.get_parameter(parameter_name, default)
@property
def slowlog_max_len(self):
parameter_name = inspect.stack()[0][3]
default = 1024
return self.get_parameter(parameter_name, default)
@property
def hash_max_ziplist_entries(self):
parameter_name = inspect.stack()[0][3]
default = 512
return self.get_parameter(parameter_name, default)
@property
def hash_max_ziplist_value(self):
parameter_name = inspect.stack()[0][3]
default = 64
return self.get_parameter(parameter_name, default)
@property
def set_max_intset_entries(self):
parameter_name = inspect.stack()[0][3]
default = 512
return self.get_parameter(parameter_name, default)
@property
def zset_max_ziplist_entries(self):
parameter_name = inspect.stack()[0][3]
default = 128
return self.get_parameter(parameter_name, default)
@property
def zset_max_ziplist_value(self):
parameter_name = inspect.stack()[0][3]
default = 64
return self.get_parameter(parameter_name, default)
@property
def activerehashing(self):
parameter_name = inspect.stack()[0][3]
default = 'yes'
return self.get_parameter(parameter_name, default)
@property
def repl_ping_slave_period(self):
parameter_name = inspect.stack()[0][3]
default = 1
return self.get_parameter(parameter_name, default)
@property
def repl_timeout(self):
parameter_name = inspect.stack()[0][3]
default = 3600
return self.get_parameter(parameter_name, default)
@property
def repl_disable_tcp_nodelay(self):
parameter_name = inspect.stack()[0][3]
default = 'no'
return self.get_parameter(parameter_name, default)
@property
def repl_backlog_size(self):
parameter_name = inspect.stack()[0][3]
default = 1048576
return self.get_parameter(parameter_name, default)
@property
def repl_backlog_ttl(self):
parameter_name = inspect.stack()[0][3]
default = 3600
return self.get_parameter(parameter_name, default)
@property
def client_output_buffer_limit_normal(self):
parameter_name = inspect.stack()[0][3]
default = "0 0 0"
return self.get_parameter(parameter_name, default)
@property
def client_output_buffer_limit_slave(self):
parameter_name = inspect.stack()[0][3]
if self.memory_size_in_gb <= 1:
default = "536870912 536870912 3600"
elif self.memory_size_in_gb <= 2:
default = "1073741824 1073741824 3600"
elif self.memory_size_in_gb <= 4:
default = "2147483648 2147483648 3600"
else:
default = "4294967296 4294967296 3600"
return self.get_parameter(parameter_name, default)
@property
def client_output_buffer_limit_pubsub(self):
parameter_name = inspect.stack()[0][3]
default = "33554432 8388608 60"
return self.get_parameter(parameter_name, default)
@property
def cluster_enabled(self):
return 'no'
@property
def cluster_enabled_true(self):
return 'yes'
@property
def save(self):
parameter_name = inspect.stack()[0][3]
default = '7200 1 3600 10 1800 10000'
return self.get_parameter(parameter_name, default)
@property
def save_list(self):
save_value = self.save.value
save_list = save_value.split()
if len(save_list) % 2 != 0:
raise Exception(
'Invalid argument {} for save parameter.'.format(save_value)
)
save_list2 = []
for i in range(0, len(save_list), 2):
item = "{} {}".format(save_list[i], save_list[i + 1])
save_list2.append(item)
return save_list2
@property
def cluster_node_timeout(self):
parameter_name = inspect.stack()[0][3]
default = 5000
return self.get_parameter(parameter_name, default)
class ConfigurationMySQL(ConfigurationBase):
__ENGINE__ = 'mysql'
@property
def query_cache_size(self):
parameter_name = inspect.stack()[0][3]
default = 0
return self.get_parameter(parameter_name, default)
@property
def max_allowed_packet(self):
parameter_name = inspect.stack()[0][3]
default = 4194304
return self.get_parameter(parameter_name, default)
@property
def sort_buffer_size(self):
parameter_name = inspect.stack()[0][3]
default = int(self.memory_size_in_bytes / 204.8)
return self.get_parameter(parameter_name, default)
@property
def tmp_table_size(self):
parameter_name = inspect.stack()[0][3]
default = int(self.memory_size_in_bytes / 64)
return self.get_parameter(parameter_name, default)
@property
def max_heap_table_size(self):
parameter_name = inspect.stack()[0][3]
default = 16777216
return self.get_parameter(parameter_name, default)
@property
def max_binlog_size(self):
parameter_name = inspect.stack()[0][3]
if self.memory_size_in_mb < 2048:
default = 52428800
elif self.memory_size_in_mb < 8192:
default = 104857600
elif self.memory_size_in_mb < 32768:
default = 262144000
else:
default = 524288000
return self.get_parameter(parameter_name, default)
@property
def key_buffer_size(self):
parameter_name = inspect.stack()[0][3]
default = 8388608
return self.get_parameter(parameter_name, default)
@property
def myisam_sort_buffer_size(self):
parameter_name = inspect.stack()[0][3]
default = 8388608
return self.get_parameter(parameter_name, default)
@property
def read_buffer_size(self):
parameter_name = inspect.stack()[0][3]
default = 131072
return self.get_parameter(parameter_name, default)
@property
def read_rnd_buffer_size(self):
parameter_name = inspect.stack()[0][3]
default = 262144
return self.get_parameter(parameter_name, default)
@property
def innodb_buffer_pool_size(self):
parameter_name = inspect.stack()[0][3]
if self.memory_size_in_mb < 1024:
default = self.memory_size_in_bytes / 4
elif self.memory_size_in_mb < 8192:
default = self.memory_size_in_bytes / 2
else:
default = (self.memory_size_in_bytes * 3) / 4
default = int(default)
return self.get_parameter(parameter_name, default)
@property
def innodb_log_file_size(self):
parameter_name = inspect.stack()[0][3]
default = 50331648
return self.get_parameter(parameter_name, default)
@property
def innodb_log_buffer_size(self):
parameter_name = inspect.stack()[0][3]
default = 8388608
return self.get_parameter(parameter_name, default)
@property
def binlog_format(self):
parameter_name = inspect.stack()[0][3]
default = 'ROW'
return self.get_parameter(parameter_name, default)
@property
def transaction_isolation(self):
parameter_name = inspect.stack()[0][3]
default = 'READ-COMMITTED'
return self.get_parameter(parameter_name, default)
@property
def default_storage_engine(self):
parameter_name = inspect.stack()[0][3]
default = 'InnoDB'
return self.get_parameter(parameter_name, default)
@property
def default_tmp_storage_engine(self):
parameter_name = inspect.stack()[0][3]
default = 'InnoDB'
return self.get_parameter(parameter_name, default)
@property
def character_set_server(self):
parameter_name = inspect.stack()[0][3]
default = 'utf8'
return self.get_parameter(parameter_name, default)
@property
def collation_server(self):
parameter_name = inspect.stack()[0][3]
default = 'utf8_general_ci'
return self.get_parameter(parameter_name, default)
@property
def max_connections(self):
parameter_name = inspect.stack()[0][3]
default = 1000
return self.get_parameter(parameter_name, default)
@property
def max_connect_errors(self):
parameter_name = inspect.stack()[0][3]
default = 999999
return self.get_parameter(parameter_name, default)
@property
def thread_cache_size(self):
parameter_name = inspect.stack()[0][3]
default = 32
return self.get_parameter(parameter_name, default)
@property
def table_open_cache(self):
parameter_name = inspect.stack()[0][3]
default = 4096
return self.get_parameter(parameter_name, default)
@property
def query_cache_type(self):
parameter_name = inspect.stack()[0][3]
default = 'OFF'
return self.get_parameter(parameter_name, default)
@property
def sync_binlog(self):
parameter_name = inspect.stack()[0][3]
default = 1
return self.get_parameter(parameter_name, default)
@property
def expire_logs_days(self):
parameter_name = inspect.stack()[0][3]
default = 3
return self.get_parameter(parameter_name, default)
@property
def long_query_time(self):
parameter_name = inspect.stack()[0][3]
default = '1.000000'
return self.get_parameter(parameter_name, default)
@property
def slow_query_log(self):
parameter_name = inspect.stack()[0][3]
default = 'ON'
return self.get_parameter(parameter_name, default)
@property
def innodb_autoextend_increment(self):
parameter_name = inspect.stack()[0][3]
default = 8
return self.get_parameter(parameter_name, default)
@property
def innodb_file_per_table(self):
parameter_name = inspect.stack()[0][3]
default = 'ON'
return self.get_parameter(parameter_name, default)
@property
def innodb_lock_wait_timeout(self):
parameter_name = inspect.stack()[0][3]
default = 50
return self.get_parameter(parameter_name, default)
@property
def innodb_flush_log_at_trx_commit(self):
parameter_name = inspect.stack()[0][3]
default = 1
return self.get_parameter(parameter_name, default)
@property
def innodb_thread_concurrency(self):
parameter_name = inspect.stack()[0][3]
default = 16
return self.get_parameter(parameter_name, default)
@property
def innodb_max_dirty_pages_pct(self):
parameter_name = inspect.stack()[0][3]
default = 90
return self.get_parameter(parameter_name, default)
@property
def innodb_max_purge_lag(self):
parameter_name = inspect.stack()[0][3]
default = 0
return self.get_parameter(parameter_name, default)
@property
def explicit_defaults_for_timestamp(self):
parameter_name = inspect.stack()[0][3]
default = 'ON'
return self.get_parameter(parameter_name, default)
@property
def performance_schema(self):
parameter_name = inspect.stack()[0][3]
if self.memory_size_in_mb < 8192:
default = 'OFF'
else:
default = 'ON'
return self.get_parameter(parameter_name, default)
@property
def thread_stack(self):
parameter_name = inspect.stack()[0][3]
default = 196608
return self.get_parameter(parameter_name, default)
@property
def log_slave_updates(self):
parameter_name = inspect.stack()[0][3]
default = 'ON'
return self.get_parameter(parameter_name, default)
@property
def innodb_log_files_in_group(self):
parameter_name = inspect.stack()[0][3]
default = 3
return self.get_parameter(parameter_name, default)
@property
def innodb_flush_method(self):
parameter_name = inspect.stack()[0][3]
default = 'O_DIRECT'
return self.get_parameter(parameter_name, default)
@property
def skip_external_locking(self):
parameter_name = inspect.stack()[0][3]
default = 'ON'
return self.get_parameter(parameter_name, default)
@property
def skip_name_resolve(self):
parameter_name = inspect.stack()[0][3]
default = 'ON'
return self.get_parameter(parameter_name, default)
@property
def wait_timeout(self):
parameter_name = inspect.stack()[0][3]
default = 28800
return self.get_parameter(parameter_name, default)
@property
def interactive_timeout(self):
parameter_name = inspect.stack()[0][3]
default = 28800
return self.get_parameter(parameter_name, default)
@property
def log_bin_trust_function_creators(self):
parameter_name = inspect.stack()[0][3]
default = 'OFF'
return self.get_parameter(parameter_name, default)
@property
def sql_mode(self):
parameter_name = inspect.stack()[0][3]
default = 'default'
return self.get_parameter(parameter_name, default)
@property
def audit_log_format(self):
parameter_name = inspect.stack()[0][3]
default = 'NEW'
return self.get_parameter(parameter_name, default)
@property
def audit_log_rotate_on_size(self):
parameter_name = inspect.stack()[0][3]
default = 0
return self.get_parameter(parameter_name, default)
@property
def audit_log_exclude_accounts(self):
parameter_name = inspect.stack()[0][3]
default = ''
return self.get_parameter(parameter_name, default)
@property
def audit_log_policy(self):
parameter_name = inspect.stack()[0][3]
default = 'NONE'
return self.get_parameter(parameter_name, default)
@property
def init_connect(self):
parameter_name = inspect.stack()[0][3]
default = ''
return self.get_parameter(parameter_name, default)
@property
def slave_net_timeout(self):
parameter_name = inspect.stack()[0][3]
default = 60
return self.get_parameter(parameter_name, default)
class ConfigurationMongoDB(ConfigurationBase):
__ENGINE__ = 'mongodb'
@property
def systemLog_quiet(self):
parameter_name = inspect.stack()[0][3]
default = False
return self.get_parameter(parameter_name, default)
def __getattr__(self, item):
if '.' in item:
item = item.replace('.', '_')
return self.__getattribute__(item)
@property
def oplogSize(self):
parameter_name = inspect.stack()[0][3]
default = Configuration.get_by_name_as_int(
'parameter_{}'.format(parameter_name), 512
)
return self.get_parameter(parameter_name, default)
@property
def quiet(self):
parameter_name = inspect.stack()[0][3]
default = 'false'
return self.get_parameter(parameter_name, default)
@property
def logLevel(self):
parameter_name = inspect.stack()[0][3]
default = 0
return self.get_parameter(parameter_name, default)
@property
def wiredTiger_engineConfig_cacheSizeGB(self):
parameter_name = inspect.stack()[0][3]
if self.memory_size_in_mb < 2564:
cache_mb = 256
else:
cache_mb = (self.memory_size_in_mb - 1024) / 2
default = round(cache_mb / 1024.0, 2)
return self.get_parameter(parameter_name, default)
class ConfigurationMySQLPercona(ConfigurationMySQL):
__ENGINE__ = 'mysql_percona'
|
|
# HTTP response (JSON) parser
import json
from application import *
from http_req import *
from url_gen import *
from datetime import datetime
class AbstractParser(object):
def __init__(self, server):
self.server = server
self.url_gen = UrlGen()
self.requester = HttpRequester()
def parse_json(self, json_input, parse_type):
pass
def get_data(self):
pass
def get_sample_data(self):
pass
def _get_response(self, rest_api):
url = self.url_gen.get_url(self.server, rest_api)
json_string = self.requester.single_request(url)
return json_string
class AppParser(AbstractParser):
def __init__(self, server):
super(AppParser, self).__init__(server)
self.apps = []
def parse_json(self, app_json, parse_type):
if parse_type == 'appid':
return self.parse_app_id(app_json)
def parse_app_id(self, app_json):
jsd = json.loads(app_json)
apps = []
for app in jsd:
app_dict = {}
# for every appid
print(app)
exit(0)
for key, value in app.iteritems():
if key == 'id':
app_dict['id'] = value
if key == 'name':
app_dict['name'] = value
if key == 'attempts':
app_dict['duration'] = str(value[0]['duration'])
app_dict['start_time'] = value[0]['startTime']
app_dict['end_time'] = value[0]['endTime']
apps.append(app_dict)
self.apps = apps
return apps
def parse_json_redis(self, json_input):
jsd = json.loads(json_input)
entries = {}
for app in jsd:
entry_key = app['id']
entry_value = {
'id': app['id'],
'name': app['name'],
'duration': str(app['attempts'][0]['duration']),
# 'start_time': app['attempts'][0]['startTime'],
# 'end_time': app['attempts'][0]['startTime'],
}
entries[entry_key] = entry_value
return entries
def get_sample_data(self):
rest_api = ''
json_string = self._get_response(rest_api)
response = dict(self.parse_json_redis(json_string).items()[:10])
return response
def get_data(self):
rest_api = ''
json_string = self._get_response(rest_api)
response = self.parse_json_redis(json_string)
return response
class JobParser(AbstractParser):
def __init__(self, server):
super(JobParser, self).__init__(server)
self.app_parser = AppParser(server)
def parse_json(self, job_json, parse_type):
if parse_type == 'jobid':
return self.parse_jobid(job_json)
return None
def parse_jobid(self, job_json):
jsd = json.loads(job_json)
jobs = []
for job in jsd:
new_job = Job()
new_job.id = job['jobId']
new_job.submission_time = job['submissionTime']
new_job.completion_time = job['completionTime']
new_job.stage_ids = job['stageIds']
new_job.status = job['status'] == 'SUCCEEDED'
new_job.num_tasks = job['numTasks']
new_job.num_active_tasks = job['numActiveTasks']
new_job.num_completed_tasks = job['numCompletedTasks']
new_job.num_skipped_tasks = job['numSkippedTasks']
new_job.num_failed_tasks = job['numFailedTasks']
new_job.num_active_stages = job['numActiveStages']
new_job.num_completed_stages = job['numCompletedStages']
new_job.num_skipped_stages = job['numSkippedStages']
new_job.num_failed_stages = job['numFailedStages']
jobs.append(new_job)
return jobs
def parse_json_redis(self, json_input, app_id):
entries = {}
jsd = json.loads(json_input)
for job in jsd:
entry_key = app_id + ':jobs:' + str(job['jobId'])
entry_value = {
'jobId': job['jobId'],
'submissionTime': job['submissionTime'],
'completionTime': job['completionTime'],
'stageIds': job['stageIds'],
'status': job['status'],
'numTasks': job['numTasks'],
'numActiveTasks': job['numActiveTasks'],
'numCompletedTasks': job['numCompletedTasks'],
'numSkippedTasks': job['numSkippedTasks'],
'numFailedTasks': job['numFailedTasks'],
'numActiveStages': job['numActiveStages'],
'numCompletedStages': job['numCompletedStages'],
'numSkippedStages': job['numSkippedStages'],
'numFailedStages': job['numFailedStages'],
}
entries[entry_key] = entry_value
return entries
def get_rest_api(self, app_id):
rest_api = app_id + '/' + 'jobs'
return rest_api
def get_data(self):
apps = self.app_parser.get_data()
response = {}
for app in apps.values():
app_id = app['id']
rest_api = self.get_rest_api(app_id)
json_string = self._get_response(rest_api)
response.update(self.parse_json_redis(json_string, app_id))
return response
class StageParser(AbstractParser):
def __init__(self, server):
super(StageParser, self).__init__(server)
self.app_parser = AppParser(server)
def parse_json(self, stage_json, parse_type):
if parse_type == 'stageid':
return self.parse_stage_id(stage_json)
elif parse_type == 'attemptid':
return self.parse_attempt_id(attempt_json=stage_json)
return None
def parse_stage_id(self, stage_json):
jsd = json.loads(stage_json)
stages = []
for stage in jsd:
try:
new_stage = Stage()
new_stage.id = int(stage['stageId'])
new_stage.num_active_tasks = int(stage['numActiveTasks'])
new_stage.num_completed_tasks = int(stage['numCompleteTasks'])
new_stage.num_failed_tasks = int(stage['numFailedTasks'])
new_stage.input_bytes = float(stage['inputBytes'])
sub_time = datetime.strptime(stage['submissionTime'],
"%Y-%m-%dT%H:%M:%S.%fGMT")
comp_time = datetime.strptime(stage['completionTime'],
"%Y-%m-%dT%H:%M:%S.%fGMT")
new_stage.completion_time = (comp_time - sub_time).total_seconds()
new_stage.shuffle_write_bytes = float(stage['shuffleWriteBytes'])
new_stage.shffule_read_bytes = float(stage['shuffleReadBytes'])
stages.append(new_stage)
except KeyError:
pass
return stages
def parse_attempt_id(self, attempt_json):
pass
def parse_json_redis(self, json_input, app_id):
entries = {}
jsd = json.loads(json_input)
for stage in jsd:
try:
sub_time = datetime.strptime(stage['submissionTime'],
"%Y-%m-%dT%H:%M:%S.%fGMT")
comp_time = datetime.strptime(stage['completionTime'],
"%Y-%m-%dT%H:%M:%S.%fGMT")
entry_key = app_id + ':stages:' + str(stage['stageId'])
entry_value = {
'app_id': app_id,
'stageId': stage['stageId'],
'status': stage['status'],
'attemptId': stage['attemptId'],
'numActiveTasks': int(stage['numActiveTasks']),
'numCompleteTasks': int(stage['numCompleteTasks']),
'numFailedTasks': int(stage['numFailedTasks']),
'inputBytes-%d'% stage['stageId']: float(stage['inputBytes']),
'completionTime-%d' % stage['stageId']: (comp_time - sub_time).total_seconds(),
'shuffleWriteBytes-%d' % stage['stageId']: float(stage['shuffleWriteBytes']),
'shuffleReadBytes-%d' % stage['stageId']: float(stage['shuffleReadBytes'])
}
entries[entry_key] = entry_value
except KeyError:
pass
return entries
def get_rest_api(self, app_id):
rest_api = app_id + '/' + 'stages'
return rest_api
def get_sample_data(self):
apps = dict(self.app_parser.get_data().items()[:10])
response = {}
for app in apps.values():
app_id = app['id']
rest_api = self.get_rest_api(app_id)
json_string = self._get_response(rest_api)
response.update(self.parse_json_redis(json_string, app_id))
return response
def get_data(self, app_ids=None):
apps = self.app_parser.get_data()
response = {}
if app_ids:
for app_id in app_ids:
rest_api = self.get_rest_api(app_id)
json_string = self._get_response(rest_api)
response.update(self.parse_json_redis(json_string, app_id))
else:
for app in apps.values():
app_id = app['id']
rest_api = self.get_rest_api(app_id)
json_string = self._get_response(rest_api)
response.update(self.parse_json_redis(json_string, app_id))
return response
class TaskParser(AbstractParser):
def __init__(self, server):
super(TaskParser, self).__init__(server)
self.stage_parser = StageParser(server)
def parse_json(self, json_input, parse_type):
pass
def parse_json_redis(self, json_input, app_id, stage_id):
entries = {}
jsd = json.loads(json_input)[0]['tasks']
for task in jsd.values():
entry_key = app_id + ':stages:' + stage_id + ':tasks:' + str(task['taskId'])
entry_value = {
'taskId': task['taskId'],
'attempt': task['attempt'],
'launchTime': task['launchTime'],
'executorId': task['executorId'],
'host': task['host'],
'taskLocality': task['taskLocality'],
'taskMetrics': task['taskMetrics'],
}
entries[entry_key] = entry_value
return entries
def get_data(self):
stages = self.stage_parser.get_data()
all_response = {}
for key, stage in stages.iteritems():
app_id = key.split(':')[0]
stage_id = str(stage['stageId'])
rest_api = self.get_rest_api(app_id, stage_id)
json_string = self._get_response(rest_api)
response = self.parse_json_redis(json_string, app_id, stage_id)
all_response.update(response)
return all_response
def get_rest_api(self, app_id, stage_id):
rest_api = app_id + '/' + 'stages' + '/' + stage_id
return rest_api
class ExecParser(AbstractParser):
def __init__(self, server):
super(ExecParser, self).__init__(server)
def parse_json(self, json_input, parse_type):
# parse the executor json
# not storing the info anymore
jsd = json.loads(json_input)
result = []
for executor in jsd:
if executor['id'] == 'driver':
# not dealing with drivers here
continue
# not driver, drop many keys
executor.pop('diskUsed', None)
executor.pop('executorLogs', None)
executor.pop('rddBlocks', None)
executor.pop('maxMemory', None)
executor.pop('totalTasks', None)
executor.pop('maxTasks', None)
executor.pop('memoryUsed', None)
executor.pop('isActive', None)
result.append(executor)
return result
class ParserFactory(object):
parser_types = {'app', 'job', 'stage', 'task', 'exec'}
@staticmethod
def get_parser(parser_type, server):
if parser_type not in ParserFactory.parser_types:
raise Exception("Unknown parser type!")
if parser_type == "app":
return AppParser(server)
elif parser_type == "job":
return JobParser(server)
elif parser_type == "stage":
return StageParser(server)
elif parser_type == "exec":
return ExecParser(server)
else:
return TaskParser(server)
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2015-02-02 10:09:43 by Brian McFee <brian.mcfee@nyu.edu>
'''Time stretching deformations'''
import jams
import librosa
import pyrubberband as pyrb
import numpy as np
import pandas as pd
from ..base import BaseTransformer
__all__ = ['TimeStretch',
'RandomTimeStretch',
'LogspaceTimeStretch',
'AnnotationBlur',
'Splitter']
class AbstractTimeStretch(BaseTransformer):
'''Abstract base class for time stretching'''
def __init__(self):
'''Abstract base class for time stretching.
This contains the deformation functions and
annotation query mapping, but does not manage
state or parameters.
'''
BaseTransformer.__init__(self)
# Build the annotation mappers
self._register('.*', self.deform_times)
self._register('tempo', self.deform_tempo)
@staticmethod
def audio(mudabox, state):
'''Deform the audio and metadata'''
mudabox._audio['y'] = pyrb.time_stretch(mudabox._audio['y'],
mudabox._audio['sr'],
state['rate'])
@staticmethod
def metadata(metadata, state):
'''Deform the metadata'''
metadata.duration /= state['rate']
@staticmethod
def deform_tempo(annotation, state):
'''Deform a tempo annotation'''
annotation.data.value *= state['rate']
@staticmethod
def deform_times(ann, state):
'''Deform time values for all annotations.'''
ann.data.time = [pd.to_timedelta(x.total_seconds() / state['rate'],
unit='s')
for x in ann.data.time]
ann.data.duration = [pd.to_timedelta(x.total_seconds() / state['rate'],
unit='s')
for x in ann.data.duration]
class TimeStretch(AbstractTimeStretch):
'''Static time stretching by a fixed rate'''
def __init__(self, rate=1.2):
'''Time stretching
Parameters
----------
rate : float > 0
The rate at which to speedup the audio.
rate > 1 speeds up,
rate < 1 slows down.
'''
AbstractTimeStretch.__init__(self)
self.rate = float(rate)
if rate <= 0:
raise ValueError('rate parameter must be strictly positive.')
def states(self, jam):
yield dict(rate=self.rate)
class LogspaceTimeStretch(AbstractTimeStretch):
'''Logarithmically spaced time stretching'''
def __init__(self, n_samples=3, lower=0.8, upper=1.2):
'''Generate stretched examples distributed uniformly
in log-time.
'''
AbstractTimeStretch.__init__(self)
if upper <= lower:
raise ValueError('upper must be strictly larger than lower')
if n_samples <= 0:
raise ValueError('n_samples must be strictly positive')
self.n_samples = n_samples
self.lower = float(lower)
self.upper = float(upper)
def states(self, jam):
'''Set the state for the transformation object.'''
rates = 2.0**np.linspace(self.lower,
self.upper,
num=self.n_samples,
endpoint=True)
for rate in rates:
yield dict(rate=rate)
class RandomTimeStretch(AbstractTimeStretch):
'''Random time stretching'''
def __init__(self, n_samples=3, location=0.0, scale=1.0e-1):
'''Generate randomly stretched examples.
For each deformation, the rate parameter is drawn from a
log-normal distribution with parameters `(location, scale)`
'''
AbstractTimeStretch.__init__(self)
if scale <= 0:
raise ValueError('scale parameter must be strictly positive.')
if not (n_samples > 0 or n_samples is None):
raise ValueError('n_samples must be None or positive')
self.n_samples = n_samples
self.location = location
self.scale = scale
def states(self, jam):
'''Set the state for a transformation object.
For a random time stretch, this corresponds to sampling
from the stretch distribution.
'''
rates = np.random.lognormal(mean=self.location,
sigma=self.scale,
size=self.n_samples)
for rate in rates:
yield dict(rate=rate)
class AnnotationBlur(BaseTransformer):
'''Randomly perturb the timing of observations.'''
def __init__(self, n_samples=3, mean=0.0, sigma=1.0,
time=True, duration=False):
'''Randomly perturb the timing of observations in a JAMS annotation.
Parameters
----------
n_samples : int > 0 or None
The number of perturbations to generate
mean : float
sigma: float > 0
The mean and standard deviation of timing noise
time : bool
duration : bool
Whether to perturb `time` or `duration` fields.
Note that duration fields may change near the end of the track,
even when `duration` is false, in order to ensure that
`time + duration <= track_duration`.
'''
BaseTransformer.__init__(self)
if sigma <= 0:
raise ValueError('sigma must be strictly positive')
if not (n_samples > 0 or n_samples is None):
raise ValueError('n_samples must be None or positive')
self.n_samples = n_samples
self.mean = float(mean)
self.sigma = float(sigma)
self.time = time
self.duration = duration
self._register('.*', self.deform_annotation)
def states(self, jam):
'''Get the state information from the jam'''
state = dict()
mudabox = jam.sandbox.muda
state['duration'] = librosa.get_duration(y=mudabox._audio['y'],
sr=mudabox._audio['sr'])
yield state
def deform_annotation(self, annotation, state):
'''Deform the annotation'''
track_duration = state['duration']
# Get the time in seconds
t = np.asarray([x.total_seconds() for x in annotation.data.time])
if self.time:
# Deform
t += np.random.normal(loc=self.mean,
scale=self.sigma,
size=t.shape)
# Clip to the track duration
t = np.clip(t, 0, track_duration)
annotation.data.time = pd.to_timedelta(t, unit='s')
# Get the time in seconds
d = np.asarray([x.total_seconds() for x in annotation.data.duration])
if self.duration:
# Deform
d += np.random.normal(loc=self.mean,
scale=self.sigma,
size=d.shape)
# Clip to the track duration - interval start
d = [np.clip(d_i, 0, track_duration - t_i) for (d_i, t_i) in zip(d, t)]
annotation.data.duration = pd.to_timedelta(d, unit='s')
class Splitter(BaseTransformer):
'''Split a single jams object into multiple small tiles'''
def __init__(self, duration=10.0, stride=5.0, min_duration=0.5):
'''
Parameters
----------
duration : float > 0
The (maximum) length (in seconds) of the sampled objects
stride : float > 0
The amount (in seconds) to advance between each sample
min_duration : float >= 0
The minimum duration to allow. If the cropped example is too
small, it will not be generated.
'''
BaseTransformer.__init__(self)
if duration <= 0:
raise ValueError('duration must be strictly positive')
if stride <= 0:
raise ValueError('stride must be strictly positive')
if min_duration < 0:
raise ValueError('min_duration must be non-negative')
self.duration = duration
self.stride = stride
self.min_duration = min_duration
self._register('.*', self.crop_times)
def states(self, jam):
'''Set the state for the transformation object'''
state = dict()
mudabox = jam.sandbox.muda
state['track_duration'] = librosa.get_duration(y=mudabox._audio['y'],
sr=mudabox._audio['sr'])
offsets = np.arange(start=0,
stop=(state['track_duration'] - self.min_duration),
step=self.stride)
for t in offsets:
state['offset'] = t
yield state
def metadata(self, metadata, state):
'''Adjust the metadata'''
metadata.duration = np.minimum(self.duration,
(state['track_duration'] -
state['offset']))
def audio(self, mudabox, state):
'''Crop the audio'''
offset_idx = int(state['offset'] * mudabox._audio['sr'])
duration = int(self.duration * mudabox._audio['sr'])
mudabox._audio['y'] = mudabox._audio['y'][offset_idx:offset_idx +
duration]
def crop_times(self, annotation, state):
'''Crop the annotation object'''
# Convert timings to td64
min_time = pd.to_timedelta(state['offset'], unit='s')
duration = pd.to_timedelta(self.duration, unit='s')
# Get all the rows where
# min_time <= time + duration
# time <= state.offset[state.index] + state.duration
data = annotation.data
data = data[data['time'] + data['duration'] >= min_time]
data = data[data['time'] <= min_time + duration]
# Move any partially contained intervals up to the feasible range
# [ | )
# t s t+d1 = s + d2
# d2 = d1 + (t - s)
# s = max(t, min_time)
# d2 = d1 - (max(t, min_time) - t)
# = d1 - max(t - t, min_time - t)
# = d1 - max(0, min_time - t)
shift = np.maximum(0, min_time - data['time'])
data['duration'] -= shift
# And now reset everything to the new t=0
# time -= min_time
data['time'] -= min_time
data['time'] = data['time'].clip(lower=pd.to_timedelta(0, unit='s'))
# For any rows with time + duration > self.duration:
# [ | )
# t d2 d1
# t + d2 <= duration
# d2 <= duration - t
# d2 = min(d1, duration - t)
# duration = min(duration, self.duration - time)
data['duration'] = np.minimum(data['duration'],
duration - data['time'])
data = data.reset_index()
annotation.data = jams.JamsFrame.from_dataframe(data)
|
|
"""
OOBHandler - Out Of Band Handler
The OOBHandler.execute_cmd is called by the sessionhandler when it
detects an `oob` keyword in the outgoing data (usually called via
`msg(oob=...)`
How this works is that the handler executes an oobfunction, which is
defined in a user-supplied module. This function can then make use of
the oobhandler's functionality to return data, register a monitor on
an object's properties or start a repeating action.
"""
from collections import defaultdict
from django.conf import settings
from evennia.server.models import ServerConfig
from evennia.server.sessionhandler import SESSIONS
from evennia.scripts.tickerhandler import TickerHandler
from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj
from evennia.utils import logger
from evennia.utils.utils import make_iter, mod_import
_SA = object.__setattr__
_GA = object.__getattribute__
_DA = object.__delattr__
# set at the bottom of this module
_OOB_FUNCS = None
_OOB_ERROR = None
#
# TrackerHandler is assigned to objects that should notify themselves to
# the OOB system when some property changes. This is never assigned manually
# but automatically through the OOBHandler.
#
class OOBFieldMonitor(object):
"""
This object should be stored on the
tracked object as "_oob_at_<fieldname>_postsave".
the update() method w ill be called by the
save mechanism, which in turn will call the
user-customizable func()
"""
def __init__(self, obj):
"""
This initializes the monitor with the object it sits on.
Args:
obj (Object): object handler is defined on.
"""
self.obj = obj
self.subscribers = defaultdict(list)
def __call__(self, fieldname):
"""
Called by the save() mechanism when the given field has
updated.
Args:
fieldname (str): The field to monitor
"""
for sessid, oobtuples in self.subscribers.items():
# oobtuples is a list [(oobfuncname, args, kwargs), ...],
# a potential list of oob commands to call when this
# field changes.
for (oobfuncname, args, kwargs) in oobtuples:
OOB_HANDLER.execute_cmd(sessid, oobfuncname, fieldname, self.obj, *args, **kwargs)
def add(self, sessid, oobfuncname, *args, **kwargs):
"""
Add a specific tracking callback to monitor
Args:
sessid (int): Session id
oobfuncname (str): oob command to call when field updates
args,kwargs (any): arguments to pass to oob commjand
Notes:
Each sessid may have a list of (oobfuncname, args, kwargs)
tuples, all of which will be executed when the
field updates.
"""
self.subscribers[sessid].append((oobfuncname, args, kwargs))
def remove(self, sessid, oobfuncname=None):
"""
Remove a subscribing session from the monitor
Args:
sessid(int): Session id
oobfuncname (str, optional): Only delete this cmdname.
If not given, delete all.
"""
if oobfuncname:
self.subscribers[sessid] = [item for item in self.subscribers[sessid]
if item[0] != oobfuncname]
else:
self.subscribers.pop(sessid, None)
class OOBAtRepeater(object):
"""
This object is created and used by the `OOBHandler.repeat` method.
It will be assigned to a target object as a custom variable, e.g.:
`obj._oob_ECHO_every_20s_for_sessid_1 = AtRepater()`
It will be called every interval seconds by the OOBHandler,
triggering whatever OOB function it is set to use.
"""
def __call__(self, *args, **kwargs):
"Called at regular intervals. Calls the oob function"
OOB_HANDLER.execute_cmd(kwargs["_sessid"], kwargs["_oobfuncname"], *args, **kwargs)
# Main OOB Handler
class OOBHandler(TickerHandler):
"""
The OOBHandler manages all server-side OOB functionality
"""
def __init__(self, *args, **kwargs):
"""
Setup the tickerhandler wrapper.
"""
super(OOBHandler, self).__init__(*args, **kwargs)
self.save_name = "oob_ticker_storage"
self.oob_save_name = "oob_monitor_storage"
self.oob_monitor_storage = {}
def _get_repeater_hook_name(self, oobfuncname, interval, sessid):
"""
Get the unique repeater call hook name for this object
Args:
oobfuncname (str): OOB function to retrieve
interval (int): Repeat interval
sessid (int): The Session id.
Returns:
hook_name (str): The repeater hook, when created, is a
dynamically assigned function that gets assigned to a
variable with a name created by combining the arguments.
"""
return "_oob_%s_every_%ss_for_sessid_%s" % (oobfuncname, interval, sessid)
def _get_fieldmonitor_name(self, fieldname):
"""
Get the fieldmonitor name.
Args:
fieldname (str): The field monitored.
Returns:
fieldmonitor_name (str): A dynamic function name
created from the argument.
"""
return "_oob_at_%s_postsave" % fieldname
def _add_monitor(self, obj, sessid, fieldname, oobfuncname, *args, **kwargs):
"""
Helper method. Creates a fieldmonitor and store it on the
object. This tracker will be updated whenever the given field
changes.
Args:
obj (Object): The object on which to store the monitor.
sessid (int): The Session id associated with the monitor.
fieldname (str): The field to monitor
oobfuncname (str): The OOB callback function to trigger when
field `fieldname` changes.
args, kwargs (any): Arguments to pass on to the callback.
"""
fieldmonitorname = self._get_fieldmonitor_name(fieldname)
if not hasattr(obj, fieldmonitorname):
# assign a new fieldmonitor to the object
_SA(obj, fieldmonitorname, OOBFieldMonitor(obj))
# register the session with the monitor
_GA(obj, fieldmonitorname).add(sessid, oobfuncname, *args, **kwargs)
# store calling arguments as a pickle for retrieval at reload
storekey = (pack_dbobj(obj), sessid, fieldname, oobfuncname)
stored = (args, kwargs)
self.oob_monitor_storage[storekey] = stored
def _remove_monitor(self, obj, sessid, fieldname, oobfuncname=None):
"""
Helper method. Removes the OOB from obj.
Args:
obj (Object): The object from which to remove the monitor.
sessid (int): The Session id associated with the monitor.
fieldname (str): The monitored field from which to remove the monitor.
oobfuncname (str): The oob callback function.
"""
fieldmonitorname = self._get_fieldmonitor_name(fieldname)
try:
_GA(obj, fieldmonitorname).remove(sessid, oobfuncname=oobfuncname)
if not _GA(obj, fieldmonitorname).subscribers:
_DA(obj, fieldmonitorname)
except AttributeError:
pass
# remove the pickle from storage
store_key = (pack_dbobj(obj), sessid, fieldname, oobfuncname)
self.oob_monitor_storage.pop(store_key, None)
def save(self):
"""
Handles saving of the OOBHandler data when the server reloads.
Called from the Server process.
"""
# save ourselves as a tickerhandler
super(OOBHandler, self).save()
# handle the extra oob monitor store
if self.ticker_storage:
ServerConfig.objects.conf(key=self.oob_save_name,
value=dbserialize(self.oob_monitor_storage))
else:
# make sure we have nothing lingering in the database
ServerConfig.objects.conf(key=self.oob_save_name, delete=True)
def restore(self):
"""
Called when the handler recovers after a Server reload. Called
by the Server process as part of the reload upstart. Here we
overload the tickerhandler's restore method completely to make
sure we correctly re-apply and re-initialize the correct
monitor and repeater objecth on all saved objects.
"""
# load the oob monitors and initialize them
oob_storage = ServerConfig.objects.conf(key=self.oob_save_name)
if oob_storage:
self.oob_storage = dbunserialize(oob_storage)
for store_key, (args, kwargs) in self.oob_storage.items():
# re-create the monitors
obj, sessid, fieldname, oobfuncname = store_key
obj = unpack_dbobj(obj)
self._add_monitor(obj, sessid, fieldname, oobfuncname, *args, **kwargs)
# handle the tickers (same as in TickerHandler except we call
# the add_repeater method which makes sure to add the hooks before
# starting the tickerpool)
ticker_storage = ServerConfig.objects.conf(key=self.save_name)
if ticker_storage:
self.ticker_storage = dbunserialize(ticker_storage)
for store_key, (args, kwargs) in self.ticker_storage.items():
obj, interval, idstring = store_key
obj = unpack_dbobj(obj)
# we saved these in add_repeater before, can now retrieve them
sessid = kwargs["_sessid"]
oobfuncname = kwargs["_oobfuncname"]
self.add_repeater(obj, sessid, oobfuncname, interval, *args, **kwargs)
def add_repeater(self, obj, sessid, oobfuncname, interval=20, *args, **kwargs):
"""
Set an oob function to be repeatedly called.
Args:
obj (Object) - the object on which to register the repeat
sessid (int) - session id of the session registering
oobfuncname (str) - oob function name to call every interval seconds
interval (int, optional) - interval to call oobfunc, in seconds
Notes:
*args, **kwargs are used as extra arguments to the oobfunc.
"""
# check so we didn't get a session instead of a sessid
if not isinstance(sessid, int):
sessid = sessid.sessid
hook = OOBAtRepeater()
hookname = self._get_repeater_hook_name(oobfuncname, interval, sessid)
_SA(obj, hookname, hook)
# we store these in kwargs so that tickerhandler saves them with the rest
kwargs.update({"_sessid":sessid, "_oobfuncname":oobfuncname})
super(OOBHandler, self).add(obj, int(interval), oobfuncname, hookname, *args, **kwargs)
def remove_repeater(self, obj, sessid, oobfuncname, interval=20):
"""
Remove the repeatedly calling oob function
Args:
obj (Object): The object on which the repeater sits
sessid (int): Session id of the Session that registered the repeater
oobfuncname (str): Name of oob function to call at repeat
interval (int, optional): Number of seconds between repeats
"""
# check so we didn't get a session instead of a sessid
if not isinstance(sessid, int):
sessid = sessid.sessid
super(OOBHandler, self).remove(obj, interval, idstring=oobfuncname)
hookname = self._get_repeater_hook_name(oobfuncname, interval, sessid)
try:
_DA(obj, hookname)
except AttributeError:
pass
def add_field_monitor(self, obj, sessid, field_name, oobfuncname, *args, **kwargs):
"""
Add a monitor tracking a database field
Args:
obj (Object): The object who'se field is to be monitored
sessid (int): Session if of the session monitoring
field_name (str): Name of database field to monitor. The db_* can optionally
be skipped (it will be automatically appended if missing)
oobfuncname (str): OOB function to call when field changes
Notes:
When the field updates the given oobfunction will be called as
`oobfuncname(session, fieldname, obj, *args, **kwargs)`
where `fieldname` is the name of the monitored field and
`obj` is the object on which the field sits. From this you
can also easily get the new field value if you want.
"""
# check so we didn't get a session instead of a sessid
if not isinstance(sessid, int):
sessid = sessid.sessid
# all database field names starts with db_*
field_name = field_name if field_name.startswith("db_") else "db_%s" % field_name
self._add_monitor(obj, sessid, field_name, oobfuncname, *args, **kwargs)
def remove_field_monitor(self, obj, sessid, field_name, oobfuncname=None):
"""
Un-tracks a database field
Args:
obj (Object): Entity with the monitored field
sessid (int): Session id of session that monitors
field_name (str): database field monitored (the db_* can optionally be
skipped (it will be auto-appended if missing)
oobfuncname (str, optional): OOB command to call on that field
Notes:
When the Attributes db_value updates the given oobfunction
will be called as
`oobfuncname(session, fieldname, obj, *args, **kwargs)`
where `fieldname` is the name of the monitored field and
`obj` is the object on which the field sits. From this you
can also easily get the new field value if you want.
"""
# check so we didn't get a session instead of a sessid
if not isinstance(sessid, int):
sessid = sessid.sessid
field_name = field_name if field_name.startswith("db_") else "db_%s" % field_name
self._remove_monitor(obj, sessid, field_name, oobfuncname=oobfuncname)
def add_attribute_monitor(self, obj, sessid, attr_name, oobfuncname, *args, **kwargs):
"""
Monitor the changes of an Attribute on an object. Will trigger when
the Attribute's `db_value` field updates.
Args:
obj (Object): Object with the Attribute to monitor.
sessid (int): Session id of monitoring Session.
attr_name (str): Name (key) of Attribute to monitor.
oobfuncname (str): OOB function to call when Attribute updates.
"""
# check so we didn't get a session instead of a sessid
if not isinstance(sessid, int):
sessid = sessid.sessid
# get the attribute object if we can
attrobj = obj.attributes.get(attr_name, return_obj=True)
if attrobj:
self._add_monitor(attrobj, sessid, "db_value", oobfuncname)
def remove_attribute_monitor(self, obj, sessid, attr_name, oobfuncname):
"""
Deactivate tracking for a given object's Attribute
Args:
obj (Object): Object monitored.
sessid (int): Session id of monitoring Session.
attr_name (str): Name of Attribute monitored.
oobfuncname (str): OOB function name called when Attribute updates.
"""
# check so we didn't get a session instead of a sessid
if not isinstance(sessid, int):
sessid = sessid.sessid
attrobj = obj.attributes.get(attr_name, return_obj=True)
if attrobj:
self._remove_monitor(attrobj, sessid, "db_value", oobfuncname)
def get_all_monitors(self, sessid):
"""
Get the names of all variables this session is tracking.
Args:
sessid (id): Session id of monitoring Session
Returns:
stored monitors (tuple): A list of tuples
`(obj, fieldname, args, kwargs)` representing all
the monitoring the Session with the given sessid is doing.
"""
# check so we didn't get a session instead of a sessid
if not isinstance(sessid, int):
sessid = sessid.sessid
# [(obj, fieldname, args, kwargs), ...]
return [(unpack_dbobj(key[0]), key[2], stored[0], stored[1])
for key, stored in self.oob_monitor_storage.items() if key[1] == sessid]
# access method - called from session.msg()
def execute_cmd(self, session, oobfuncname, *args, **kwargs):
"""
Execute an oob command
Args:
session (Session or int): Session or Session.sessid calling
the oob command
oobfuncname (str): The name of the oob command (case sensitive)
Notes:
If the oobfuncname is a valid oob function, `args` and
`kwargs` are passed into the oob command.
"""
if isinstance(session, int):
# a sessid. Convert to a session
session = SESSIONS.session_from_sessid(session)
if not session:
errmsg = "OOB Error: execute_cmd(%s,%s,%s,%s) - no valid session" % \
(session, oobfuncname, args, kwargs)
raise RuntimeError(errmsg)
#print "execute_oob:", session, oobfuncname, args, kwargs
try:
oobfunc = _OOB_FUNCS[oobfuncname]
except Exception:
errmsg = "'%s' is not a valid OOB command. Commands available:\n %s" % (oobfuncname, ", ".join(_OOB_FUNCS))
if _OOB_ERROR:
_OOB_ERROR(session, errmsg, *args, **kwargs)
errmsg = "OOB ERROR: %s" % errmsg
logger.log_trace(errmsg)
return
# we found an oob command. Execute it.
try:
oobfunc(session, *args, **kwargs)
except Exception, err:
errmsg = "Exception in %s(*%s, **%s):\n%s" % (oobfuncname, args, kwargs, err)
if _OOB_ERROR:
_OOB_ERROR(session, errmsg, *args, **kwargs)
errmsg = "OOB ERROR: %s" % errmsg
logger.log_trace(errmsg)
# access object
OOB_HANDLER = OOBHandler()
# load resources from plugin module. This must happen
# AFTER the OOB_HANDLER has been initialized since the
# commands will want to import it.
_OOB_FUNCS = {}
for modname in make_iter(settings.OOB_PLUGIN_MODULES):
_OOB_FUNCS.update(mod_import(modname).CMD_MAP)
# get the command to receive eventual error strings
_OOB_ERROR = _OOB_FUNCS.get("oob_error", None)
if not _OOB_ERROR:
# no custom error set; create default oob error message function
def oob_error(session, errmsg, *args, **kwargs):
"""
Fallback error handler. This will be used if no custom
oob_error is defined and just echoes the error back to the
session.
Args:
errmsg (str): Error message to echo.
args, kwargs (any): Not used.
"""
session.msg(oob=("err", ("ERROR ", errmsg)))
_OOB_ERROR = oob_error
|
|
#!/usr/bin/python
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Testing Library For Nacl
"""
import difflib
import re
import os
import signal
import subprocess
import sys
# Windows does not fully implement os.times functionality. If
# _GetTimesPosix were used, the fields for CPU time used in user and
# in kernel mode by the process will both be zero. Instead, we must
# use the ctypes module and access windll's kernel32 interface to
# extract the CPU usage information.
if sys.platform[:3] == 'win':
import ctypes
class SubprocessCpuTimer:
"""Timer used to measure user and kernel CPU time expended by a subprocess.
A new object of this class should be instantiated just before the
subprocess is created, and after the subprocess is finished and
waited for (via the wait method of the Popen object), the elapsed
time can be obtained by invoking the ElapsedCpuTime of the
SubprocessCpuTimer instance.
"""
WIN32_PROCESS_TIMES_TICKS_PER_SECOND = 1.0e7
# use a class variable to avoid slicing at run-time
_use_proc_handle = sys.platform[:3] == 'win'
@staticmethod
def _GetTimePosix():
t = os.times()
return t[2] + t[3]
@staticmethod
def _GetTimeWindows(proc_handle):
if proc_handle is None:
return 0
creation_time = ctypes.c_ulonglong()
exit_time = ctypes.c_ulonglong()
kernel_time = ctypes.c_ulonglong()
user_time = ctypes.c_ulonglong()
rc = ctypes.windll.kernel32.GetProcessTimes(
int(proc_handle._handle),
ctypes.byref(creation_time),
ctypes.byref(exit_time),
ctypes.byref(kernel_time),
ctypes.byref(user_time))
if not rc:
print >>sys.stderr, 'Could not obtain process time'
return 0
return ((kernel_time.value + user_time.value)
/ SubprocessCpuTimer.WIN32_PROCESS_TIMES_TICKS_PER_SECOND)
@staticmethod
def _GetTime(proc_handle):
if SubprocessCpuTimer._use_proc_handle:
return SubprocessCpuTimer._GetTimeWindows(proc_handle)
return SubprocessCpuTimer._GetTimePosix()
def __init__(self):
self._start_time = self._GetTime(None)
def ElapsedCpuTime(self, proc_handle):
return self._GetTime(proc_handle) - self._start_time
def RunTest(cmd):
"""Run a test where we only care about the return code."""
assert type(cmd) == list
failed = 0
timer = SubprocessCpuTimer()
p = None
try:
p = subprocess.Popen(cmd)
retcode = p.wait()
except OSError:
print 'exception: ' + str(sys.exc_info()[1])
retcode = 0
failed = 1
if p is None:
return (0, 0, 1)
return (timer.ElapsedCpuTime(p), retcode, failed)
def RunTestWithInputOutput(cmd, input_data):
"""Run a test where we also care about stdin/stdout/stderr.
NOTE: this function may have problems with arbitrarily
large input or output, especially on windows
NOTE: input_data can be either a string or or a file like object,
file like objects may be better for large input/output
"""
assert type(cmd) == list
stdout = ''
stderr = ''
failed = 0
p = None
timer = SubprocessCpuTimer()
try:
# Python on windows does not include any notion of SIGPIPE. On
# Linux and OSX, Python installs a signal handler for SIGPIPE that
# sets the handler to SIG_IGN so that syscalls return -1 with
# errno equal to EPIPE, and translates those to exceptions;
# unfortunately, the subprocess module fails to reset the handler
# for SIGPIPE to SIG_DFL, and the SIG_IGN behavior is inherited.
# subprocess.Popen's preexec_fn is apparently okay to use on
# Windows, as long as its value is None.
if hasattr(signal, 'SIGPIPE'):
no_pipe = lambda : signal.signal(signal.SIGPIPE, signal.SIG_DFL)
else:
no_pipe = None
if type(input_data) == str:
p = subprocess.Popen(cmd,
bufsize=1000*1000,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
preexec_fn = no_pipe)
stdout, stderr = p.communicate(input_data)
else:
# input_data is a file like object
p = subprocess.Popen(cmd,
bufsize=1000*1000,
stdin=input_data,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
preexec_fn = no_pipe)
stdout, stderr = p.communicate()
retcode = p.wait()
except OSError, x:
if x.errno == 10:
print '@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
print 'ignoring exception', str(sys.exc_info()[1])
print 'return code NOT checked'
print 'this seems to be a windows issue'
print '@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
failed = 0
retcode = 0
else:
print 'exception: ' + str(sys.exc_info()[1])
retcode = 0
failed = 1
if p is None:
cpu_time_consumed = 0
else:
cpu_time_consumed = timer.ElapsedCpuTime(p)
return (cpu_time_consumed, retcode, failed, stdout, stderr)
def DiffStringsIgnoringWhiteSpace(a, b):
a = a.splitlines()
b = b.splitlines()
# NOTE: the whitespace stuff seems to be broken in python
cruncher = difflib.SequenceMatcher(lambda x: x in ' \t\r', a, b)
for group in cruncher.get_grouped_opcodes():
eq = True
for tag, i1, i2, j1, j2 in group:
if tag != 'equal':
eq = False
break
if eq: continue
i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
yield '@@ -%d,%d +%d,%d @@\n' % (i1+1, i2-i1, j1+1, j2-j1)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' [' + line + ']'
continue
if tag == 'replace' or tag == 'delete':
for line in a[i1:i2]:
yield '-[' + repr(line) + ']'
if tag == 'replace' or tag == 'insert':
for line in b[j1:j2]:
yield '+[' + repr(line) + ']'
def RegexpFilterLines(regexp, inverse, group_only, lines):
"""Apply regexp to filter lines of text, keeping only those lines
that match.
Any carriage return / newline sequence is turned into a newline.
Args:
regexp: A regular expression, only lines that match are kept
inverse: Only keep lines that do not match
group_only: replace matching lines with the regexp groups,
text outside the groups are omitted, useful for
eliminating file names that might change, etc).
lines: A string containing newline-separated lines of text
Return:
Filtered lines of text, newline separated.
"""
result = []
nfa = re.compile(regexp)
for line in lines.split('\n'):
if line.endswith('\r'):
line = line[:-1]
mobj = nfa.search(line)
if mobj and inverse:
continue
if not mobj and not inverse:
continue
if group_only:
matched_strings = []
for s in mobj.groups():
if s is not None:
matched_strings.append(s)
result.append(''.join(matched_strings))
else:
result.append(line)
return '\n'.join(result)
|
|
"""Tests for acme.challenges."""
import unittest
import mock
import OpenSSL
import requests
from six.moves.urllib import parse as urllib_parse # pylint: disable=import-error
from acme import errors
from acme import jose
from acme import other
from acme import test_util
CERT = test_util.load_comparable_cert('cert.pem')
KEY = jose.JWKRSA(key=test_util.load_rsa_private_key('rsa512_key.pem'))
class ChallengeTest(unittest.TestCase):
def test_from_json_unrecognized(self):
from acme.challenges import Challenge
from acme.challenges import UnrecognizedChallenge
chall = UnrecognizedChallenge({"type": "foo"})
# pylint: disable=no-member
self.assertEqual(chall, Challenge.from_json(chall.jobj))
class UnrecognizedChallengeTest(unittest.TestCase):
def setUp(self):
from acme.challenges import UnrecognizedChallenge
self.jobj = {"type": "foo"}
self.chall = UnrecognizedChallenge(self.jobj)
def test_to_partial_json(self):
self.assertEqual(self.jobj, self.chall.to_partial_json())
def test_from_json(self):
from acme.challenges import UnrecognizedChallenge
self.assertEqual(
self.chall, UnrecognizedChallenge.from_json(self.jobj))
class KeyAuthorizationChallengeResponseTest(unittest.TestCase):
def setUp(self):
def _encode(name):
assert name == "token"
return "foo"
self.chall = mock.Mock()
self.chall.encode.side_effect = _encode
def test_verify_ok(self):
from acme.challenges import KeyAuthorizationChallengeResponse
response = KeyAuthorizationChallengeResponse(
key_authorization='foo.oKGqedy-b-acd5eoybm2f-NVFxvyOoET5CNy3xnv8WY')
self.assertTrue(response.verify(self.chall, KEY.public_key()))
def test_verify_wrong_token(self):
from acme.challenges import KeyAuthorizationChallengeResponse
response = KeyAuthorizationChallengeResponse(
key_authorization='bar.oKGqedy-b-acd5eoybm2f-NVFxvyOoET5CNy3xnv8WY')
self.assertFalse(response.verify(self.chall, KEY.public_key()))
def test_verify_wrong_thumbprint(self):
from acme.challenges import KeyAuthorizationChallengeResponse
response = KeyAuthorizationChallengeResponse(
key_authorization='foo.oKGqedy-b-acd5eoybm2f-NVFxv')
self.assertFalse(response.verify(self.chall, KEY.public_key()))
def test_verify_wrong_form(self):
from acme.challenges import KeyAuthorizationChallengeResponse
response = KeyAuthorizationChallengeResponse(
key_authorization='.foo.oKGqedy-b-acd5eoybm2f-'
'NVFxvyOoET5CNy3xnv8WY')
self.assertFalse(response.verify(self.chall, KEY.public_key()))
class HTTP01ResponseTest(unittest.TestCase):
# pylint: disable=too-many-instance-attributes
def setUp(self):
from acme.challenges import HTTP01Response
self.msg = HTTP01Response(key_authorization=u'foo')
self.jmsg = {
'resource': 'challenge',
'type': 'http-01',
'keyAuthorization': u'foo',
}
from acme.challenges import HTTP01
self.chall = HTTP01(token=(b'x' * 16))
self.response = self.chall.response(KEY)
def test_to_partial_json(self):
self.assertEqual(self.jmsg, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import HTTP01Response
self.assertEqual(
self.msg, HTTP01Response.from_json(self.jmsg))
def test_from_json_hashable(self):
from acme.challenges import HTTP01Response
hash(HTTP01Response.from_json(self.jmsg))
def test_simple_verify_bad_key_authorization(self):
key2 = jose.JWKRSA.load(test_util.load_vector('rsa256_key.pem'))
self.response.simple_verify(self.chall, "local", key2.public_key())
@mock.patch("acme.challenges.requests.get")
def test_simple_verify_good_validation(self, mock_get):
validation = self.chall.validation(KEY)
mock_get.return_value = mock.MagicMock(text=validation)
self.assertTrue(self.response.simple_verify(
self.chall, "local", KEY.public_key()))
mock_get.assert_called_once_with(self.chall.uri("local"))
@mock.patch("acme.challenges.requests.get")
def test_simple_verify_bad_validation(self, mock_get):
mock_get.return_value = mock.MagicMock(text="!")
self.assertFalse(self.response.simple_verify(
self.chall, "local", KEY.public_key()))
@mock.patch("acme.challenges.requests.get")
def test_simple_verify_whitespace_validation(self, mock_get):
from acme.challenges import HTTP01Response
mock_get.return_value = mock.MagicMock(
text=(self.chall.validation(KEY) +
HTTP01Response.WHITESPACE_CUTSET))
self.assertTrue(self.response.simple_verify(
self.chall, "local", KEY.public_key()))
mock_get.assert_called_once_with(self.chall.uri("local"))
@mock.patch("acme.challenges.requests.get")
def test_simple_verify_connection_error(self, mock_get):
mock_get.side_effect = requests.exceptions.RequestException
self.assertFalse(self.response.simple_verify(
self.chall, "local", KEY.public_key()))
@mock.patch("acme.challenges.requests.get")
def test_simple_verify_port(self, mock_get):
self.response.simple_verify(
self.chall, domain="local",
account_public_key=KEY.public_key(), port=8080)
self.assertEqual("local:8080", urllib_parse.urlparse(
mock_get.mock_calls[0][1][0]).netloc)
class HTTP01Test(unittest.TestCase):
def setUp(self):
from acme.challenges import HTTP01
self.msg = HTTP01(
token=jose.decode_b64jose(
'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ+PCt92wr+oA'))
self.jmsg = {
'type': 'http-01',
'token': 'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA',
}
def test_path(self):
self.assertEqual(self.msg.path, '/.well-known/acme-challenge/'
'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA')
def test_uri(self):
self.assertEqual(
'http://example.com/.well-known/acme-challenge/'
'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA',
self.msg.uri('example.com'))
def test_to_partial_json(self):
self.assertEqual(self.jmsg, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import HTTP01
self.assertEqual(self.msg, HTTP01.from_json(self.jmsg))
def test_from_json_hashable(self):
from acme.challenges import HTTP01
hash(HTTP01.from_json(self.jmsg))
def test_good_token(self):
self.assertTrue(self.msg.good_token)
self.assertFalse(
self.msg.update(token=b'..').good_token)
class TLSSNI01ResponseTest(unittest.TestCase):
# pylint: disable=too-many-instance-attributes
def setUp(self):
from acme.challenges import TLSSNI01
self.chall = TLSSNI01(
token=jose.b64decode(b'a82d5ff8ef740d12881f6d3c2277ab2e'))
self.response = self.chall.response(KEY)
self.jmsg = {
'resource': 'challenge',
'type': 'tls-sni-01',
'keyAuthorization': self.response.key_authorization,
}
# pylint: disable=invalid-name
label1 = b'dc38d9c3fa1a4fdcc3a5501f2d38583f'
label2 = b'b7793728f084394f2a1afd459556bb5c'
self.z = label1 + label2
self.z_domain = label1 + b'.' + label2 + b'.acme.invalid'
self.domain = 'foo.com'
def test_z_and_domain(self):
self.assertEqual(self.z, self.response.z)
self.assertEqual(self.z_domain, self.response.z_domain)
def test_to_partial_json(self):
self.assertEqual(self.jmsg, self.response.to_partial_json())
def test_from_json(self):
from acme.challenges import TLSSNI01Response
self.assertEqual(self.response, TLSSNI01Response.from_json(self.jmsg))
def test_from_json_hashable(self):
from acme.challenges import TLSSNI01Response
hash(TLSSNI01Response.from_json(self.jmsg))
@mock.patch('acme.challenges.socket.gethostbyname')
@mock.patch('acme.challenges.crypto_util.probe_sni')
def test_probe_cert(self, mock_probe_sni, mock_gethostbyname):
mock_gethostbyname.return_value = '127.0.0.1'
self.response.probe_cert('foo.com')
mock_gethostbyname.assert_called_once_with('foo.com')
mock_probe_sni.assert_called_once_with(
host='127.0.0.1', port=self.response.PORT,
name=self.z_domain)
self.response.probe_cert('foo.com', host='8.8.8.8')
mock_probe_sni.assert_called_with(
host='8.8.8.8', port=mock.ANY, name=mock.ANY)
self.response.probe_cert('foo.com', port=1234)
mock_probe_sni.assert_called_with(
host=mock.ANY, port=1234, name=mock.ANY)
self.response.probe_cert('foo.com', bar='baz')
mock_probe_sni.assert_called_with(
host=mock.ANY, port=mock.ANY, name=mock.ANY, bar='baz')
self.response.probe_cert('foo.com', name=b'xxx')
mock_probe_sni.assert_called_with(
host=mock.ANY, port=mock.ANY,
name=self.z_domain)
def test_gen_verify_cert(self):
key1 = test_util.load_pyopenssl_private_key('rsa512_key.pem')
cert, key2 = self.response.gen_cert(key1)
self.assertEqual(key1, key2)
self.assertTrue(self.response.verify_cert(cert))
def test_gen_verify_cert_gen_key(self):
cert, key = self.response.gen_cert()
self.assertTrue(isinstance(key, OpenSSL.crypto.PKey))
self.assertTrue(self.response.verify_cert(cert))
def test_verify_bad_cert(self):
self.assertFalse(self.response.verify_cert(
test_util.load_cert('cert.pem')))
def test_simple_verify_bad_key_authorization(self):
key2 = jose.JWKRSA.load(test_util.load_vector('rsa256_key.pem'))
self.response.simple_verify(self.chall, "local", key2.public_key())
@mock.patch('acme.challenges.TLSSNI01Response.verify_cert', autospec=True)
def test_simple_verify(self, mock_verify_cert):
mock_verify_cert.return_value = mock.sentinel.verification
self.assertEqual(
mock.sentinel.verification, self.response.simple_verify(
self.chall, self.domain, KEY.public_key(),
cert=mock.sentinel.cert))
mock_verify_cert.assert_called_once_with(
self.response, mock.sentinel.cert)
@mock.patch('acme.challenges.TLSSNI01Response.probe_cert')
def test_simple_verify_false_on_probe_error(self, mock_probe_cert):
mock_probe_cert.side_effect = errors.Error
self.assertFalse(self.response.simple_verify(
self.chall, self.domain, KEY.public_key()))
class TLSSNI01Test(unittest.TestCase):
def setUp(self):
from acme.challenges import TLSSNI01
self.msg = TLSSNI01(
token=jose.b64decode('a82d5ff8ef740d12881f6d3c2277ab2e'))
self.jmsg = {
'type': 'tls-sni-01',
'token': 'a82d5ff8ef740d12881f6d3c2277ab2e',
}
def test_to_partial_json(self):
self.assertEqual(self.jmsg, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import TLSSNI01
self.assertEqual(self.msg, TLSSNI01.from_json(self.jmsg))
def test_from_json_hashable(self):
from acme.challenges import TLSSNI01
hash(TLSSNI01.from_json(self.jmsg))
def test_from_json_invalid_token_length(self):
from acme.challenges import TLSSNI01
self.jmsg['token'] = jose.encode_b64jose(b'abcd')
self.assertRaises(
jose.DeserializationError, TLSSNI01.from_json, self.jmsg)
@mock.patch('acme.challenges.TLSSNI01Response.gen_cert')
def test_validation(self, mock_gen_cert):
mock_gen_cert.return_value = ('cert', 'key')
self.assertEqual(('cert', 'key'), self.msg.validation(
KEY, cert_key=mock.sentinel.cert_key))
mock_gen_cert.assert_called_once_with(key=mock.sentinel.cert_key)
class RecoveryContactTest(unittest.TestCase):
def setUp(self):
from acme.challenges import RecoveryContact
self.msg = RecoveryContact(
activation_url='https://example.ca/sendrecovery/a5bd99383fb0',
success_url='https://example.ca/confirmrecovery/bb1b9928932',
contact='c********n@example.com')
self.jmsg = {
'type': 'recoveryContact',
'activationURL': 'https://example.ca/sendrecovery/a5bd99383fb0',
'successURL': 'https://example.ca/confirmrecovery/bb1b9928932',
'contact': 'c********n@example.com',
}
def test_to_partial_json(self):
self.assertEqual(self.jmsg, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import RecoveryContact
self.assertEqual(self.msg, RecoveryContact.from_json(self.jmsg))
def test_from_json_hashable(self):
from acme.challenges import RecoveryContact
hash(RecoveryContact.from_json(self.jmsg))
def test_json_without_optionals(self):
del self.jmsg['activationURL']
del self.jmsg['successURL']
del self.jmsg['contact']
from acme.challenges import RecoveryContact
msg = RecoveryContact.from_json(self.jmsg)
self.assertTrue(msg.activation_url is None)
self.assertTrue(msg.success_url is None)
self.assertTrue(msg.contact is None)
self.assertEqual(self.jmsg, msg.to_partial_json())
class RecoveryContactResponseTest(unittest.TestCase):
def setUp(self):
from acme.challenges import RecoveryContactResponse
self.msg = RecoveryContactResponse(token='23029d88d9e123e')
self.jmsg = {
'resource': 'challenge',
'type': 'recoveryContact',
'token': '23029d88d9e123e',
}
def test_to_partial_json(self):
self.assertEqual(self.jmsg, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import RecoveryContactResponse
self.assertEqual(
self.msg, RecoveryContactResponse.from_json(self.jmsg))
def test_from_json_hashable(self):
from acme.challenges import RecoveryContactResponse
hash(RecoveryContactResponse.from_json(self.jmsg))
def test_json_without_optionals(self):
del self.jmsg['token']
from acme.challenges import RecoveryContactResponse
msg = RecoveryContactResponse.from_json(self.jmsg)
self.assertTrue(msg.token is None)
self.assertEqual(self.jmsg, msg.to_partial_json())
class ProofOfPossessionHintsTest(unittest.TestCase):
def setUp(self):
jwk = KEY.public_key()
issuers = (
'C=US, O=SuperT LLC, CN=SuperTrustworthy Public CA',
'O=LessTrustworthy CA Inc, CN=LessTrustworthy But StillSecure',
)
cert_fingerprints = (
'93416768eb85e33adc4277f4c9acd63e7418fcfe',
'16d95b7b63f1972b980b14c20291f3c0d1855d95',
'48b46570d9fc6358108af43ad1649484def0debf',
)
subject_key_identifiers = ('d0083162dcc4c8a23ecb8aecbd86120e56fd24e5')
authorized_for = ('www.example.com', 'example.net')
serial_numbers = (34234239832, 23993939911, 17)
from acme.challenges import ProofOfPossession
self.msg = ProofOfPossession.Hints(
jwk=jwk, issuers=issuers, cert_fingerprints=cert_fingerprints,
certs=(CERT,), subject_key_identifiers=subject_key_identifiers,
authorized_for=authorized_for, serial_numbers=serial_numbers)
self.jmsg_to = {
'jwk': jwk,
'certFingerprints': cert_fingerprints,
'certs': (jose.encode_b64jose(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1, CERT.wrapped)),),
'subjectKeyIdentifiers': subject_key_identifiers,
'serialNumbers': serial_numbers,
'issuers': issuers,
'authorizedFor': authorized_for,
}
self.jmsg_from = self.jmsg_to.copy()
self.jmsg_from.update({'jwk': jwk.to_json()})
def test_to_partial_json(self):
self.assertEqual(self.jmsg_to, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import ProofOfPossession
self.assertEqual(
self.msg, ProofOfPossession.Hints.from_json(self.jmsg_from))
def test_from_json_hashable(self):
from acme.challenges import ProofOfPossession
hash(ProofOfPossession.Hints.from_json(self.jmsg_from))
def test_json_without_optionals(self):
for optional in ['certFingerprints', 'certs', 'subjectKeyIdentifiers',
'serialNumbers', 'issuers', 'authorizedFor']:
del self.jmsg_from[optional]
del self.jmsg_to[optional]
from acme.challenges import ProofOfPossession
msg = ProofOfPossession.Hints.from_json(self.jmsg_from)
self.assertEqual(msg.cert_fingerprints, ())
self.assertEqual(msg.certs, ())
self.assertEqual(msg.subject_key_identifiers, ())
self.assertEqual(msg.serial_numbers, ())
self.assertEqual(msg.issuers, ())
self.assertEqual(msg.authorized_for, ())
self.assertEqual(self.jmsg_to, msg.to_partial_json())
class ProofOfPossessionTest(unittest.TestCase):
def setUp(self):
from acme.challenges import ProofOfPossession
hints = ProofOfPossession.Hints(
jwk=KEY.public_key(), cert_fingerprints=(),
certs=(), serial_numbers=(), subject_key_identifiers=(),
issuers=(), authorized_for=())
self.msg = ProofOfPossession(
alg=jose.RS256, hints=hints,
nonce=b'xD\xf9\xb9\xdbU\xed\xaa\x17\xf1y|\x81\x88\x99 ')
self.jmsg_to = {
'type': 'proofOfPossession',
'alg': jose.RS256,
'nonce': 'eET5udtV7aoX8Xl8gYiZIA',
'hints': hints,
}
self.jmsg_from = {
'type': 'proofOfPossession',
'alg': jose.RS256.to_json(),
'nonce': 'eET5udtV7aoX8Xl8gYiZIA',
'hints': hints.to_json(),
}
def test_to_partial_json(self):
self.assertEqual(self.jmsg_to, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import ProofOfPossession
self.assertEqual(
self.msg, ProofOfPossession.from_json(self.jmsg_from))
def test_from_json_hashable(self):
from acme.challenges import ProofOfPossession
hash(ProofOfPossession.from_json(self.jmsg_from))
class ProofOfPossessionResponseTest(unittest.TestCase):
def setUp(self):
# acme-spec uses a confusing example in which both signature
# nonce and challenge nonce are the same, don't make the same
# mistake here...
signature = other.Signature(
alg=jose.RS256, jwk=KEY.public_key(),
sig=b'\xa7\xc1\xe7\xe82o\xbc\xcd\xd0\x1e\x010#Z|\xaf\x15\x83'
b'\x94\x8f#\x9b\nQo(\x80\x15,\x08\xfcz\x1d\xfd\xfd.\xaap'
b'\xfa\x06\xd1\xa2f\x8d8X2>%d\xbd%\xe1T\xdd\xaa0\x18\xde'
b'\x99\x08\xf0\x0e{',
nonce=b'\x99\xc7Q\xb3f2\xbc\xdci\xfe\xd6\x98k\xc67\xdf',
)
from acme.challenges import ProofOfPossessionResponse
self.msg = ProofOfPossessionResponse(
nonce=b'xD\xf9\xb9\xdbU\xed\xaa\x17\xf1y|\x81\x88\x99 ',
signature=signature)
self.jmsg_to = {
'resource': 'challenge',
'type': 'proofOfPossession',
'nonce': 'eET5udtV7aoX8Xl8gYiZIA',
'signature': signature,
}
self.jmsg_from = {
'resource': 'challenge',
'type': 'proofOfPossession',
'nonce': 'eET5udtV7aoX8Xl8gYiZIA',
'signature': signature.to_json(),
}
def test_verify(self):
self.assertTrue(self.msg.verify())
def test_to_partial_json(self):
self.assertEqual(self.jmsg_to, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import ProofOfPossessionResponse
self.assertEqual(
self.msg, ProofOfPossessionResponse.from_json(self.jmsg_from))
def test_from_json_hashable(self):
from acme.challenges import ProofOfPossessionResponse
hash(ProofOfPossessionResponse.from_json(self.jmsg_from))
class DNSTest(unittest.TestCase):
def setUp(self):
from acme.challenges import DNS
self.msg = DNS(token=jose.b64decode(
b'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA'))
self.jmsg = {
'type': 'dns',
'token': 'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA',
}
def test_to_partial_json(self):
self.assertEqual(self.jmsg, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import DNS
self.assertEqual(self.msg, DNS.from_json(self.jmsg))
def test_from_json_hashable(self):
from acme.challenges import DNS
hash(DNS.from_json(self.jmsg))
def test_gen_check_validation(self):
self.assertTrue(self.msg.check_validation(
self.msg.gen_validation(KEY), KEY.public_key()))
def test_gen_check_validation_wrong_key(self):
key2 = jose.JWKRSA.load(test_util.load_vector('rsa1024_key.pem'))
self.assertFalse(self.msg.check_validation(
self.msg.gen_validation(KEY), key2.public_key()))
def test_check_validation_wrong_payload(self):
validations = tuple(
jose.JWS.sign(payload=payload, alg=jose.RS256, key=KEY)
for payload in (b'', b'{}')
)
for validation in validations:
self.assertFalse(self.msg.check_validation(
validation, KEY.public_key()))
def test_check_validation_wrong_fields(self):
bad_validation = jose.JWS.sign(
payload=self.msg.update(
token=b'x' * 20).json_dumps().encode('utf-8'),
alg=jose.RS256, key=KEY)
self.assertFalse(self.msg.check_validation(
bad_validation, KEY.public_key()))
def test_gen_response(self):
with mock.patch('acme.challenges.DNS.gen_validation') as mock_gen:
mock_gen.return_value = mock.sentinel.validation
response = self.msg.gen_response(KEY)
from acme.challenges import DNSResponse
self.assertTrue(isinstance(response, DNSResponse))
self.assertEqual(response.validation, mock.sentinel.validation)
def test_validation_domain_name(self):
self.assertEqual(
'_acme-challenge.le.wtf', self.msg.validation_domain_name('le.wtf'))
class DNSResponseTest(unittest.TestCase):
def setUp(self):
from acme.challenges import DNS
self.chall = DNS(token=jose.b64decode(
b"evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA"))
self.validation = jose.JWS.sign(
payload=self.chall.json_dumps(sort_keys=True).encode(),
key=KEY, alg=jose.RS256)
from acme.challenges import DNSResponse
self.msg = DNSResponse(validation=self.validation)
self.jmsg_to = {
'resource': 'challenge',
'type': 'dns',
'validation': self.validation,
}
self.jmsg_from = {
'resource': 'challenge',
'type': 'dns',
'validation': self.validation.to_json(),
}
def test_to_partial_json(self):
self.assertEqual(self.jmsg_to, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import DNSResponse
self.assertEqual(self.msg, DNSResponse.from_json(self.jmsg_from))
def test_from_json_hashable(self):
from acme.challenges import DNSResponse
hash(DNSResponse.from_json(self.jmsg_from))
def test_check_validation(self):
self.assertTrue(
self.msg.check_validation(self.chall, KEY.public_key()))
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import uuid
from six.moves.urllib import parse as urlparse # pylint: disable=import-error
from six.moves import http_client
from st2common import log as logging
from st2common.constants.auth import (
HEADER_API_KEY_ATTRIBUTE_NAME,
HEADER_ATTRIBUTE_NAME,
)
from st2common.constants.triggers import WEBHOOK_TRIGGER_TYPES
from st2common.models.api.trace import TraceContext
from st2common.models.api.trigger import TriggerAPI
from st2common.models.db.webhook import WebhookDB
import st2common.services.triggers as trigger_service
from st2common.rbac.types import PermissionType
from st2common.rbac.backends import get_rbac_backend
from st2common.services.triggerwatcher import TriggerWatcher
from st2common.services.trigger_dispatcher import TriggerDispatcherService
from st2common.router import abort
from st2common.router import Response
from st2common.util.jsonify import get_json_type_for_python_value
LOG = logging.getLogger(__name__)
TRACE_TAG_HEADER = "St2-Trace-Tag"
class HooksHolder(object):
"""
Maintains a hook to TriggerDB mapping.
"""
def __init__(self):
self._triggers_by_hook = {}
def __contains__(self, key):
return key in self._triggers_by_hook
def add_hook(self, hook, trigger):
if hook not in self._triggers_by_hook:
self._triggers_by_hook[hook] = []
self._triggers_by_hook[hook].append(trigger)
def remove_hook(self, hook, trigger):
if hook not in self._triggers_by_hook:
return False
remove_index = -1
for idx, item in enumerate(self._triggers_by_hook[hook]):
if item["id"] == trigger["id"]:
remove_index = idx
break
if remove_index < 0:
return False
self._triggers_by_hook[hook].pop(remove_index)
if not self._triggers_by_hook[hook]:
del self._triggers_by_hook[hook]
return True
def get_triggers_for_hook(self, hook):
return self._triggers_by_hook.get(hook, [])
def get_all(self):
triggers = []
for values in six.itervalues(self._triggers_by_hook):
triggers.extend(values)
return triggers
class WebhooksController(object):
def __init__(self, *args, **kwargs):
self._hooks = HooksHolder()
self._base_url = "/webhooks/"
self._trigger_types = list(WEBHOOK_TRIGGER_TYPES.keys())
self._trigger_dispatcher_service = TriggerDispatcherService(LOG)
queue_suffix = self.__class__.__name__
self._trigger_watcher = TriggerWatcher(
create_handler=self._handle_create_trigger,
update_handler=self._handle_update_trigger,
delete_handler=self._handle_delete_trigger,
trigger_types=self._trigger_types,
queue_suffix=queue_suffix,
exclusive=True,
)
self._trigger_watcher.start()
self._register_webhook_trigger_types()
def get_all(self):
# Return only the hooks known by this controller.
return self._hooks.get_all()
def get_one(self, url, requester_user):
triggers = self._hooks.get_triggers_for_hook(url)
if not triggers:
abort(http_client.NOT_FOUND)
return
permission_type = PermissionType.WEBHOOK_VIEW
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_has_resource_db_permission(
user_db=requester_user,
resource_db=WebhookDB(name=url),
permission_type=permission_type,
)
# For demonstration purpose return 1st
return triggers[0]
def post(self, hook, webhook_body_api, headers, requester_user):
body = webhook_body_api.data
permission_type = PermissionType.WEBHOOK_SEND
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_has_resource_db_permission(
user_db=requester_user,
resource_db=WebhookDB(name=hook),
permission_type=permission_type,
)
headers = self._get_headers_as_dict(headers)
headers = self._filter_authentication_headers(headers)
# If webhook contains a trace-tag use that else create create a unique trace-tag.
trace_context = self._create_trace_context(
trace_tag=headers.pop(TRACE_TAG_HEADER, None), hook=hook
)
if hook == "st2" or hook == "st2/":
# When using st2 or system webhook, body needs to always be a dict
if not isinstance(body, dict):
type_string = get_json_type_for_python_value(body)
msg = "Webhook body needs to be an object, got: %s" % (type_string)
raise ValueError(msg)
trigger = body.get("trigger", None)
payload = body.get("payload", None)
if not trigger:
msg = "Trigger not specified."
return abort(http_client.BAD_REQUEST, msg)
self._trigger_dispatcher_service.dispatch_with_context(
trigger=trigger,
payload=payload,
trace_context=trace_context,
throw_on_validation_error=True,
)
else:
if not self._is_valid_hook(hook):
self._log_request("Invalid hook.", headers, body)
msg = "Webhook %s not registered with st2" % hook
return abort(http_client.NOT_FOUND, msg)
triggers = self._hooks.get_triggers_for_hook(hook)
payload = {}
payload["headers"] = headers
payload["headers_lower"] = {k.lower(): v for k, v in headers.items()}
payload["body"] = body
# Dispatch trigger instance for each of the trigger found
for trigger_dict in triggers:
# TODO: Instead of dispatching the whole dict we should just
# dispatch TriggerDB.ref or similar
self._trigger_dispatcher_service.dispatch_with_context(
trigger=trigger_dict,
payload=payload,
trace_context=trace_context,
throw_on_validation_error=True,
)
# NOTE: For url encoded request bodies, values will be bytes instead of unicode and this
# doesn't work with orjson so we first need to "cast" all the values from bytes to unicode
return Response(json=body, status=http_client.ACCEPTED)
def _is_valid_hook(self, hook):
# TODO: Validate hook payload with payload_schema.
return hook in self._hooks
def _register_webhook_trigger_types(self):
for trigger_type in WEBHOOK_TRIGGER_TYPES.values():
trigger_service.create_trigger_type_db(trigger_type)
def _create_trace_context(self, trace_tag, hook):
# if no trace_tag then create a unique one
if not trace_tag:
trace_tag = "webhook-%s-%s" % (hook, uuid.uuid4().hex)
return TraceContext(trace_tag=trace_tag)
def add_trigger(self, trigger):
# NOTE: trigger is a dictionary
# Note: Permission checking for creating and deleting a webhook is done during rule
# creation
url = self._get_normalized_url(trigger)
LOG.info("Listening to endpoint: %s", urlparse.urljoin(self._base_url, url))
self._hooks.add_hook(url, trigger)
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
# Note: Permission checking for creating and deleting a webhook is done during rule
# creation
url = self._get_normalized_url(trigger)
removed = self._hooks.remove_hook(url, trigger)
if removed:
LOG.info(
"Stop listening to endpoint: %s", urlparse.urljoin(self._base_url, url)
)
def _get_normalized_url(self, trigger):
"""
remove the trailing and leading / so that the hook url and those coming
from trigger parameters end up being the same.
"""
return trigger["parameters"]["url"].strip("/")
def _get_headers_as_dict(self, headers):
headers_dict = {}
for key, value in headers.items():
headers_dict[key] = value
return headers_dict
def _filter_authentication_headers(self, headers):
auth_headers = [HEADER_API_KEY_ATTRIBUTE_NAME, HEADER_ATTRIBUTE_NAME, "Cookie"]
return {key: value for key, value in headers.items() if key not in auth_headers}
def _log_request(self, msg, headers, body, log_method=LOG.debug):
headers = self._get_headers_as_dict(headers)
body = str(body)
log_method("%s\n\trequest.header: %s.\n\trequest.body: %s.", msg, headers, body)
##############################################
# Event handler methods for the trigger events
##############################################
def _handle_create_trigger(self, trigger):
LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type))
trigger = self._sanitize_trigger(trigger=trigger)
self.add_trigger(trigger=trigger)
def _handle_update_trigger(self, trigger):
LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type))
trigger = self._sanitize_trigger(trigger=trigger)
self.update_trigger(trigger=trigger)
def _handle_delete_trigger(self, trigger):
LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type))
trigger = self._sanitize_trigger(trigger=trigger)
self.remove_trigger(trigger=trigger)
def _sanitize_trigger(self, trigger):
sanitized = TriggerAPI.from_model(trigger).to_dict()
return sanitized
webhooks_controller = WebhooksController()
|
|
from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from .models import Person, FunPerson, Book, Car, PersonManager, PublishedBookManager
class CustomManagerTests(TestCase):
def setUp(self):
self.b1 = Book.published_objects.create(
title="How to program", author="Rodney Dangerfield", is_published=True)
self.b2 = Book.published_objects.create(
title="How to be smart", author="Albert Einstein", is_published=False)
def test_manager(self):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
# Test a custom `Manager` method.
self.assertQuerysetEqual(
Person.objects.get_fun_people(), [
"Bugs Bunny"
],
six.text_type
)
# Test that the methods of a custom `QuerySet` are properly
# copied onto the default `Manager`.
for manager in ['custom_queryset_default_manager',
'custom_queryset_custom_manager']:
manager = getattr(Person, manager)
# Copy public methods.
manager.public_method()
# Don't copy private methods.
with self.assertRaises(AttributeError):
manager._private_method()
# Copy methods with `manager=True` even if they are private.
manager._optin_private_method()
# Don't copy methods with `manager=False` even if they are public.
with self.assertRaises(AttributeError):
manager.optout_public_method()
# Test that the overridden method is called.
queryset = manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], six.text_type)
self.assertEqual(queryset._filter_CustomQuerySet, True)
# Test that specialized querysets inherit from our custom queryset.
queryset = manager.values_list('first_name', flat=True).filter()
self.assertEqual(list(queryset), [six.text_type("Bugs")])
self.assertEqual(queryset._filter_CustomQuerySet, True)
# Test that the custom manager `__init__()` argument has been set.
self.assertEqual(Person.custom_queryset_custom_manager.init_arg, 'hello')
# Test that the custom manager method is only available on the manager.
Person.custom_queryset_custom_manager.manager_only()
with self.assertRaises(AttributeError):
Person.custom_queryset_custom_manager.all().manager_only()
# Test that the queryset method doesn't override the custom manager method.
queryset = Person.custom_queryset_custom_manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], six.text_type)
self.assertEqual(queryset._filter_CustomManager, True)
# The RelatedManager used on the 'books' descriptor extends the default
# manager
self.assertIsInstance(droopy.books, PublishedBookManager)
# The default manager, "objects", doesn't exist, because a custom one
# was provided.
self.assertRaises(AttributeError, lambda: Book.objects)
# The RelatedManager used on the 'authors' descriptor extends the
# default manager
self.assertIsInstance(self.b2.authors, PersonManager)
self.assertQuerysetEqual(
Book.published_objects.all(), [
"How to program",
],
lambda b: b.title
)
Car.cars.create(name="Corvette", mileage=21, top_speed=180)
Car.cars.create(name="Neon", mileage=31, top_speed=100)
self.assertQuerysetEqual(
Car.cars.order_by("name"), [
"Corvette",
"Neon",
],
lambda c: c.name
)
self.assertQuerysetEqual(
Car.fast_cars.all(), [
"Corvette",
],
lambda c: c.name
)
# Each model class gets a "_default_manager" attribute, which is a
# reference to the first manager defined in the class. In this case,
# it's "cars".
self.assertQuerysetEqual(
Car._default_manager.order_by("name"), [
"Corvette",
"Neon",
],
lambda c: c.name
)
def test_fk_related_manager(self):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
self.assertQuerysetEqual(
self.b1.favorite_books.order_by('first_name').all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_books.all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_gfk_related_manager(self):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
self.assertQuerysetEqual(
self.b1.favorite_things.all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_things.all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.authors.add(droopy)
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.fun_authors.add(bugs)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.fun_authors.add(droopy)
self.assertQuerysetEqual(
self.b1.authors.order_by('first_name').all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_authors.order_by('first_name').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_removal_through_default_fk_related_manager(self, bulk=True):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
self.b1.fun_people_favorite_books.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.b1.fun_people_favorite_books.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
bugs.favorite_book = self.b1
bugs.save()
self.b1.fun_people_favorite_books.clear(bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_default_fk_related_manager(self):
self.test_removal_through_default_fk_related_manager(bulk=False)
def test_removal_through_specified_fk_related_manager(self, bulk=True):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
# Check that the fun manager DOESN'T remove boring people.
self.b1.favorite_books(manager='fun_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# Check that the boring manager DOES remove boring people.
self.b1.favorite_books(manager='boring_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
droopy.favorite_book = self.b1
droopy.save()
# Check that the fun manager ONLY clears fun people.
self.b1.favorite_books(manager='fun_people').clear(bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='fun_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_specified_fk_related_manager(self):
self.test_removal_through_specified_fk_related_manager(bulk=False)
def test_removal_through_default_gfk_related_manager(self, bulk=True):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
self.b1.fun_people_favorite_things.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by('first_name').filter(favorite_thing_id=self.b1.pk), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.b1.fun_people_favorite_things.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by('first_name').filter(favorite_thing_id=self.b1.pk), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
bugs.favorite_book = self.b1
bugs.save()
self.b1.fun_people_favorite_things.clear(bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by('first_name').filter(favorite_thing_id=self.b1.pk), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_default_gfk_related_manager(self):
self.test_removal_through_default_gfk_related_manager(bulk=False)
def test_removal_through_specified_gfk_related_manager(self, bulk=True):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
# Check that the fun manager DOESN'T remove boring people.
self.b1.favorite_things(manager='fun_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# Check that the boring manager DOES remove boring people.
self.b1.favorite_things(manager='boring_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
droopy.favorite_thing = self.b1
droopy.save()
# Check that the fun manager ONLY clears fun people.
self.b1.favorite_things(manager='fun_people').clear(bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='fun_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_specified_gfk_related_manager(self):
self.test_removal_through_specified_gfk_related_manager(bulk=False)
def test_removal_through_default_m2m_related_manager(self):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.fun_authors.add(bugs)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.fun_authors.add(droopy)
self.b1.fun_authors.remove(droopy)
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(), [
"Bugs",
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
self.b1.fun_authors.remove(bugs)
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(), [
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
self.b1.fun_authors.add(bugs)
self.b1.fun_authors.clear()
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(), [
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
def test_removal_through_specified_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.authors.add(droopy)
# Check that the fun manager DOESN'T remove boring people.
self.b1.authors(manager='fun_people').remove(droopy)
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# Check that the boring manager DOES remove boring people.
self.b1.authors(manager='boring_people').remove(droopy)
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
self.b1.authors.add(droopy)
# Check that the fun manager ONLY clears fun people.
self.b1.authors(manager='fun_people').clear()
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager='fun_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import random
import shutil
import tempfile
import unittest
from logging import DEBUG
from mock import patch, call, DEFAULT
from contextlib import nested
from swift.account import reaper
from swift.account.backend import DATADIR
from swift.common.exceptions import ClientException
from swift.common.utils import normalize_timestamp
from test import unit
from swift.common.storage_policy import StoragePolicy, POLICIES
class FakeLogger(object):
def __init__(self, *args, **kwargs):
self.inc = {'return_codes.4': 0,
'return_codes.2': 0,
'objects_failures': 0,
'objects_deleted': 0,
'objects_remaining': 0,
'objects_possibly_remaining': 0,
'containers_failures': 0,
'containers_deleted': 0,
'containers_remaining': 0,
'containers_possibly_remaining': 0}
self.exp = []
def info(self, msg, *args):
self.msg = msg
def error(self, msg, *args):
self.msg = msg
def timing_since(*args, **kwargs):
pass
def getEffectiveLevel(self):
return DEBUG
def exception(self, *args):
self.exp.append(args)
def increment(self, key):
self.inc[key] += 1
class FakeBroker(object):
def __init__(self):
self.info = {}
def get_info(self):
return self.info
class FakeAccountBroker(object):
def __init__(self, containers):
self.containers = containers
def get_info(self):
info = {'account': 'a',
'delete_timestamp': time.time() - 10}
return info
def list_containers_iter(self, *args):
for cont in self.containers:
yield cont, None, None, None
def is_status_deleted(self):
return True
def empty(self):
return False
class FakeRing(object):
def __init__(self):
self.nodes = [{'id': '1',
'ip': '10.10.10.1',
'port': 6002,
'device': None},
{'id': '2',
'ip': '10.10.10.1',
'port': 6002,
'device': None},
{'id': '3',
'ip': '10.10.10.1',
'port': 6002,
'device': None},
]
def get_nodes(self, *args, **kwargs):
return ('partition', self.nodes)
def get_part_nodes(self, *args, **kwargs):
return self.nodes
acc_nodes = [{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
cont_nodes = [{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
@unit.patch_policies([StoragePolicy(0, 'zero', False,
object_ring=unit.FakeRing()),
StoragePolicy(1, 'one', True,
object_ring=unit.FakeRing())])
class TestReaper(unittest.TestCase):
def setUp(self):
self.to_delete = []
self.myexp = ClientException("", http_host=None,
http_port=None,
http_device=None,
http_status=404,
http_reason=None
)
def tearDown(self):
for todel in self.to_delete:
shutil.rmtree(todel)
def fake_direct_delete_object(self, *args, **kwargs):
if self.amount_fail < self.max_fail:
self.amount_fail += 1
raise self.myexp
def fake_direct_delete_container(self, *args, **kwargs):
if self.amount_delete_fail < self.max_delete_fail:
self.amount_delete_fail += 1
raise self.myexp
def fake_direct_get_container(self, *args, **kwargs):
if self.get_fail:
raise self.myexp
objects = [{'name': 'o1'},
{'name': 'o2'},
{'name': unicode('o3')},
{'name': ''}]
return None, objects
def fake_container_ring(self):
return FakeRing()
def fake_reap_object(self, *args, **kwargs):
if self.reap_obj_fail:
raise Exception
def prepare_data_dir(self, ts=False):
devices_path = tempfile.mkdtemp()
# will be deleted by teardown
self.to_delete.append(devices_path)
path = os.path.join(devices_path, 'sda1', DATADIR)
os.makedirs(path)
path = os.path.join(path, '100',
'a86', 'a8c682d2472e1720f2d81ff8993aba6')
os.makedirs(path)
suffix = 'db'
if ts:
suffix = 'ts'
with open(os.path.join(path, 'a8c682203aba6.%s' % suffix), 'w') as fd:
fd.write('')
return devices_path
def init_reaper(self, conf=None, myips=None, fakelogger=False):
if conf is None:
conf = {}
if myips is None:
myips = ['10.10.10.1']
r = reaper.AccountReaper(conf)
r.stats_return_codes = {}
r.stats_containers_deleted = 0
r.stats_containers_remaining = 0
r.stats_containers_possibly_remaining = 0
r.stats_objects_deleted = 0
r.stats_objects_remaining = 0
r.stats_objects_possibly_remaining = 0
r.myips = myips
if fakelogger:
r.logger = FakeLogger()
return r
def fake_reap_account(self, *args, **kwargs):
self.called_amount += 1
def fake_account_ring(self):
return FakeRing()
def test_delay_reaping_conf_default(self):
r = reaper.AccountReaper({})
self.assertEqual(r.delay_reaping, 0)
r = reaper.AccountReaper({'delay_reaping': ''})
self.assertEqual(r.delay_reaping, 0)
def test_delay_reaping_conf_set(self):
r = reaper.AccountReaper({'delay_reaping': '123'})
self.assertEqual(r.delay_reaping, 123)
def test_delay_reaping_conf_bad_value(self):
self.assertRaises(ValueError, reaper.AccountReaper,
{'delay_reaping': 'abc'})
def test_reap_warn_after_conf_set(self):
conf = {'delay_reaping': '2', 'reap_warn_after': '3'}
r = reaper.AccountReaper(conf)
self.assertEqual(r.reap_not_done_after, 5)
def test_reap_warn_after_conf_bad_value(self):
self.assertRaises(ValueError, reaper.AccountReaper,
{'reap_warn_after': 'abc'})
def test_reap_delay(self):
time_value = [100]
def _time():
return time_value[0]
time_orig = reaper.time
try:
reaper.time = _time
r = reaper.AccountReaper({'delay_reaping': '10'})
b = FakeBroker()
b.info['delete_timestamp'] = normalize_timestamp(110)
self.assertFalse(r.reap_account(b, 0, None))
b.info['delete_timestamp'] = normalize_timestamp(100)
self.assertFalse(r.reap_account(b, 0, None))
b.info['delete_timestamp'] = normalize_timestamp(90)
self.assertFalse(r.reap_account(b, 0, None))
# KeyError raised immediately as reap_account tries to get the
# account's name to do the reaping.
b.info['delete_timestamp'] = normalize_timestamp(89)
self.assertRaises(KeyError, r.reap_account, b, 0, None)
b.info['delete_timestamp'] = normalize_timestamp(1)
self.assertRaises(KeyError, r.reap_account, b, 0, None)
finally:
reaper.time = time_orig
def test_reap_object(self):
conf = {
'mount_check': 'false',
}
r = reaper.AccountReaper(conf, logger=unit.debug_logger())
ring = unit.FakeRing()
mock_path = 'swift.account.reaper.direct_delete_object'
for policy in POLICIES:
r.reset_stats()
with patch(mock_path) as fake_direct_delete:
r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
policy.idx)
for i, call_args in enumerate(
fake_direct_delete.call_args_list):
cnode = cont_nodes[i]
host = '%(ip)s:%(port)s' % cnode
device = cnode['device']
headers = {
'X-Container-Host': host,
'X-Container-Partition': 'partition',
'X-Container-Device': device,
'X-Backend-Storage-Policy-Index': policy.idx
}
ring = r.get_object_ring(policy.idx)
expected = call(ring.devs[i], 0, 'a', 'c', 'o',
headers=headers, conn_timeout=0.5,
response_timeout=10)
self.assertEqual(call_args, expected)
self.assertEqual(r.stats_objects_deleted, 3)
def test_reap_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.amount_fail = 0
self.max_fail = 1
policy = random.choice(list(POLICIES))
with patch('swift.account.reaper.direct_delete_object',
self.fake_direct_delete_object):
r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
policy.idx)
self.assertEqual(r.stats_objects_deleted, 1)
self.assertEqual(r.stats_objects_remaining, 1)
self.assertEqual(r.stats_objects_possibly_remaining, 1)
def test_reap_object_non_exist_policy_index(self):
r = self.init_reaper({}, fakelogger=True)
r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 2)
self.assertEqual(r.stats_objects_deleted, 0)
self.assertEqual(r.stats_objects_remaining, 1)
self.assertEqual(r.stats_objects_possibly_remaining, 0)
@patch('swift.account.reaper.Ring',
lambda *args, **kwargs: unit.FakeRing())
def test_reap_container(self):
policy = random.choice(list(POLICIES))
r = self.init_reaper({}, fakelogger=True)
with patch.multiple('swift.account.reaper',
direct_get_container=DEFAULT,
direct_delete_object=DEFAULT,
direct_delete_container=DEFAULT) as mocks:
headers = {'X-Backend-Storage-Policy-Index': policy.idx}
obj_listing = [{'name': 'o'}]
def fake_get_container(*args, **kwargs):
try:
obj = obj_listing.pop(0)
except IndexError:
obj_list = []
else:
obj_list = [obj]
return headers, obj_list
mocks['direct_get_container'].side_effect = fake_get_container
r.reap_container('a', 'partition', acc_nodes, 'c')
mock_calls = mocks['direct_delete_object'].call_args_list
self.assertEqual(3, len(mock_calls))
for call_args in mock_calls:
_args, kwargs = call_args
self.assertEqual(kwargs['headers']
['X-Backend-Storage-Policy-Index'],
policy.idx)
self.assertEquals(mocks['direct_delete_container'].call_count, 3)
self.assertEqual(r.stats_objects_deleted, 3)
def test_reap_container_get_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = True
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 0
ctx = [patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container),
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container),
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring),
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object)]
with nested(*ctx):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.inc['return_codes.4'], 1)
self.assertEqual(r.stats_containers_deleted, 1)
def test_reap_container_partial_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 2
ctx = [patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container),
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container),
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring),
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object)]
with nested(*ctx):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.inc['return_codes.4'], 2)
self.assertEqual(r.stats_containers_possibly_remaining, 1)
def test_reap_container_full_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 3
ctx = [patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container),
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container),
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring),
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object)]
with nested(*ctx):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.inc['return_codes.4'], 3)
self.assertEqual(r.stats_containers_remaining, 1)
@patch('swift.account.reaper.Ring',
lambda *args, **kwargs: unit.FakeRing())
def test_reap_container_non_exist_policy_index(self):
r = self.init_reaper({}, fakelogger=True)
with patch.multiple('swift.account.reaper',
direct_get_container=DEFAULT,
direct_delete_object=DEFAULT,
direct_delete_container=DEFAULT) as mocks:
headers = {'X-Backend-Storage-Policy-Index': 2}
obj_listing = [{'name': 'o'}]
def fake_get_container(*args, **kwargs):
try:
obj = obj_listing.pop(0)
except IndexError:
obj_list = []
else:
obj_list = [obj]
return headers, obj_list
mocks['direct_get_container'].side_effect = fake_get_container
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.msg,
'ERROR: invalid storage policy index: 2')
def fake_reap_container(self, *args, **kwargs):
self.called_amount += 1
self.r.stats_containers_deleted = 1
self.r.stats_objects_deleted = 1
self.r.stats_containers_remaining = 1
self.r.stats_objects_remaining = 1
self.r.stats_containers_possibly_remaining = 1
self.r.stats_objects_possibly_remaining = 1
def test_reap_account(self):
containers = ('c1', 'c2', 'c3', '')
broker = FakeAccountBroker(containers)
self.called_amount = 0
self.r = r = self.init_reaper({}, fakelogger=True)
r.start_time = time.time()
ctx = [patch('swift.account.reaper.AccountReaper.reap_container',
self.fake_reap_container),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring)]
with nested(*ctx):
nodes = r.get_account_ring().get_part_nodes()
self.assertTrue(r.reap_account(broker, 'partition', nodes))
self.assertEqual(self.called_amount, 4)
self.assertEqual(r.logger.msg.find('Completed pass'), 0)
self.assertTrue(r.logger.msg.find('1 containers deleted'))
self.assertTrue(r.logger.msg.find('1 objects deleted'))
self.assertTrue(r.logger.msg.find('1 containers remaining'))
self.assertTrue(r.logger.msg.find('1 objects remaining'))
self.assertTrue(r.logger.msg.find('1 containers possibly remaining'))
self.assertTrue(r.logger.msg.find('1 objects possibly remaining'))
def test_reap_account_no_container(self):
broker = FakeAccountBroker(tuple())
self.r = r = self.init_reaper({}, fakelogger=True)
self.called_amount = 0
r.start_time = time.time()
ctx = [patch('swift.account.reaper.AccountReaper.reap_container',
self.fake_reap_container),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring)]
with nested(*ctx):
nodes = r.get_account_ring().get_part_nodes()
self.assertTrue(r.reap_account(broker, 'partition', nodes))
self.assertEqual(r.logger.msg.find('Completed pass'), 0)
self.assertEqual(self.called_amount, 0)
def test_reap_device(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf)
ctx = [patch('swift.account.reaper.AccountBroker',
FakeAccountBroker),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring),
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account)]
with nested(*ctx):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 1)
def test_reap_device_with_ts(self):
devices = self.prepare_data_dir(ts=True)
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf=conf)
ctx = [patch('swift.account.reaper.AccountBroker',
FakeAccountBroker),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring),
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account)]
with nested(*ctx):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 0)
def test_reap_device_with_not_my_ip(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.1.2'])
ctx = [patch('swift.account.reaper.AccountBroker',
FakeAccountBroker),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring),
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account)]
with nested(*ctx):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 0)
def test_run_once(self):
def prepare_data_dir():
devices_path = tempfile.mkdtemp()
# will be deleted by teardown
self.to_delete.append(devices_path)
path = os.path.join(devices_path, 'sda1', DATADIR)
os.makedirs(path)
return devices_path
def init_reaper(devices):
r = reaper.AccountReaper({'devices': devices})
return r
devices = prepare_data_dir()
r = init_reaper(devices)
with patch('swift.account.reaper.ismount', lambda x: True):
with patch(
'swift.account.reaper.AccountReaper.reap_device') as foo:
r.run_once()
self.assertEqual(foo.called, 1)
with patch('swift.account.reaper.ismount', lambda x: False):
with patch(
'swift.account.reaper.AccountReaper.reap_device') as foo:
r.run_once()
self.assertFalse(foo.called)
def test_run_forever(self):
def fake_sleep(val):
self.val = val
def fake_random():
return 1
def fake_run_once():
raise Exception('exit')
def init_reaper():
r = reaper.AccountReaper({'interval': 1})
r.run_once = fake_run_once
return r
r = init_reaper()
with patch('swift.account.reaper.sleep', fake_sleep):
with patch('swift.account.reaper.random.random', fake_random):
try:
r.run_forever()
except Exception as err:
pass
self.assertEqual(self.val, 1)
self.assertEqual(str(err), 'exit')
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from ast import literal_eval
from warnings import warn
from typing import (
Tuple,
List,
Set
)
from PyQt5.QtCore import (
QPointF,
QRectF,
Qt
)
from PyQt5.QtGui import (
QKeyEvent
)
from PyQt5.QtWidgets import (
QGraphicsItem,
QGraphicsLineItem,
QGraphicsRectItem,
QGraphicsSceneMouseEvent,
QGraphicsSceneHoverEvent
)
from cadnano.objectinstance import ObjectInstance
from cadnano.part.nucleicacidpart import NucleicAcidPart
from cadnano.proxies.cnenum import (
GridEnum,
HandleEnum,
EnumType
)
from cadnano.fileio.lattice import (
HoneycombDnaPart,
SquareDnaPart
)
from cadnano.controllers import NucleicAcidPartItemController
from cadnano.gui.palette import (
getBrushObj,
getNoBrush,
getNoPen,
getPenObj
)
from cadnano.views.abstractitems import QAbstractPartItem
from cadnano.part.nucleicacidpart import DEFAULT_RADIUS
from cadnano.views.resizehandles import ResizeHandleGroup
from cadnano.views.sliceview.sliceextras import ShortestPathHelper
from . import slicestyles as styles
from .griditem import (
GridItem,
GridEvent,
GridPoint
)
from .prexovermanager import PreXoverManager
from .virtualhelixitem import SliceVirtualHelixItem
from . import (
SliceRootItemT,
AbstractSliceToolT
)
from cadnano.views.sliceview.tools import (
CreateSliceToolT
)
from cadnano.cntypes import (
ABInfoT,
VirtualHelixT,
KeyT,
ValueT,
WindowT,
Vec2T,
RectT,
Vec3T
)
_DEFAULT_WIDTH = styles.DEFAULT_PEN_WIDTH
_DEFAULT_ALPHA = styles.DEFAULT_ALPHA
_SELECTED_COLOR = styles.SELECTED_COLOR
_SELECTED_WIDTH = styles.SELECTED_PEN_WIDTH
_SELECTED_ALPHA = styles.SELECTED_ALPHA
_HANDLE_SIZE = 8
class SliceNucleicAcidPartItem(QAbstractPartItem):
"""Parent should be either a SliceRootItem, or an AssemblyItem.
Invariant: keys in _empty_helix_hash = range(_nrows) x range(_ncols)
where x is the cartesian product.
Attributes:
active_virtual_helix_item: Description
resize_handle_group (ResizeHandleGroup): handles for dragging and resizing
griditem (GridItem): Description
outline (QGraphicsRectItem): Description
prexover_manager (PreXoverManager): Description
scale_factor (float): Description
"""
_RADIUS = styles.SLICE_HELIX_RADIUS
_RADIUS_TUPLE = (DEFAULT_RADIUS, _RADIUS)
_BOUNDING_RECT_PADDING = 80
def __init__(self, part_instance: ObjectInstance,
viewroot: SliceRootItemT):
"""
Args:
part_instance: ``ObjectInstance`` of the ``Part``
viewroot: ``SliceRootItem`` and parent object
"""
super(SliceNucleicAcidPartItem, self).__init__(part_instance, viewroot)
self.setFlag(QGraphicsItem.ItemIsFocusable)
self.shortest_path_start = None
self.coordinates_to_vhid = dict()
self._last_hovered_coord = None
self._last_hovered_item = None
self._highlighted_path = []
self._highlighted_copypaste = []
self.lock_hints = False
self.copypaste_origin_offset = None
self.spa_start_vhi = None
self.last_mouse_position = None
self._highlighted_grid_point = None
self._getActiveTool = viewroot.manager.activeToolGetter
m_p = self._model_part
self._controller = NucleicAcidPartItemController(self, m_p)
self.scale_factor = self._RADIUS / m_p.radius()
self.inverse_scale_factor = m_p.radius() / self._RADIUS
self.active_virtual_helix_item: SliceVirtualHelixItem = None
self.prexover_manager = PreXoverManager(self)
self.hide() # hide while until after attemptResize() to avoid flicker
self._rect: QRectF = QRectF(0., 0., 1000., 1000.) # set this to a token value
self.boundRectToModel()
self.setPen(getNoPen())
self.setRect(self._rect)
self.setAcceptHoverEvents(True)
self.shortest_path_add_mode = False
# Cache of VHs that were active as of last call to activeSliceChanged
# If None, all slices will be redrawn and the cache will be filled.
# Connect destructor. This is for removing a part from scenes.
# initialize the NucleicAcidPartItem with an empty set of old coords
self.setZValue(styles.ZPARTITEM)
self.outline = outline = QGraphicsRectItem(self)
o_rect = self._configureOutline(outline)
outline.setFlag(QGraphicsItem.ItemStacksBehindParent)
outline.setZValue(styles.ZDESELECTOR)
model_color = m_p.getColor()
self.outline.setPen(getPenObj(model_color, _DEFAULT_WIDTH))
self.model_bounds_hint = QGraphicsRectItem(self)
self.model_bounds_hint.setBrush(getBrushObj(model_color, alpha=12))
self.model_bounds_hint.setPen(getNoPen())
self.resize_handle_group = ResizeHandleGroup(o_rect, _HANDLE_SIZE, model_color, True,
HandleEnum.TOP |
HandleEnum.BOTTOM |
HandleEnum.LEFT |
HandleEnum.RIGHT |
HandleEnum.TOP_LEFT |
HandleEnum.TOP_RIGHT |
HandleEnum.BOTTOM_LEFT |
HandleEnum.BOTTOM_RIGHT,
self, show_coords=True)
self.griditem = GridItem(self, self._model_props['grid_type'])
self.griditem.setZValue(1)
self.resize_handle_group.setZValue(2)
self.x_axis_line = QGraphicsLineItem(0, 0, self._RADIUS, 0, self)
self.x_axis_line.setPen(getPenObj('#cc0000', _DEFAULT_WIDTH))
self.x_axis_line.setZValue(styles.ZAXIS)
self.y_axis_line = QGraphicsLineItem(0, 0, 0, -self._RADIUS, self)
self.y_axis_line.setPen(getPenObj('#007200', _DEFAULT_WIDTH))
self.y_axis_line.setZValue(styles.ZAXIS)
# select upon creation
for part in m_p.document().children():
if part is m_p:
part.setSelected(True)
else:
part.setSelected(False)
self.show()
# end def
### SIGNALS ###
### SLOTS ###
def partActiveVirtualHelixChangedSlot(self, part: NucleicAcidPart, id_num: int):
"""Summary
Args:
part: Description
id_num: VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
"""
vhi = self._virtual_helix_item_hash.get(id_num)
self.setActiveVirtualHelixItem(vhi)
self.setPreXoverItemsVisible(vhi)
# end def
def partActiveBaseInfoSlot(self, part: NucleicAcidPart, info: ABInfoT):
"""Summary
Args:
part: Description
info: Description
"""
pxom = self.prexover_manager
pxom.deactivateNeighbors()
if info and info is not None:
id_num, is_fwd, idx, _ = info
pxom.activateNeighbors(id_num, is_fwd, idx)
# end def
def partPropertyChangedSlot(self, part: NucleicAcidPart, key: str, new_value):
"""Slot for property chaing
Args:
part: The model part
key: map key
new_value (Any): Description
"""
if self._model_part == part:
self._model_props[key] = new_value
if key == 'color':
self.outline.setPen(getPenObj(new_value, _DEFAULT_WIDTH))
for vhi in self._virtual_helix_item_hash.values():
vhi.updateAppearance()
self.resize_handle_group.setPens(getPenObj(new_value, 0))
elif key == 'is_visible':
if new_value:
self.show()
else:
self.hide()
elif key == 'grid_type':
self.griditem.setGridEnum(new_value)
# end def
def partRemovedSlot(self, sender: NucleicAcidPart):
"""Slot wrapper for ``destroyItem()``
Args:
sender: Model object that emitted the signal.
"""
return self.destroyItem()
# end def
def destroyItem(self):
'''Remove this object and references to it from the view
'''
print("destroying SliceNucleicAcidPartItem")
for id_num in list(self._virtual_helix_item_hash.keys()):
self.removeVirtualHelixItem(id_num)
self.prexover_manager.destroyItem()
self.prexover_manager = None
scene = self.scene()
scene.removeItem(self.x_axis_line)
self.x_axis_line = None
scene.removeItem(self.y_axis_line)
self.y_axis_line = None
scene.removeItem(self.outline)
self.outline = None
scene.removeItem(self.model_bounds_hint)
self.model_bounds_hint = None
self.resize_handle_group.destroyItem()
self.resize_handle_group = None
self.griditem.destroyItem()
self.griditem = None
super(SliceNucleicAcidPartItem, self).destroyItem()
# end def
def partVirtualHelicesTranslatedSlot(self, part: NucleicAcidPart,
vh_set: Set[int],
left_overs: Set[int],
do_deselect: bool):
"""left_overs are neighbors that need updating due to changes
Args:
part: Model object that emitted the signal.
vh_set: Description
left_overs: Description
do_deselect: Description
"""
if do_deselect:
tool = self._getActiveTool()
if tool.methodPrefix() == "selectTool":
if tool.isSelectionActive():
# tool.deselectItems()
tool.modelClear()
# 1. move everything that moved
for id_num in vh_set:
vhi = self._virtual_helix_item_hash[id_num]
vhi.updatePosition()
# 2. now redraw what makes sense to be redrawn
for id_num in vh_set:
vhi = self._virtual_helix_item_hash[id_num]
self._refreshVirtualHelixItemGizmos(id_num, vhi)
for id_num in left_overs:
vhi = self._virtual_helix_item_hash[id_num]
self._refreshVirtualHelixItemGizmos(id_num, vhi)
# 0. clear PreXovers:
# self.prexover_manager.hideGroups()
# if self.active_virtual_helix_item is not None:
# self.active_virtual_helix_item.deactivate()
# self.active_virtual_helix_item = None
avhi = self.active_virtual_helix_item
self.setPreXoverItemsVisible(avhi)
self.enlargeRectToFit()
# end def
def _refreshVirtualHelixItemGizmos(self, id_num: int,
vhi: SliceVirtualHelixItem):
"""Update props and appearance of self & recent neighbors. Ultimately
triggered by a partVirtualHelicesTranslatedSignal.
Args:
id_num: VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
vhi: the item associated with id_num
"""
neighbors = vhi.getProperty('neighbors')
neighbors = literal_eval(neighbors)
vhi.beginAddWedgeGizmos()
for nvh in neighbors:
nvhi = self._virtual_helix_item_hash.get(nvh, False)
if nvhi:
vhi.setWedgeGizmo(nvh, nvhi)
# end for
vhi.endAddWedgeGizmos()
# end def
def partVirtualHelixPropertyChangedSlot(self, sender: NucleicAcidPart,
id_num: int,
virtual_helix: VirtualHelixT,
keys: KeyT,
values: ValueT):
"""
Args:
sender: Model object that emitted the signal.
id_num: VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
keys: keys that changed
values: new values for each key that changed
"""
if self._model_part == sender:
vh_i = self._virtual_helix_item_hash[id_num]
vh_i.virtualHelixPropertyChangedSlot(keys, values)
# end def
def partVirtualHelixAddedSlot(self, sender: NucleicAcidPart,
id_num: int,
virtual_helix: VirtualHelixT,
neighbors: List[int]):
"""
Args:
sender: Model object that emitted the signal.
id_num: VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
neighbors: Description
"""
if self._viewroot.are_signals_on:
vhi = SliceVirtualHelixItem(id_num, self)
self._virtual_helix_item_hash[id_num] = vhi
self._refreshVirtualHelixItemGizmos(id_num, vhi)
for neighbor_id in neighbors:
nvhi = self._virtual_helix_item_hash.get(neighbor_id, False)
if nvhi:
self._refreshVirtualHelixItemGizmos(neighbor_id, nvhi)
self.enlargeRectToFit()
position = sender.locationQt(id_num=id_num,
scale_factor=self.scale_factor)
if self.griditem.grid_type is GridEnum.HONEYCOMB:
coordinates = HoneycombDnaPart.positionModelToLatticeCoord(DEFAULT_RADIUS,
position[0],
position[1],
scale_factor=self.scale_factor)
else:
coordinates = SquareDnaPart.positionModelToLatticeCoord(DEFAULT_RADIUS,
position[0],
position[1],
scale_factor=self.scale_factor)
assert id_num not in self.coordinates_to_vhid.values()
if coordinates in self.coordinates_to_vhid.values():
print('COORDINATES DUPLICATE %s in %s' % (coordinates, self.coordinates_to_vhid.values()))
self.coordinates_to_vhid[coordinates] = id_num
assert len(self.coordinates_to_vhid.keys()) == len(set(self.coordinates_to_vhid.keys()))
assert len(self.coordinates_to_vhid.values()) == len(set(self.coordinates_to_vhid.values()))
else:
self.coordinates_to_vhid[coordinates] = id_num
# end def
def partVirtualHelixRemovingSlot(self, sender: NucleicAcidPart,
id_num: int,
virtual_helix: VirtualHelixT,
neighbors: List[int]):
"""
Args:
sender: Model object that emitted the signal.
id_num: VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
neighbors: Description
"""
tm = self._viewroot.manager
tm.resetTools()
self.removeVirtualHelixItem(id_num)
for neighbor_id in neighbors:
nvhi = self._virtual_helix_item_hash[neighbor_id]
self._refreshVirtualHelixItemGizmos(neighbor_id, nvhi)
for coordinates, current_id in self.coordinates_to_vhid.items():
if current_id == id_num:
del self.coordinates_to_vhid[coordinates]
break
assert id_num not in self.coordinates_to_vhid.values()
assert len(self.coordinates_to_vhid.keys()) == len(set(self.coordinates_to_vhid.keys()))
assert len(self.coordinates_to_vhid.values()) == len(set(self.coordinates_to_vhid.values()))
# end def
def partSelectedChangedSlot(self, model_part: NucleicAcidPart,
is_selected: bool):
"""Set this Z to front, and return other Zs to default.
Args:
model_part: The model part
is_selected: Description
"""
if is_selected:
# self._drag_handle.resetAppearance(_SELECTED_COLOR, _SELECTED_WIDTH, _SELECTED_ALPHA)
self.setZValue(styles.ZPARTITEM + 1)
else:
# self._drag_handle.resetAppearance(self.modelColor(), _DEFAULT_WIDTH, _DEFAULT_ALPHA)
self.setZValue(styles.ZPARTITEM)
# end def
def partVirtualHelicesSelectedSlot(self, sender: NucleicAcidPart,
vh_set: Set[int],
is_adding: bool):
"""
Args:
sender: Model object that emitted the signal.
vh_set: Description
is_adding: adding (``True``) virtual helices to a selection
or removing (``False``)
"""
select_tool = self._viewroot.select_tool
if is_adding:
select_tool.selection_set.update(vh_set)
select_tool.setPartItem(self)
select_tool.getSelectionBoundingRect()
else:
select_tool.deselectSet(vh_set)
# end def
def partDocumentSettingChangedSlot(self, part: NucleicAcidPart,
key: str,
value: str):
"""Slot for handling changes to Document settings
Args:
part: the model :class:`NucleicAcidPart`
key: key to the dictionary, must be `grid`
value: value
Raises:
NotImplementedError:
ValueError: Unknown grid styling
"""
warn( "partDocumentSettingChangedSlot is not implemented GridItem.setDrawlines needs to be implemented")
return
if key == 'grid':
if value == 'lines and points':
self.griditem.setDrawlines(True)
elif value == 'points':
self.griditem.setDrawlines(False)
elif value == 'circles':
NotImplementedError("not implented circles value")
else:
raise ValueError("unknown grid styling: {}".format(value))
else:
raise NotImplementedError("unknown key {}".format(key))
### ACCESSORS ###
def boundingRect(self) -> QRectF:
"""
"""
return self._rect
# end def
def modelColor(self) -> str:
"""
"""
return self._model_props['color']
# end def
def window(self) -> WindowT:
"""
"""
return self.parentItem().window()
# end def
def setActiveVirtualHelixItem(self, new_active_vhi: SliceVirtualHelixItem):
"""
Args:
new_active_vhi: Description
"""
current_vhi = self.active_virtual_helix_item
if new_active_vhi != current_vhi:
if current_vhi is not None:
current_vhi.deactivate()
if new_active_vhi is not None:
new_active_vhi.activate()
self.active_virtual_helix_item = new_active_vhi
# end def
def setPreXoverItemsVisible(self, virtual_helix_item: SliceVirtualHelixItem):
"""
self._pre_xover_items list references prexovers parented to other
PathHelices such that only the activeHelix maintains the list of
visible prexovers
Args:
virtual_helix_item: Description
"""
vhi = virtual_helix_item
pxom = self.prexover_manager
if vhi is None:
pxom.hideGroups()
return
part = self.part()
info = part.active_base_info
if info:
id_num, is_fwd, idx, to_vh_id_num = info
per_neighbor_hits, pairs = part.potentialCrossoverMap(id_num, idx)
pxom.activateVirtualHelix(virtual_helix_item, idx,
per_neighbor_hits, pairs)
# end def
def removeVirtualHelixItem(self, id_num: int):
"""
Args:
id_num: VirtualHelix ID number. See :class:`NucleicAcidPart` for
description and related methods.
"""
vhi = self._virtual_helix_item_hash[id_num]
if vhi == self.active_virtual_helix_item:
self.active_virtual_helix_item = None
vhi.virtualHelixRemovedSlot()
del self._virtual_helix_item_hash[id_num]
# When any VH is removed, turn SPA mode off
self.shortest_path_add_mode = False
self.shortest_path_start = None
# end def
def reconfigureRect(self, top_left: Vec2T,
bottom_right: Vec2T,
finish: bool = False,
padding: int = 80) -> QRectF:
"""Reconfigures the rectangle that is the document.
Args:
top_left: A tuple corresponding to the x-y coordinates of
top left corner of the document
bottom_right: A tuple corresponding to the x-y coordinates
of the bottom left corner of the document
Returns:
tuple of point tuples representing the top_left and
bottom_right as reconfigured with padding
"""
rect = self._rect
ptTL = QPointF(*self._padTL(padding, *top_left)) if top_left else rect.topLeft()
ptBR = QPointF(*self._padBR(padding, *bottom_right)) if bottom_right else rect.bottomRight()
self._rect = QRectF(ptTL, ptBR)
self.setRect(self._rect)
self._configureOutline(self.outline)
self.griditem.updateGrid()
return self.outline.rect()
# end def
def _padTL(self, padding: int, xTL: float, yTL: float) -> Vec2T:
return xTL + padding, yTL + padding
# end def
def _padBR(self, padding: int, xBR: float, yBR: float) -> Vec2T:
return xBR - padding, yBR - padding
# end def
def enlargeRectToFit(self):
"""Enlarges Part Rectangle to fit the model bounds.
This should be called when adding a SliceVirtualHelixItem. This
method enlarges the rectangle to ensure that it fits the design.
This method needs to check the model size to do this, but also takes
into account any expansions the user has made to the rectangle as to
not shrink the rectangle after the user has expanded it.
"""
padding = self._BOUNDING_RECT_PADDING
model_left, model_top, model_right, model_bottom = self.getModelMinBounds()
rect_left, rect_right, rect_bottom, rect_top = self.bounds()
xTL = min(rect_left, model_left) - padding
xBR = max(rect_right, model_right) + padding
yTL = min(rect_top, model_top) - padding
yBR = max(rect_bottom, model_bottom) + padding
new_outline_rect = self.reconfigureRect(top_left=(xTL, yTL), bottom_right=(xBR, yBR))
self.resize_handle_group.alignHandles(new_outline_rect)
### PRIVATE SUPPORT METHODS ###
def _configureOutline(self, outline: QGraphicsRectItem) -> QRectF:
"""Adjusts ``outline`` size with default padding.
Args:
outline: Description
Returns:
o_rect: ``outline`` rect adjusted by ``_BOUNDING_RECT_PADDING``
"""
_p = self._BOUNDING_RECT_PADDING
o_rect = self.rect().adjusted(-_p, -_p, _p, _p)
outline.setRect(o_rect)
return o_rect
# end def
def boundRectToModel(self):
"""Update the size of the rectangle corresponding to the grid to
the size of the model or a minimum size (whichever is greater).
"""
xTL, yTL, xBR, yBR = self.getModelMinBounds()
self._rect = QRectF(QPointF(xTL, yTL), QPointF(xBR, yBR))
# end def
def getModelMinBounds(self, handle_type=None) -> RectT:
"""Bounds in form of Qt scaled from model
Args:
handle_type: Default is ``None``
"""
xLL, yLL, xUR, yUR = self.part().boundDimensions(self.scale_factor)
# return xLL, -yUR, xUR, -yLL
r = self._RADIUS
return xLL - r, -yUR - r, xUR + r, -yLL + r
# end def
def bounds(self) -> RectT:
"""x_low, x_high, y_low, y_high
"""
rect = self._rect
return (rect.left(), rect.right(), rect.bottom(), rect.top())
### PUBLIC SUPPORT METHODS ###
def setLastHoveredItem(self, gridpoint: GridPoint):
"""Stores the last self-reported gridpoint to be hovered.
Args:
gridpoint: the hoveree
"""
self._last_hovered_item = gridpoint
def setModifyState(self, bool_val: bool):
"""Hides the mod_rect when modify state disabled.
Args:
bool_val: what the modifystate should be set to.
"""
pass
# end def
def showModelMinBoundsHint(self, handle_type: EnumType, show: bool = True):
"""Shows QGraphicsRectItem reflecting current model bounds.
ResizeHandleGroup should toggle this when resizing.
Args:
handle_type: :class:`HandleEnum`
show:
"""
m_b_h = self.model_bounds_hint
if show:
xTL, yTL, xBR, yBR = self.getModelMinBounds()
m_b_h.setRect(QRectF(QPointF(xTL, yTL), QPointF(xBR, yBR)))
m_b_h.show()
else:
m_b_h.hide()
def updateStatusBar(self, status_str: str, timeout: float = 0):
"""Shows status_str in the MainWindow's status bar.
Args:
status_str: Description to display in status bar.
"""
self.window().statusBar().showMessage(status_str, timeout)
# end def
def zoomToFit(self):
"""Ask the view to zoom to fit.
"""
thescene = self.scene()
theview = thescene.views()[0]
theview.zoomToFit()
# end def
### EVENT HANDLERS ###
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
"""Handler for user mouse press.
Args:
event: Contains item, scene, and screen coordinates of the event,
and previous event.
"""
if event.button() == Qt.RightButton:
return
part = self._model_part
part.setSelected(True)
if self.isMovable():
return QGraphicsItem.mousePressEvent(self, event)
tool = self._getActiveTool()
if tool.FILTER_NAME not in part.document().filter_set:
return
tool_method_name = tool.methodPrefix() + "MousePress"
if tool_method_name == 'createToolMousePress':
return
elif hasattr(self, tool_method_name):
getattr(self, tool_method_name)(tool, event)
else:
event.setaccepted(False)
QGraphicsItem.mousePressEvent(self, event)
# end def
def hoverMoveEvent(self, event: QGraphicsSceneHoverEvent):
mapped_position = self.griditem.mapFromScene(event.scenePos())
self.last_mouse_position = (mapped_position.x(), mapped_position.y())
tool = self._getActiveTool()
tool_method_name = tool.methodPrefix() + "HoverMove"
if hasattr(self, tool_method_name):
getattr(self, tool_method_name)(tool, event)
else:
event.setAccepted(False)
QGraphicsItem.hoverMoveEvent(self, event)
# end def
def getModelPos(self, pos: QPointF) -> Vec3T:
"""Y-axis is inverted in Qt +y === DOWN
Args:
pos: Description
"""
sf = self.scale_factor
x, y = pos.x()/sf, -1.0*pos.y()/sf
return x, y, 0.
# end def
def getVirtualHelixItem(self, id_num: int):
"""
Args:
id_num: VirtualHelix ID number. See :class:`NucleicAcidPart` for
description and related methods.
Returns:
:class:`SliceVirtualHelixItem` of the ``id_num``
"""
return self._virtual_helix_item_hash.get(id_num)
# end def
def keyPressEvent(self, event: QKeyEvent):
is_alt = bool(event.modifiers() & Qt.AltModifier)
isInLatticeCoord = HoneycombDnaPart.isInLatticeCoord if self.griditem.grid_type is GridEnum.HONEYCOMB \
else SquareDnaPart.isInLatticeCoord
if event.key() == Qt.Key_Escape:
# print("Esc here")
self._setShortestPathStart(None)
self.removeAllCreateHints()
if isInLatticeCoord(radius_tuple=self._RADIUS_TUPLE,
xy_tuple=self.last_mouse_position,
coordinate_tuple=self.getLastHoveredCoordinates(),
scale_factor=self.scale_factor):
self.highlightOneGridPoint(self.getLastHoveredCoordinates())
tool = self._getActiveTool()
if tool.methodPrefix() == 'selectTool':
self.removeAllCopyPasteHints()
tool.clipboard = None
elif is_alt and self.shortest_path_add_mode is True and isInLatticeCoord(radius_tuple=self._RADIUS_TUPLE,
xy_tuple=self.last_mouse_position,
coordinate_tuple=self.getLastHoveredCoordinates(),
scale_factor=self.scale_factor):
self._previewSpa(self.last_mouse_position)
elif is_alt and self.getLastHoveredCoordinates() and self.last_mouse_position:
if isInLatticeCoord(radius_tuple=self._RADIUS_TUPLE,
xy_tuple=self.last_mouse_position,
coordinate_tuple=self.getLastHoveredCoordinates(),
scale_factor=self.scale_factor):
coord = self.getLastHoveredCoordinates()
self.highlightOneGridPoint(coord, styles.SPA_START_HINT_COLOR)
self.griditem.highlightGridPoint(coord[0], coord[1], on=True)
# end def
def keyReleaseEvent(self, event: QKeyEvent):
is_alt = bool(event.modifiers() & Qt.AltModifier)
if not is_alt:
self.removeAllCreateHints()
isInLatticeCoord = HoneycombDnaPart.isInLatticeCoord if self.griditem.grid_type is GridEnum.HONEYCOMB \
else SquareDnaPart.isInLatticeCoord
if isInLatticeCoord(radius_tuple=self._RADIUS_TUPLE,
xy_tuple=self.last_mouse_position,
coordinate_tuple=self.getLastHoveredCoordinates(),
scale_factor=self.scale_factor):
coord = self.getLastHoveredCoordinates()
self.highlightOneGridPoint(coord)
self.griditem.highlightGridPoint(coord[0], coord[1], on=True)
# end def
def createToolMousePress(self, tool: CreateSliceToolT,
event: QGraphicsSceneMouseEvent,
alt_event: GridEvent = None):
"""Creates individual or groups of VHs in Part on user input.
Shift modifier enables multi-helix addition.
Args:
event (TYPE): Description
alt_event (None, optional): Description
"""
mapped_position = self.griditem.mapFromScene(event.scenePos())
position = (mapped_position.x(), mapped_position.y())
# 1. get point in model coordinates:
part = self._model_part
if alt_event is None:
pt = tool.eventToPosition(self, event)
else:
pt = alt_event.pos()
if pt is None:
tool.deactivate()
return QGraphicsItem.mousePressEvent(self, event)
part_pt_tuple: Vec3T = self.getModelPos(pt)
modifiers = event.modifiers()
is_spa_mode = modifiers == Qt.AltModifier
last_added_spa_vhi_id = self._handleShortestPathMousePress(tool=tool,
position=position,
is_spa_mode=is_spa_mode)
if last_added_spa_vhi_id is not None:
return
row, column = self.getLastHoveredCoordinates()
parity = self._getCoordinateParity(row, column)
part.createVirtualHelix(x=part_pt_tuple[0],
y=part_pt_tuple[1],
parity=parity)
id_num = part.getVirtualHelixAtPoint(part_pt_tuple)
vhi = self._virtual_helix_item_hash[id_num]
tool.setVirtualHelixItem(vhi)
tool.startCreation()
if is_spa_mode:
self._highlightSpaVH(id_num)
# end def
def _getModelXYforCoord(self, row: int, column: int) -> Vec2T:
radius = DEFAULT_RADIUS
if self.griditem.grid_type is GridEnum.HONEYCOMB:
return HoneycombDnaPart.latticeCoordToQtXY(radius, row, column)
elif self.griditem.grid_type is GridEnum.SQUARE:
return SquareDnaPart.latticeCoordToQtXY(radius, row, column)
else:
return None
# end def
def _getCoordinateParity(self, row, column):
if self.griditem.grid_type is GridEnum.HONEYCOMB:
return 0 if HoneycombDnaPart.isEvenParity(row=row, column=column) else 1
elif self.griditem.grid_type is GridEnum.SQUARE:
return 0 if SquareDnaPart.isEvenParity(row=row, column=column) else 1
else:
return None
# end def
def _handleShortestPathMousePress(self, tool, position, is_spa_mode):
"""
Handles logic for determining if SPA mode should be activated or
continued.
Args:
tool ():
position (tuple): the xy coordinates of the mouse press
is_spa_mode (bool): whether or not this event is a SPA event
Returns:
True if nothing needs to be done by the caller (i.e. this method
and its callees added VHs as necessary, False otherwise
"""
if is_spa_mode:
# Complete the path
if self.shortest_path_start is not None:
last_vhi_id = self.createToolShortestPath(tool=tool, start=self.shortest_path_start, end=position)
if last_vhi_id is not None:
self._setShortestPathStart(position)
self._highlightSpaVH(last_vhi_id)
return last_vhi_id
# Initialize SPA
else:
self._setShortestPathStart(position)
else:
self._setShortestPathStart(None)
def _setShortestPathStart(self, position):
# TODO[NF]: Docstring
if position is not None:
self.shortest_path_add_mode = True
self.shortest_path_start = position
else:
self.shortest_path_add_mode = False
self.shortest_path_start = None
self._highlightSpaVH(None)
def _highlightSpaVH(self, vh_id):
# TODO[NF]: Docstring
if self.spa_start_vhi:
self.spa_start_vhi.setBrush(getNoBrush())
if vh_id is None:
self.spa_start_vhi = None
else:
self.spa_start_vhi = self._virtual_helix_item_hash[vh_id]
self.spa_start_vhi.setBrush(getBrushObj(styles.SPA_START_HINT_COLOR, alpha=32))
# end def
def createToolShortestPath(self, tool, start, end):
"""
Handle the creation of VHIs for SPA mode.
Args:
tool ():
start (tuple): the x-y coordinates of the start point
end (tuple): the x-y coordinates of the end point
Returns:
The ID of the last VHI created
"""
path = ShortestPathHelper.shortestPathXY(start=start,
end=end,
vh_set=self.coordinates_to_vhid.keys(),
grid_type=self.griditem.grid_type,
scale_factor=self.scale_factor,
part_radius=DEFAULT_RADIUS)
# Abort and exit SPA if there is no path from start to end
if path == []:
self.shortest_path_start = None
self.shortest_path_add_mode = False
return None
else:
x_list, y_list, parity_list = zip(*path)
id_numbers = self._model_part.batchCreateVirtualHelices(x_list=x_list,
y_list=y_list,
parities=parity_list)
for id_number in id_numbers:
vhi = self._virtual_helix_item_hash[id_number]
tool.setVirtualHelixItem(vhi)
tool.startCreation()
return id_number
# end def
def createToolHoverMove(self, tool, event):
"""Summary
Args:
tool (TYPE): Description
event (TYPE): Description
Returns:
TYPE: Description
"""
is_alt = True if event.modifiers() & Qt.AltModifier else False
mapped_position = self.griditem.mapFromScene(event.scenePos())
event_xy = (mapped_position.x(), mapped_position.y())
if self.griditem.grid_type is GridEnum.HONEYCOMB:
event_coord = HoneycombDnaPart.positionModelToLatticeCoord(DEFAULT_RADIUS,
event_xy[0],
event_xy[1],
scale_factor=self.scale_factor,
strict=True)
elif self.griditem.grid_type is GridEnum.SQUARE:
event_coord = SquareDnaPart.positionModelToLatticeCoord(DEFAULT_RADIUS,
event_xy[0],
event_xy[1],
scale_factor=self.scale_factor,
strict=True)
else:
event_coord = None
self.last_mouse_position = event_xy
if event_coord:
try:
grid_point = self.griditem.points_dict[(event_coord)]
self.setLastHoveredItem(grid_point)
except KeyError:
pass
# Un-highlight GridItems if necessary by calling createToolHoverLeave
if len(self._highlighted_path) > 1 or (self._highlighted_path and self._highlighted_path[0] != event_coord):
self.removeAllCreateHints()
self._highlighted_grid_point = event_coord
if event_coord:
self.griditem.highlightGridPoint(row=event_coord[0], column=event_coord[1], on=True)
# Highlight GridItems if alt is being held down
if is_alt and self.shortest_path_add_mode and event_coord is not None:
self._previewSpa(event_xy)
else:
if is_alt and event_coord is not None:
self.highlightOneGridPoint(self.getLastHoveredCoordinates(), styles.SPA_START_HINT_COLOR)
elif not is_alt and event_coord is not None:
part = self._model_part
next_idnums = (part._getNewIdNum(0), part._getNewIdNum(1))
self.griditem.showCreateHint(event_coord, next_idnums=next_idnums)
self._highlighted_path.append(event_coord)
tool.hoverMoveEvent(self, event)
return QGraphicsItem.hoverMoveEvent(self, event)
# end def
def _previewSpa(self, event_xy):
"""
Highlight and add VH ID numbers to the GridPoints that the SPA would
use.
Args:
event_xy (tuple): the x-y coordinates corresponding to the
position of the mouse
Returns:
None
"""
part = self._model_part
start_xy = self.shortest_path_start
end_xy = event_xy
self._highlighted_path = ShortestPathHelper.shortestPathAStar(start=start_xy ,
end=end_xy ,
part_radius=DEFAULT_RADIUS,
vh_set=self.coordinates_to_vhid.keys(),
grid_type=self.griditem.grid_type,
scale_factor=self.scale_factor)
even_id = part._getNewIdNum(0)
odd_id = part._getNewIdNum(1)
for coord in self._highlighted_path:
# This can return True, False or None
is_odd = self.griditem.showCreateHint(coord, next_idnums=(even_id, odd_id))
if is_odd is True:
odd_id += 2
elif is_odd is False:
even_id += 2
# end def
def createToolHoverLeave(self, tool, event):
self.removeAllCreateHints()
return QGraphicsItem.hoverLeaveEvent(self, event)
# end def
def selectToolHoverEnter(self, tool, event):
"""
Hint vh coords that will be created if clipboard is pasted at hoverEnter
position.
"""
if tool.clipboard is None: # is there anything on the clipboard?
return
self.removeAllCopyPasteHints()
event_pos = self.griditem.mapFromScene(event.scenePos())
positionToLatticeCoord = HoneycombDnaPart.positionModelToLatticeCoord\
if self.griditem.grid_type is GridEnum.HONEYCOMB else SquareDnaPart.positionModelToLatticeCoord
hov_row, hov_col = positionToLatticeCoord(DEFAULT_RADIUS,
event_pos.x(),
event_pos.y(),
self.scale_factor)
self._last_hovered_coord = (hov_row, hov_col)
parity = self._getCoordinateParity(hov_row, hov_col)
part = self._model_part
vh_id_list = tool.clipboard['vh_list']
try:
min_id_same_parity = int(min(filter(lambda x: x[0] % 2 == parity, vh_id_list))[0])
except ValueError: # no vhs match parity
return
min_pos = part.locationQt(min_id_same_parity, self.scaleFactor())
min_row, min_col = positionToLatticeCoord(DEFAULT_RADIUS,
min_pos[0],
min_pos[1],
self.scale_factor)
id_offset = part.getMaxIdNum() if part.getMaxIdNum() % 2 == 0 else part.getMaxIdNum() + 1
# placing clipboard's min_id_same_parity on the hovered_coord,
# hint neighboring coords with offsets corresponding to clipboard vhs
hinted_coordinates = []
copied_coordinates = []
for i in range(len(vh_id_list)):
vh_id, vh_len = vh_id_list[i]
position_xy = part.locationQt(vh_id, self.scaleFactor())
copied_row, copied_col = positionToLatticeCoord(DEFAULT_RADIUS,
position_xy[0],
position_xy[1],
self.scale_factor)
hint_coord = (hov_row+(copied_row-min_row), hov_col+(copied_col-min_col))
hinted_coordinates.append(hint_coord)
copied_coordinates.append((copied_row, copied_col))
# If any of the highlighted coordinates conflict with any existing VHs, abort
if any(coord in self.coordinates_to_vhid.keys() for coord in hinted_coordinates):
print('Conflict')
self.copypaste_origin_offset = None
return
print(self.coordinates_to_vhid)
for i, hint_coord in enumerate(hinted_coordinates):
self.griditem.showCreateHint(hint_coord, next_idnums=(i+id_offset, i+id_offset))
self._highlighted_copypaste.append(hint_coord)
# print("clipboard contents:", vh_id_list, min_idnum, idnum_offset)
hov_x, hov_y = self._getModelXYforCoord(hov_row, hov_col)
min_x, min_y, _ = part.getVirtualHelixOrigin(min_id_same_parity)
self.copypaste_origin_offset = (round(hov_x-min_x, 9), round(hov_y-min_y, 9))
# end def
def selectToolHoverMove(self, tool, event):
"""
Hint vh coords that will be created if clipboard is pasted at hoverMove
position.
"""
if tool.clipboard is None: # is there anything on the clipboard?
return
isInLatticeCoord = HoneycombDnaPart.isInLatticeCoord if self.griditem.grid_type is GridEnum.HONEYCOMB \
else SquareDnaPart.isInLatticeCoord
event_pos = self.griditem.mapFromScene(event.scenePos())
event_position_xy = (event_pos.x(), event_pos.y())
positionToLatticeCoord = HoneycombDnaPart.positionModelToLatticeCoord \
if self.griditem.grid_type is GridEnum.HONEYCOMB else SquareDnaPart.positionModelToLatticeCoord
hover_coordinates = positionToLatticeCoord(DEFAULT_RADIUS,
event_position_xy[0],
event_position_xy[1],
self.scale_factor)
if self._last_hovered_coord == hover_coordinates or not isInLatticeCoord(radius_tuple=self._RADIUS_TUPLE,
xy_tuple=self.last_mouse_position,
coordinate_tuple=self.getLastHoveredCoordinates(),
scale_factor=self.scale_factor):
return
else:
self._last_hovered_coord = hover_coordinates
self.removeAllCopyPasteHints()
parity = self._getCoordinateParity(hover_coordinates[0], hover_coordinates[1])
vh_id_list = tool.clipboard['vh_list']
try:
min_id_same_parity = int(min(filter(lambda x: x[0] % 2 == parity, vh_id_list))[0])
except ValueError:
return
part = self._model_part
min_pos = part.locationQt(min_id_same_parity, self.scaleFactor())
min_row, min_col = positionToLatticeCoord(DEFAULT_RADIUS,
min_pos[0],
min_pos[1],
self.scale_factor)
id_offset = part.getMaxIdNum() if part.getMaxIdNum() % 2 == 0 else part.getMaxIdNum() + 1
# placing clipboard's min_id_same_parity on the hovered_coord,
# hint neighboring coords with offsets corresponding to clipboard vhs
hinted_coordinates = []
for i in range(len(vh_id_list)):
vh_id, vh_len = vh_id_list[i]
position_xy = part.locationQt(vh_id, self.scaleFactor())
copied_row, copied_col = positionToLatticeCoord(DEFAULT_RADIUS,
position_xy[0],
position_xy[1],
self.scale_factor)
hint_coord = (hover_coordinates[0]+(copied_row-min_row), hover_coordinates[1]+(copied_col-min_col))
hinted_coordinates.append(hint_coord)
# If any of the highlighted coordinates conflict with any existing VHs, abort
if any(coord in self.coordinates_to_vhid.keys() for coord in hinted_coordinates):
self.copypaste_origin_offset = None
return
for i, hint_coord in enumerate(hinted_coordinates):
self.griditem.showCreateHint(hint_coord, next_idnums=(i+id_offset, i+id_offset))
self._highlighted_copypaste.append(hint_coord)
# This is going to give us the difference between hovering and the min parity location. We want the
# difference between the min parity's former and new location
hov_x, hov_y = self._getModelXYforCoord(hover_coordinates[0], hover_coordinates[1])
min_x, min_y, _ = part.getCoordinate(min_id_same_parity, 0)
self.copypaste_origin_offset = (round(hov_x-min_x, 9), round(hov_y-min_y, 9))
# end def
def selectToolHoverLeave(self, tool, event):
self.removeAllCopyPasteHints()
# end def
def selectToolMousePress(self, tool, event):
"""
Args:
tool (TYPE): Description
event (TYPE): Description
"""
if tool.clipboard is not None:
self.pasteClipboard(tool, event)
tool.setPartItem(self)
pt = tool.eventToPosition(self, event)
part_pt_tuple: Vec3T = self.getModelPos(pt)
part = self._model_part
if part.isVirtualHelixNearPoint(part_pt_tuple):
id_num = part.getVirtualHelixAtPoint(part_pt_tuple)
if id_num >= 0:
pass
# loc = part.getCoordinate(id_num, 0)
# print("VirtualHelix #{} at ({:.3f}, {:.3f})".format(id_num, loc[0], loc[1]))
else:
# tool.deselectItems()
tool.modelClear()
else:
# tool.deselectItems()
tool.modelClear()
return QGraphicsItem.mousePressEvent(self, event)
# end def
def pasteClipboard(self, tool, event):
assert tool.clipboard is not None
assert isinstance(event, QGraphicsSceneMouseEvent)
new_vhs = tool.pasteClipboard()
def removeAllCopyPasteHints(self):
if self.lock_hints:
return
for coord in self._highlighted_copypaste:
self.griditem.showCreateHint(coord, show_hint=False)
self._highlighted_copypaste = []
self.copypaste_origin_offset = None
# end def
def removeAllCreateHints(self):
"""
Remove the create hints from each currently hinted GridItem.
Iterates over all coordinates in self._highlighted_path.
Returns:
None
"""
if self.lock_hints:
return
for coord in self._highlighted_path:
self.griditem.showCreateHint(coord, show_hint=False)
self.griditem.highlightGridPoint(coord[0],
coord[1],
on=False)
self._highlighted_path = []
# end def
def highlightOneGridPoint(self, coordinates, color=None):
"""
Add a hint to one GridPoint.
Args:
coordinates (tuple): the row-column coordinates of the gridPoint to
be highlighted
color (): the color that the gridPoint should be changed to
Returns:
None
"""
if coordinates is None:
return
assert isinstance(coordinates, tuple) and len(coordinates) is 2
assert isinstance(coordinates[0], int) and isinstance(coordinates[1], int)
next_idnums = (self._model_part._getNewIdNum(0), self._model_part._getNewIdNum(1))
self.griditem.showCreateHint(coordinates, next_idnums=next_idnums, color=color)
self._highlighted_path.append(coordinates)
# end def
def getLastHoveredCoordinates(self):
"""
Get the row and column corresponding to the GridPoint that was most
recently hovered over.
This accounts for the fact that the rows are inverted (i.e. the result
returned by this method will match the coordinate system stored in this
class' internal records of coordinates)
Returns:
A tuple corresponding to the row and column of the most recently
hovered GridPoint.
"""
if self._last_hovered_item:
row, column = self._last_hovered_item.coord()
return -row, column
# end def
# end class
|
|
# needs access to libtcc and math.h
# TODO: *get tcc errors (currently something like 'Unknown error 3217941984',
# this makes debugging painful)
# *currently the compiled function accepts too many arguments silently
# *implement multi-dimensional functions for frange
# *list comprehension syntax for frange?
# *configuration of path to libtcc.so
# *add gcc support again (easier to set up than tcc)
# *fix compiler warnings
# heavily inspired by http://www.cs.tut.fi/~ask/cinpy/
"""
Experimental module for compiling functions to machine code.
Can also be used to generate C code from SymPy expressions.
Depends on libtcc.
This code is experimental. It may have severe bugs. Due to the use of C, it's
able to crash your Python interpreter/debugger with obscure error messages.
64 bit floats (double) are used.
Overview
========
clambdify: compile a function to machine code (only useful for big functions)
frange: evaluate a function on a range of numbers using machine code
cexpr: translate a Python expression to a C expression
genfcode: generate C code from a lambda string
evanonarray: evaluate a function on an array using machine code
Performance
===========
Python functions using the math module are *quite* fast. For simple functions
they are faster than functions compiled to machine code. So you should try
whether lambdify is fast enough for you.
Iterating is slow in Python (it's probably the biggest bottle neck).
frange allows you to iterate using machine code. This can result in huge
speedups. You might want to use NumPy: http://numpy.org/
For simple functions it's faster, but for big ones frange can be several times
more efficent.
You should try which solution is the best for your application.
You can run the included benchmarks to see the real performance on your machine.
Configuration
=============
You will probably need to compile libtcc on your own. Get the sources of tcc:
http://bellard.org/tcc/
Currently it only works for a recent development version. So you might want to
run the following commands (you have to use your own pathes of course):
$ cvs -z3 -d:pserver:anonymous@cvs.savannah.nongnu.org:/sources/tinycc co tinycc
$ cd tinycc
$ ./configure
$ make
$ gcc -shared -Wl,-soname,libtcc.so -o libtcc.so libtcc.o
$ cd sympy/utilities/
$ ln -s tinycc/libtcc.so # or change libtccpath in compilef.py
You might try to run libtcc_test. If something went wrong there will be bad low
level Python errors probably crashing the interpreter. The error output will be
printed to stdout or stderr, which might be different to your Python shell.
Make sure that this module knows the path to libtcc.
If everything went right, all the tests will pass. Run this file to do so and
to see the results of some benchmarks.
"""
import os
import ctypes
from sympy import Symbol, cse, sympify
from sympy.utilities.lambdify import lambdastr as getlambdastr
try:
import numpy
except ImportError:
numpy = None
libtccpath = './libtcc.so'
dps = 17 # decimal places of float precision
# load libtcc TODO: better Windows support
libtcc = ctypes.cdll.LoadLibrary(libtccpath)
if not libtcc:
raise ImportError('Could not load libtcc')
def __getLeftRight(expr, index, oplength=1, stopchar='+-'):
"""
Gets the expressions to the left and right of an operator.
>>> __getLeftRight('1/(g(x)*3.5)**(x - a**x)/(x**2 + a)', 12,
... oplength=2, stopchar='+-*/')
('(g(x)*3.5)', '(x - a**x)')
"""
# assumes correct syntax
# TODO: never repeat yourself
# get left expression
left = ''
openbraces = 0
for char in reversed(expr[:index]):
if char == ' ': # skip whitespaces but keep them
left = char + left
continue
elif char == ')':
openbraces += 1
left = char + left
elif char == '(':
if not openbraces: # happens when operator is in braces
break
openbraces -= 1
left = char + left
elif char in stopchar:
if openbraces:
left = char + left
continue
else:
break
else:
left = char + left
# get right expression
right = ''
openbraces = 0
for char in expr[index+oplength:]:
if char == ' ': # skip whitespaces but keep them
right += char
continue
elif char == '(':
openbraces += 1
right += char
elif char == ')':
if not openbraces: # happens when operator is in braces
break
openbraces -= 1
right += char
elif char in stopchar:
if openbraces:
right += char
continue
else:
break
else:
right += char
return (left, right)
def cexpr(pyexpr):
"""
Python math expression string -> C expression string
"""
# TODO: better spacing
# replace 'a**b' with 'pow(a, b)'
while True:
index = pyexpr.find('**')
if index != -1:
left, right = __getLeftRight(pyexpr, index, 2, '+-*/')
pyexpr = pyexpr.replace(left + '**' + right, ' pow(%s, %s) '
% (left.lstrip(), right.rstrip()))
else:
break
# TODO: convert 'x**n' to 'x*x*...*x'
# TODO: avoid integer division
return pyexpr
def _gentmpvars():
"""
Generate symbols tmp1, tmp2, ... infinitely.
"""
i = 0
while True:
i += 1
yield Symbol('tmp' + str(i))
def genfcode(lambdastr, use_cse=False):
"""
Python lambda string -> C function code
Optionally cse() is used to eliminate common subexpressions.
"""
# TODO: verify lambda string
# interpret lambda string
varstr, fstr = lambdastr.split(': ')
varstr = varstr.lstrip('lambda ')
# generate C variable string
cvars = varstr.split(',')
cvarstr = ''
for v in cvars:
cvarstr += 'double %s, ' % v
cvarstr = cvarstr.rstrip(', ')
# convert function string to C syntax
if not use_cse:
cfstr = ''
finalexpr = cexpr(fstr)
else:
# eliminate common subexpressions
subs, finalexpr = cse(sympify(fstr), _gentmpvars())
assert len(finalexpr) == 1
vardec = ''
cfstr = ''
for symbol, expr in subs:
vardec += ' double %s;\n' % symbol.name
cfstr += ' %s = %s;\n' % (symbol.name, cexpr(str(expr.evalf(dps))))
cfstr = vardec + cfstr
finalexpr = cexpr(str(finalexpr[0].evalf(dps)))
# generate C code
code = """
inline double f(%s)
{
%s
return %s;
}
""" % (cvarstr, cfstr, finalexpr)
return code
def __run(cmd):
"""
Checks the exit code of a ran command.
"""
if not cmd == 0:
raise Exception('could not run libtcc command')
def _compile(code, argcount=None, fname='f', fprototype=None):
"""
C code with function -> compiled function
Supports all standard C math functions, pi and e.
Function is assumed to get and return 'double' only.
Uses libtcc.
"""
# returned type and all arguments are double
if fprototype:
fprototype = ctypes.CFUNCTYPE(*fprototype)
else:
assert argcount, 'need argcount if no prototype is specified'
fprototype = ctypes.CFUNCTYPE(*[ctypes.c_double]*(argcount+1))
# see libtcc.h for API documentation
tccstate = libtcc.tcc_new()
__run(libtcc.tcc_set_output_type(tccstate, 0)) # output to memory
##print libtcc.tcc_add_library_path(tccstate, mathh) # could be dropped
__run(libtcc.tcc_add_library(tccstate, 'm')) # use math.h FIXME: Windows
# compile string
__run(libtcc.tcc_compile_string(tccstate, code))
__run(libtcc.tcc_relocate(tccstate)) # fails if link error
# create C variable to get result
symbol = ctypes.c_long()
__run(libtcc.tcc_get_symbol(tccstate, ctypes.byref(symbol), fname))
# return reference to C function
return fprototype(symbol.value)
# expr needs to work with lambdastr
def clambdify(args, expr, **kwargs):
"""
SymPy expression -> compiled function
Supports all standard C math functions, pi and e.
>>> from sympy import symbols, sqrt
>>> x, y = symbols('xy')
>>> cf = clambdify((x,y), sqrt(x*y))
>>> cf(0.5, 4)
1.4142135623730951
"""
# convert function to lambda string
s = getlambdastr(args, expr.evalf(21))
# generate code
code = """
# include <math.h>
# define pi M_PI
# define e M_E
%s
""" % genfcode(s, **kwargs)
# compile code
return _compile(code, len(args))
def frange(*args, **kwargs):
"""
frange(lambdastr, [start,] stop[, step]) -> ctypes double array
Evaluates function on range using machine code.
Currently only one-dimensional functions are supported.
For simple functions it's somewhat slower than NumPy.
For big functions it can be several times faster.
lambdastr has the same restrictions as in clambdify.
>>> frange('lambda x: sqrt(x)', 1, 4) # doctest: +ELLIPSIS
<__main__.c_double_Array_3 object at ...>
>>> for i in _:
... print i
...
1.0
1.41421356237
1.73205080757
"""
if len(args) > 4:
raise TypeError('expected at most 4 arguments, got %i' % len(args))
if len(args) < 2:
raise TypeError('expected at least 2 argument, got %i' % len(args))
# interpret arguments
lambdastr = args[0]
start = 0
step = 1
if len(args) == 2:
stop = args[1]
elif len(args) >= 3:
start = args[1]
stop = args[2]
if len(args) == 4:
step = args[3]
assert start + step != start, \
'step is too small and would cause an infinite loop'
# determine length of resulting array
# TODO: do this better
length = stop - start
if length % step == 0:
length = length/step - 1 # exclude last one
else:
length = length//step
if step > 0:
if start < stop:
length += 1 # include first one
else:
if start > stop:
length += 1 # include first one
if length < 0:
length = 0
assert length == int(length)
length = int(length)
# create array
a = (ctypes.c_double * length)()
# generate code
vardef = 'double* MAX; double x = %f;' % start
loopbody = '*result = f(x); x += %f;' % step
code = """
# include <math.h>
# define pi M_PI
# define e M_E
%s
void evalonrange(double *result, int n)
{
%s
for (MAX = result + n; result < MAX; result++)
{
%s
}
}
""" % (genfcode(lambdastr, **kwargs), vardef, loopbody)
# compile and run
evalonrange = _compile(code, fname='evalonrange',
fprototype=[None, ctypes.c_void_p, ctypes.c_int])
evalonrange(ctypes.byref(a), ctypes.c_int(length))
# return ctypes array with results
return a
def evalonarray(lambdastr, array, length=None, **kwargs):
"""
Evaluates a function on an array using machine code.
array can be a numpy array, a ctypes array or a pointer to an array.
In the latter case, the correct length must be specified.
array will be overwritten! Make a copy before to avoid this.
"""
# interpret arguments
if hasattr(array, 'ctypes'): # numpy array
pointer = array.ctypes.get_as_parameter()
length = len(array)
elif isinstance(array, ctypes.Array): # ctypes array
pointer = ctypes.byref(array)
length = len(array)
elif isinstance(array, ctypes.c_void_p): # ctypes pointer FIXME
pointer = array
assert isinstance(length, int) and not length < 0
else:
raise ValueError, 'array type not recognized'
# generate code
code = """
# include <math.h>
# define pi M_PI
# define e M_E
%s
void evalonarray(double *array, int length)
{
double* MAX;
for (MAX = array + length; array < MAX; array++)
{
*array = f(*array);
}
}
""" % genfcode(lambdastr, **kwargs)
# compile an run on array
run = _compile(code, fname='evalonarray',
fprototype=[None, ctypes.c_void_p, ctypes.c_int])
run(pointer, length)
#########
# TESTS #
#########
from sympy import sqrt, pi, lambdify
from math import exp, cos, sin
def test_cexpr():
expr = '1/(g(x)*3.5)**(x - a**x)/(x**2 + a)'
assert cexpr(expr).replace(' ', '') == \
'1/pow((g(x)*3.5),(x-pow(a,x)))/(pow(x,2)+a)'
def test_clambdify():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f1 = sqrt(x*y)
pf1 = lambdify((x, y), f1, 'math')
cf1 = clambdify((x, y), f1)
for i in xrange(10):
assert cf1(i, 10 - i) == pf1(i, 10 - i)
f2 = (x - y) / z * pi
pf2 = lambdify((x, y, z), f2, 'math')
cf2 = clambdify((x, y, z), f2)
assert round(pf2(1, 2, 3), 14) == round(cf2(1, 2, 3), 14)
# FIXME: slight difference in precision
def test_frange():
fstr = 'lambda x: exp(x)*cos(x)**x'
f = eval(fstr)
a = frange(fstr, 30, 168, 3)
args = range(30, 168, 3)
assert len(a) == len(args)
for i in xrange(len(a)):
assert a[i] == f(args[i])
assert len(frange('lambda x: x', 0, -10000)) == 0
assert len(frange('lambda x: x', -1, -1, 0.0001)) == 0
a = frange('lambda x: x', -5, 5, 0.1)
b = range(-50, 50)
assert len(a) == len(b)
for i in xrange(len(a)):
assert int(round(a[i]*10)) == b[i]
a = frange('lambda x: x', 17, -9, -3)
b = range(17, -9, -3)
assert len(a) == len(b)
for i in xrange(len(a)):
assert a[i] == b[i]
a = frange('lambda x: x', 2.7, -3.1, -1.01)
b = range(270, -310, -101)
assert len(a) == len(b)
for i in xrange(len(a)):
assert int(round(a[i]*100)) == b[i]
assert frange('lambda x: x', 0.2, 0.1, -0.1)[0] == 0.2
assert len(frange('lambda x: x', 0)) == 0
assert len(frange('lambda x: x', 1000, -1)) == 0
assert len(frange('lambda x: x', -1.23, 3.21, -0.0000001)) == 0
try:
frange()
assert False
except TypeError:
pass
try:
frange(1, 2, 3, 4, 5)
assert False
except TypeError:
pass
def test_evalonarray_ctypes():
a = frange('lambda x: x', 10)
evalonarray('lambda x: sin(x)', a)
for i, j in enumerate(a):
assert sin(i) == j
# TODO: test for ctypes pointers
## evalonarray('lambda x: asin(x)', ctypes.byref(a), len(a))
## for i, j in enumerater(a):
## print j
def test_evalonarray_numpy():
a = numpy.arange(10, dtype=float)
evalonarray('lambda x: x + 1', a)
for i, j in enumerate(a):
assert float(i + 1) == j
def test_use_cse():
args = ('lambda x: sqrt(x + 1)**sqrt(x + 1)', 1, 10)
a = frange(*args)
kwargs = {}
kwargs['use_cse'] = True
b = frange(*args, **kwargs)
assert len(a) == len(b)
for i in xrange(len(a)):
assert a[i] == b[i]
def benchmark():
"""
Run some benchmarks for clambdify and frange.
NumPy and Psyco are used as reference if available.
"""
from time import time
from timeit import Timer
def fbenchmark(f, var=[Symbol('x')]):
"""
Do some benchmarks with f using clambdify, lambdify and psyco.
"""
global cf, pf, psyf
start = time()
cf = clambdify(var, f)
print 'compile time (including sympy overhead): %f s' % (time() - start)
pf = lambdify(var, f, 'math')
psyf = None
try:
import psyco
psyf = lambdify(var, f, 'math')
psyco.bind(psyf)
except ImportError:
pass
code = '''for x in (i/1000. for i in xrange(1000)):
f(%s)''' % ('x,'*len(var)).rstrip(',')
t1 = Timer(code, 'from __main__ import cf as f')
t2 = Timer(code, 'from __main__ import pf as f')
if psyf:
t3 = Timer(code, 'from __main__ import psyf as f')
else:
t3 = None
print 'for x = (0, 1, 2, ..., 999)/1000'
print '20 times in 3 runs'
print 'compiled: %.4f %.4f %.4f' % tuple(t1.repeat(3, 20))
print 'Python lambda: %.4f %.4f %.4f' % tuple(t2.repeat(3, 20))
if t3:
print 'Psyco lambda: %.4f %.4f %.4f' % tuple(t3.repeat(3, 20))
print 'big function:'
from sympy import diff, exp, sin, cos, pi, lambdify
x = Symbol('x')
## f1 = diff(exp(x)**2 - sin(x)**pi, x) \
## * x**12-2*x**3+2*exp(x**2)-3*x**7+4*exp(123+x-x**5+2*x**4) \
## * ((x + pi)**5).expand()
f1 = 2*exp(x**2) + x**12*(-pi*sin(x)**((-1) + pi)*cos(x) + 2*exp(2*x)) \
+ 4*(10*pi**3*x**2 + 10*pi**2*x**3 + 5*pi*x**4 + 5*x*pi**4 + pi**5 \
+ x**5)*exp(123 + x + 2*x**4 - x**5) - 2*x**3 - 3*x**7
fbenchmark(f1)
print
print 'simple function:'
y = Symbol('y')
f2 = sqrt(x*y)+x*5
fbenchmark(f2, [x,y])
times = 100000
fstr = 'exp(sin(exp(-x**2)) + sqrt(pi)*cos(x**5/(x**3-x**2+pi*x)))'
print
print 'frange with f(x) ='
print fstr
print 'for x=1, ..., %i' % times
print 'in 3 runs including full compile time'
t4 = Timer("frange('lambda x: %s', 0, %i)" % (fstr, times),
'from __main__ import frange')
try:
import numpy
except ImportError:
numpy = None
print 'frange: %.4f %.4f %.4f' % tuple(t4.repeat(3, 1))
if numpy:
t5 = Timer('x = arange(%i); result = %s' % (times, fstr),
'from numpy import arange, sqrt, exp, sin, cos, exp, pi')
print 'numpy: %.4f %.4f %.4f' % tuple(t5.repeat(3, 1))
# TODO: integration into fbenchmark
if __name__ == '__main__':
if __debug__:
print 'Running tests...',
test_cexpr()
test_clambdify()
test_frange()
test_evalonarray_ctypes()
if numpy:
test_evalonarray_numpy()
test_use_cse()
import doctest
doctest.testmod()
print 'OK'
print
print 'Running benchmark...'
benchmark()
|
|
"""
This module implements the base model class. All model things inherit from this class.
"""
from . import H2OFrame
from . import H2OConnection
import h2o
import imp
class ModelBase(object):
def __init__(self):
self._id = None
self._model_json = None
self._metrics_class = None
self._is_xvalidated = False
self._xval_keys = None
self._parms = {} # internal, for object recycle
self.parms = {} # external
self._estimator_type = None
self._future = False # used by __repr__/show to query job state
self._job = None # used when _future is True
@property
def model_id(self):
"""
:return: Retrieve this model's identifier.
"""
return self._id
@model_id.setter
def model_id(self, value):
oldname = self.model_id
self._id = value
h2o.rapids("(rename \"{}\" \"{}\")".format(oldname, value))
@property
def params(self):
"""
Get the parameters and the actual/default values only.
:return: A dictionary of parameters used to build this model.
"""
params = {}
for p in self.parms:
params[p] = {"default":self.parms[p]["default_value"], "actual":self.parms[p]["actual_value"]}
return params
@property
def full_parameters(self):
"""
Get the full specification of all parameters.
:return: a dictionary of parameters used to build this model.
"""
return self.parms
def __repr__(self):
self.show()
return ""
def predict(self, test_data):
"""
Predict on a dataset.
:param test_data: Data to be predicted on.
:return: A new H2OFrame filled with predictions.
"""
if not isinstance(test_data, H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
test_data._eager()
j = H2OConnection.post_json("Predictions/models/" + self.model_id + "/frames/" + test_data.frame_id)
# prediction_frame_id = j["predictions_frame"] #j["model_metrics"][0]["predictions"]["frame_id"]["name"]
return h2o.get_frame(j["predictions_frame"]["name"])
def is_cross_validated(self):
"""
:return: True if the model was cross-validated.
"""
return self._is_xvalidated
def xval_keys(self):
"""
:return: The model keys for the cross-validated model.
"""
return self._xval_keys
def get_xval_models(self,key=None):
"""
Return a Model object.
:param key: If None, return all cross-validated models; otherwise return the model that key points to.
:return: A model or list of models.
"""
return h2o.get_model(key) if key is not None else [h2o.get_model(k) for k in self._xval_keys]
@property
def xvals(self):
"""
Return a list of the cross-validated models.
:return: A list of models
"""
return self.get_xval_models()
def deepfeatures(self, test_data, layer):
"""
Return hidden layer details
:param test_data: Data to create a feature space on
:param layer: 0 index hidden layer
"""
if test_data is None: raise ValueError("Must specify test data")
test_data._eager()
j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data._id, deep_features_hidden_layer=layer)
return h2o.get_frame(j["predictions_frame"]["name"])
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:return: an H2OFrame which represents the weight matrix identified by matrix_id
"""
num_weight_matrices = len(self._model_json['output']['weights'])
if matrix_id not in range(num_weight_matrices):
raise ValueError("Weight matrix does not exist. Model has {0} weight matrices (0-based indexing), but matrix {1} "
"was requested.".format(num_weight_matrices, matrix_id))
return h2o.get_frame(self._model_json['output']['weights'][matrix_id]['URL'].split('/')[3])
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:return: an H2OFrame which represents the bias vector identified by vector_id
"""
num_bias_vectors = len(self._model_json['output']['biases'])
if vector_id not in range(num_bias_vectors):
raise ValueError("Bias vector does not exist. Model has {0} bias vectors (0-based indexing), but vector {1} "
"was requested.".format(num_bias_vectors, vector_id))
return h2o.get_frame(self._model_json['output']['biases'][vector_id]['URL'].split('/')[3])
def normmul(self):
"""
Normalization/Standardization multipliers for numeric predictors
"""
return self._model_json['output']['normmul']
def normsub(self):
"""
Normalization/Standardization offsets for numeric predictors
"""
return self._model_json['output']['normsub']
def respmul(self):
"""
Normalization/Standardization multipliers for numeric response
"""
return self._model_json['output']['normrespmul']
def respsub(self):
"""
Normalization/Standardization offsets for numeric response
"""
return self._model_json['output']['normrespsub']
def catoffsets(self):
"""
Categorical offsets for one-hot encoding
"""
return self._model_json['output']['catoffsets']
def model_performance(self, test_data=None, train=False, valid=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. Both train and valid arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model. If the test_data is the training data, the training metrics are returned.
:param valid: Report the validation metrics for the model. If train and valid are True, then it defaults to True.
:return: An object of class H2OModelMetrics.
"""
if test_data is None:
if not train and not valid: train = True # default to train
if train: return self._model_json["output"]["training_metrics"]
if valid: return self._model_json["output"]["validation_metrics"]
else: # cases dealing with test_data not None
if not isinstance(test_data, H2OFrame):
raise ValueError("`test_data` must be of type H2OFrame. Got: " + type(test_data))
test_data._eager()
res = H2OConnection.post_json("ModelMetrics/models/" + self._id + "/frames/" + test_data._id)
# FIXME need to do the client-side filtering... PUBDEV-874: https://0xdata.atlassian.net/browse/PUBDEV-874
raw_metrics = None
for mm in res["model_metrics"]:
if not mm["frame"] == None and mm["frame"]["name"] == test_data._id:
raw_metrics = mm
break
return self._metrics_class(raw_metrics,algo=self._model_json["algo"])
def score_history(self):
"""
Retrieve Model Score History
:return: the score history (H2OTwoDimTable)
"""
model = self._model_json["output"]
if 'scoring_history' in model.keys() and model["scoring_history"] != None:
s = model["scoring_history"]
if h2o.can_use_pandas():
import pandas
pandas.options.display.max_rows = 20
return pandas.DataFrame(s.cell_values,columns=s.col_header)
return s
else: print "No score history for this model"
def summary(self):
"""
Print a detailed summary of the model.
:return:
"""
model = self._model_json["output"]
if model["model_summary"]:
model["model_summary"].show() # H2OTwoDimTable object
def show(self):
"""
Print innards of model, without regards to type
:return: None
"""
if self._future:
self._job.poll_once()
return
if self._model_json is None:
print "No model trained yet"
return
model = self._model_json["output"]
print "Model Details"
print "============="
print self.__class__.__name__, ": ", self._model_json["algo_full_name"]
print "Model Key: ", self._id
self.summary()
print
# training metrics
tm = model["training_metrics"]
if tm: tm.show()
vm = model["validation_metrics"]
if vm: vm.show()
xm = model["cross_validation_metrics"]
if xm: xm.show()
if "scoring_history" in model.keys() and model["scoring_history"]: model["scoring_history"].show()
if "variable_importances" in model.keys() and model["variable_importances"]: model["variable_importances"].show()
def varimp(self, return_list=False):
"""
Pretty print the variable importances, or return them in a list
:param return_list: if True, then return the variable importances in an list (ordered from most important to least
important). Each entry in the list is a 4-tuple of (variable, relative_importance, scaled_importance, percentage).
:return: None or ordered list
"""
model = self._model_json["output"]
if "variable_importances" in model.keys() and model["variable_importances"]:
if not return_list: return model["variable_importances"].show()
else: return model["variable_importances"].cell_values
else:
print "Warning: This model doesn't have variable importances"
def residual_deviance(self,train=False,valid=False,xval=False):
"""
Retreive the residual deviance if this model has the attribute, or None otherwise.
:param train: Get the residual deviance for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the residual deviance for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the residual deviance, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].residual_deviance() if train else self._model_json["output"]["validation_metrics"].residual_deviance()
def residual_degrees_of_freedom(self,train=False,valid=False,xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param train: Get the residual dof for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the residual dof for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the residual dof, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].residual_degrees_of_freedom() if train else self._model_json["output"]["validation_metrics"].residual_degrees_of_freedom()
def null_deviance(self,train=False,valid=False,xval=False):
"""
Retreive the null deviance if this model has the attribute, or None otherwise.
:param: train Get the null deviance for the training set. If both train and valid are False, then train is selected by default.
:param: valid Get the null deviance for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the null deviance, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].null_deviance() if train else self._model_json["output"]["validation_metrics"].null_deviance()
def null_degrees_of_freedom(self,train=False,valid=False,xval=False):
"""
Retreive the null degress of freedom if this model has the attribute, or None otherwise.
:param train: Get the null dof for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the null dof for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the null dof, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].null_degrees_of_freedom() if train else self._model_json["output"]["validation_metrics"].null_degrees_of_freedom()
def pprint_coef(self):
"""
Pretty print the coefficents table (includes normalized coefficients)
:return: None
"""
print self._model_json["output"]["coefficients_table"] # will return None if no coefs!
def coef(self):
"""
:return: Return the coefficients for this model.
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None: return None
tbl = tbl.cell_values
return {a[0]:a[1] for a in tbl}
def coef_norm(self):
"""
:return: Return the normalized coefficients
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None: return None
tbl = tbl.cell_values
return {a[0]:a[2] for a in tbl}
def r2(self, train=False, valid=False, xval=False):
"""
Return the R^2 for this regression model.
The R^2 value is defined to be 1 - MSE/var,
where var is computed as sigma*sigma.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the R^2 value for the training data.
:param valid: If valid is True, then return the R^2 value for the validation data.
:param xval: If xval is True, then return the R^2 value for the cross validation data.
:return: The R^2 for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.r2()
return m.values()[0] if len(m) == 1 else m
def mse(self, train=False, valid=False, xval=False):
"""
Get the MSE(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the MSE value for the training data.
:param valid: If valid is True, then return the MSE value for the validation data.
:param xval: If xval is True, then return the MSE value for the cross validation data.
:return: The MSE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.mse()
return m.values()[0] if len(m) == 1 else m
def logloss(self, train=False, valid=False, xval=False):
"""
Get the Log Loss(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Log Loss value for the training data.
:param valid: If valid is True, then return the Log Loss value for the validation data.
:param xval: If xval is True, then return the Log Loss value for the cross validation data.
:return: The Log Loss for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.logloss()
return m.values()[0] if len(m) == 1 else m
def mean_residual_deviance(self, train=False, valid=False, xval=False):
"""
Get the Mean Residual Deviances(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Mean Residual Deviance value for the training data.
:param valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
:param xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.
:return: The Mean Residual Deviance for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.mean_residual_deviance()
return m.values()[0] if len(m) == 1 else m
def auc(self, train=False, valid=False, xval=False):
"""
Get the AUC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the AUC value for the training data.
:param valid: If valid is True, then return the AUC value for the validation data.
:param xval: If xval is True, then return the AUC value for the validation data.
:return: The AUC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.auc()
return m.values()[0] if len(m) == 1 else m
def aic(self, train=False, valid=False, xval=False):
"""
Get the AIC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the AIC value for the training data.
:param valid: If valid is True, then return the AIC value for the validation data.
:param xval: If xval is True, then return the AIC value for the validation data.
:return: The AIC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.aic()
return m.values()[0] if len(m) == 1 else m
def giniCoef(self, train=False, valid=False, xval=False):
"""
Get the Gini Coefficient(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Gini Coefficient value for the training data.
:param valid: If valid is True, then return the Gini Coefficient value for the validation data.
:param xval: If xval is True, then return the Gini Coefficient value for the cross validation data.
:return: The Gini Coefficient for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.giniCoef()
return m.values()[0] if len(m) == 1 else m
def download_pojo(self,path=""):
"""
Download the POJO for this model to the directory specified by path (no trailing slash!).
If path is "", then dump to screen.
:param model: Retrieve this model's scoring POJO.
:param path: An absolute path to the directory where POJO should be saved.
:return: None
"""
h2o.download_pojo(self,path) # call the "package" function
@staticmethod
def _get_metrics(o, train, valid, xval):
metrics = {}
if train: metrics["train"] = o._model_json["output"]["training_metrics"]
if valid: metrics["valid"] = o._model_json["output"]["validation_metrics"]
if xval : metrics["xval"] = o._model_json["output"]["cross_validation_metrics"]
if len(metrics) == 0: metrics["train"] = o._model_json["output"]["training_metrics"]
return metrics
# Delete from cluster as model goes out of scope
# def __del__(self):
# h2o.remove(self._id)
def _plot(self, timestep, metric, **kwargs):
# check for matplotlib. exit if absent
try:
imp.find_module('matplotlib')
import matplotlib
if 'server' in kwargs.keys() and kwargs['server']: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print "matplotlib is required for this function!"
return
scoring_history = self.score_history()
# Separate functionality for GLM since its output is different from other algos
if self._model_json["algo"] == "glm":
# GLM has only one timestep option, which is `iteration`
timestep = "iteration"
if metric == "AUTO": metric = "log_likelihood"
elif metric not in ("log_likelihood", "objective"):
raise ValueError("for GLM, metric must be one of: log_likelihood, objective")
plt.xlabel(timestep)
plt.ylabel(metric)
plt.title("Validation Scoring History")
plt.plot(scoring_history[timestep], scoring_history[metric])
elif self._model_json["algo"] in ("deeplearning", "drf", "gbm"):
# Set timestep
if self._model_json["algo"] in ("gbm", "drf"):
if timestep == "AUTO": timestep = "number_of_trees"
elif timestep not in ("duration","number_of_trees"):
raise ValueError("timestep for gbm or drf must be one of: duration, number_of_trees")
else: #self._model_json["algo"] == "deeplearning":
# Delete first row of DL scoring history since it contains NAs & NaNs
if scoring_history["samples"][0] == 0:
scoring_history = scoring_history[1:]
if timestep == "AUTO": timestep = "epochs"
elif timestep not in ("epochs","samples","duration"):
raise ValueError("timestep for deeplearning must be one of: epochs, samples, duration")
training_metric = "training_{}".format(metric)
validation_metric = "validation_{}".format(metric)
if timestep == "duration":
dur_colname = "duration_{}".format(scoring_history["duration"][1].split()[1])
scoring_history[dur_colname] = map(lambda x: str(x).split()[0],scoring_history["duration"])
timestep = dur_colname
if h2o.can_use_pandas():
valid = validation_metric in list(scoring_history)
ylim = (scoring_history[[training_metric, validation_metric]].min().min(), scoring_history[[training_metric, validation_metric]].max().max()) if valid \
else (scoring_history[training_metric].min(), scoring_history[training_metric].max())
else:
valid = validation_metric in scoring_history.col_header
ylim = (min(min(scoring_history[[training_metric, validation_metric]])), max(max(scoring_history[[training_metric, validation_metric]]))) if valid \
else (min(scoring_history[training_metric]), max(scoring_history[training_metric]))
if valid: #Training and validation scoring history
plt.xlabel(timestep)
plt.ylabel(metric)
plt.title("Scoring History")
plt.ylim(ylim)
plt.plot(scoring_history[timestep], scoring_history[training_metric], label = "Training")
plt.plot(scoring_history[timestep], scoring_history[validation_metric], color = "orange", label = "Validation")
plt.legend()
else: #Training scoring history only
plt.xlabel(timestep)
plt.ylabel(training_metric)
plt.title("Training Scoring History")
plt.ylim(ylim)
plt.plot(scoring_history[timestep], scoring_history[training_metric])
else: # algo is not glm, deeplearning, drf, gbm
raise ValueError("Plotting not implemented for this type of model")
if "server" not in kwargs.keys() or not kwargs["server"]: plt.show()
@staticmethod
def _check_targets(y_actual, y_predicted):
"""
Check that y_actual and y_predicted have the same length.
:param y_actual: An H2OFrame
:param y_predicted: An H2OFrame
:return: None
"""
if len(y_actual) != len(y_predicted):
raise ValueError("Row mismatch: [{},{}]".format(len(y_actual),len(y_predicted)))
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v10.services.types import (
keyword_plan_campaign_service,
)
from .base import KeywordPlanCampaignServiceTransport, DEFAULT_CLIENT_INFO
class KeywordPlanCampaignServiceGrpcTransport(
KeywordPlanCampaignServiceTransport
):
"""gRPC backend transport for KeywordPlanCampaignService.
Service to manage Keyword Plan campaigns.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_keyword_plan_campaigns(
self,
) -> Callable[
[keyword_plan_campaign_service.MutateKeywordPlanCampaignsRequest],
keyword_plan_campaign_service.MutateKeywordPlanCampaignsResponse,
]:
r"""Return a callable for the mutate keyword plan campaigns method over gRPC.
Creates, updates, or removes Keyword Plan campaigns. Operation
statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `HeaderError <>`__ `InternalError <>`__
`KeywordPlanCampaignError <>`__ `KeywordPlanError <>`__
`ListOperationError <>`__ `MutateError <>`__ `QuotaError <>`__
`RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__
Returns:
Callable[[~.MutateKeywordPlanCampaignsRequest],
~.MutateKeywordPlanCampaignsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_keyword_plan_campaigns" not in self._stubs:
self._stubs[
"mutate_keyword_plan_campaigns"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.KeywordPlanCampaignService/MutateKeywordPlanCampaigns",
request_serializer=keyword_plan_campaign_service.MutateKeywordPlanCampaignsRequest.serialize,
response_deserializer=keyword_plan_campaign_service.MutateKeywordPlanCampaignsResponse.deserialize,
)
return self._stubs["mutate_keyword_plan_campaigns"]
def close(self):
self.grpc_channel.close()
__all__ = ("KeywordPlanCampaignServiceGrpcTransport",)
|
|
import pickle
import unittest
import numpy as np
import six
import chainer
from chainer import functions as F
from chainer import optimizers
from chainer import testing
_all_optimizers = [
'AdaDelta',
'AdaGrad',
'Adam',
'AdamW',
'AMSGrad',
'AdaBound',
'AMSBound',
'CorrectedMomentumSGD',
'MomentumSGD',
'MSVAG',
'NesterovAG',
'RMSprop',
'RMSpropGraves',
'SGD',
'SMORMS3',
]
_parameterize_optimizers = testing.parameterize(*testing.product({
'optimizer_impl': [getattr(chainer.optimizers, o) for o in _all_optimizers]
}))
class SimpleChain(chainer.Chain):
def __init__(self, shape=()):
super(SimpleChain, self).__init__()
w_np = np.asarray(np.random.randn(*shape)).astype(np.float32)
with self.init_scope():
self.w = chainer.Parameter(w_np, name='w')
def __call__(self, x):
return F.sum((x - self.w) ** 2)
class TestAllOptimizersCoverage(unittest.TestCase):
# Checks _all_optimizers covers all the built-in optimizers.
def test_all_optimizers_coverage(self):
module = chainer.optimizers
module_optimizers = []
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, type) and issubclass(obj, chainer.Optimizer)):
module_optimizers.append(name)
assert sorted(_all_optimizers) == sorted(module_optimizers)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
@testing.parameterize(*(
# Optimizers constructed with default arguments
[
{
'optimizer': o,
'kwargs': {}
}
for o in _all_optimizers]
# https://chainer/chainer/issues/7424
+ [
{
'optimizer': 'Adam',
'kwargs': {'weight_decay_rate': 0.5},
}]
))
@testing.parameterize(*testing.product(
{'shape': [(2, 3), (), (1, 0, 2)]}
))
class TestOptimizer(unittest.TestCase):
def test_optimizer(self, backend_config):
device = backend_config.device
target = SimpleChain(self.shape)
target.to_device(device)
optimizer_cls = getattr(chainer.optimizers, self.optimizer)
optimizer = optimizer_cls(**self.kwargs)
optimizer.setup(target)
x_np = np.asarray(np.random.randn(*self.shape)).astype(np.float32)
x = chainer.Variable(device.send(x_np))
# Just ensures no error occurs. No numerical check is performed.
optimizer.update(target, x)
@_parameterize_optimizers
class TestOptimizerHyperparameter(unittest.TestCase):
def setUp(self):
self.target = chainer.Link()
with self.target.init_scope():
self.target.w = chainer.Parameter()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hyperparams(self):
# TODO(niboshi): The following optimizers do not pass this test
# because their __init__ do not accept some hyperparameters.
# The test should be fixed.
if self.optimizer_impl in (
chainer.optimizers.AdamW,
chainer.optimizers.AMSGrad,
chainer.optimizers.AdaBound,
chainer.optimizers.AMSBound,
):
raise unittest.SkipTest(
'The optimizer is incompatible with this test')
self.create()
default = self.optimizer.hyperparam.get_dict()
for name, default_value in six.iteritems(default):
self.create()
self.assertEqual(self.get_hyperparam(name), default_value)
new_value = default_value + 0.1
self.create(**{name: new_value})
self.assertEqual(self.get_hyperparam(name), new_value)
class WeightSaveHook(object):
name = 'WeightSaveHook'
call_for_each_param = True
def __init__(self):
self.value = None
def __call__(self, rule, param):
p, g = param.data, param.grad
if p is None or g is None:
return
self.value = np.copy(p)
@_parameterize_optimizers
class TestOptimizerHooks(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hooks(self):
w_pre = np.copy(self.target.w.data)
h_pre = WeightSaveHook()
h_post = WeightSaveHook()
self.create()
self.optimizer.add_hook(h_pre, timing='pre')
self.optimizer.add_hook(h_post, name='WeightSaveHookPost',
timing='post')
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
self.assertEqual(w_pre, h_pre.value)
self.assertEqual(w_post, h_post.value)
self.assertNotEqual(h_pre.value, h_post.value)
def test_hooks_auto(self):
w_pre = np.copy(self.target.w.data)
h_pre = WeightSaveHook()
h_pre.timing = 'pre'
h_post = WeightSaveHook()
h_post.timing = 'post'
self.create()
self.optimizer.add_hook(h_pre, timing='auto')
self.optimizer.add_hook(h_post, name='WeightSaveHookPost',
timing='auto')
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
self.assertEqual(w_pre, h_pre.value)
self.assertEqual(w_post, h_post.value)
self.assertNotEqual(h_pre.value, h_post.value)
@_parameterize_optimizers
class TestOptimizerPickable(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_new_pickle(self):
self.create()
pickled_opt = pickle.dumps(self.optimizer)
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
# Pickle has saved a copy of the target
opt = pickle.loads(pickled_opt)
opt.update(opt.target, x)
pickled_w_post = np.copy(opt.target.w.data)
self.assertEqual(w_post, pickled_w_post)
def test_updated_pickle(self):
self.create()
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
pickled_opt = pickle.dumps(self.optimizer)
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
# Pickle has saved a copy of the target
opt = pickle.loads(pickled_opt)
opt.update(opt.target, x)
pickled_w_post = np.copy(opt.target.w.data)
self.assertEqual(w_post, pickled_w_post)
@_parameterize_optimizers
class TestOptimizerLossScaling(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def test_invalid_configs(self):
self.create()
with self.assertRaises(ValueError):
self.optimizer.loss_scaling(interval=0)
with self.assertRaises(ValueError):
self.optimizer.loss_scaling(scale=-1)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestAdamW(unittest.TestCase):
def test_adam_w(self, backend_config):
xp = backend_config.xp
device = backend_config.device
link = chainer.Link(x=(1,))
link.to_device(device)
opt = optimizers.Adam(eta=0.5, weight_decay_rate=0.1)
opt.setup(link)
link.x.data.fill(1)
link.x.grad = device.send(xp.ones_like(link.x.data))
opt.update()
# compare against the value computed with v5 impl
testing.assert_allclose(link.x.data, np.array([0.9495]),
atol=1e-7, rtol=1e-7)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestAMSGrad(unittest.TestCase):
def test_amsgrad(self, backend_config):
device = backend_config.device
link = chainer.Link(x=(4,))
x = link.x
x.data.fill(0)
link.to_device(device)
opt = optimizers.Adam(alpha=0.01, beta2=0.7, amsgrad=True)
opt.setup(link)
x.grad = device.send(np.array([1, -1, 10, -10], np.float32))
opt.update()
testing.assert_allclose(
x.update_rule.state['v'],
[0.3, 0.3, 30, 30],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.data,
[-0.01, 0.01, -0.01, 0.01],
atol=1e-7, rtol=1e-7)
x.grad = device.send(np.array([-10, -10, -1, -1], np.float32))
opt.update()
testing.assert_allclose(
x.update_rule.state['v'],
[30.21, 30.21, 21.3, 21.3],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.update_rule.state['vhat'],
[30.21, 30.21, 30, 30],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.data,
# result with NumPy
[-0.00377703, 0.01745388, -0.01548985, 0.01686232],
atol=1e-7, rtol=1e-7)
testing.run_module(__name__, __file__)
|
|
import os
import os.path
import tempfile
import shutil
import json
from nose.tools import eq_
from nose.tools import with_setup
from build_pack_utils import utils
from common.integration import ErrorHelper
from common.components import BuildPackAssertHelper
from common.components import HttpdAssertHelper
from common.components import PhpAssertHelper
from common.components import NoWebServerAssertHelper
from common.components import NewRelicAssertHelper
from common.components import DownloadAssertHelper
from common.base import BaseCompileApp
newrelic = utils.load_extension('extensions/newrelic')
def create_manifest_file(manifest_filename,contents):
file = open(manifest_filename,'w+')
file.write(contents)
file.close()
class TestNewRelic(object):
def setUp(self):
self.manifest_dir = tempfile.mkdtemp()
self.buildpack_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
self.build_dir = tempfile.mkdtemp('build-')
self.php_dir = os.path.join(self.build_dir, 'php', 'etc')
os.makedirs(self.php_dir)
shutil.copy('defaults/config/php/7.4.x/php.ini', self.php_dir)
def tearDown(self):
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
if os.path.exists(self.manifest_dir):
shutil.rmtree(self.manifest_dir)
def test_set_default_version(self):
manifest_filename = os.path.join(self.manifest_dir, 'manifest.yml')
create_manifest_file(manifest_filename, GOOD_MANIFEST)
# create the object with the buildpack manifest
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VM': 'php',
'BP_DIR': self.buildpack_dir
}))
eq_(True, 'NEWRELIC_VERSION' in nr._ctx.keys())
del nr._ctx['NEWRELIC_VERSION']
# and test it with our custom manifest
nr._set_default_version(manifest_filename)
eq_(True, 'NEWRELIC_VERSION' in nr._ctx.keys())
eq_(nr._ctx['NEWRELIC_VERSION'], '6.4.0.99')
def test_set_default_version_bad_manifest(self):
manifest_filename = os.path.join(self.manifest_dir, 'manifest.yml')
create_manifest_file(manifest_filename, BAD_MANIFEST)
# create the object with the buildpack manifest
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VM': 'php',
'BP_DIR': self.buildpack_dir
}))
# and test it with our custom manifest
exception = None
try:
nr._set_default_version(manifest_filename)
except RuntimeError as e:
exception = e
eq_("Error detecting NewRelic default version", str(exception))
def testDefaults(self):
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VM': 'php',
'BP_DIR': self.buildpack_dir
}))
eq_(True, 'NEWRELIC_HOST' in nr._ctx.keys())
eq_(True, 'NEWRELIC_VERSION' in nr._ctx.keys())
eq_(True, 'NEWRELIC_PACKAGE' in nr._ctx.keys())
eq_(True, 'NEWRELIC_DOWNLOAD_URL' in nr._ctx.keys())
eq_(True, 'NEWRELIC_STRIP' in nr._ctx.keys())
def testShouldNotInstall(self):
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'BP_DIR': self.buildpack_dir
}))
eq_(False, nr.should_install())
@with_setup(setup=setUp, teardown=tearDown)
def testShouldInstall(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'BP_DIR': self.buildpack_dir,
'NEWRELIC_LICENSE': 'JUNK_LICENSE',
'VCAP_APPLICATION': {
'name': 'app-name-1'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
eq_(True, nr.should_install())
eq_('x64', nr._php_arch)
#eq_('@{HOME}/php/lib/php/extensions/no-debug-non-zts-20170718', nr._php_extn_dir)
eq_(False, nr._php_zts)
#eq_('20170718', nr._php_api)
#eq_('@{HOME}/newrelic/agent/x64/newrelic-20170718.so', nr.newrelic_so)
eq_('app-name-1', nr.app_name)
eq_('JUNK_LICENSE', nr.license_key)
eq_('@{HOME}/logs/newrelic.log', nr.log_path)
eq_('@{HOME}/logs/newrelic-daemon.log', nr.daemon_log_path)
eq_('@{HOME}/newrelic/daemon/newrelic-daemon.x64', nr.daemon_path)
eq_('@{HOME}/newrelic/daemon.sock', nr.socket_path)
eq_('@{HOME}/newrelic/daemon.pid', nr.pid_path)
@with_setup(setup=setUp, teardown=tearDown)
def testShouldInstallService(self):
ctx = utils.FormattedDict({
'BP_DIR': self.buildpack_dir,
'BUILD_DIR': self.build_dir,
'VCAP_SERVICES': {
'newrelic': [{
'name': 'newrelic',
'label': 'newrelic',
'tags': ['Monitoring'],
'plan': 'standard',
'credentials': {'licenseKey': 'LICENSE'}}]
},
'VCAP_APPLICATION': {
'name': 'app-name-1'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
eq_(True, nr.should_install())
eq_('x64', nr._php_arch)
#eq_('@{HOME}/php/lib/php/extensions/no-debug-non-zts-20170718',
# nr._php_extn_dir)
eq_(False, nr._php_zts)
#eq_('20170718', nr._php_api)
#eq_('@{HOME}/newrelic/agent/x64/newrelic-20170718.so', nr.newrelic_so)
eq_('app-name-1', nr.app_name)
eq_('LICENSE', nr.license_key)
eq_('@{HOME}/logs/newrelic.log', nr.log_path)
eq_('@{HOME}/logs/newrelic-daemon.log', nr.daemon_log_path)
eq_('@{HOME}/newrelic/daemon/newrelic-daemon.x64', nr.daemon_path)
eq_('@{HOME}/newrelic/daemon.sock', nr.socket_path)
eq_('@{HOME}/newrelic/daemon.pid', nr.pid_path)
@with_setup(setup=setUp, teardown=tearDown)
def testShouldInstallServiceAndManual(self):
ctx = utils.FormattedDict({
'BP_DIR': self.buildpack_dir,
'BUILD_DIR': self.build_dir,
'VCAP_SERVICES': {
'newrelic': [{
'name': 'newrelic',
'label': 'newrelic',
'tags': ['Monitoring'],
'plan': 'standard',
'credentials': {'licenseKey': 'LICENSE'}}]
},
'NEWRELIC_LICENSE': 'LICENSE2',
'VCAP_APPLICATION': {
'name': 'app-name-2'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
eq_(True, nr.should_install())
eq_('x64', nr._php_arch)
# TODO eq_('@{HOME}/php/lib/php/extensions/no-debug-non-zts-20170718',
#nr._php_extn_dir)
eq_(False, nr._php_zts)
# TODO eq_('20170718', nr._php_api)
#eq_('@{HOME}/newrelic/agent/x64/newrelic-20170718.so', nr.newrelic_so)
eq_('app-name-2', nr.app_name)
eq_('LICENSE2', nr.license_key)
eq_('@{HOME}/logs/newrelic.log', nr.log_path)
eq_('@{HOME}/logs/newrelic-daemon.log', nr.daemon_log_path)
eq_('@{HOME}/newrelic/daemon/newrelic-daemon.x64', nr.daemon_path)
eq_('@{HOME}/newrelic/daemon.sock', nr.socket_path)
eq_('@{HOME}/newrelic/daemon.pid', nr.pid_path)
@with_setup(setup=setUp, teardown=tearDown)
def testModifyPhpIni(self):
ctx = utils.FormattedDict({
'BP_DIR': self.buildpack_dir,
'BUILD_DIR': self.build_dir,
'NEWRELIC_LICENSE': 'JUNK_LICENSE',
'VCAP_APPLICATION': {
'name': 'app-name-1'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
nr.modify_php_ini()
with open(os.path.join(self.php_dir, 'php.ini'), 'rt') as php_ini:
lines = php_ini.readlines()
eq_(True, lines.index('extension=%s\n' % nr.newrelic_so) >= 0)
eq_(True, lines.index('[newrelic]\n') >= 0)
eq_(True, lines.index('newrelic.license=@{NEWRELIC_LICENSE}\n') >= 0)
eq_(True, lines.index('newrelic.appname=%s\n' % nr.app_name) >= 0)
class TestNewRelicCompiled(BaseCompileApp):
def __init__(self):
self.app_name = 'app-1'
def setUp(self):
BaseCompileApp.setUp(self)
os.environ['NEWRELIC_LICENSE'] = 'JUNK_LICENSE'
os.environ['VCAP_APPLICATION'] = json.dumps({
'name': 'app-name-1'
})
def test_with_httpd_and_newrelic(self):
# helpers to confirm the environment
bp = BuildPackAssertHelper()
nr = NewRelicAssertHelper()
httpd = HttpdAssertHelper()
php = PhpAssertHelper()
# set web server to httpd, since that's what we're expecting here
self.opts.set_web_server('httpd')
# run the compile step of the build pack
output = ErrorHelper().compile(self.bp)
# confirm downloads
DownloadAssertHelper(3, 2).assert_downloads_from_output(output)
# confirm start script
bp.assert_start_script_is_correct(self.build_dir)
httpd.assert_start_script_is_correct(self.build_dir)
php.assert_start_script_is_correct(self.build_dir)
# confirm bp utils installed
bp.assert_scripts_are_installed(self.build_dir)
bp.assert_config_options(self.build_dir)
# check env & proc files
httpd.assert_contents_of_procs_file(self.build_dir)
httpd.assert_contents_of_env_file(self.build_dir)
php.assert_contents_of_procs_file(self.build_dir)
php.assert_contents_of_env_file(self.build_dir)
# webdir exists
httpd.assert_web_dir_exists(self.build_dir, self.opts.get_webdir())
# check php & httpd installed
httpd.assert_files_installed(self.build_dir)
php.assert_files_installed(self.build_dir)
nr.assert_files_installed(self.build_dir)
class TestNewRelicWithApp5(BaseCompileApp):
def __init__(self):
self.app_name = 'app-5'
def setUp(self):
BaseCompileApp.setUp(self)
os.environ['NEWRELIC_LICENSE'] = 'JUNK_LICENSE'
os.environ['VCAP_APPLICATION'] = json.dumps({
'name': 'app-name-1'
})
def test_standalone(self):
# helpers to confirm the environment
bp = BuildPackAssertHelper()
php = PhpAssertHelper()
none = NoWebServerAssertHelper()
nr = NewRelicAssertHelper()
# no web server
self.opts.set_web_server('none')
# run the compile step of the build pack
output = ErrorHelper().compile(self.bp)
# confirm downloads
DownloadAssertHelper(2, 1).assert_downloads_from_output(output)
# confirm httpd and nginx are not installed
none.assert_no_web_server_is_installed(self.build_dir)
# confirm start script
bp.assert_start_script_is_correct(self.build_dir)
php.assert_start_script_is_correct(self.build_dir)
# confirm bp utils installed
bp.assert_scripts_are_installed(self.build_dir)
# check env & proc files
none.assert_contents_of_procs_file(self.build_dir)
php.assert_contents_of_env_file(self.build_dir)
# webdir exists
none.assert_no_web_dir(self.build_dir, self.opts.get_webdir())
# check php cli installed
none.assert_files_installed(self.build_dir)
nr.assert_files_installed(self.build_dir)
BAD_MANIFEST = '''\
---
language: php
default_versions:
- name: newrelic
version: 99.3.0.161
dependencies:
- name: newrelic
version: 7.4.0.198
uri: https://download.newrelic.com/php_agent/archive/7.4.0.198/newrelic-php5-7.4.0.198-linux.tar.gz
cf_stacks:
- cflinuxfs3
sha256: 3640d3cad6b5199f54a6b54a627235d6
- name: newrelic
version: 6.4.0.99
uri: https://download.newrelic.com/php_agent/archive/6.4.0.99/newrelic-php5-6.4.0.99-linux.tar.gz
cf_stacks:
- cflinuxfs3
sha256: a5d5178f0f8133a65baf942a07408ba6
'''
GOOD_MANIFEST = '''\
---
language: php
default_versions:
- name: newrelic
version: 6.4.0.99
dependencies:
- name: newrelic
version: 7.4.0.198
uri: https://download.newrelic.com/php_agent/archive/7.4.0.198/newrelic-php5-7.4.0.198-linux.tar.gz
cf_stacks:
- cflinuxfs3
sha256: 3640d3cad6b5199f54a6b54a627235d6
- name: newrelic
version: 6.4.0.99
uri: https://download.newrelic.com/php_agent/archive/6.4.0.99/newrelic-php5-6.4.0.99-linux.tar.gz
cf_stacks:
- cflinuxfs3
sha256: a5d5178f0f8133a65baf942a07408ba6
'''
|
|
from smimeX509validation import TrustStore, LoadDirChainOfTrust,smimeX509validation, smimeX509ValidationError
from hepixvmitrust.vmitrustlib import time_format_definition as time_format_definition
import logging
import vmcatcher.databaseDefinition as model
import json
trustAnchorMap = ['None','Browser','IGTF']
class output_driver_base(object):
def __init__(self):
self.log = logging.getLogger("output_driver_base")
self.fpOutput = None
self.saSession = None
self.x509anchor = None
def info(self, *args, **kwargs):
self.log.debug("info")
outut = {}
argSubscription = kwargs.get('Subscription', None)
if argSubscription != None:
outut["Subscription"] = self.info_Subscription(argSubscription)
argImageListInstance = kwargs.get('ImageListInstance', None)
if argImageListInstance != None:
outut["ImageListInstance"] = self.info_ImageListInstance(argImageListInstance)
argImageInstance = kwargs.get('ImageInstance', None)
if argImageInstance != None:
outut["ImageInstance"] = self.info_ImageInstance(argImageInstance)
argImageDefinition = kwargs.get('ImageDefinition', None)
if argImageDefinition != None:
outut["ImageDefinition"] = self.info_ImageDefinition(argImageDefinition)
argSubscriptionAuth = kwargs.get('SubscriptionAuth', None)
if argSubscriptionAuth != None:
outut["SubscriptionAuth"] = self.info_SubscriptionAuth(argSubscriptionAuth)
argEndorser = kwargs.get('Endorser', None)
if argEndorser != None:
outut["Endorser"] = self.info_Endorser(argEndorser)
argEndorserPrincible = kwargs.get('EndorserPrincible', None)
if argEndorserPrincible != None:
outut["EndorserPrincible"] = self.info_EndorserPrincible(argEndorserPrincible)
self.fpOutput.write(json.dumps(outut,sort_keys=True, indent=4))
#print dir(imagelistInstance)
#display_subscription
def query_imagedef_Identifier(self,imagelistIdentifier):
self.log.debug("query_imagedef_Identifier")
imageDefs = self.saSession.query(model.ImageDefinition,model.Subscription).\
filter(model.Subscription.id == model.ImageDefinition.subscription).\
filter(model.ImageDefinition.identifier==imagelistIdentifier)
return imageDefs
def bitmap_vmcatcher_image(self,imagedef,subscription):
self.log.debug("bitmap_vmcatcher_image")
subauthq = self.saSession.query(model.ImageInstance,model.ImageListInstance).\
filter(model.ImageDefinition.id == imagedef.id).\
filter(model.Subscription.id == subscription.id).\
filter(model.ImageDefinition.subscription == model.Subscription.id).\
filter(model.ImageInstance.fkIdentifier == model.ImageDefinition.id).\
filter(model.ImageInstance.fkimagelistinstance == model.ImageListInstance.id).\
filter(model.Subscription.imagelist_latest == model.ImageListInstance.id)
bimappedOutput = imagedef.cache
if subauthq.count() == 1:
q_result = subauthq.one()
imageInstance = q_result[0]
listInstance = q_result[1]
available = 0
if ((imagedef.latest == imageInstance.id) and (subscription.authorised == 1) and
(subscription.imagelist_latest == listInstance.id) and (listInstance.expired == None) and
(imageInstance.fkimagelistinstance != None) and (imagedef.latest !=None)):
available = 1
bimappedOutput = imagedef.cache + (available << 1)
return bimappedOutput
def display_imagedef(self,imagedef):
details = self.saSession.query(model.Subscription,
model.ImageListInstance,
model.ImageInstance,
model.SubscriptionAuth,
model.Endorser,
model.EndorserPrincible).\
filter(model.ImageListInstance.id==model.ImageInstance.fkimagelistinstance).\
filter(model.ImageDefinition.id == imagedef.id).\
filter(model.ImageInstance.fkIdentifier == model.ImageDefinition.id).\
filter(model.Subscription.id == imagedef.subscription).\
filter(model.Subscription.imagelist_latest == model.ImageListInstance.id).\
filter(model.SubscriptionAuth.id == model.ImageListInstance.sub_auth).\
filter(model.SubscriptionAuth.endorser == model.Endorser.id).\
filter(model.Endorser.id == model.EndorserPrincible.endorser)
if details.count() > 0:
for item in details:
subscription = item[0]
imagelistinstance = item[1]
imageinstance = item[2]
SubscriptionAuth = item[3]
Endorser = item[4]
EndorserPrincible = item[5]
self.info(Subscription = subscription,
ImageDefinition = imagedef,
ImageListInstance = imagelistinstance,
ImageInstance = imageinstance,
SubscriptionAuth = SubscriptionAuth,
Endorser = Endorser,
EndorserPrincible = EndorserPrincible)
return True
details = self.saSession.query(model.Subscription, model.ImageDefinition).\
filter(model.ImageDefinition.id == imagedef.id).\
filter(model.Subscription.id == imagedef.subscription)
if details.count() > 0:
for item in details:
subscription = item[0]
imagedef = item[1]
self.log.warning("Subscription '%s' needs an update." % (subscription.identifier))
self.info(Subscription = subscription,
ImageDefinition = imagedef)
return True
self.log.error("Image '%s' needs an update." % (imagedef.identifier))
return False
class output_driver_lister(output_driver_base):
def __init__(self):
output_driver_base.__init__(self)
self.log = logging.getLogger("output_driver_lister")
def list_vmcatcher_subscribe(self):
subauthq = self.saSession.query(model.Subscription).all()
for item in subauthq:
taout = trustAnchorMap[item.trustAnchor]
self.fpOutput.write ("%s\t%s\t%s\t%s\n" % (item.identifier,item.authorised,taout,item.uri))
def info(self, *args, **kwargs):
self.log.debug("info")
argSubscription = kwargs.get('Subscription', None)
if argSubscription != None:
self.info_Subscription(argSubscription)
argImageListInstance = kwargs.get('ImageListInstance', None)
if argImageListInstance != None:
self.info_ImageListInstance(argImageListInstance)
argImageInstance = kwargs.get('ImageInstance', None)
if argImageInstance != None:
self.info_ImageInstance(argImageInstance)
argImageDefinition = kwargs.get('ImageDefinition', None)
if argImageDefinition != None:
self.info_ImageDefinition(argImageDefinition)
argSubscriptionAuth = kwargs.get('SubscriptionAuth', None)
if argSubscriptionAuth != None:
self.info_SubscriptionAuth(argSubscriptionAuth)
argEndorser = kwargs.get('Endorser', None)
if argEndorser != None:
self.info_Endorser(argEndorser)
argEndorserPrincible = kwargs.get('EndorserPrincible', None)
if argEndorserPrincible != None:
self.info_EndorserPrincible(argEndorserPrincible)
def list_vmcatcher_image(self):
self.log.debug("list_vmcatcher_image")
imageDefs = self.saSession.query(model.ImageDefinition,model.Subscription).\
filter(model.Subscription.id == model.ImageDefinition.subscription)
for q_result in imageDefs:
imagedef = q_result[0]
subscription = q_result[1]
bimappedOutput = self.bitmap_vmcatcher_image(imagedef,subscription)
self.fpOutput.write("%s\t%s\t%s\n" % (imagedef.identifier,bimappedOutput,subscription.identifier))
def list_vmcatcher_endorser_cred(self):
self.log.debug("list_vmcatcher_endorser_cred")
allendorsers = self.saSession.query(model.Endorser).all()
for endorser in allendorsers:
EndId = str(endorser.identifier)
subauthq = self.saSession.query(model.Endorser,model.EndorserPrincible).\
filter(model.Endorser.id==model.EndorserPrincible.endorser).\
filter(model.Endorser.identifier==EndId)
for item in subauthq:
endorser = item[0]
princible = item[1]
self.fpOutput.write ("'%s'\t'%s'\t'%s'\n" % (endorser.identifier,princible.hv_dn,princible.hv_ca))
def list_vmcatcher_endorser_link(self):
self.log.debug("list_vmcatcher_endorser_link")
allLinks = self.saSession.query(model.Subscription,model.Endorser,model.SubscriptionAuth).\
filter(model.Endorser.id==model.SubscriptionAuth.endorser).\
filter(model.Subscription.id==model.SubscriptionAuth.subscription)
for sub,endorser,aubauth in allLinks:
self.fpOutput.write ("'%s'\t'%s'\t'%s'\n" % (endorser.identifier,sub.identifier,aubauth.authorised))
def display_subscription(self,subscription):
return self.info_Subscription(subscription)
def display_endorser(self,endorser):
self.log.debug("display_endorser")
self.fpOutput.write ("endorser.dc:identifier=%s\n" % (endorser.identifier))
if len(endorser.princibles) == 0:
self.log.warning("endorser '%s' has no princibles" % (endorser))
return False
for princible in endorser.princibles:
self.fpOutput.write("endorser.hv:dn=%s\n" % (princible.hv_dn))
self.fpOutput.write("endorser.hv:ca=%s\n" % (princible.hv_ca))
for subauth in endorser.subscriptionauth:
#self.fpOutput.write("subauth.authorised=%s\n" % (subauth.authorised))
subscription_query = self.saSession.query(model.Subscription).\
filter(model.Subscription.id == subauth.subscription)
for subscription in subscription_query:
self.display_subscription(subscription)
def info_ImageListInstance(self,argImageListInstance):
self.log.debug("info_ImageListInstance")
expired = False
if argImageListInstance.expired == None:
msg = "imagelist.expired=False\n"
expired = True
else:
msg = "imagelist.expired=True\n"
if not expired:
self.fpOutput.write ('imagelist.vmcatcher.dc:date:expired=%s\n' % (argImageListInstance.expired.strftime(time_format_definition)))
self.fpOutput.write (msg)
self.fpOutput.write ('imagelist.dc:date:imported=%s\n' % (argImageListInstance.imported.strftime(time_format_definition)))
self.fpOutput.write ('imagelist.dc:date:created=%s\n' % (argImageListInstance.created.strftime(time_format_definition)))
self.fpOutput.write ('imagelist.dc:date:expires=%s\n' % (argImageListInstance.expires.strftime(time_format_definition)))
return True
def info_Subscription(self,subscription):
self.log.debug("info_Subscription")
# imagelist.dc:identifier for backwards compatability for 0.2.X versions
self.fpOutput.write ('imagelist.dc:identifier=%s\n' % (subscription.identifier))
self.fpOutput.write ('subscription.dc:identifier=%s\n' % (subscription.identifier))
self.fpOutput.write ('subscription.dc:description=%s\n' % (subscription.description))
self.fpOutput.write ('subscription.sl:authorised=%s\n' % (subscription.authorised))
self.fpOutput.write ('subscription.hv:uri=%s\n' % (subscription.uri))
self.fpOutput.write ('subscription.hv:uri.trustAnchor=%s\n' % (trustAnchorMap[subscription.trustAnchor]))
if (subscription.userName != None):
if len(subscription.userName) > 0:
self.fpOutput.write ('subscription.hv:uri.username=%s\n' % (subscription.userName))
if (subscription.password != None):
if len(subscription.password) > 0:
self.fpOutput.write ('subscription.hv:uri.password=%s\n' % (subscription.password))
if subscription.updated:
self.fpOutput.write ('subscription.dc:date:updated=%s\n' % (subscription.updated.strftime(time_format_definition)))
else:
self.fpOutput.write ('subscription.dc:date:updated=%s\n'% (False))
self.fpOutput.write ('subscription.updateMode=%s\n' % (subscription.updateMode))
return True
def info_ImageInstance(self,imageInstance):
self.fpOutput.write ('image.dc:description=%s\n' % (imageInstance.description))
self.fpOutput.write ('image.dc:title=%s\n' % (imageInstance.title))
self.fpOutput.write ('image.hv:hypervisor=%s\n' % (imageInstance.hypervisor))
self.fpOutput.write ('image.hv:size=%s\n' % (imageInstance.size))
self.fpOutput.write ('image.hv:uri=%s\n' % (imageInstance.uri))
self.fpOutput.write ('image.hv:version=%s\n' % (imageInstance.version))
self.fpOutput.write ('image.sl:arch=%s\n' % (imageInstance.hypervisor))
self.fpOutput.write ('image.sl:checksum:sha512=%s\n' % (imageInstance.sha512))
self.fpOutput.write ('image.sl:comments=%s\n' % (imageInstance.comments))
self.fpOutput.write ('image.sl:os=%s\n' % (imageInstance.os))
self.fpOutput.write ('image.sl:osversion=%s\n' % (imageInstance.osversion))
def info_ImageDefinition(self,imageDef):
self.fpOutput.write ('imagedef.dc:identifier=%s\n' % (imageDef.identifier))
self.fpOutput.write ('imagedef.cache=%s\n' % (imageDef.cache))
def info_SubscriptionAuth(self,SubscriptionAuth):
return "link"
def info_Endorser(self,Endorser):
output = {"identifier" : Endorser.identifier
}
return output
def info_EndorserPrincible(self,EndorserPrincible):
self.fpOutput.write ('endorser:hv:subject=%s\n' % (EndorserPrincible.hv_dn))
self.fpOutput.write ('endorser:hv:issuer=%s\n' %(EndorserPrincible.hv_ca))
class output_driver_display_message(output_driver_base):
def __init__(self):
output_driver_base.__init__(self)
self.log = logging.getLogger("output_driver_display_message")
def display_ImageListInstance(self,imagelist):
smimeProcessor = smimeX509validation(self.x509anchor)
try:
smimeProcessor.Process(str(imagelist.data))
except smimeX509ValidationError as expt:
self.log.error("Failed to validate text for '%s' produced error '%s'" % (imagelist,expt))
return False
if not smimeProcessor.verified:
self.log.error("Failed to validate text for '%s'" % (imagelist))
return False
self.fpOutput.write (smimeProcessor.InputDaraStringIO.getvalue())
return True
def display_subscription(self,subscription):
subauthq = self.saSession.query(model.ImageListInstance).\
filter(model.Subscription.id == subscription.id).\
filter(model.ImageInstance.fkIdentifier == model.ImageDefinition.id).\
filter(model.ImageInstance.fkimagelistinstance == model.ImageListInstance.id).\
filter(model.Subscription.imagelist_latest == model.ImageListInstance.id)
sub = subauthq.first()
return self.display_ImageListInstance(sub)
class output_driver_display_metadata(output_driver_base):
def __init__(self):
output_driver_base.__init__(self)
self.log = logging.getLogger("output_driver_display_metadata")
def display_subscription(self,subscription):
query_imagelistInstance = self.saSession.query(model.ImageListInstance).\
filter(model.ImageListInstance.id==subscription.imagelist_latest)
if query_imagelistInstance.count() > 0:
for imagelistInstance in query_imagelistInstance:
self.info(Subscription=subscription,ImageListInstance=imagelistInstance)
return
self.info(Subscription=subscription)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK for Python setup file."""
from __future__ import absolute_import
from __future__ import print_function
import os
import platform
import sys
import warnings
from distutils.version import StrictVersion
# Pylint and isort disagree here.
# pylint: disable=ungrouped-imports
import setuptools
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.sdist import sdist
from setuptools.command.test import test
def get_version():
global_names = {}
exec( # pylint: disable=exec-used
open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'apache_beam/version.py')
).read(),
global_names
)
return global_names['__version__']
PACKAGE_NAME = 'apache-beam'
PACKAGE_VERSION = get_version()
PACKAGE_DESCRIPTION = 'Apache Beam SDK for Python'
PACKAGE_URL = 'https://beam.apache.org'
PACKAGE_DOWNLOAD_URL = 'https://pypi.python.org/pypi/apache-beam'
PACKAGE_AUTHOR = 'Apache Software Foundation'
PACKAGE_EMAIL = 'dev@beam.apache.org'
PACKAGE_KEYWORDS = 'apache beam'
PACKAGE_LONG_DESCRIPTION = '''
Apache Beam is a unified programming model for both batch and streaming
data processing, enabling efficient execution across diverse distributed
execution engines and providing extensibility points for connecting to
different technologies and user communities.
'''
REQUIRED_PIP_VERSION = '7.0.0'
_PIP_VERSION = get_distribution('pip').version
if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION):
warnings.warn(
"You are using version {0} of pip. " \
"However, version {1} is recommended.".format(
_PIP_VERSION, REQUIRED_PIP_VERSION
)
)
REQUIRED_CYTHON_VERSION = '0.28.1'
try:
_CYTHON_VERSION = get_distribution('cython').version
if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION):
warnings.warn(
"You are using version {0} of cython. " \
"However, version {1} is recommended.".format(
_CYTHON_VERSION, REQUIRED_CYTHON_VERSION
)
)
except DistributionNotFound:
# do nothing if Cython is not installed
pass
# Currently all compiled modules are optional (for performance only).
if platform.system() == 'Windows':
# Windows doesn't always provide int64_t.
cythonize = lambda *args, **kwargs: []
else:
try:
# pylint: disable=wrong-import-position
from Cython.Build import cythonize
except ImportError:
cythonize = lambda *args, **kwargs: []
REQUIRED_PACKAGES = [
'avro>=1.8.1,<2.0.0; python_version < "3.0"',
'avro-python3>=1.8.1,<2.0.0; python_version >= "3.0"',
'crcmod>=1.7,<2.0',
'dill>=0.2.9,<0.4.0',
'fastavro>=0.21.4,<0.22',
'funcsigs>=1.0.2,<2; python_version < "3.0"',
'future>=0.16.0,<1.0.0',
'futures>=3.2.0,<4.0.0; python_version < "3.0"',
'grpcio>=1.12.1,<2',
'hdfs>=2.1.0,<3.0.0',
'httplib2>=0.8,<=0.12.0',
'mock>=1.0.1,<3.0.0',
'pymongo>=3.8.0,<4.0.0',
'oauth2client>=2.0.1,<4',
'protobuf>=3.5.0.post1,<4',
# [BEAM-6287] pyarrow is not supported on Windows for Python 2
('pyarrow>=0.11.1,<0.15.0; python_version >= "3.0" or '
'platform_system != "Windows"'),
'pydot>=1.2.0,<2',
'python-dateutil>=2.8.0,<3',
'pytz>=2018.3',
# [BEAM-5628] Beam VCF IO is not supported in Python 3.
'pyvcf>=0.6.8,<0.7.0; python_version < "3.0"',
'pyyaml>=3.12,<4.0.0',
'typing>=3.6.0,<3.7.0; python_version < "3.5.0"',
]
REQUIRED_TEST_PACKAGES = [
'nose>=1.3.7',
'nose_xunitmp>=0.4.1',
'numpy>=1.14.3,<2',
'pandas>=0.23.4,<0.25',
'parameterized>=0.6.0,<0.7.0',
'pyhamcrest>=1.9,<2.0',
'tenacity>=5.0.2,<6.0',
]
GCP_REQUIREMENTS = [
'cachetools>=3.1.0,<4',
'google-apitools>=0.5.28,<0.5.29',
# [BEAM-4543] googledatastore is not supported in Python 3.
'proto-google-cloud-datastore-v1>=0.90.0,<=0.90.4; python_version < "3.0"',
# [BEAM-4543] googledatastore is not supported in Python 3.
'googledatastore>=7.0.1,<7.1; python_version < "3.0"',
'google-cloud-datastore>=1.7.1,<1.8.0',
'google-cloud-pubsub>=0.39.0,<0.40.0',
# GCP packages required by tests
'google-cloud-bigquery>=1.6.0,<1.18.0',
'google-cloud-core>=0.28.1,<2',
'google-cloud-bigtable>=0.31.1,<0.33.0',
]
# We must generate protos after setup_requires are installed.
def generate_protos_first(original_cmd):
try:
# See https://issues.apache.org/jira/browse/BEAM-2366
# pylint: disable=wrong-import-position
import gen_protos
class cmd(original_cmd, object):
def run(self):
gen_protos.generate_proto_files()
super(cmd, self).run()
return cmd
except ImportError:
warnings.warn("Could not import gen_protos, skipping proto generation.")
return original_cmd
python_requires = '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*'
if sys.version_info[0] == 3:
warnings.warn(
'Some syntactic constructs of Python 3 are not yet fully supported by '
'Apache Beam.')
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=PACKAGE_DESCRIPTION,
long_description=PACKAGE_LONG_DESCRIPTION,
url=PACKAGE_URL,
download_url=PACKAGE_DOWNLOAD_URL,
author=PACKAGE_AUTHOR,
author_email=PACKAGE_EMAIL,
packages=setuptools.find_packages(),
package_data={'apache_beam': [
'*/*.pyx', '*/*/*.pyx', '*/*.pxd', '*/*/*.pxd', 'testing/data/*.yaml',
'portability/api/*.yaml']},
ext_modules=cythonize([
'apache_beam/**/*.pyx',
'apache_beam/coders/coder_impl.py',
'apache_beam/metrics/execution.py',
'apache_beam/runners/common.py',
'apache_beam/runners/worker/logger.py',
'apache_beam/runners/worker/opcounters.py',
'apache_beam/runners/worker/operations.py',
'apache_beam/transforms/cy_combiners.py',
'apache_beam/utils/counters.py',
'apache_beam/utils/windowed_value.py',
]),
install_requires=REQUIRED_PACKAGES,
python_requires=python_requires,
test_suite='nose.collector',
tests_require=REQUIRED_TEST_PACKAGES,
extras_require={
'docs': ['Sphinx>=1.5.2,<2.0'],
'test': REQUIRED_TEST_PACKAGES,
'gcp': GCP_REQUIREMENTS,
},
zip_safe=False,
# PyPI package information.
classifiers=[
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache License, Version 2.0',
keywords=PACKAGE_KEYWORDS,
entry_points={
'nose.plugins.0.10': [
'beam_test_plugin = test_config:BeamTestPlugin',
]},
cmdclass={
'build_py': generate_protos_first(build_py),
'develop': generate_protos_first(develop),
'egg_info': generate_protos_first(egg_info),
'sdist': generate_protos_first(sdist),
'test': generate_protos_first(test),
},
)
|
|
"""
Django settings for winthrop project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# default to False, override in local settings
DEBUG = False
ALLOWED_HOSTS = []
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# Additional locations of static files
STATICFILES_DIRS = [
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'sitemedia'),
]
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Compressor
# https://django-compressor.readthedocs.io/en/latest/
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'compressor_toolkit.precompilers.SCSSCompiler'),
('module', 'compressor_toolkit.precompilers.ES6Compiler')
)
# Compressor-toolkit
# https://github.com/kottenator/django-compressor-toolkit
COMPRESS_ES6_COMPILER_CMD = '''
export NODE_PATH="{paths}" && \
{browserify_bin} "{infile}" -o "{outfile}" \
-t [ "{node_modules}/babelify" --presets [ "{node_modules}/@babel/preset-env" ] ]
'''
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other finders..
'compressor.finders.CompressorFinder',
)
# Application definition
INSTALLED_APPS = [
'dal', # django-autocomplete-light (must be before grappelli)
'dal_select2',
'grappelli',
'compressor',
'compressor_toolkit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.sites',
'django_cas_ng',
'pucas',
'djiffy',
'annotator_store',
# local apps
'winthrop.common',
'winthrop.places',
'winthrop.people',
'winthrop.books',
'winthrop.footnotes',
'winthrop.annotation',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas_ng.backends.CASBackend',
)
ROOT_URLCONF = 'winthrop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
"DIRS": [
os.path.join(BASE_DIR, "templates")
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'winthrop.context_extras',
'winthrop.context_processors.template_settings',
],
},
},
]
WSGI_APPLICATION = 'winthrop.wsgi.application'
GRAPPELLI_ADMIN_TITLE = 'Winthrop Admin'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'sitemedia'),
]
SITE_ID = 1
# pucas configuration that is not expected to change across deploys
# and does not reference local server configurations or fields
PUCAS_LDAP = {
# basic user profile attributes
'ATTRIBUTES': ['givenName', 'sn', 'mail'],
'ATTRIBUTE_MAP': {
'first_name': 'givenName',
'last_name': 'sn',
'email': 'mail',
},
}
ANNOTATOR_ANNOTATION_MODEL = 'annotation.Annotation'
##################
# LOCAL SETTINGS #
##################
# (local settings import logic adapted from mezzanine)
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(BASE_DIR, "winthrop", "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "winthrop.local_settings"
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
# if in debug mode and django-debug-toolbar is available, add to installed apps
if DEBUG:
try:
import debug_toolbar
INSTALLED_APPS.append('debug_toolbar')
MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')
except ImportError:
pass
|
|
import math
import numpy as np
from scipy import ndimage
from ..transform import resize
from ..util import img_as_float
def _smooth(image, sigma, mode, cval):
"""Return image with each channel smoothed by the Gaussian filter."""
smoothed = np.empty(image.shape, dtype=np.double)
# apply Gaussian filter to all dimensions independently
if image.ndim == 3:
for dim in range(image.shape[2]):
ndimage.gaussian_filter(image[..., dim], sigma,
output=smoothed[..., dim],
mode=mode, cval=cval)
else:
ndimage.gaussian_filter(image, sigma, output=smoothed,
mode=mode, cval=cval)
return smoothed
def _check_factor(factor):
if factor <= 1:
raise ValueError('scale factor must be greater than 1')
def pyramid_reduce(image, downscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Smooth and then downsample image.
Parameters
----------
image : array
Input image.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
out : array
Smoothed and downsampled float image.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
image = img_as_float(image)
rows = image.shape[0]
cols = image.shape[1]
out_rows = math.ceil(rows / float(downscale))
out_cols = math.ceil(cols / float(downscale))
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
smoothed = _smooth(image, sigma, mode, cval)
out = resize(smoothed, (out_rows, out_cols), order=order,
mode=mode, cval=cval)
return out
def pyramid_expand(image, upscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Upsample and then smooth image.
Parameters
----------
image : array
Input image.
upscale : float, optional
Upscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * upscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of upsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
out : array
Upsampled and smoothed float image.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
"""
_check_factor(upscale)
image = img_as_float(image)
rows = image.shape[0]
cols = image.shape[1]
out_rows = math.ceil(upscale * rows)
out_cols = math.ceil(upscale * cols)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * upscale / 6.0
resized = resize(image, (out_rows, out_cols), order=order,
mode=mode, cval=cval)
out = _smooth(resized, sigma, mode, cval)
return out
def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Yield images of the Gaussian pyramid formed by the input image.
Recursively applies the `pyramid_reduce` function to the image, and yields
the downscaled images.
Note that the first image of the pyramid will be the original, unscaled
image. The total number of images is `max_layer + 1`. In case all layers
are computed, the last image is either a one-pixel image or the image where
the reduction does not change its shape.
Parameters
----------
image : array
Input image.
max_layer : int
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = img_as_float(image)
layer = 0
rows = image.shape[0]
cols = image.shape[1]
prev_layer_image = image
yield image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
while layer != max_layer:
layer += 1
layer_image = pyramid_reduce(prev_layer_image, downscale, sigma, order,
mode, cval)
prev_rows = rows
prev_cols = cols
prev_layer_image = layer_image
rows = layer_image.shape[0]
cols = layer_image.shape[1]
# no change to previous pyramid layer
if prev_rows == rows and prev_cols == cols:
break
yield layer_image
def pyramid_laplacian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Yield images of the laplacian pyramid formed by the input image.
Each layer contains the difference between the downsampled and the
downsampled, smoothed image::
layer = resize(prev_layer) - smooth(resize(prev_layer))
Note that the first image of the pyramid will be the difference between the
original, unscaled image and its smoothed version. The total number of
images is `max_layer + 1`. In case all layers are computed, the last image
is either a one-pixel image or the image where the reduction does not
change its shape.
Parameters
----------
image : array
Input image.
max_layer : int
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
.. [2] http://sepwww.stanford.edu/~morgan/texturematch/paper_html/node3.html
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = img_as_float(image)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
layer = 0
rows = image.shape[0]
cols = image.shape[1]
smoothed_image = _smooth(image, sigma, mode, cval)
yield image - smoothed_image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
while layer != max_layer:
layer += 1
out_rows = math.ceil(rows / float(downscale))
out_cols = math.ceil(cols / float(downscale))
resized_image = resize(smoothed_image, (out_rows, out_cols),
order=order, mode=mode, cval=cval)
smoothed_image = _smooth(resized_image, sigma, mode, cval)
prev_rows = rows
prev_cols = cols
rows = resized_image.shape[0]
cols = resized_image.shape[1]
# no change to previous pyramid layer
if prev_rows == rows and prev_cols == cols:
break
yield resized_image - smoothed_image
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix remote services: Abstract RPC implementation
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.8
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
**TODO:**
* "system" methods (list, help, ...)
"""
# Module version
__version_info__ = (0, 5, 8)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# iPOPO decorators
from pelix.ipopo.decorators import Validate, Invalidate, Property, Provides
# Pelix constants
import pelix.constants as constants
import pelix.remote.beans
from pelix.remote import RemoteServiceError
# Standard library
import logging
import threading
import uuid
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@Provides(pelix.remote.SERVICE_EXPORT_PROVIDER)
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED)
class AbstractRpcServiceExporter(object):
"""
Abstract Remote Services exporter
"""
def __init__(self):
"""
Sets up the exporter
"""
# Bundle context
self._context = None
# Framework UID
self._framework_uid = None
# Handled configurations
self._kinds = None
# Exported services: Name -> ExportEndpoint
self.__endpoints = {}
# Thread safety
self.__lock = threading.Lock()
def dispatch(self, method, params):
"""
Called by the servlet: calls the method of an exported service
"""
# Get the best matching name
matching = None
len_found = 0
for name in self.__endpoints:
if len(name) > len_found and method.startswith(name + "."):
# Better matching end point name (longer that previous one)
matching = name
len_found = len(matching)
if matching is None:
# No end point name match
raise RemoteServiceError("No end point found for: {0}"
.format(method))
# Extract the method name. (+1 for the trailing dot)
method_name = method[len_found + 1:]
# Get the service
try:
service = self.__endpoints[matching].instance
except KeyError:
raise RemoteServiceError("Unknown endpoint: {0}".format(matching))
# Get the method
method_ref = getattr(service, method_name, None)
if method_ref is None:
raise RemoteServiceError("Unknown method {0}".format(method))
# Call it (let the errors be propagated)
return method_ref(*params)
def handles(self, configurations):
"""
Checks if this provider handles the given configuration types
:param configurations: Configuration types
"""
if configurations is None or configurations == '*':
# 'Matches all'
return True
return bool(set(configurations).intersection(self._kinds))
def export_service(self, svc_ref, name, fw_uid):
"""
Prepares an export endpoint
:param svc_ref: Service reference
:param name: Endpoint name
:param fw_uid: Framework UID
:return: An ExportEndpoint bean
:raise NameError: Already known name
:raise BundleException: Error getting the service
"""
with self.__lock:
# Prepare extra properties
extra_props = self.make_endpoint_properties(svc_ref, name, fw_uid)
try:
# Check if the name has been changed by the exporter
name = extra_props[pelix.remote.PROP_ENDPOINT_NAME]
except KeyError:
# Name not updated
pass
if name in self.__endpoints:
# Already known end point
raise NameError("Already known end point {0} for kinds {1}"
.format(name, ','.join(self._kinds)))
# Get the service (let it raise a BundleException if any
service = self._context.get_service(svc_ref)
# Prepare the export endpoint
try:
endpoint = pelix.remote.beans.ExportEndpoint(str(uuid.uuid4()),
fw_uid,
self._kinds,
name,
svc_ref,
service,
extra_props)
except ValueError:
# No specification to export (specifications filtered, ...)
return None
# Store information
self.__endpoints[name] = endpoint
# Return the endpoint bean
return endpoint
def update_export(self, endpoint, new_name, old_properties):
"""
Updates an export endpoint
:param endpoint: An ExportEndpoint bean
:param new_name: Future endpoint name
:param old_properties: Previous properties
:raise NameError: Rename refused
"""
with self.__lock:
try:
if self.__endpoints[new_name] is not endpoint:
# Reject the new name, as an endpoint uses it
raise NameError("New name of {0} already used: {1}"
.format(endpoint.name, new_name))
else:
# Name hasn't changed
pass
except KeyError:
# Update the name of the endpoint
old_name = endpoint.name
endpoint.rename(new_name)
# No endpoint matches the new name: update the storage
self.__endpoints[new_name] = self.__endpoints.pop(old_name)
def unexport_service(self, endpoint):
"""
Deletes an export endpoint
:param endpoint: An ExportEndpoint bean
"""
with self.__lock:
# Clean up storage
del self.__endpoints[endpoint.name]
# Release the service
svc_ref = endpoint.reference
self._context.unget_service(svc_ref)
def make_endpoint_properties(self, svc_ref, name, fw_uid):
"""
Prepare properties for the ExportEndpoint to be created
:param svc_ref: Service reference
:param name: Endpoint name
:param fw_uid: Framework UID
:return: A dictionary of extra endpoint properties
"""
raise NotImplementedError("make_endpoint_properties() not "
"implemented by class {0}"
.format(type(self).__name__))
@Validate
def validate(self, context):
"""
Component validated
"""
# Store the context
self._context = context
# Store the framework UID
self._framework_uid = context.get_property(constants.FRAMEWORK_UID)
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
# Clean up the storage
self.__endpoints.clear()
# Clean up members
self._context = None
self._framework_uid = None
# ------------------------------------------------------------------------------
@Provides(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER)
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED)
class AbstractRpcServiceImporter(object):
"""
Abstract Remote Services importer
"""
def __init__(self):
"""
Sets up the exporter
"""
# Bundle context
self._context = None
# Framework UID
self._framework_uid = None
# Component properties
self._kinds = None
# Registered services (endpoint UID -> ServiceReference)
self.__registrations = {}
self.__lock = threading.Lock()
def endpoint_added(self, endpoint):
"""
An end point has been imported
"""
configs = set(endpoint.configurations)
if '*' not in configs and not configs.intersection(self._kinds):
# Not for us
return
with self.__lock:
if endpoint.uid in self.__registrations:
# Already known endpoint
return
# Prepare a proxy
svc = self.make_service_proxy(endpoint)
if svc is None:
return
# Register it as a service
svc_reg = self._context.register_service(endpoint.specifications,
svc, endpoint.properties)
# Store references
self.__registrations[endpoint.uid] = svc_reg
def endpoint_updated(self, endpoint, old_properties):
"""
An end point has been updated
"""
with self.__lock:
try:
# Update service registration properties
self.__registrations[endpoint.uid].set_properties(
endpoint.properties)
except KeyError:
# Unknown end point
return
def endpoint_removed(self, endpoint):
"""
An end point has been removed
"""
with self.__lock:
try:
# Pop reference and unregister the service
self.__registrations.pop(endpoint.uid).unregister()
except KeyError:
# Unknown end point
return
else:
# Clear the proxy
self.clear_service_proxy(endpoint)
def make_service_proxy(self, endpoint):
"""
Creates the proxy for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
:return: A service proxy
"""
raise NotImplementedError("make_service_proxy() not implemented by "
"class {0}".format(type(self).__name__))
def clear_service_proxy(self, endpoint):
"""
Destroys the proxy made for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
"""
raise NotImplementedError("clear_service_proxy() not implemented by "
"class {0}".format(type(self).__name__))
@Validate
def validate(self, context):
"""
Component validated
"""
# Store the bundle context and the framework UID
self._context = context
self._framework_uid = context.get_property(constants.FRAMEWORK_UID)
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
# Unregister all of our services
for svc_reg in self.__registrations.values():
svc_reg.unregister()
# Clean up members
self.__registrations.clear()
self._context = None
self._framework_uid = None
|
|
# Copyright (c) 2016 IBM
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Common utilities for Castellan.
"""
from castellan.common import exception
from castellan.common import utils
from castellan.tests import base
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_context import context
CONF = cfg.CONF
class TestUtils(base.TestCase):
def setUp(self):
super(TestUtils, self).setUp()
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
CONF.register_opts(utils.credential_opts, group=utils.OPT_GROUP)
def test_token_credential(self):
token_value = 'ec9799cd921e4e0a8ab6111c08ebf065'
self.config_fixture.config(
auth_type='token',
token=token_value,
group='key_manager'
)
token_context = utils.credential_factory(conf=CONF)
token_context_class = token_context.__class__.__name__
self.assertEqual('Token', token_context_class)
self.assertEqual(token_value, token_context.token)
def test_token_credential_with_context(self):
token_value = 'ec9799cd921e4e0a8ab6111c08ebf065'
ctxt = context.RequestContext(auth_token=token_value)
self.config_fixture.config(
auth_type='token',
group='key_manager'
)
token_context = utils.credential_factory(conf=CONF, context=ctxt)
token_context_class = token_context.__class__.__name__
self.assertEqual('Token', token_context_class)
self.assertEqual(token_value, token_context.token)
def test_token_credential_config_override_context(self):
ctxt_token_value = '00000000000000000000000000000000'
ctxt = context.RequestContext(auth_token=ctxt_token_value)
conf_token_value = 'ec9799cd921e4e0a8ab6111c08ebf065'
self.config_fixture.config(
auth_type='token',
token=conf_token_value,
group='key_manager'
)
token_context = utils.credential_factory(conf=CONF, context=ctxt)
token_context_class = token_context.__class__.__name__
self.assertEqual('Token', token_context_class)
self.assertEqual(conf_token_value, token_context.token)
def test_token_credential_exception(self):
self.config_fixture.config(
auth_type='token',
group='key_manager'
)
self.assertRaises(exception.InsufficientCredentialDataError,
utils.credential_factory,
CONF)
def test_password_credential(self):
password_value = 'p4ssw0rd'
self.config_fixture.config(
auth_type='password',
password=password_value,
group='key_manager'
)
password_context = utils.credential_factory(conf=CONF)
password_context_class = password_context.__class__.__name__
self.assertEqual('Password', password_context_class)
self.assertEqual(password_value, password_context.password)
def test_keystone_token_credential(self):
token_value = 'ec9799cd921e4e0a8ab6111c08ebf065'
self.config_fixture.config(
auth_type='keystone_token',
token=token_value,
group='key_manager'
)
ks_token_context = utils.credential_factory(conf=CONF)
ks_token_context_class = ks_token_context.__class__.__name__
self.assertEqual('KeystoneToken', ks_token_context_class)
self.assertEqual(token_value, ks_token_context.token)
def test_keystone_token_credential_with_context(self):
token_value = 'ec9799cd921e4e0a8ab6111c08ebf065'
ctxt = context.RequestContext(auth_token=token_value)
self.config_fixture.config(
auth_type='keystone_token',
group='key_manager'
)
ks_token_context = utils.credential_factory(conf=CONF, context=ctxt)
ks_token_context_class = ks_token_context.__class__.__name__
self.assertEqual('KeystoneToken', ks_token_context_class)
self.assertEqual(token_value, ks_token_context.token)
def test_keystone_token_credential_config_override_context(self):
ctxt_token_value = 'ec9799cd921e4e0a8ab6111c08ebf065'
ctxt = context.RequestContext(auth_token=ctxt_token_value)
conf_token_value = 'ec9799cd921e4e0a8ab6111c08ebf065'
self.config_fixture.config(
auth_type='keystone_token',
token=conf_token_value,
group='key_manager'
)
ks_token_context = utils.credential_factory(conf=CONF, context=ctxt)
ks_token_context_class = ks_token_context.__class__.__name__
self.assertEqual('KeystoneToken', ks_token_context_class)
self.assertEqual(conf_token_value, ks_token_context.token)
def test_keystone_token_credential_exception(self):
self.config_fixture.config(
auth_type='keystone_token',
group='key_manager'
)
self.assertRaises(exception.InsufficientCredentialDataError,
utils.credential_factory,
CONF)
def test_keystone_password_credential(self):
password_value = 'p4ssw0rd'
self.config_fixture.config(
auth_type='keystone_password',
password=password_value,
group='key_manager'
)
ks_password_context = utils.credential_factory(conf=CONF)
ks_password_context_class = ks_password_context.__class__.__name__
self.assertEqual('KeystonePassword', ks_password_context_class)
self.assertEqual(password_value, ks_password_context.password)
def test_oslo_context_to_keystone_token(self):
auth_token_value = '16bd612f28ec479b8ffe8e124fc37b43'
tenant_value = '00c6ef5ad2984af2acd7d42c299935c0'
ctxt = context.RequestContext(
auth_token=auth_token_value,
tenant=tenant_value)
ks_token_context = utils.credential_factory(context=ctxt)
ks_token_context_class = ks_token_context.__class__.__name__
self.assertEqual('KeystoneToken', ks_token_context_class)
self.assertEqual(auth_token_value, ks_token_context.token)
self.assertEqual(tenant_value, ks_token_context.project_id)
def test_invalid_auth_type(self):
self.config_fixture.config(
auth_type='hotdog',
group='key_manager'
)
self.assertRaises(exception.AuthTypeInvalidError,
utils.credential_factory,
conf=CONF)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow 2.0 layer behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools as it
import sys
import traceback
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DynamicLayer1(base_layer.Layer):
def __init__(self, dynamic=False, **kwargs):
super(DynamicLayer1, self).__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return math_ops.sqrt(inputs)
else:
return math_ops.square(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class DynamicLayer2(base_layer.Layer):
def __init__(self, dynamic=False, **kwargs):
super(DynamicLayer2, self).__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
samples = []
for sample in inputs:
samples.append(math_ops.square(sample))
return array_ops.stack(samples, axis=0)
def compute_output_shape(self, input_shape):
return input_shape
class InvalidLayer(base_layer.Layer):
def call(self, inputs):
raise ValueError('You did something wrong!')
class BaseLayerTest(keras_parameterized.TestCase):
@parameterized.parameters(DynamicLayer1, DynamicLayer2)
def test_dynamic_layer_in_functional_model_in_graph_mode(self, layer_class):
with context.graph_mode():
inputs = keras.Input((3,))
# Works when `dynamic=True` is declared.
outputs = layer_class(dynamic=True)(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
# But then you cannot run the model since you're in a graph scope.
with self.assertRaisesRegexp(
ValueError, 'You must enable eager execution'):
model.compile(rmsprop.RMSprop(0.001), loss='mse')
# Fails when `dynamic=True` not declared.
with self.assertRaisesRegexp(
TypeError, 'attempting to use Python control flow'):
_ = layer_class()(inputs)
@parameterized.parameters(DynamicLayer1, DynamicLayer2)
def test_dynamic_layer_in_functional_model_in_eager_mode(self, layer_class):
inputs = keras.Input((3,))
# Fails when `dynamic=True` not declared.
with self.assertRaisesRegexp(
TypeError, 'attempting to use Python control flow'):
_ = layer_class()(inputs)
# Works when `dynamic=True` is declared.
outputs = layer_class(dynamic=True)(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_nested_dynamic_layers_in_eager_mode(self):
inputs = keras.Input((3,))
outputs = DynamicLayer1(dynamic=True)(inputs)
inner_model = keras.Model(inputs, outputs)
self.assertEqual(inner_model.dynamic, True)
inputs = keras.Input((3,))
x = DynamicLayer2(dynamic=True)(inputs)
outputs = inner_model(x)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_layers_in_sequential_model(self):
# Without input_shape argument
model = keras.Sequential([DynamicLayer1(dynamic=True),
keras.layers.Dense(3),
DynamicLayer2(dynamic=True)])
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
# With input_shape argument
model = keras.Sequential([DynamicLayer1(dynamic=True, input_shape=(3,)),
DynamicLayer2(dynamic=True)])
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_layers_in_subclassed_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.layer1 = DynamicLayer1(dynamic=True)
def call(self, inputs):
return self.layer1(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_subclassed_model_no_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs, [None])
def test_dynamic_subclassed_model_with_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape(
tuple(input_shape[:-1].as_list()) + (3,))
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs[0].shape.as_list(), [None, 3])
@test_util.run_in_graph_and_eager_modes
def test_invalid_forward_pass(self):
inputs = keras.Input((3,))
with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):
_ = InvalidLayer()(inputs)
@keras_parameterized.run_with_all_model_types
@test_util.run_in_graph_and_eager_modes
def test_build_with_numpy_data(self):
model_layers = [
keras.layers.Dense(3, activation='relu', kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model(np.zeros((2, 4), dtype='float32'))
self.assertTrue(model.built)
@test_util.run_in_graph_and_eager_modes
def test_default_add_weight(self):
class TestLayer(keras.layers.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.default_weight = self.add_weight()
self.weight_without_name = self.add_weight(shape=(3, 4))
self.regularized_weight_without_name = self.add_weight(
shape=(3, 4), regularizer='l2')
layer = TestLayer()
self.assertEqual(layer.default_weight.shape.as_list(), [])
self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])
self.assertEqual(layer.default_weight.dtype.name, 'float32')
self.assertEqual(layer.weight_without_name.dtype.name, 'float32')
self.assertEqual(len(layer.losses), 1)
if not context.executing_eagerly():
# Cannot access tensor.name in eager execution.
self.assertTrue('Variable_2/Regularizer' in layer.losses[0].name)
def test_learning_phase_freezing_for_layers(self):
# This test is only meant to run in graph functions mode (ambient eager).
# In forced eager, `model.predict` ignores the global learning phase
# and just uses training=False. TODO(fchollet): consider unifying the
# behaviors.
class LearningPhaseLayer(keras.layers.Layer):
def call(self, inputs):
return keras.backend.in_train_phase(
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
return np.sum(model.predict(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with keras.backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 1)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
keras.backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 1)
keras.backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
# Cannot be enabled with `run_eagerly=True`, see b/123904578
@test_util.run_all_in_graph_and_eager_modes
def test_layer_can_return_variable(self):
class ComputeSum(keras.layers.Layer):
def __init__(self):
super(ComputeSum, self).__init__()
self.total = variables.Variable(
initial_value=array_ops.zeros((1, 1)), trainable=False)
if not context.executing_eagerly():
keras.backend.get_session().run(self.total.initializer)
def call(self, inputs):
self.total.assign_add(inputs)
return self.total
inputs = keras.Input(shape=(1,))
model = keras.Model(inputs, ComputeSum()(inputs))
model.predict(np.ones((1, 1)))
def _get_layer_with_training_arg(self):
class TrainingLayer(keras.layers.Layer):
"""A layer with a `training` argument in a defuned `call`."""
@def_function.function
def call(self, inputs, training=None):
if training is None:
training = keras.backend.learning_phase()
return tf_utils.smart_cond(training,
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
return TrainingLayer()
@keras_parameterized.run_with_all_model_types
# b/124459427: can't test with `run_eagerly=True` for now.
@test_util.run_in_graph_and_eager_modes
def test_training_arg_in_defun(self):
layer = self._get_layer_with_training_arg()
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 1.)
loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(loss, 0.)
# Test that the argument injection performed in `call` is not active
# when the argument is passed explicitly.
layer = self._get_layer_with_training_arg()
inputs = keras.Input(shape=(1,))
# Pass `training` by name
outputs = layer(inputs, training=False)
model = keras.Model(inputs, outputs)
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 0.)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_raw_variable_assignment(self):
class RawVariableLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(RawVariableLayer, self).__init__(**kwargs)
# Test variables in nested structure.
self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]
def call(self, inputs):
return inputs * self.var_list[0] * self.var_list[1]['a']
model = testing_utils.get_model_from_layers([RawVariableLayer()],
input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
# Checks that variables get initialized.
model.fit(x, y, batch_size=2, epochs=2)
class SymbolicSupportTest(test.TestCase):
def test_using_symbolic_tensors_with_tf_ops(self):
# Single-input.
x = keras.Input((3,))
y = math_ops.square(x)
self.assertEqual(y.graph, keras.backend.get_graph())
# Multi-inputs.
x1, x2 = keras.Input((3,)), keras.Input((3,))
y = array_ops.concat([x1, x2], axis=1)
self.assertEqual(y.graph, keras.backend.get_graph())
# Mixing Keras symbolic tensors and graph tensors from the same graph works.
with keras.backend.get_graph().as_default():
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
# Creating same op type (matmul) multiple times in the Keras graph works.
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
def test_mixing_eager_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = array_ops.ones((3, 3))
self.assertIsInstance(x2, ops.EagerTensor)
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
def test_mixing_numpy_arrays_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = np.ones((3, 3), dtype='float32')
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):
x1 = keras.Input((3,))
x2 = array_ops.ones((3, 3))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):
x1 = keras.Input((3,))
x2 = np.ones((3, 3), dtype='float32')
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_reraising_exception(self):
# When layer is not dynamic, we have some pattern matching during exception
# handling to detect when the user is trying to use python control flow.
# When an exception is thrown but the pattern doesn't match, we want to
# preserve the originating stack trace. An early implementation of this
# logic lost the stack trace. We test the correct behavior here.
class TypeErrorLayer(base_layer.Layer):
def call(self, inputs):
def easily_identifiable_name():
raise TypeError('Non-matching TypeError message.')
easily_identifiable_name()
inputs = keras.Input((3,))
try:
_ = TypeErrorLayer()(inputs)
except TypeError:
tb = traceback.extract_tb(sys.exc_info()[2])
last_entry = tb[-1]
function_name = last_entry[2]
self.assertEqual(function_name, 'easily_identifiable_name')
@test_util.run_all_in_graph_and_eager_modes
class NestedTrackingTest(test.TestCase):
def test_nested_layer_variable_tracking(self):
# Test that variables from nested sublayers are
# being tracked by subclassed layers.
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.dense1 = keras.layers.Dense(1)
self.dense2 = keras.layers.BatchNormalization()
def build(self, input_shape):
self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())
self.v2 = variables.Variable(
name='v2',
initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),
trainable=False)
def call(self, inputs):
x = self.dense1(inputs) + self.dense2(inputs)
return x + self.v1 + self.v2
layer = MyLayer()
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 5)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.dense1.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 5)
layer.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 8)
self.assertEqual(
set([layer.dense1, layer.dense2, layer.v1, layer.v2]),
set([obj for unused_name, obj in layer._checkpoint_dependencies]))
def test_nested_layer_updates_losses_tracking(self):
# Test that updates and losses from nested sublayers are
# being tracked by subclassed layers.
class UpdateAndLossLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
return inputs + 1
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def __init__(self):
super(MyLayer, self).__init__()
self.ul1 = UpdateAndLossLayer()
self.ul2 = UpdateAndLossLayer()
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
x = self.ul1(inputs)
return self.ul2(x)
layer = MyLayer()
if context.executing_eagerly():
inputs = array_ops.ones((3, 1))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
else:
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.updates), 3)
def test_attribute_reassignment(self):
l = keras.layers.Layer()
l.a = keras.layers.Layer()
l.a = []
l.a = variables.Variable(1.)
l.a = keras.layers.Layer()
last_assignment = keras.layers.Layer()
l.a = last_assignment
l.b = variables.Variable(1.)
del l.b
l.c = keras.layers.Layer()
del l.c
l.d = last_assignment
del l.d
self.assertEqual([last_assignment], l._layers)
self.assertEqual([], l.trainable_weights)
self.assertEqual([], l.non_trainable_weights)
self.assertEqual([], l.weights)
del l.a
self.assertEqual([], l._layers)
def test_assign_op_not_tracked_as_variable(self):
class LayerWithAssignAttr(keras.layers.Layer):
def build(self, input_shape):
self.v = variables.Variable(1.)
self.v_assign = self.v.assign_add(2.)
layer = LayerWithAssignAttr()
layer.build((10, 10))
self.assertEqual([layer.v], layer.variables)
@test_util.run_all_in_graph_and_eager_modes
class NameScopingTest(keras_parameterized.TestCase):
def test_name_scope_layer(self):
x = keras.backend.placeholder(shape=(10, 10))
layer = keras.layers.Dense(10, name='MyName')
layer(x)
self.assertEqual(layer.bias.name, 'MyName/bias:0')
self.assertEqual(layer.kernel.name, 'MyName/kernel:0')
def test_name_scope_sublayer(self):
x = keras.backend.placeholder(shape=(10, 10))
layer = keras.layers.Dense(
10, activation=keras.layers.ReLU(name='MyAct'), name='MyName2')
y = layer(x)
self.assertEqual(layer.bias.name, 'MyName2/bias:0')
self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')
self.assertEqual(y.name, 'MyName2/MyAct/Relu:0')
def test_name_scope_tf_tensor(self):
x = ops.convert_to_tensor(np.ones((10, 10)))
layer = keras.layers.Dense(
10, activation=keras.layers.ReLU(name='MyAct'), name='MyName3')
layer(x)
self.assertEqual(layer.bias.name, 'MyName3/bias:0')
self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')
_LAYERS_TO_TEST = [
(keras.layers.Dense, (1,), collections.OrderedDict(units=[1])),
(keras.layers.Activation, (2, 2),
collections.OrderedDict(activation=['relu'])),
(keras.layers.Dropout, (16,), collections.OrderedDict(rate=[0.25])),
(keras.layers.BatchNormalization, (8, 8, 3), collections.OrderedDict(
axis=[3], center=[True, False], scale=[True, False])),
(keras.layers.Conv1D, (8, 8), collections.OrderedDict(
filters=[1], kernel_size=[1, 3], strides=[1, 2],
padding=['valid', 'same'], use_bias=[True, False],
kernel_regularizer=[None, 'l2'])),
(keras.layers.Conv2D, (8, 8, 3), collections.OrderedDict(
filters=[1], kernel_size=[1, 3], strides=[1, 2],
padding=['valid', 'same'], use_bias=[True, False],
kernel_regularizer=[None, 'l2'])),
(keras.layers.LSTM, (8, 8), collections.OrderedDict(
units=[1],
activation=[None, 'relu'],
kernel_regularizer=[None, 'l2'],
dropout=[0, 0.5],
stateful=[True, False],
unroll=[True, False])),
]
OUTPUT_TEST_CASES = []
for layer_type, inp_shape, arg_dict in _LAYERS_TO_TEST:
arg_combinations = [[(k, i) for i in v] for k, v in arg_dict.items()] # pylint: disable=g-complex-comprehension
for args in it.product(*arg_combinations):
name = '_{}_{}'.format(
layer_type.__name__, '_'.join('{}_{}'.format(k, v) for k, v in args))
OUTPUT_TEST_CASES.append(
(name, layer_type, inp_shape, {k: v for k, v in args}))
class OutputTypeTest(keras_parameterized.TestCase):
"""Test that layers and models produce the correct tensor types."""
# In v1 graph there are only symbolic tensors.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(*OUTPUT_TEST_CASES)
def test_layer_outputs(self, layer_to_test, input_shape, layer_kwargs):
layer = layer_to_test(**layer_kwargs)
input_data = np.ones(shape=(2,) + input_shape, dtype=np.float32)
layer_result = layer(input_data)
inp = keras.layers.Input(shape=input_shape, batch_size=2)
model = keras.models.Model(inp, layer_to_test(**layer_kwargs)(inp))
model_result = model(input_data)
for x in [layer_result, model_result]:
if not isinstance(x, ops.Tensor):
raise ValueError('Tensor or EagerTensor expected, got type {}'
.format(type(x)))
if isinstance(x, ops.EagerTensor) != context.executing_eagerly():
expected_type = (ops.EagerTensor if context.executing_eagerly()
else ops.Tensor)
raise ValueError('Expected type {}, got type {}'
.format(expected_type, type(x)))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
from functools import wraps
from flask import Blueprint, current_app, render_template, redirect, request, url_for, flash, session, abort, jsonify
from flask_login import current_user, login_required
from sqlalchemy import or_, and_
from preston.xmlapi import Preston as XMLAPI
from auth.shared import db, eveapi
from auth.models import User
from .models import Member
from .reddit_oauth import RedditOAuth
# Create and configure app
app = Blueprint('hr', __name__, template_folder='templates/hr', static_folder='static')
# Reddit OAuth connection
reddit_oauth = None
# Storage for API calls
new_apps = []
@app.record_once
def _record(setup_state):
app.config = setup_state.app.config
global reddit_oauth
reddit_oauth = RedditOAuth(
app.config['REDDIT_OAUTH_CLIENT_ID'],
app.config['REDDIT_OAUTH_SECRET'],
app.config['REDDIT_OAUTH_CALLBACK']
)
@app.context_processor
def _prerender():
if current_user.is_authenticated:
return {
'member': Member.query.filter_by(character_name=current_user.name).first()
}
return {}
@app.before_request
def _preprocess():
member = get_member_for(current_user)
if not current_user.is_anonymous:
if not member:
id = get_id_for_name(current_user.name)
db.session.add(Member(current_user.name, id, get_corp_for_id(id)))
db.session.commit()
elif not member.character_id:
member.character_id = get_id_for_name(member.character_name)
db.session.commit()
@app.route('/', methods=['GET', 'POST'])
@login_required
def index():
"""
Member page.
This is for logged in members to view their personal data. They
can edit the API keys.
Methods:
GET
POST
Args:
None
Returns:
rendered template 'personal.html'
"""
if get_member_for(current_user).status in ['Guest', 'New', 'Ready to be interviewed', 'Ready to be accepted']:
return redirect(url_for('.join'))
if request.method == 'POST':
current_app.logger.debug('POST on index by {}'.format(current_user.name))
key_id = request.form['key_id']
v_code = request.form['v_code']
validate_key(key_id, v_code, get_member_for(current_user))
return redirect(url_for('.index'))
reddit_link = reddit_oauth.get_authorize_url()
return render_template('hr/personal.html', reddit_link=reddit_link)
def validate_key(key_id, v_code, member):
"""
This method validates a single- or multi-line string of API
keys and codes separated by ' - ' against the EVE API to
verify that they have the correct access mask.
Args:
key_id (str) - EVE API key keyID
v_code (str) - EVE API key vCode
member (hr.models.Member) - Member to update if the keys are valid
Returns:
value (bool) if all keys were valid
"""
errors = []
try:
auth = XMLAPI(key=key_id, code=v_code, user_agent=eveapi['user_agent'])
result = auth.account.APIKeyInfo()
if not int(result['key']['@accessMask']) == app.config['API_KEY_MASK']:
errors.append('The key with ID "{}" has the wrong access mask. Has: {}, needs: {}'.format(
key_id, result['key']['@accessMask'], app.config['API_KEY_MASK']
))
except Exception as e:
errors.append('An error occurred with keyID "{}"'.format(key_id))
errors.append('An error occurred with keyID "{}": {}'.format(key_id, str(e)))
if not errors and member:
member.key_id = key_id
member.v_code = v_code
db.session.commit()
flash('API key information saved', 'success')
else:
flash('; '.join(errors), 'error')
return not errors
@app.route('/members')
@login_required
def membership():
"""
Recruiter page.
This page shows recruiters the total list of members of the corporation
and the applications to join the corporation.
Args:
None
Returns:
rendered template 'membership.html'
"""
if not current_user.recruiter and not current_user.mentor and not current_user.admin:
return redirect(url_for('.index'))
show_hidden = request.args.get('show_hidden', 0, type=bool)
show_applications = request.args.get('show_applications', 0, type=bool)
members = Member.query.filter_by(hidden=show_hidden).all()
if show_applications:
members = [member for member in members if member.status in
['New', 'Ready to be interviewed', 'Ready to be accepted']]
members = sorted(members, key=lambda x: x.character_name.lower())
return render_template('hr/membership.html',
members=members, show_hidden=show_hidden, show_applications=show_applications)
@app.route('/members/add', methods=['GET', 'POST'])
@login_required
def add_member():
"""
This page allows recruiters to manually add an applicant.
Methods:
GET
POST
Args:
None
Returns:
rendered template 'add_applicant.html'
"""
if not current_user.recruiter and not current_user.admin:
return redirect(url_for('.index'))
if request.method == 'POST':
name = request.form.get('name')
reddit = request.form.get('reddit')
status = request.form.get('status')
apikey = request.form.get('apikey')
apicode = request.form.get('apicode')
main = request.form.get('main')
if main == '*':
main = name
notes = request.form.get('notes')
current_app.logger.debug('POST on add_member by {}: name = {}, reddit = {}, status = {}, main = {}'.format(
current_user.name, name, reddit, status, main
))
if not validate_key(apikey, apicode, None):
current_app.logger.info('POST on add_member didn\'t have a valid key')
flash('Invalid key for user', 'danger')
return redirect(url_for('.add_member'))
id = get_id_for_name(name)
member = Member(name, id, get_corp_for_id(id), status, reddit, main, notes, apikey, apicode)
current_app.logger.info('New member added through add_member: ' + str(name))
db.session.add(member)
db.session.commit()
db.session.commit()
flash('Character added', 'success')
return render_template('hr/add_member.html', all_members=get_all_member_names())
@app.route('/admin', methods=['GET', 'POSt'])
@login_required
def admin():
"""
This is the admin control page, where admins can add and remove
recruiters and pull all corp members from the EVE API to update
the database.
Methods:
GET
POST
Args:
None
Returns:
rendered template 'admin.html'
"""
if not current_user.admin:
current_app.logger.debug('Admin access denied to {}'.format(current_user.name))
return redirect(url_for('.index'))
if request.method == 'POST':
current_app.logger.debug('POST on admin by {}'.format(current_user.name))
name = request.form['name']
member = Member.query.filter_by(character_name=name).first()
if not member:
flash('Unknown member', 'error')
return redirect(url_for('.admin'))
member.status = 'Accepted'
if request.form['role'] == 'Recruiter':
member.user.recruiter = True
if request.form['role'] == 'Mentor':
member.user.mentor = True
db.session.commit()
flash(member.character_name + ' promoted to ' + request.form['role'], 'success')
return redirect(url_for('.admin'))
admins = ', '.join([user.name for user in User.query.filter_by(admin=True).all()])
recruiters = [get_member_for(user) for user in User.query.filter(or_(User.recruiter, User.admin)).all()]
mentors = [get_member_for(user) for user in User.query.filter(or_(User.mentor, User.admin)).all()]
recruiters = sorted(set(recruiters), key=lambda x: x.character_name)
mentors = sorted(set(mentors), key=lambda x: x.character_name)
return render_template('hr/admin.html',
admins=admins, recruiters=recruiters, mentors=mentors, all_members=get_all_member_names())
@app.route('/admin/set_status', methods=['POST'])
@login_required
def admin_set_status():
"""
This transient endpoint allows an admin to set the
status of a member.
Methods:
POST
Args:
None
Returns:
redirect to the admin endpoint
"""
if not current_user.admin:
return redirect(url_for('.index'))
if not request.method == 'POST':
return redirect(url_for('.admin'))
name = request.form.get('name', None)
status = request.form.get('status', 'New')
if not name or not status:
flash('Missing name or status', 'error')
return redirect(url_for('.admin'))
member = Member.query.filter_by(character_name=name).first()
if not member:
flash('Unknown member name', 'error')
return redirect(url_for('.admin'))
member.status = status
db.session.commit()
flash('User status changed for ' + name + ' to ' + status, 'success')
return redirect(url_for('.admin'))
@app.route('/admin/revoke/<name>/<role>')
@login_required
def revoke_access(name, role):
"""
This transient endpoint allows an admin to revoke the recruiter
status of a member.
Args:
name (str) - name of the recruiter to revoke
Returns:
redirect to the admin endpoint
"""
if not current_user.admin:
return redirect(url_for('.index'))
member = Member.query.filter_by(character_name=name).first()
if not member:
flash('Unknown member name', 'error')
return redirect(url_for('.admin'))
member.status = 'Accepted'
if role == 'Recruiter':
member.user.recruiter = False
elif role == 'Mentor':
member.user.mentor = False
db.session.commit()
flash('User access revoked for ' + name, 'success')
return redirect(url_for('.admin'))
@app.route('/details/<int:id>', methods=['GET', 'POST'])
@login_required
def details(id):
"""
This page allows recruiters to view and edit a member's details.
Methods:
GET
POST
Args:
id (int) - id of the member to examine
Returns:
rendered template 'details.html'
"""
if not current_user.recruiter and not current_user.mentor and not current_user.admin:
current_app.logger.debug('Details access denied to {}'.format(current_user.name))
return redirect(url_for('.index'))
member = Member.query.get(id)
if not member:
flash('Unknown id', 'error')
current_app.logger.error('Unknown id on details for id {} by {}'.format(id, current_user.name))
return redirect(url_for('.membership'))
if request.method == 'POST':
if request.form['section'] == 'keys':
current_app.logger.info('POST on details - keys by {} for {}'.format(
current_user.name, member.character_name
))
validate_key(request.form['key_id'], request.form['v_code'], member)
elif request.form['section'] == 'status':
current_app.logger.info('POST on details - status by {} for {}: {}'.format(
current_user.name, member.character_name, request.form['status']
))
member.status = request.form['status']
if member.status == 'Denied':
member.hidden = True
db.session.commit()
flash('Status changed', 'success')
elif request.form['section'] == 'main':
current_app.logger.info('POST on details - main by {} for {}: {}'.format(
current_user.name, member.character_name, request.form['main']
))
main = request.form['main']
member.main = main if not main == '*' else member.character_name
db.session.commit()
flash('Main character changed', 'success')
elif request.form['section'] == 'notes':
current_app.logger.info('POST on details - notes by {} for {}: {}'.format(
current_user.name, member.character_name, request.form['notes']
))
member.notes = request.form['notes']
db.session.commit()
flash('Notes changed', 'success')
elif request.form['section'] == 'training':
current_app.logger.info('POST on details - training by {} for {}'.format(
current_user.name, member.character_name
))
member.know_good_fits = 'know_good_fits' in request.form
member.know_scan = 'know_scan' in request.form
member.know_mass_and_time = 'know_mass_and_time' in request.form
member.know_organize_gank = 'know_organize_gank' in request.form
member.know_when_to_pve = 'know_when_to_pve' in request.form
member.know_comms = 'know_comms' in request.form
member.know_appropriate_ships = 'know_appropriate_ships' in request.form
member.know_intel = 'know_intel' in request.form
member.know_pvp = 'know_pvp' in request.form
member.know_doctrine = 'know_doctrine' in request.form
for alt in member.get_alts():
alt.know_good_fits = member.know_good_fits
alt.know_scan = member.know_scan
alt.know_mass_and_time = member.know_mass_and_time
alt.know_organize_gank = member.know_organize_gank
alt.know_when_to_pve = member.know_when_to_pve
alt.know_comms = member.know_comms
alt.know_appropriate_ships = member.know_appropriate_ships
alt.know_intel = member.know_intel
alt.know_pvp = member.know_pvp
alt.know_doctrine = member.know_doctrine
db.session.commit()
else:
flash('Unknown form submission', 'error')
return redirect(url_for('.details', id=id))
return render_template('hr/details.html', member=member, all_members=get_all_member_names())
@app.route('/visibility/<int:id>/<action>')
@login_required
def visibility(id, action):
"""
This transient endpoint allows a recruiter to set the visiblity
of a member on the membership page (to be used to hide people who
have left the corp).
Args:
id (int) - id of the member to modify
action (str) - whether to hide or show the member
Returns:
redirect to the member's details endpoint
"""
if not current_user.recruiter and not current_user.admin:
current_app.logger.debug('Visibility access denied to {}'.format(current_user.name))
return redirect(url_for('.index'))
member = Member.query.get(id)
if not member:
flash('Unknown id', 'error')
current_app.logger.error('Unknown id on details for id {} by {}'.format(id, current_user.name))
return redirect(url_for('.membership'))
member.hidden = action == 'hide'
db.session.commit()
flash('"{}" {}'.format(member.character_name, 'hidden' if member.hidden else 'made visible'), 'success')
return redirect(url_for('.details', id=id))
@app.route('/delete/<int:id>')
@login_required
def delete(id):
"""
This transient endpoint allows an admin to permanently delete a
member from the database.
Args:
id (int) - id of the member to delete
Returns:
redirect to the membership endpoint
"""
if not current_user.admin:
current_app.logger.debug('Delete access denied to {}'.format(current_user.name))
return redirect(url_for('.details', id=id))
Member.query.filter_by(id=id).delete()
db.session.commit()
flash('Member deleted', 'success')
return redirect(url_for('.membership'))
@app.route('/join', methods=['GET', 'POST'])
@login_required
def join():
"""
This page allows a user to submit an application to join the corporation
by supplying an API key and optional reddit account and main character.
Methods:
GET
POST
Args:
None
Returns:
rendered template 'join.html'
"""
member = get_member_for(current_user)
if member.status == 'Accepted':
return redirect(url_for('.index'))
character_name = session.get('character_name') or current_user.name if not current_user.is_anonymous else None
if not character_name:
flash('Well, something went wrong. Try again?', 'error')
return redirect(url_for('.login'))
if request.method == 'POST':
current_app.logger.debug('POST on join by {}'.format(current_user.name))
try:
key = request.form['key']
code = request.form['code']
auth = XMLAPI(key=key, code=code, user_agent=eveapi['user_agent'])
main = request.form.get('main')
reddit = None
if main == '*':
main = get_member_for(current_user).character_name
else:
try:
reddit = Member.query.filter_by(character_name=main).first().reddit
except Exception:
current_app.logger.warning('{} tried to set {} as their main, but that Member object wasn\'t found'.format(
current_user.name, main
))
result = auth.account.APIKeyInfo()
if not int(result['key']['@accessMask']) == app.config['API_KEY_MASK']:
flash('Wrong key mask - you need {}'.format(app.config['API_KEY_MASK']), 'error')
return redirect(url_for('.join'))
member = get_member_for(current_user)
member.status = 'New'
member.main = main
member.key_id = key
member.v_code = code
member.reddit = reddit
db.session.commit()
new_apps.append(current_user.name)
flash('Your application is in - someone will take a look soon', 'success')
except Exception:
flash('An error occurred when parsing your API key. Are you sure you entered it right?', 'error')
return redirect(url_for('.join'))
reddit_link = reddit_oauth.get_authorize_url()
return render_template('hr/join.html',
character_name=character_name, reddit_link=reddit_link,
all_members=get_all_member_names(), discord_link=app.config['DISCORD_LINK'])
@app.route('/sync')
@login_required
def sync():
"""
This transient endpoint calls the sync_members method.
Args:
None
Returns:
redirect to the admin endpoint
"""
if not current_user.admin:
current_app.logger.debug('Admin access denied to {}'.format(current_user.name))
return redirect(url_for('.index'))
sync_members()
return redirect(url_for('.admin'))
def sync_members():
"""
This method allows an admin to import a list of corporation
members from the EVE API, updating any missing models from the database
and marking characters that have left (or been kicked from) the corporation
as being gone.
Args:
None
Returns:
value (dict) of membership changes
"""
current_app.logger.info('-- Starting member sync')
auth = XMLAPI(
key=app.config['CORP_MEMBER_API_KEY'],
code=app.config['CORP_MEMBER_API_CODE'],
user_agent=eveapi['user_agent']
)
api_members = []
existing_members, new_members, left_members = [], [], []
for member in auth.corp.MemberTracking()['rowset']['row']:
name = member['@name']
db_model = Member.query.filter_by(character_name=name).first()
if not db_model:
current_app.logger.info('-- Added {} to the corporation'.format(name))
existing_members.append(name)
db_model = Member(name, member['@characterID'], app.config['CORPORATION'], 'Accepted')
db.session.add(db_model)
db_model.corporation = app.config['CORPORATION']
if db_model.status not in ['Accepted', 'Recruiter']:
db_model.status = 'Accepted'
new_members.append(name)
current_app.logger.info('-- {} has been accepted into the corporation'.format(name))
api_members.append(name)
current_app.logger.debug('Full corp roster: ' + ', '.join(api_members))
for member in Member.query.filter_by(status='Accepted').all():
if member.character_name not in api_members:
current_app.logger.warning('-- ' + member.character_name + ' is not in the corporation')
left_members.append(member.character_name)
if member.user:
db.session.delete(member.user)
db.session.delete(member)
try:
db.session.commit()
current_app.logger.info('-- Database saved after member sync')
except Exception as e:
current_app.logger.error('-- An error occurred when syncing members: ' + str(e))
flash('Members imported', 'success')
return {
'existing_members': existing_members,
'new_members': new_members,
'left_members': left_members
}
@app.route('/reports')
@login_required
def reports():
"""
This page shows reports to the recruiters for the purpose of validation
and security.
Args:
None
Returns:
Rendered template 'reports.html'
"""
if not current_user.recruiter and not current_user.admin:
current_app.logger.debug('Visibility access denied to {}'.format(current_user.name))
return redirect(url_for('.index'))
members = Member.query.filter(and_(Member.status != 'Left', Member.status != 'Guest')).all()
member_names = get_all_member_names()
defunct_alts = []
invalid_mains = []
missing_api_keys = []
for member in members:
if member.character_name != member.main:
if member.main not in member_names:
invalid_mains.append(member)
else:
main = [m for m in members if m.character_name == member.main]
if (len(main) > 0 and main[0].status == 'Left') or not main:
defunct_alts.append(member)
if not member.key_id or not member.v_code:
missing_api_keys.append(member)
return render_template('hr/reports.html',
defunct_alts=defunct_alts, invalid_mains=invalid_mains, missing_api_keys=missing_api_keys)
@app.route('/reddit/callback')
@login_required
def reddit_oauth_callback():
"""
This transient endpoint completes the reddit OAuth verification process
and sets the current user's reddit account in the database.
Args:
None
Returns:
redirect to the index endpoint
"""
if current_user.is_anonymous:
return redirect(url_for('.login'))
current_app.logger.debug('Reddit callback by {}'.format(current_user.name))
username = reddit_oauth.get_token(request.args['code'])
user_member = get_member_for(current_user)
user_member.reddit = username
current_app.logger.info('{} updated their reddit account to {}'.format(current_user.name, username))
for member in Member.query.filter_by(main=user_member.character_name).all():
member.reddit = username
current_app.logger.info('{} updated their alt {} reddit account to {}'.format(
current_user.name, member.character_name, username
))
db.session.commit()
return redirect(url_for('.index'))
def api_key_required(f):
@wraps(f)
def inner(*args, **kwargs):
"""
Endpoint decorator for REST interactions - the request
header must contain the secret from the config.
Args:
args (tuple) - args
kwargs (dict) - kwargs
Returns:
call of the wrapped method if the header was valid, a
error 403 response otherwise
"""
token = request.headers.get('REST-SECRET')
if not token or not token == app.config['REST_SECRET']:
current_app.logger.warning('Access denied to API endpoint ' + str(request.endpoint) + ', token = ' + str(token))
abort(403)
return f(*args, **kwargs)
return inner
@app.route('/api/sync')
@api_key_required
def api_sync():
"""
Syncs the corporation membership with the EVE XML API
and returns the result of doing so.
Args:
None
Returns:
response (JSON)
"""
current_app.logger.info('API endpoint sync accessed')
return jsonify(sync_members())
@app.route('/api/apps')
@api_key_required
def api_apps():
"""
Returns a list of all new apps since the last poll.
Args:
None
Returns:
response (JSON)
"""
current_app.logger.info('API endpoint apps accessed')
apps = new_apps
new_apps.clear()
return jsonify(apps)
@app.route('/api/keys')
@api_key_required
def api_keys():
"""
Iterates through all API keys in the database and checks that
they're still valid. Since API keys are validated when entered
and cannot be removed from the system without being switched
for another valid pair, the only way that a user can block
access to their data through the EVE XML API is deleting the
key from their account. There's no notification for this, so
keys have to be checked periodically.
To reduce the amount of API calls, members who've already
left the corporation are not checked.
Args:
None
Returns:
response (JSON)
"""
current_app.logger.info('API endpoint keys accessed')
invalid_keys = []
for member in Member.query.filter(Member.status != 'Left').all():
try:
auth = XMLAPI(key=member.key_id, code=member.v_code, user_agent=eveapi['user_agent'])
result = auth.account.APIKeyInfo()
if not int(result['key']['@accessMask']) == app.config['API_KEY_MASK']:
invalid_keys.append(member.character_name)
current_app.logger.warning('-- ' + member.character_name + ' has an invalid API key!')
else:
current_app.logger.debug('-- ' + member.character_name + ' has a valid API key')
except Exception:
invalid_keys.append(member.character_name)
current_app.logger.warning('-- ' + member.character_name + ' has an invalid API key!')
return jsonify(invalid_keys)
def get_all_member_names():
"""
Returns a list of all member names in the corporation.
Args:
None
Returns:
value (list) of string names
"""
return sorted([m.character_name for m in Member.query.all()], key=lambda x: x.lower())
def get_id_for_name(name):
"""
This helper method takes a character's name and returns their EVE character ID.
Args:
name (str): character name
Returns:
int: character id
"""
return eveapi['xml'].eve.CharacterId(names=name)['rowset']['row']['@characterID']
def get_corp_for_name(name):
"""
This helper method takes a character's name and returns their corporation.
Args:
name (str) - full character name
Returns:
value (int) of their EVE character ID
"""
return get_corp_for_id(get_id_for_name(name))
def get_corp_for_id(id):
"""
This helper method takes a character's id and returns their corporation name.
Args:
name (str) - full character name
Returns:
value (str) of their corporation's name
"""
return eveapi['xml'].eve.CharacterAffiliation(ids=id)['rowset']['row']['@corporationName']
def get_member_for(user):
"""
This helper method returns the corresponding Member object for the user.
Args:
user (User): the user
Returns:
Member: member object if there's a user logged in, otherwise None
"""
if current_user.is_anonymous:
return None
return Member.query.filter_by(character_name=user.name).first()
|
|
#!/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Usage: symbolstore.py <params> <dump_syms path> <symbol store path>
# <debug info files or dirs>
# Runs dump_syms on each debug info file specified on the command line,
# then places the resulting symbol file in the proper directory
# structure in the symbol store path. Accepts multiple files
# on the command line, so can be called as part of a pipe using
# find <dir> | xargs symbolstore.pl <dump_syms> <storepath>
# But really, you might just want to pass it <dir>.
#
# Parameters accepted:
# -c : Copy debug info files to the same directory structure
# as sym files
# -a "<archs>" : Run dump_syms -a <arch> for each space separated
# cpu architecture in <archs> (only on OS X)
# -s <srcdir> : Use <srcdir> as the top source directory to
# generate relative filenames.
import sys
import platform
import os
import re
import shutil
import textwrap
import fnmatch
import subprocess
from optparse import OptionParser
# Utility classes
class VCSFileInfo:
""" A base class for version-controlled file information. Ensures that the
following attributes are generated only once (successfully):
self.root
self.clean_root
self.revision
self.filename
The attributes are generated by a single call to the GetRoot,
GetRevision, and GetFilename methods. Those methods are explicitly not
implemented here and must be implemented in derived classes. """
def __init__(self, file):
if not file:
raise ValueError
self.file = file
def __getattr__(self, name):
""" __getattr__ is only called for attributes that are not set on self,
so setting self.[attr] will prevent future calls to the GetRoot,
GetRevision, and GetFilename methods. We don't set the values on
failure on the off chance that a future call might succeed. """
if name == "root":
root = self.GetRoot()
if root:
self.root = root
return root
elif name == "clean_root":
clean_root = self.GetCleanRoot()
if clean_root:
self.clean_root = clean_root
return clean_root
elif name == "revision":
revision = self.GetRevision()
if revision:
self.revision = revision
return revision
elif name == "filename":
filename = self.GetFilename()
if filename:
self.filename = filename
return filename
raise AttributeError
def GetRoot(self):
""" This method should return the unmodified root for the file or 'None'
on failure. """
raise NotImplementedError
def GetCleanRoot(self):
""" This method should return the repository root for the file or 'None'
on failure. """
raise NotImplementedErrors
def GetRevision(self):
""" This method should return the revision number for the file or 'None'
on failure. """
raise NotImplementedError
def GetFilename(self):
""" This method should return the repository-specific filename for the
file or 'None' on failure. """
raise NotImplementedError
class CVSFileInfo(VCSFileInfo):
""" A class to maintiain version information for files in a CVS repository.
Derived from VCSFileInfo. """
def __init__(self, file, srcdir):
VCSFileInfo.__init__(self, file)
self.srcdir = srcdir
def GetRoot(self):
(path, filename) = os.path.split(self.file)
root = os.path.join(path, "CVS", "Root")
if not os.path.isfile(root):
return None
f = open(root, "r")
root_name = f.readline().strip()
f.close()
if root_name:
return root_name
print >> sys.stderr, "Failed to get CVS Root for %s" % filename
return None
def GetCleanRoot(self):
parts = self.root.split('@')
if len(parts) > 1:
# we don't want the extra colon
return parts[1].replace(":","")
return self.root.replace(":","")
def GetRevision(self):
(path, filename) = os.path.split(self.file)
entries = os.path.join(path, "CVS", "Entries")
if not os.path.isfile(entries):
return None
f = open(entries, "r")
for line in f:
parts = line.split("/")
if len(parts) > 1 and parts[1] == filename:
return parts[2]
print >> sys.stderr, "Failed to get CVS Revision for %s" % filename
return None
def GetFilename(self):
file = self.file
if self.revision and self.clean_root:
if self.srcdir:
# strip the base path off
# but we actually want the last dir in srcdir
file = os.path.normpath(file)
# the lower() is to handle win32+vc8, where
# the source filenames come out all lowercase,
# but the srcdir can be mixed case
if file.lower().startswith(self.srcdir.lower()):
file = file[len(self.srcdir):]
(head, tail) = os.path.split(self.srcdir)
if tail == "":
tail = os.path.basename(head)
file = tail + file
return "cvs:%s:%s:%s" % (self.clean_root, file, self.revision)
return file
# This regex separates protocol and optional username/password from a url.
# For instance, all the following urls will be transformed into
# 'foo.com/bar':
#
# http://foo.com/bar
# svn+ssh://user@foo.com/bar
# svn+ssh://user:pass@foo.com/bar
#
# This is used by both SVN and HG
rootRegex = re.compile(r'^\S+?:/+(?:[^\s/]*@)?(\S+)$')
class SVNFileInfo(VCSFileInfo):
url = None
repo = None
svndata = {}
def __init__(self, file):
""" We only want to run subversion's info tool once so pull all the data
here. """
VCSFileInfo.__init__(self, file)
if os.path.isfile(file):
command = os.popen("svn info %s" % file, "r")
for line in command:
# The last line of the output is usually '\n'
if line.strip() == '':
continue
# Split into a key/value pair on the first colon
key, value = line.split(':', 1)
if key in ["Repository Root", "Revision", "URL"]:
self.svndata[key] = value.strip()
exitStatus = command.close()
if exitStatus:
print >> sys.stderr, "Failed to get SVN info for %s" % file
def GetRoot(self):
key = "Repository Root"
if key in self.svndata:
match = rootRegex.match(self.svndata[key])
if match:
return match.group(1)
print >> sys.stderr, "Failed to get SVN Root for %s" % self.file
return None
# File bug to get this teased out from the current GetRoot, this is temporary
def GetCleanRoot(self):
return self.root
def GetRevision(self):
key = "Revision"
if key in self.svndata:
return self.svndata[key]
print >> sys.stderr, "Failed to get SVN Revision for %s" % self.file
return None
def GetFilename(self):
if self.root and self.revision:
if "URL" in self.svndata and "Repository Root" in self.svndata:
url, repo = self.svndata["URL"], self.svndata["Repository Root"]
file = url[len(repo) + 1:]
return "svn:%s:%s:%s" % (self.root, file, self.revision)
print >> sys.stderr, "Failed to get SVN Filename for %s" % self.file
return self.file
def read_output(*args):
(stdout, _) = subprocess.Popen(args=args, stdout=subprocess.PIPE).communicate()
return stdout.rstrip()
class HGRepoInfo:
# HG info is per-repo, so cache it in a static
# member var
repos = {}
def __init__(self, path, rev, cleanroot):
self.path = path
self.rev = rev
self.cleanroot = cleanroot
class HGFileInfo(VCSFileInfo):
def __init__(self, file, srcdir):
VCSFileInfo.__init__(self, file)
# we should only have to collect this info once per-repo
if not srcdir in HGRepoInfo.repos:
rev = read_output('hg', '-R', srcdir,
'parent', '--template={node|short}')
# Look for the default hg path. If SRVSRV_ROOT is set, we
# don't bother asking hg.
hg_root = os.environ.get("SRCSRV_ROOT")
if hg_root:
path = hg_root
else:
path = read_output('hg', '-R', srcdir,
'showconfig', 'paths.default')
if not path:
print >> sys.stderr, "Failed to get HG Repo for %s" % srcdir
cleanroot = None
if path != '': # not there?
match = rootRegex.match(path)
if match:
cleanroot = match.group(1)
if cleanroot.endswith('/'):
cleanroot = cleanroot[:-1]
if cleanroot is None:
print >> sys.stderr, textwrap.dedent("""\
Could not determine repo info for %s. This is either not a clone of the web-based
repository, or you have not specified SRCSRV_ROOT, or the clone is corrupt.""") % srcdir
sys.exit(1)
HGRepoInfo.repos[srcdir] = HGRepoInfo(path, rev, cleanroot)
self.repo = HGRepoInfo.repos[srcdir]
self.file = file
self.srcdir = srcdir
def GetRoot(self):
return self.repo.path
def GetCleanRoot(self):
return self.repo.cleanroot
def GetRevision(self):
return self.repo.rev
def GetFilename(self):
file = self.file
if self.revision and self.clean_root:
if self.srcdir:
# strip the base path off
file = os.path.normpath(file)
if IsInDir(file, self.srcdir):
file = file[len(self.srcdir):]
if file.startswith('/') or file.startswith('\\'):
file = file[1:]
return "hg:%s:%s:%s" % (self.clean_root, file, self.revision)
return file
# Utility functions
# A cache of files for which VCS info has already been determined. Used to
# prevent extra filesystem activity or process launching.
vcsFileInfoCache = {}
def IsInDir(file, dir):
# the lower() is to handle win32+vc8, where
# the source filenames come out all lowercase,
# but the srcdir can be mixed case
return os.path.abspath(file).lower().startswith(os.path.abspath(dir).lower())
def GetVCSFilename(file, srcdirs):
"""Given a full path to a file, and the top source directory,
look for version control information about this file, and return
a tuple containing
1) a specially formatted filename that contains the VCS type,
VCS location, relative filename, and revision number, formatted like:
vcs:vcs location:filename:revision
For example:
cvs:cvs.mozilla.org/cvsroot:mozilla/browser/app/nsBrowserApp.cpp:1.36
2) the unmodified root information if it exists"""
(path, filename) = os.path.split(file)
if path == '' or filename == '':
return (file, None)
fileInfo = None
root = ''
if file in vcsFileInfoCache:
# Already cached this info, use it.
fileInfo = vcsFileInfoCache[file]
else:
for srcdir in srcdirs:
if os.path.isdir(os.path.join(path, "CVS")):
fileInfo = CVSFileInfo(file, srcdir)
if fileInfo:
root = fileInfo.root
elif os.path.isdir(os.path.join(path, ".svn")) or \
os.path.isdir(os.path.join(path, "_svn")):
fileInfo = SVNFileInfo(file);
elif os.path.isdir(os.path.join(srcdir, '.hg')) and \
IsInDir(file, srcdir):
fileInfo = HGFileInfo(file, srcdir)
if fileInfo:
vcsFileInfoCache[file] = fileInfo
break
if fileInfo:
file = fileInfo.filename
root = fileInfo.root
# we want forward slashes on win32 paths
return (file.replace("\\", "/"), root)
def GetPlatformSpecificDumper(**kwargs):
"""This function simply returns a instance of a subclass of Dumper
that is appropriate for the current platform."""
# Python 2.5 has a bug where platform.system() returns 'Microsoft'.
# Remove this when we no longer support Python 2.5.
return {'Windows': Dumper_Win32,
'Microsoft': Dumper_Win32,
'Linux': Dumper_Linux,
'Sunos5': Dumper_Solaris,
'Darwin': Dumper_Mac}[platform.system()](**kwargs)
def SourceIndex(fileStream, outputPath, vcs_root):
"""Takes a list of files, writes info to a data block in a .stream file"""
# Creates a .pdb.stream file in the mozilla\objdir to be used for source indexing
# Create the srcsrv data block that indexes the pdb file
result = True
pdbStreamFile = open(outputPath, "w")
pdbStreamFile.write('''SRCSRV: ini ------------------------------------------------\r\nVERSION=2\r\nINDEXVERSION=2\r\nVERCTRL=http\r\nSRCSRV: variables ------------------------------------------\r\nHGSERVER=''')
pdbStreamFile.write(vcs_root)
pdbStreamFile.write('''\r\nSRCSRVVERCTRL=http\r\nHTTP_EXTRACT_TARGET=%hgserver%/raw-file/%var3%/%var2%\r\nSRCSRVTRG=%http_extract_target%\r\nSRCSRV: source files ---------------------------------------\r\n''')
pdbStreamFile.write(fileStream) # can't do string interpolation because the source server also uses this and so there are % in the above
pdbStreamFile.write("SRCSRV: end ------------------------------------------------\r\n\n")
pdbStreamFile.close()
return result
class Dumper:
"""This class can dump symbols from a file with debug info, and
store the output in a directory structure that is valid for use as
a Breakpad symbol server. Requires a path to a dump_syms binary--
|dump_syms| and a directory to store symbols in--|symbol_path|.
Optionally takes a list of processor architectures to process from
each debug file--|archs|, the full path to the top source
directory--|srcdir|, for generating relative source file names,
and an option to copy debug info files alongside the dumped
symbol files--|copy_debug|, mostly useful for creating a
Microsoft Symbol Server from the resulting output.
You don't want to use this directly if you intend to call
ProcessDir. Instead, call GetPlatformSpecificDumper to
get an instance of a subclass."""
def __init__(self, dump_syms, symbol_path,
archs=None,
srcdirs=None,
copy_debug=False,
vcsinfo=False,
srcsrv=False,
exclude=[]):
# popen likes absolute paths, at least on windows
self.dump_syms = os.path.abspath(dump_syms)
self.symbol_path = symbol_path
if archs is None:
# makes the loop logic simpler
self.archs = ['']
else:
self.archs = ['-a %s' % a for a in archs.split()]
if srcdirs is not None:
self.srcdirs = [os.path.normpath(a) for a in srcdirs]
else:
self.srcdirs = None
self.copy_debug = copy_debug
self.vcsinfo = vcsinfo
self.srcsrv = srcsrv
self.exclude = exclude[:]
# subclasses override this
def ShouldProcess(self, file):
return not any(fnmatch.fnmatch(os.path.basename(file), exclude) for exclude in self.exclude)
# and can override this
def ShouldSkipDir(self, dir):
return False
def RunFileCommand(self, file):
"""Utility function, returns the output of file(1)"""
try:
# we use -L to read the targets of symlinks,
# and -b to print just the content, not the filename
return os.popen("file -Lb " + file).read()
except:
return ""
# This is a no-op except on Win32
def FixFilenameCase(self, file):
return file
# This is a no-op except on Win32
def SourceServerIndexing(self, debug_file, guid, sourceFileStream, vcs_root):
return ""
# subclasses override this if they want to support this
def CopyDebug(self, file, debug_file, guid):
pass
def Process(self, file_or_dir):
"Process a file or all the (valid) files in a directory."
if os.path.isdir(file_or_dir) and not self.ShouldSkipDir(file_or_dir):
return self.ProcessDir(file_or_dir)
elif os.path.isfile(file_or_dir):
return self.ProcessFile(file_or_dir)
# maybe it doesn't exist?
return False
def ProcessDir(self, dir):
"""Process all the valid files in this directory. Valid files
are determined by calling ShouldProcess."""
result = True
for root, dirs, files in os.walk(dir):
for d in dirs[:]:
if self.ShouldSkipDir(d):
dirs.remove(d)
for f in files:
fullpath = os.path.join(root, f)
if self.ShouldProcess(fullpath):
if not self.ProcessFile(fullpath):
result = False
return result
def ProcessFile(self, file):
"""Dump symbols from this file into a symbol file, stored
in the proper directory structure in |symbol_path|."""
print >> sys.stderr, "Processing file: %s" % file
sys.stderr.flush()
result = False
sourceFileStream = ''
# tries to get the vcs root from the .mozconfig first - if it's not set
# the tinderbox vcs path will be assigned further down
vcs_root = os.environ.get("SRCSRV_ROOT")
for arch_num, arch in enumerate(self.archs):
try:
proc = subprocess.Popen([self.dump_syms] + arch.split() + [file],
stdout=subprocess.PIPE)
module_line = proc.stdout.next()
if module_line.startswith("MODULE"):
# MODULE os cpu guid debug_file
(guid, debug_file) = (module_line.split())[3:5]
# strip off .pdb extensions, and append .sym
sym_file = re.sub("\.pdb$", "", debug_file) + ".sym"
# we do want forward slashes here
rel_path = os.path.join(debug_file,
guid,
sym_file).replace("\\", "/")
full_path = os.path.normpath(os.path.join(self.symbol_path,
rel_path))
try:
os.makedirs(os.path.dirname(full_path))
except OSError: # already exists
pass
f = open(full_path, "w")
f.write(module_line)
# now process the rest of the output
for line in proc.stdout:
if line.startswith("FILE"):
# FILE index filename
(x, index, filename) = line.rstrip().split(None, 2)
if sys.platform == "sunos5":
for srcdir in self.srcdirs:
start = filename.find(self.srcdir)
if start != -1:
filename = filename[start:]
break
filename = self.FixFilenameCase(filename)
sourcepath = filename
if self.vcsinfo:
(filename, rootname) = GetVCSFilename(filename, self.srcdirs)
# sets vcs_root in case the loop through files were to end on an empty rootname
if vcs_root is None:
if rootname:
vcs_root = rootname
# gather up files with hg for indexing
if filename.startswith("hg"):
(ver, checkout, source_file, revision) = filename.split(":", 3)
sourceFileStream += sourcepath + "*" + source_file + '*' + revision + "\r\n"
f.write("FILE %s %s\n" % (index, filename))
else:
# pass through all other lines unchanged
f.write(line)
# we want to return true only if at least one line is not a MODULE or FILE line
result = True
f.close()
proc.wait()
# we output relative paths so callers can get a list of what
# was generated
print rel_path
if self.srcsrv and vcs_root:
# add source server indexing to the pdb file
self.SourceServerIndexing(file, guid, sourceFileStream, vcs_root)
# only copy debug the first time if we have multiple architectures
if self.copy_debug and arch_num == 0:
self.CopyDebug(file, debug_file, guid)
except StopIteration:
pass
except:
print >> sys.stderr, "Unexpected error: ", sys.exc_info()[0]
raise
return result
# Platform-specific subclasses. For the most part, these just have
# logic to determine what files to extract symbols from.
class Dumper_Win32(Dumper):
fixedFilenameCaseCache = {}
def ShouldProcess(self, file):
"""This function will allow processing of pdb files that have dll
or exe files with the same base name next to them."""
if not Dumper.ShouldProcess(self, file):
return False
if file.endswith(".pdb"):
(path,ext) = os.path.splitext(file)
if os.path.isfile(path + ".exe") or os.path.isfile(path + ".dll"):
return True
return False
def FixFilenameCase(self, file):
"""Recent versions of Visual C++ put filenames into
PDB files as all lowercase. If the file exists
on the local filesystem, fix it."""
# Use a cached version if we have one.
if file in self.fixedFilenameCaseCache:
return self.fixedFilenameCaseCache[file]
result = file
(path, filename) = os.path.split(file)
if os.path.isdir(path):
lc_filename = filename.lower()
for f in os.listdir(path):
if f.lower() == lc_filename:
result = os.path.join(path, f)
break
# Cache the corrected version to avoid future filesystem hits.
self.fixedFilenameCaseCache[file] = result
return result
def CopyDebug(self, file, debug_file, guid):
rel_path = os.path.join(debug_file,
guid,
debug_file).replace("\\", "/")
full_path = os.path.normpath(os.path.join(self.symbol_path,
rel_path))
shutil.copyfile(file, full_path)
# try compressing it
compressed_file = os.path.splitext(full_path)[0] + ".pd_"
# ignore makecab's output
success = subprocess.call(["makecab.exe", "/D", "CompressionType=LZX", "/D",
"CompressionMemory=21",
full_path, compressed_file],
stdout=open("NUL:","w"), stderr=subprocess.STDOUT)
if success == 0 and os.path.exists(compressed_file):
os.unlink(full_path)
print os.path.splitext(rel_path)[0] + ".pd_"
else:
print rel_path
def SourceServerIndexing(self, debug_file, guid, sourceFileStream, vcs_root):
# Creates a .pdb.stream file in the mozilla\objdir to be used for source indexing
debug_file = os.path.abspath(debug_file)
streamFilename = debug_file + ".stream"
stream_output_path = os.path.abspath(streamFilename)
# Call SourceIndex to create the .stream file
result = SourceIndex(sourceFileStream, stream_output_path, vcs_root)
if self.copy_debug:
pdbstr_path = os.environ.get("PDBSTR_PATH")
pdbstr = os.path.normpath(pdbstr_path)
subprocess.call([pdbstr, "-w", "-p:" + os.path.basename(debug_file),
"-i:" + os.path.basename(streamFilename), "-s:srcsrv"],
cwd=os.path.dirname(stream_output_path))
# clean up all the .stream files when done
os.remove(stream_output_path)
return result
class Dumper_Linux(Dumper):
objcopy = os.environ['OBJCOPY'] if 'OBJCOPY' in os.environ else 'objcopy'
def ShouldProcess(self, file):
"""This function will allow processing of files that are
executable, or end with the .so extension, and additionally
file(1) reports as being ELF files. It expects to find the file
command in PATH."""
if not Dumper.ShouldProcess(self, file):
return False
if file.endswith(".so") or os.access(file, os.X_OK):
return self.RunFileCommand(file).startswith("ELF")
return False
def CopyDebug(self, file, debug_file, guid):
# We want to strip out the debug info, and add a
# .gnu_debuglink section to the object, so the debugger can
# actually load our debug info later.
file_dbg = file + ".dbg"
if subprocess.call([self.objcopy, '--only-keep-debug', file, file_dbg]) == 0 and \
subprocess.call([self.objcopy, '--add-gnu-debuglink=%s' % file_dbg, file]) == 0:
rel_path = os.path.join(debug_file,
guid,
debug_file + ".dbg")
full_path = os.path.normpath(os.path.join(self.symbol_path,
rel_path))
shutil.move(file_dbg, full_path)
# gzip the shipped debug files
os.system("gzip %s" % full_path)
print rel_path + ".gz"
else:
if os.path.isfile(file_dbg):
os.unlink(file_dbg)
class Dumper_Solaris(Dumper):
def RunFileCommand(self, file):
"""Utility function, returns the output of file(1)"""
try:
output = os.popen("file " + file).read()
return output.split('\t')[1];
except:
return ""
def ShouldProcess(self, file):
"""This function will allow processing of files that are
executable, or end with the .so extension, and additionally
file(1) reports as being ELF files. It expects to find the file
command in PATH."""
if not Dumper.ShouldProcess(self, file):
return False
if file.endswith(".so") or os.access(file, os.X_OK):
return self.RunFileCommand(file).startswith("ELF")
return False
class Dumper_Mac(Dumper):
def ShouldProcess(self, file):
"""This function will allow processing of files that are
executable, or end with the .dylib extension, and additionally
file(1) reports as being Mach-O files. It expects to find the file
command in PATH."""
if not Dumper.ShouldProcess(self, file):
return False
if file.endswith(".dylib") or os.access(file, os.X_OK):
return self.RunFileCommand(file).startswith("Mach-O")
return False
def ShouldSkipDir(self, dir):
"""We create .dSYM bundles on the fly, but if someone runs
buildsymbols twice, we should skip any bundles we created
previously, otherwise we'll recurse into them and try to
dump the inner bits again."""
if dir.endswith(".dSYM"):
return True
return False
def ProcessFile(self, file):
"""dump_syms on Mac needs to be run on a dSYM bundle produced
by dsymutil(1), so run dsymutil here and pass the bundle name
down to the superclass method instead."""
dsymbundle = file + ".dSYM"
if os.path.exists(dsymbundle):
shutil.rmtree(dsymbundle)
# dsymutil takes --arch=foo instead of -a foo like everything else
subprocess.call(["dsymutil"] + [a.replace('-a ', '--arch=') for a in self.archs if a]
+ [file],
stdout=open("/dev/null","w"))
if not os.path.exists(dsymbundle):
# dsymutil won't produce a .dSYM for files without symbols
return False
res = Dumper.ProcessFile(self, dsymbundle)
# CopyDebug will already have been run from Dumper.ProcessFile
shutil.rmtree(dsymbundle)
# fallback for DWARF-less binaries
if not res:
print >> sys.stderr, "Couldn't read DWARF symbols in: %s" % dsymbundle
res = Dumper.ProcessFile(self, file)
return res
def CopyDebug(self, file, debug_file, guid):
"""ProcessFile has already produced a dSYM bundle, so we should just
copy that to the destination directory. However, we'll package it
into a .tar.bz2 because the debug symbols are pretty huge, and
also because it's a bundle, so it's a directory. |file| here is the
dSYM bundle, and |debug_file| is the original filename."""
rel_path = os.path.join(debug_file,
guid,
os.path.basename(file) + ".tar.bz2")
full_path = os.path.abspath(os.path.join(self.symbol_path,
rel_path))
success = subprocess.call(["tar", "cjf", full_path, os.path.basename(file)],
cwd=os.path.dirname(file),
stdout=open("/dev/null","w"), stderr=subprocess.STDOUT)
if success == 0 and os.path.exists(full_path):
print rel_path
# Entry point if called as a standalone program
def main():
parser = OptionParser(usage="usage: %prog [options] <dump_syms binary> <symbol store path> <debug info files>")
parser.add_option("-c", "--copy",
action="store_true", dest="copy_debug", default=False,
help="Copy debug info files into the same directory structure as symbol files")
parser.add_option("-a", "--archs",
action="store", dest="archs",
help="Run dump_syms -a <arch> for each space separated cpu architecture in ARCHS (only on OS X)")
parser.add_option("-s", "--srcdir",
action="append", dest="srcdir", default=[],
help="Use SRCDIR to determine relative paths to source files")
parser.add_option("-v", "--vcs-info",
action="store_true", dest="vcsinfo",
help="Try to retrieve VCS info for each FILE listed in the output")
parser.add_option("-i", "--source-index",
action="store_true", dest="srcsrv", default=False,
help="Add source index information to debug files, making them suitable for use in a source server.")
parser.add_option("-x", "--exclude",
action="append", dest="exclude", default=[], metavar="PATTERN",
help="Skip processing files matching PATTERN.")
(options, args) = parser.parse_args()
#check to see if the pdbstr.exe exists
if options.srcsrv:
pdbstr = os.environ.get("PDBSTR_PATH")
if not os.path.exists(pdbstr):
print >> sys.stderr, "Invalid path to pdbstr.exe - please set/check PDBSTR_PATH.\n"
sys.exit(1)
if len(args) < 3:
parser.error("not enough arguments")
exit(1)
dumper = GetPlatformSpecificDumper(dump_syms=args[0],
symbol_path=args[1],
copy_debug=options.copy_debug,
archs=options.archs,
srcdirs=options.srcdir,
vcsinfo=options.vcsinfo,
srcsrv=options.srcsrv,
exclude=options.exclude)
for arg in args[2:]:
dumper.Process(arg)
# run main if run directly
if __name__ == "__main__":
main()
|
|
import os
import sys
import unittest
import warnings
from types import ModuleType
from django.conf import ENVIRONMENT_VARIABLE, LazySettings, Settings, settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, modify_settings,
override_settings, signals,
)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3], TEST='override', TEST_OUTER='outer')
class FullyDecoratedTranTestCase(TransactionTestCase):
available_apps = []
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
self.assertEqual(settings.TEST, 'override')
self.assertEqual(settings.TEST_OUTER, 'outer')
@modify_settings(ITEMS={
'append': ['e', 'f'],
'prepend': ['a'],
'remove': ['d', 'c'],
})
def test_method_list_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'e', 'f'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
@modify_settings(ITEMS={
'append': ['b'],
'prepend': ['d'],
'remove': ['a', 'c', 'e'],
})
def test_method_list_override_no_ops(self):
self.assertListEqual(settings.ITEMS, ['b', 'd'])
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
def test_method_list_override_strings(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
@modify_settings(ITEMS={'remove': ['b', 'd']})
@modify_settings(ITEMS={'append': ['b'], 'prepend': ['d']})
def test_method_list_override_nested_order(self):
self.assertListEqual(settings.ITEMS, ['d', 'c', 'b'])
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
self.assertEqual(settings.TEST_OUTER, 'outer')
def test_decorated_testcase_name(self):
self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase')
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertEqual(settings.TEST, 'override')
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
@override_settings(TEST='override2')
def test_method_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
self.assertEqual(settings.TEST, 'override2')
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
@override_settings(TEST='override')
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
@classmethod
def setUpClass(cls):
super(ClassDecoratedTestCase, cls).setUpClass()
cls.foo = getattr(settings, 'TEST', 'BUG')
def test_override(self):
self.assertEqual(settings.TEST, 'override')
def test_setupclass_override(self):
"""Settings are overridden within setUpClass (#21281)."""
self.assertEqual(self.foo, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
super(ClassDecoratedTestCase, self).test_max_recursion_error()
@modify_settings(ITEMS={'append': 'mother'})
@override_settings(ITEMS=['father'], TEST='override-parent')
class ParentDecoratedTestCase(TestCase):
pass
@modify_settings(ITEMS={'append': ['child']})
@override_settings(TEST='override-child')
class ChildDecoratedTestCase(ParentDecoratedTestCase):
def test_override_settings_inheritance(self):
self.assertEqual(settings.ITEMS, ['father', 'mother', 'child'])
self.assertEqual(settings.TEST, 'override-child')
class SettingsTests(SimpleTestCase):
def setUp(self):
self.testvalue = None
signals.setting_changed.connect(self.signal_callback)
def tearDown(self):
signals.setting_changed.disconnect(self.signal_callback)
def signal_callback(self, sender, setting, value, **kwargs):
if setting == 'TEST':
self.testvalue = value
def test_override(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test2'
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test'
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
@override_settings(TEST='override')
def test_decorator(self):
self.assertEqual('override', settings.TEST)
def test_context_manager(self):
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
override = override_settings(TEST='override')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
override.enable()
self.assertEqual('override', settings.TEST)
override.disable()
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
def test_class_decorator(self):
# SimpleTestCase can be decorated by override_settings, but not ut.TestCase
class SimpleTestCaseSubclass(SimpleTestCase):
pass
class UnittestTestCaseSubclass(unittest.TestCase):
pass
decorated = override_settings(TEST='override')(SimpleTestCaseSubclass)
self.assertIsInstance(decorated, type)
self.assertTrue(issubclass(decorated, SimpleTestCase))
with self.assertRaisesMessage(Exception, "Only subclasses of Django SimpleTestCase"):
decorated = override_settings(TEST='override')(UnittestTestCaseSubclass)
def test_signal_callback_context_manager(self):
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual(self.testvalue, 'override')
self.assertIsNone(self.testvalue)
@override_settings(TEST='override')
def test_signal_callback_decorator(self):
self.assertEqual(self.testvalue, 'override')
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
def test_settings_delete_wrapped(self):
with self.assertRaises(TypeError):
delattr(settings, '_wrapped')
def test_override_settings_delete(self):
"""
Allow deletion of a setting in an overridden settings set (#18824)
"""
previous_i18n = settings.USE_I18N
previous_l10n = settings.USE_L10N
with self.settings(USE_I18N=False):
del settings.USE_I18N
with self.assertRaises(AttributeError):
getattr(settings, 'USE_I18N')
# Should also work for a non-overridden setting
del settings.USE_L10N
with self.assertRaises(AttributeError):
getattr(settings, 'USE_L10N')
self.assertNotIn('USE_I18N', dir(settings))
self.assertNotIn('USE_L10N', dir(settings))
self.assertEqual(settings.USE_I18N, previous_i18n)
self.assertEqual(settings.USE_L10N, previous_l10n)
def test_override_settings_nested(self):
"""
override_settings uses the actual _wrapped attribute at
runtime, not when it was instantiated.
"""
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST2')
inner = override_settings(TEST2='override')
with override_settings(TEST='override'):
self.assertEqual('override', settings.TEST)
with inner:
self.assertEqual('override', settings.TEST)
self.assertEqual('override', settings.TEST2)
# inner's __exit__ should have restored the settings of the outer
# context manager, not those when the class was instantiated
self.assertEqual('override', settings.TEST)
with self.assertRaises(AttributeError):
getattr(settings, 'TEST2')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST')
with self.assertRaises(AttributeError):
getattr(settings, 'TEST2')
class TestComplexSettingOverride(SimpleTestCase):
def setUp(self):
self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy()
signals.COMPLEX_OVERRIDE_SETTINGS.add('TEST_WARN')
def tearDown(self):
signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings
self.assertNotIn('TEST_WARN', signals.COMPLEX_OVERRIDE_SETTINGS)
def test_complex_override_warning(self):
"""Regression test for #19031"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with override_settings(TEST_WARN='override'):
self.assertEqual(settings.TEST_WARN, 'override')
self.assertEqual(len(w), 1)
# File extension may by .py, .pyc, etc. Compare only basename.
self.assertEqual(os.path.splitext(w[0].filename)[0], os.path.splitext(__file__)[0])
self.assertEqual(str(w[0].message), 'Overriding setting TEST_WARN can lead to unexpected behavior.')
class SecureProxySslHeaderTest(SimpleTestCase):
@override_settings(SECURE_PROXY_SSL_HEADER=None)
def test_none(self):
req = HttpRequest()
self.assertIs(req.is_secure(), False)
@override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTOCOL', 'https'))
def test_set_without_xheader(self):
req = HttpRequest()
self.assertIs(req.is_secure(), False)
@override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTOCOL', 'https'))
def test_set_with_xheader_wrong(self):
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue'
self.assertIs(req.is_secure(), False)
@override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTOCOL', 'https'))
def test_set_with_xheader_right(self):
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https'
self.assertIs(req.is_secure(), True)
@override_settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTOCOL', 'https'))
def test_xheader_preferred_to_underlying_request(self):
class ProxyRequest(HttpRequest):
def _get_scheme(self):
"""Proxy always connecting via HTTPS"""
return 'https'
# Client connects via HTTP.
req = ProxyRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'http'
self.assertIs(req.is_secure(), False)
class IsOverriddenTest(SimpleTestCase):
def test_configure(self):
s = LazySettings()
s.configure(SECRET_KEY='foo')
self.assertTrue(s.is_overridden('SECRET_KEY'))
def test_module(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
sys.modules['fake_settings_module'] = settings_module
try:
s = Settings('fake_settings_module')
self.assertTrue(s.is_overridden('SECRET_KEY'))
self.assertFalse(s.is_overridden('ALLOWED_HOSTS'))
finally:
del sys.modules['fake_settings_module']
def test_override(self):
self.assertFalse(settings.is_overridden('ALLOWED_HOSTS'))
with override_settings(ALLOWED_HOSTS=[]):
self.assertTrue(settings.is_overridden('ALLOWED_HOSTS'))
def test_unevaluated_lazysettings_repr(self):
lazy_settings = LazySettings()
expected = '<LazySettings [Unevaluated]>'
self.assertEqual(repr(lazy_settings), expected)
def test_evaluated_lazysettings_repr(self):
lazy_settings = LazySettings()
module = os.environ.get(ENVIRONMENT_VARIABLE)
expected = '<LazySettings "%s">' % module
# Force evaluation of the lazy object.
lazy_settings.APPEND_SLASH
self.assertEqual(repr(lazy_settings), expected)
def test_usersettingsholder_repr(self):
lazy_settings = LazySettings()
lazy_settings.configure(APPEND_SLASH=False)
expected = '<UserSettingsHolder>'
self.assertEqual(repr(lazy_settings._wrapped), expected)
def test_settings_repr(self):
module = os.environ.get(ENVIRONMENT_VARIABLE)
lazy_settings = Settings(module)
expected = '<Settings "%s">' % module
self.assertEqual(repr(lazy_settings), expected)
class TestListSettings(unittest.TestCase):
"""
Make sure settings that should be lists or tuples throw
ImproperlyConfigured if they are set to a string instead of a list or tuple.
"""
list_or_tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
def test_tuple_settings(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
for setting in self.list_or_tuple_settings:
setattr(settings_module, setting, ('non_list_or_tuple_value'))
sys.modules['fake_settings_module'] = settings_module
try:
with self.assertRaises(ImproperlyConfigured):
Settings('fake_settings_module')
finally:
del sys.modules['fake_settings_module']
delattr(settings_module, setting)
|
|
# Copyright 2017 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_config import cfg
from manila import exception
from manila import quota
from manila import test
CONF = cfg.CONF
@ddt.ddt
class DbQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.project_id = 'fake_project_id'
self.user_id = 'fake_user_id'
self.share_type_id = 'fake_share_type_id'
self.ctxt = type(
'FakeContext', (object, ),
{'project_id': self.project_id, 'user_id': self.user_id,
'quota_class': 'fake_quota_class', 'elevated': mock.Mock()})
self.driver = quota.DbQuotaDriver()
self.reservations = ['foo', 'bar']
self.resources = {k: quota.BaseResource(k) for k in ('foo', 'bar')}
def test_get_by_class(self):
self.mock_object(quota.db, 'quota_class_get')
result = self.driver.get_by_class(
self.ctxt, 'fake_quota_class', 'fake_res')
self.assertEqual(quota.db.quota_class_get.return_value, result)
quota.db.quota_class_get.assert_called_once_with(
self.ctxt, 'fake_quota_class', 'fake_res')
def test_get_defaults(self):
self.mock_object(
quota.db, 'quota_class_get_default',
mock.Mock(return_value={'foo': 13}))
result = self.driver.get_defaults(self.ctxt, self.resources)
self.assertEqual(
{'foo': 13, 'bar': self.resources['bar'].default}, result)
quota.db.quota_class_get_default.assert_called_once_with(self.ctxt)
@ddt.data(True, False)
def test_get_class_quotas(self, defaults):
self.mock_object(
quota.db, 'quota_class_get_all_by_name',
mock.Mock(return_value={'foo': 13}))
result = self.driver.get_class_quotas(
self.ctxt, self.resources, 'fake_quota_class', defaults)
expected = {'foo': 13, 'bar': -1} if defaults else {'foo': 13}
self.assertEqual(expected, result)
quota.db.quota_class_get_all_by_name.assert_called_once_with(
self.ctxt, 'fake_quota_class')
@ddt.data(
('fake_project_id', {'foo': 20}, None, True, None, True),
('fake_different_project_id', {'bar': 40}, 'fake_quota_class', True,
{'foo': {'in_use': 3, 'reserved': 2}}, False),
('fake_project_id', {'bar': 30}, 'fake_quota_class', True, None, False)
)
@ddt.unpack
def test__process_quotas(self, project_id, quotas, quota_class, defaults,
usages, remains):
self.mock_object(quota.db, 'quota_get_all', mock.Mock(return_value=[]))
self.mock_object(quota.db, 'quota_class_get_all_by_name')
self.mock_object(
self.driver, 'get_defaults',
mock.Mock(return_value={'foo': 11, 'bar': 12}))
self.mock_object(
quota.db, 'quota_get_all',
mock.Mock(return_value=[]))
result = self.driver._process_quotas(
self.ctxt, self.resources, project_id, quotas, quota_class,
defaults, usages, remains)
expected = {key: {'limit': mock.ANY} for key in ('foo', 'bar')}
if usages:
for res in self.resources.values():
usage = usages.get(res.name, {})
expected[res.name].update(
in_use=usage.get('in_use', 0),
reserved=usage.get('reserved', 0))
if remains:
quota.db.quota_get_all.assert_called_once_with(
self.ctxt, project_id)
for res in self.resources.values():
expected[res.name]['remains'] = mock.ANY
else:
self.assertEqual(0, quota.db.quota_get_all.call_count)
self.assertEqual(expected, result)
if quota_class or project_id == self.ctxt.project_id:
quota.db.quota_class_get_all_by_name.assert_called_once_with(
self.ctxt, quota_class or self.ctxt.quota_class)
else:
self.assertEqual(
0, quota.db.quota_class_get_all_by_name.call_count)
@ddt.data(
('fake_quota_class', True, None, 'fake_remains'),
(None, False, 'fake_usages', False),
)
@ddt.unpack
def test_get_project_quotas(self, quota_class, defaults, usages, remains):
self.mock_object(quota.db, 'quota_get_all_by_project')
self.mock_object(quota.db, 'quota_usage_get_all_by_project')
self.mock_object(self.driver, '_process_quotas')
result = self.driver.get_project_quotas(
self.ctxt, self.resources, self.project_id,
quota_class, defaults, usages, remains)
self.assertEqual(
result, self.driver._process_quotas.return_value)
project_usages = None
if usages:
project_usages = (
quota.db.quota_usage_get_all_by_project.return_value)
self.driver._process_quotas.assert_called_once_with(
self.ctxt, self.resources, self.project_id,
quota.db.quota_get_all_by_project.return_value,
quota_class, defaults=defaults, usages=project_usages,
remains=remains)
quota.db.quota_get_all_by_project.assert_called_once_with(
self.ctxt, self.project_id)
if usages:
quota.db.quota_usage_get_all_by_project.assert_called_once_with(
self.ctxt, self.project_id)
else:
self.assertEqual(
0, quota.db.quota_usage_get_all_by_project.call_count)
@ddt.data(
(None, True, True),
('fake_quota_class', False, True),
('fake_quota_class', True, False),
)
@ddt.unpack
def test_get_user_quotas(self, quota_class, defaults, usages):
project_quotas = {'fake_resource': 5}
self.mock_object(
quota.db, 'quota_get_all_by_project',
mock.Mock(return_value=project_quotas))
self.mock_object(
quota.db, 'quota_get_all_by_project_and_user',
mock.Mock(return_value={'fake_user_defined_resource': 14}))
mock_user_usages = self.mock_object(
quota.db, 'quota_usage_get_all_by_project_and_user')
self.mock_object(self.driver, '_process_quotas')
result = self.driver.get_user_quotas(
self.ctxt, self.resources, self.project_id, self.user_id,
quota_class, defaults, usages)
self.assertEqual(
self.driver._process_quotas.return_value, result)
quota.db.quota_get_all_by_project.assert_called_once_with(
self.ctxt, self.project_id)
quota.db.quota_get_all_by_project_and_user.assert_called_once_with(
self.ctxt, self.project_id, self.user_id)
if usages:
user_usages = mock_user_usages.return_value
mock_user_usages.assert_called_once_with(
self.ctxt, self.project_id, self.user_id)
else:
user_usages = None
self.assertEqual(0, mock_user_usages.call_count)
expected_user_quotas = {'fake_user_defined_resource': 14}
expected_user_quotas.update(project_quotas)
self.driver._process_quotas.assert_called_once_with(
self.ctxt, self.resources, self.project_id, expected_user_quotas,
quota_class, defaults=defaults, usages=user_usages)
@ddt.data(
(None, True, True),
('fake_quota_class', False, True),
('fake_quota_class', True, False),
)
@ddt.unpack
def test_get_share_type_quotas(self, quota_class, defaults, usages):
project_quotas = {'fake_resource': 5}
self.mock_object(
quota.db, 'quota_get_all_by_project',
mock.Mock(return_value=project_quotas))
mock_st_quotas = self.mock_object(
quota.db, 'quota_get_all_by_project_and_share_type',
mock.Mock(return_value={'fake_st_defined_resource': 14}))
mock_st_usages = self.mock_object(
quota.db, 'quota_usage_get_all_by_project_and_share_type')
self.mock_object(self.driver, '_process_quotas')
result = self.driver.get_share_type_quotas(
self.ctxt, self.resources, self.project_id, self.share_type_id,
quota_class, defaults, usages)
self.assertEqual(
self.driver._process_quotas.return_value, result)
quota.db.quota_get_all_by_project.assert_called_once_with(
self.ctxt, self.project_id)
mock_st_quotas.assert_called_once_with(
self.ctxt, self.project_id, self.share_type_id)
if usages:
st_usages = mock_st_usages.return_value
mock_st_usages.assert_called_once_with(
self.ctxt, self.project_id, self.share_type_id)
else:
st_usages = None
self.assertEqual(0, mock_st_usages.call_count)
expected_st_quotas = {'fake_st_defined_resource': 14}
expected_st_quotas.update(project_quotas)
self.driver._process_quotas.assert_called_once_with(
self.ctxt, self.resources, self.project_id, expected_st_quotas,
quota_class, defaults=defaults, usages=st_usages)
@ddt.data((None, None), (None, 'foo_st_id'), ('foo_user_id', None))
@ddt.unpack
def test_get_settable_quotas(self, user_id, st_id):
project_quotas = {'fake': {
'limit': 13, 'in_use': 7, 'reserved': 5, 'remains': 1,
}}
user_or_st_quotas = {'fake': {
'limit': 11, 'in_use': 5, 'reserved': 2,
}}
self.mock_object(
self.driver, 'get_project_quotas',
mock.Mock(return_value=project_quotas))
self.mock_object(
self.driver, 'get_user_quotas',
mock.Mock(return_value=user_or_st_quotas))
self.mock_object(
self.driver, 'get_share_type_quotas',
mock.Mock(return_value=user_or_st_quotas))
result = self.driver.get_settable_quotas(
self.ctxt, self.resources, self.project_id, user_id, st_id)
if user_id:
self.driver.get_user_quotas.assert_called_once_with(
self.ctxt, self.resources, self.project_id, user_id)
else:
self.assertEqual(0, self.driver.get_user_quotas.call_count)
if st_id:
self.driver.get_share_type_quotas.assert_called_once_with(
self.ctxt, self.resources, self.project_id, st_id)
else:
self.assertEqual(0, self.driver.get_share_type_quotas.call_count)
if user_id or st_id:
expected_settable_quotas = {'fake': {'maximum': 13, 'minimum': 7}}
else:
expected_settable_quotas = {'fake': {'maximum': -1, 'minimum': 12}}
self.driver.get_project_quotas.assert_called_once_with(
self.ctxt, self.resources, self.project_id, remains=True)
self.assertEqual(expected_settable_quotas, result)
@ddt.data((None, None), (None, 'fake_st_id'), ('fake_user_id', None))
@ddt.unpack
def test__get_quotas(self, user_id, st_id):
quotas = {'foo': {'limit': 5}, 'bar': {'limit': 13}}
self.mock_object(
self.driver, 'get_project_quotas', mock.Mock(return_value=quotas))
self.mock_object(
self.driver, 'get_user_quotas', mock.Mock(return_value=quotas))
self.mock_object(
self.driver, 'get_share_type_quotas',
mock.Mock(return_value=quotas))
result = self.driver._get_quotas(
self.ctxt, self.resources, ('foo', 'bar'), False,
self.project_id, user_id, st_id)
expected = {k: v['limit'] for k, v in quotas.items()}
self.assertEqual(expected, result)
sub_resources = {k: v for k, v in self.resources.items()}
if user_id:
self.driver.get_user_quotas.assert_called_once_with(
self.ctxt, sub_resources, self.project_id, user_id,
self.ctxt.quota_class, usages=False)
self.assertEqual(0, self.driver.get_project_quotas.call_count)
self.assertEqual(0, self.driver.get_share_type_quotas.call_count)
elif st_id:
self.driver.get_share_type_quotas.assert_called_once_with(
self.ctxt, sub_resources, self.project_id, st_id,
self.ctxt.quota_class, usages=False)
self.assertEqual(0, self.driver.get_project_quotas.call_count)
self.assertEqual(0, self.driver.get_user_quotas.call_count)
else:
self.driver.get_project_quotas.assert_called_once_with(
self.ctxt, sub_resources, self.project_id,
self.ctxt.quota_class, usages=False)
self.assertEqual(0, self.driver.get_user_quotas.call_count)
self.assertEqual(0, self.driver.get_share_type_quotas.call_count)
def test__get_quotas_unknown(self):
quotas = {'foo': {'limit': 5}, 'bar': {'limit': 13}}
self.mock_object(
self.driver, 'get_project_quotas', mock.Mock(return_value=quotas))
self.mock_object(
self.driver, 'get_user_quotas', mock.Mock(return_value=quotas))
self.mock_object(
self.driver, 'get_share_type_quotas',
mock.Mock(return_value=quotas))
self.assertRaises(
exception.QuotaResourceUnknown,
self.driver._get_quotas,
self.ctxt, self.resources, ['foo', 'bar'], True,
self.project_id, self.user_id, self.share_type_id)
self.assertEqual(0, self.driver.get_project_quotas.call_count)
self.assertEqual(0, self.driver.get_user_quotas.call_count)
self.assertEqual(0, self.driver.get_share_type_quotas.call_count)
@ddt.data(
{}, {'project_id': 'fake_project'}, {'user_id': 'fake_user'},
{'share_type_id': 'fake_share_type_id'},
)
def test_reserve(self, kwargs):
self.mock_object(quota.db, 'quota_reserve')
deltas = {'delta1': 1, 'delta2': 2}
quotas, user_quotas, st_quotas = 'fake1', 'fake2', 'fake3'
self.mock_object(
self.driver, '_get_quotas', mock.Mock(
side_effect=[quotas, user_quotas, st_quotas]))
result = self.driver.reserve(
self.ctxt, self.resources, deltas, None, **kwargs)
expected_kwargs = {
'project_id': self.ctxt.project_id,
'user_id': self.ctxt.user_id,
'share_type_id': None,
}
expected_kwargs.update(kwargs)
st_quotas = st_quotas if kwargs.get('share_type_id') else {}
self.assertEqual(quota.db.quota_reserve.return_value, result)
quota.db.quota_reserve.assert_called_once_with(
self.ctxt, self.resources, quotas, user_quotas, st_quotas,
deltas, mock.ANY, CONF.until_refresh, CONF.max_age,
**expected_kwargs)
self.assertEqual(
3 if kwargs.get('share_type_id') else 2,
self.driver._get_quotas.call_count)
def test_reserve_wrong_expire(self):
self.assertRaises(
exception.InvalidReservationExpiration,
self.driver.reserve,
self.ctxt, self.resources, 'fake_deltas', 'fake_expire')
def test_commit(self):
self.mock_object(quota.db, 'reservation_commit')
result = self.driver.commit(
self.ctxt, self.reservations, self.project_id, self.user_id,
self.share_type_id)
self.assertIsNone(result)
quota.db.reservation_commit.assert_called_once_with(
self.ctxt, self.reservations, project_id=self.project_id,
user_id=self.user_id, share_type_id=self.share_type_id)
@ddt.data(
(None, None),
('fake_project_id', 'fake_user_id'),
)
@ddt.unpack
def test_rollback(self, project_id, user_id):
self.mock_object(quota.db, 'reservation_rollback')
result = self.driver.rollback(
self.ctxt, self.reservations, project_id, user_id,
self.share_type_id)
expected_project_id = project_id or self.ctxt.project_id
expected_user_id = user_id or self.ctxt.user_id
self.assertIsNone(result)
quota.db.reservation_rollback.assert_called_once_with(
self.ctxt, self.reservations, project_id=expected_project_id,
user_id=expected_user_id, share_type_id=self.share_type_id)
def test_usage_reset(self):
self.mock_object(
quota.db, 'quota_usage_update',
mock.Mock(side_effect=[
'foo',
exception.QuotaUsageNotFound(project_id=self.project_id)]))
result = self.driver.usage_reset(self.ctxt, ['foo', 'bar'])
self.assertIsNone(result)
quota.db.quota_usage_update.assert_has_calls([
mock.call(
self.ctxt.elevated.return_value, self.ctxt.project_id,
self.ctxt.user_id, res, in_use=-1)
for res in ('foo', 'bar')
])
def test_destroy_all_by_project(self):
self.mock_object(quota.db, 'quota_destroy_all_by_project')
result = self.driver.destroy_all_by_project(self.ctxt, self.project_id)
self.assertIsNone(result)
quota.db.quota_destroy_all_by_project.assert_called_once_with(
self.ctxt, self.project_id)
def test_destroy_all_by_project_and_user(self):
self.mock_object(quota.db, 'quota_destroy_all_by_project_and_user')
result = self.driver.destroy_all_by_project_and_user(
self.ctxt, self.project_id, self.user_id)
self.assertIsNone(result)
quota.db.quota_destroy_all_by_project_and_user.assert_called_once_with(
self.ctxt, self.project_id, self.user_id)
def test_destroy_all_by_project_and_share_type(self):
mock_destroy_all = self.mock_object(
quota.db, 'quota_destroy_all_by_project_and_share_type')
result = self.driver.destroy_all_by_project_and_share_type(
self.ctxt, self.project_id, self.share_type_id)
self.assertIsNone(result)
mock_destroy_all.assert_called_once_with(
self.ctxt, self.project_id, self.share_type_id)
def test_expire(self):
self.mock_object(quota.db, 'reservation_expire')
result = self.driver.expire(self.ctxt)
self.assertIsNone(result)
quota.db.reservation_expire.assert_called_once_with(self.ctxt)
@ddt.ddt
class QuotaEngineTestCase(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.ctxt = 'fake_context'
self.mock_class('manila.quota.DbQuotaDriver')
self.engine = quota.QuotaEngine()
self.driver = self.engine._driver
self.resources = [quota.BaseResource('foo'), quota.BaseResource('bar')]
self.project_id = 'fake_project_id'
self.user_id = 'fake_user_id'
self.share_type_id = 'fake_share_type_id'
self.quota_class = 'fake_quota_class'
def test_register_resource(self):
self.assertNotIn(self.resources[0].name, self.engine)
self.engine.register_resource(self.resources[0])
self.assertIn(self.resources[0].name, self.engine)
def test_register_resources(self):
for res in self.resources:
self.assertNotIn(res.name, self.engine)
self.engine.register_resources(self.resources)
for res in self.resources:
self.assertIn(res.name, self.engine)
def test_get_by_class(self):
result = self.engine.get_by_class(
self.ctxt, self.quota_class, 'fake_res')
self.assertEqual(result, self.driver.get_by_class.return_value)
self.driver.get_by_class.assert_called_once_with(
self.ctxt, self.quota_class, 'fake_res')
def test_get_defaults(self):
result = self.engine.get_defaults(self.ctxt)
self.assertEqual(result, self.driver.get_defaults.return_value)
self.driver.get_defaults.assert_called_once_with(
self.ctxt, self.engine._resources)
@ddt.data(None, True, False)
def test_get_class_quotas(self, defaults):
kwargs = {}
if defaults is not None:
kwargs['defaults'] = defaults
result = self.engine.get_class_quotas(
self.ctxt, self.quota_class, **kwargs)
self.assertEqual(result, self.driver.get_class_quotas.return_value)
kwargs['defaults'] = defaults if defaults is not None else True
self.driver.get_class_quotas.assert_called_once_with(
self.ctxt, self.engine._resources, self.quota_class, **kwargs)
@ddt.data(
{},
{'quota_class': 'foo'},
{'defaults': False},
{'usages': False},
)
def test_get_user_quotas(self, kwargs):
expected_kwargs = {
'quota_class': None,
'defaults': True,
'usages': True,
}
expected_kwargs.update(kwargs)
result = self.engine.get_user_quotas(
self.ctxt, self.project_id, self.user_id, **kwargs)
self.assertEqual(result, self.driver.get_user_quotas.return_value)
self.driver.get_user_quotas.assert_called_once_with(
self.ctxt, self.engine._resources,
self.project_id, self.user_id, **expected_kwargs)
@ddt.data(
{},
{'quota_class': 'foo'},
{'defaults': False},
{'usages': False},
)
def test_get_share_type_quotas(self, kwargs):
expected_kwargs = {
'quota_class': None,
'defaults': True,
'usages': True,
}
expected_kwargs.update(kwargs)
result = self.engine.get_share_type_quotas(
self.ctxt, self.project_id, self.share_type_id, **kwargs)
self.assertEqual(
result, self.driver.get_share_type_quotas.return_value)
self.driver.get_share_type_quotas.assert_called_once_with(
self.ctxt, self.engine._resources,
self.project_id, self.share_type_id, **expected_kwargs)
@ddt.data(
{},
{'quota_class': 'foo'},
{'defaults': False},
{'usages': False},
{'remains': True},
)
def test_get_project_quotas(self, kwargs):
expected_kwargs = {
'quota_class': None,
'defaults': True,
'usages': True,
'remains': False,
}
expected_kwargs.update(kwargs)
result = self.engine.get_project_quotas(
self.ctxt, self.project_id, **kwargs)
self.assertEqual(result, self.driver.get_project_quotas.return_value)
self.driver.get_project_quotas.assert_called_once_with(
self.ctxt, self.engine._resources,
self.project_id, **expected_kwargs)
@ddt.data(
{},
{'user_id': 'fake_user_id'},
{'share_type_id': 'fake_share_type_id'},
)
def test_get_settable_quotas(self, kwargs):
expected_kwargs = {'user_id': None, 'share_type_id': None}
expected_kwargs.update(kwargs)
result = self.engine.get_settable_quotas(
self.ctxt, self.project_id, **kwargs)
self.assertEqual(result, self.driver.get_settable_quotas.return_value)
self.driver.get_settable_quotas.assert_called_once_with(
self.ctxt, self.engine._resources,
self.project_id, **expected_kwargs)
def test_count(self):
mock_count = mock.Mock()
resource = quota.CountableResource('FakeCountableResource', mock_count)
self.engine.register_resource(resource)
result = self.engine.count(self.ctxt, resource.name)
self.assertEqual(mock_count.return_value, result)
def test_count_unknown_resource(self):
self.assertRaises(
exception.QuotaResourceUnknown,
self.engine.count,
self.ctxt, 'nonexistent_resource', 'foo_arg', foo='kwarg')
def test_reserve(self):
result = self.engine.reserve(
self.ctxt, 'fake_expire', self.project_id, self.user_id,
self.share_type_id, delta1=1, delta2=2)
self.assertEqual(self.driver.reserve.return_value, result)
self.driver.reserve.assert_called_once_with(
self.ctxt, self.engine._resources, {'delta1': 1, 'delta2': 2},
expire='fake_expire', project_id=self.project_id,
user_id=self.user_id, share_type_id=self.share_type_id)
@ddt.data(Exception('FakeException'), [None])
def test_commit(self, side_effect):
fake_reservations = ['foo', 'bar']
self.driver.commit.side_effect = side_effect
self.mock_object(quota.LOG, 'exception')
result = self.engine.commit(
self.ctxt, fake_reservations, 'fake_project_id',
'fake_user_id', 'fake_share_type_id')
self.assertIsNone(result)
self.driver.commit.assert_called_once_with(
self.ctxt, fake_reservations, project_id='fake_project_id',
user_id='fake_user_id', share_type_id='fake_share_type_id')
if side_effect == [None]:
self.assertEqual(0, quota.LOG.exception.call_count)
else:
quota.LOG.exception.assert_called_once_with(
mock.ANY, fake_reservations)
@ddt.data(Exception('FakeException'), [None])
def test_rollback(self, side_effect):
fake_reservations = ['foo', 'bar']
self.driver.rollback.side_effect = side_effect
self.mock_object(quota.LOG, 'exception')
result = self.engine.rollback(
self.ctxt, fake_reservations, 'fake_project_id',
'fake_user_id', 'fake_share_type_id')
self.assertIsNone(result)
self.driver.rollback.assert_called_once_with(
self.ctxt, fake_reservations, project_id='fake_project_id',
user_id='fake_user_id', share_type_id='fake_share_type_id')
if side_effect == [None]:
self.assertEqual(0, quota.LOG.exception.call_count)
else:
quota.LOG.exception.assert_called_once_with(
mock.ANY, fake_reservations)
def test_usage_reset(self):
result = self.engine.usage_reset(self.ctxt, 'fake_resources')
self.assertIsNone(result)
self.driver.usage_reset.assert_called_once_with(
self.ctxt, 'fake_resources')
def test_destroy_all_by_project_and_user(self):
result = self.engine.destroy_all_by_project_and_user(
self.ctxt, 'fake_project_id', 'fake_user_id')
self.assertIsNone(result)
self.driver.destroy_all_by_project_and_user.assert_called_once_with(
self.ctxt, 'fake_project_id', 'fake_user_id')
def test_destroy_all_by_project_and_share_type(self):
result = self.engine.destroy_all_by_project_and_share_type(
self.ctxt, 'fake_project_id', 'fake_st_id')
self.assertIsNone(result)
mock_destroy_all_by_project_and_share_type = (
self.driver.destroy_all_by_project_and_share_type)
mock_destroy_all_by_project_and_share_type.assert_called_once_with(
self.ctxt, 'fake_project_id', 'fake_st_id')
def test_destroy_all_by_project(self):
result = self.engine.destroy_all_by_project(
self.ctxt, 'fake_project_id')
self.assertIsNone(result)
self.driver.destroy_all_by_project.assert_called_once_with(
self.ctxt, 'fake_project_id')
def test_expire(self):
result = self.engine.expire(self.ctxt)
self.assertIsNone(result)
self.driver.expire.assert_called_once_with(self.ctxt)
def test_resources(self):
self.engine.register_resources(self.resources)
self.assertEqual(['bar', 'foo'], self.engine.resources)
def test_current_common_resources(self):
self.assertEqual(
['gigabytes', 'share_group_snapshots', 'share_groups',
'share_networks', 'shares', 'snapshot_gigabytes', 'snapshots'],
quota.QUOTAS.resources)
|
|
"""
sentry.plugins.base.v2
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ('Plugin2',)
import logging
from django.http import HttpResponseRedirect
from threading import local
from sentry.plugins.base.response import Response
from sentry.plugins.base.configuration import default_plugin_config
class PluginMount(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if IPlugin2 in bases:
return new_cls
if new_cls.title is None:
new_cls.title = new_cls.__name__
if not new_cls.slug:
new_cls.slug = new_cls.title.replace(' ', '-').lower()
if not hasattr(new_cls, 'logger'):
new_cls.logger = logging.getLogger('sentry.plugins.%s' % (new_cls.slug,))
return new_cls
class IPlugin2(local):
"""
Plugin interface. Should not be inherited from directly.
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
>>> from sentry.plugins import Plugin2
>>>
>>> class MyPlugin(Plugin2):
>>> def get_title(self):
>>> return 'My Plugin'
As a general rule all inherited methods should allow ``**kwargs`` to ensure
ease of future compatibility.
"""
# Generic plugin information
title = None
slug = None
description = None
version = None
author = None
author_url = None
resource_links = ()
# Configuration specifics
conf_key = None
conf_title = None
project_conf_form = None
project_conf_template = 'sentry/plugins/project_configuration.html'
# Global enabled state
enabled = True
can_disable = True
# Should this plugin be enabled by default for projects?
project_default_enabled = False
def _get_option_key(self, key):
return '%s:%s' % (self.get_conf_key(), key)
def is_enabled(self, project=None):
"""
Returns a boolean representing if this plugin is enabled.
If ``project`` is passed, it will limit the scope to that project.
>>> plugin.is_enabled()
"""
if not self.enabled:
return False
if not self.can_disable:
return True
if not self.can_enable_for_projects():
return True
if project:
project_enabled = self.get_option('enabled', project)
if project_enabled is not None:
return project_enabled
else:
return self.project_default_enabled
return True
def reset_options(self, project=None, user=None):
from .helpers import reset_options
return reset_options(self.get_conf_key(), project, user)
def get_option(self, key, project=None, user=None):
"""
Returns the value of an option in your plugins keyspace, or ``None`` if
one is not present.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> value = plugin.get_option('my_option')
"""
from sentry.plugins.helpers import get_option
return get_option(self._get_option_key(key), project, user)
def set_option(self, key, value, project=None, user=None):
"""
Updates the value of an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.set_option('my_option', 'http://example.com')
"""
from sentry.plugins.helpers import set_option
return set_option(self._get_option_key(key), value, project, user)
def unset_option(self, key, project=None, user=None):
"""
Removes an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.unset_option('my_option')
"""
from sentry.plugins.helpers import unset_option
return unset_option(self._get_option_key(key), project, user)
def enable(self, project=None, user=None):
"""Enable the plugin."""
self.set_option('enabled', True, project, user)
def disable(self, project=None, user=None):
"""Disable the plugin."""
self.set_option('enabled', False, project, user)
def get_conf_key(self):
"""
Returns a string representing the configuration keyspace prefix for this plugin.
"""
if not self.conf_key:
self.conf_key = self.get_conf_title().lower().replace(' ', '_')
return self.conf_key
def get_conf_title(self):
"""
Returns a string representing the title to be shown on the configuration page.
"""
return self.conf_title or self.get_title()
def has_project_conf(self):
return self.project_conf_form is not None
def can_enable_for_projects(self):
"""
Returns a boolean describing whether this plugin can be enabled on a per project basis
"""
return True
# Response methods
def redirect(self, url):
"""
Returns a redirect response type.
"""
return HttpResponseRedirect(url)
def render(self, template, context=None):
"""
Given a template name, and an optional context (dictionary), returns a
ready-to-render response.
Default context includes the plugin instance.
>>> plugin.render('template.html', {'hello': 'world'})
"""
if context is None:
context = {}
context['plugin'] = self
return Response(template, context)
# The following methods are specific to web requests
def get_title(self):
"""
Returns the general title for this plugin.
>>> plugin.get_title()
"""
return self.title
def get_description(self):
"""
Returns the description for this plugin. This is shown on the plugin configuration
page.
>>> plugin.get_description()
"""
return self.description
def get_resource_links(self):
"""
Returns a list of tuples pointing to various resources for this plugin.
>>> def get_resource_links(self):
>>> return [
>>> ('Documentation', 'https://docs.getsentry.com'),
>>> ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),
>>> ('Source', 'https://github.com/getsentry/sentry'),
>>> ]
"""
return self.resource_links
def get_rules(self, **kwargs):
"""
Return a list of Rule classes to add to the registry.
>>> def get_rules(self, **kwargs):
>>> return [MyCustomRule]
"""
return []
def get_actions(self, request, group, **kwargs):
"""
Return a list of available actions to append this aggregate.
Examples of built-in actions are "Mute Event" and "Remove Data".
An action is a tuple containing two elements:
('Action Label', '/uri/to/action/')
>>> def get_actions(self, request, group, **kwargs):
>>> return [('Google', 'http://google.com')]
"""
return []
def get_annotations(self, group, **kwargs):
"""
Return a list of annotations to append to this aggregate.
An example of an annotation might be "Needs Fix" or "Task #123".
The properties of each tag must match the constructor for
:class:`sentry.plugins.Annotation`
>>> def get_annotations(self, group, **kwargs):
>>> task_id = GroupMeta.objects.get_value(group, 'myplugin:tid')
>>> if not task_id:
>>> return []
>>> return [{'label': '#%s' % (task_id,)}]
"""
return []
def get_notifiers(self, **kwargs):
"""
Return a list of notifiers to append to the registry.
Notifiers must extend :class:`sentry.plugins.Notifier`.
>>> def get_notifiers(self, **kwargs):
>>> return [MyNotifier]
"""
return []
def get_tags(self, event, **kwargs):
"""
Return a list of additional tags to add to this instance.
A tag is a tuple containing two elements:
('tag-key', 'tag-value')
>>> def get_tags(self, event, **kwargs):
>>> return [('tag-key', 'tag-value')]
"""
return []
def get_event_preprocessors(self, **kwargs):
"""
Return a list of preprocessors to apply to the given event.
A preprocessor is a function that takes the normalized data blob as an
input and returns modified data as output. If no changes to the data are
made it is safe to return ``None``.
>>> def get_event_preprocessors(self, **kwargs):
>>> return [lambda x: x]
"""
return []
def get_feature_hooks(self, **kwargs):
"""
Return a list of callables to check for feature status.
>>> from sentry.features import FeatureHandler
>>>
>>> class NoRegistration(FeatureHandler):
>>> features = set(['auth:register'])
>>>
>>> def has(self, feature, actor):
>>> return False
>>> def get_feature_hooks(self, **kwargs):
>>> return [NoRegistration()]
"""
return []
def get_release_hook(self, **kwargs):
"""
Return an implementation of ``ReleaseHook``.
>>> from sentry.plugins import ReleaseHook
>>>
>>> class MyReleaseHook(ReleaseHook):
>>> def handle(self, request):
>>> self.finish_release(version=request.POST['version'])
>>> def get_release_hook(self, **kwargs):
>>> return MyReleaseHook
"""
return []
def configure(self, project, request):
"""Configures the plugin."""
return default_plugin_config(self, project, request)
class Plugin2(IPlugin2):
"""
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
"""
__version__ = 2
__metaclass__ = PluginMount
|
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.download import *
from webkitpy.tool.mocktool import MockCheckout, MockOptions, MockTool
class AbstractRolloutPrepCommandTest(unittest.TestCase):
def test_commit_info(self):
command = AbstractRolloutPrepCommand()
tool = MockTool()
command.bind_to_tool(tool)
output = OutputCapture()
expected_stderr = "Preparing rollout for bug 42.\n"
commit_info = output.assert_outputs(self, command._commit_info, [1234], expected_stderr=expected_stderr)
self.assertTrue(commit_info)
mock_commit_info = Mock()
mock_commit_info.bug_id = lambda: None
tool._checkout.commit_info_for_revision = lambda revision: mock_commit_info
expected_stderr = "Unable to parse bug number from diff.\n"
commit_info = output.assert_outputs(self, command._commit_info, [1234], expected_stderr=expected_stderr)
self.assertEqual(commit_info, mock_commit_info)
def test_prepare_state(self):
command = AbstractRolloutPrepCommand()
mock_commit_info = MockCheckout().commit_info_for_revision(123)
command._commit_info = lambda revision: mock_commit_info
state = command._prepare_state(None, ["124 123 125", "Reason"], None)
self.assertEqual(123, state["revision"])
self.assertEqual([123, 124, 125], state["revision_list"])
self.assertRaises(ScriptError, command._prepare_state, options=None, args=["125 r122 123", "Reason"], tool=None)
self.assertRaises(ScriptError, command._prepare_state, options=None, args=["125 foo 123", "Reason"], tool=None)
class DownloadCommandsTest(CommandsTest):
def _default_options(self):
options = MockOptions()
options.build = True
options.build_style = True
options.check_builders = True
options.check_style = True
options.clean = True
options.close_bug = True
options.force_clean = False
options.force_patch = True
options.non_interactive = False
options.parent_command = 'MOCK parent command'
options.quiet = False
options.test = True
options.update = True
return options
def test_build(self):
expected_stderr = "Updating working directory\nBuilding WebKit\n"
self.assert_execute_outputs(Build(), [], options=self._default_options(), expected_stderr=expected_stderr)
def test_build_and_test(self):
expected_stderr = "Updating working directory\nBuilding WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\n"
self.assert_execute_outputs(BuildAndTest(), [], options=self._default_options(), expected_stderr=expected_stderr)
def test_apply_attachment(self):
options = self._default_options()
options.update = True
options.local_commit = True
expected_stderr = "Updating working directory\nProcessing 1 patch from 1 bug.\nProcessing patch 197 from bug 42.\n"
self.assert_execute_outputs(ApplyAttachment(), [197], options=options, expected_stderr=expected_stderr)
def test_apply_patches(self):
options = self._default_options()
options.update = True
options.local_commit = True
expected_stderr = "Updating working directory\n2 reviewed patches found on bug 42.\nProcessing 2 patches from 1 bug.\nProcessing patch 197 from bug 42.\nProcessing patch 128 from bug 42.\n"
self.assert_execute_outputs(ApplyFromBug(), [42], options=options, expected_stderr=expected_stderr)
def test_land_diff(self):
expected_stderr = "Building WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\nCommitted r49824: <http://trac.webkit.org/changeset/49824>\nUpdating bug 42\n"
mock_tool = MockTool()
mock_tool.scm().create_patch = Mock(return_value="Patch1\nMockPatch\n")
mock_tool.checkout().modified_changelogs = Mock(return_value=[])
self.assert_execute_outputs(Land(), [42], options=self._default_options(), expected_stderr=expected_stderr, tool=mock_tool)
# Make sure we're not calling expensive calls too often.
self.assertEqual(mock_tool.scm().create_patch.call_count, 1)
self.assertEqual(mock_tool.checkout().modified_changelogs.call_count, 1)
def test_land_red_builders(self):
expected_stderr = '\nWARNING: Builders ["Builder2"] are red, please watch your commit carefully.\nSee http://dummy_buildbot_host/console?category=core\n\nBuilding WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\nCommitted r49824: <http://trac.webkit.org/changeset/49824>\nUpdating bug 42\n'
mock_tool = MockTool()
mock_tool.buildbot.light_tree_on_fire()
self.assert_execute_outputs(Land(), [42], options=self._default_options(), expected_stderr=expected_stderr, tool=mock_tool)
def test_check_style(self):
expected_stderr = """Processing 1 patch from 1 bug.
Updating working directory
MOCK run_and_throw_if_fail: ['mock-update-webkit']
Processing patch 197 from bug 42.
MOCK run_and_throw_if_fail: ['mock-check-webkit-style', '--git-commit', 'MOCK git commit', '--diff-files', 'MockFile1']
"""
self.assert_execute_outputs(CheckStyle(), [197], options=self._default_options(), expected_stderr=expected_stderr, tool=MockTool(log_executive=True))
def test_build_attachment(self):
expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 197 from bug 42.\nBuilding WebKit\n"
self.assert_execute_outputs(BuildAttachment(), [197], options=self._default_options(), expected_stderr=expected_stderr)
def test_land_attachment(self):
# FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags.
expected_stderr = """Processing 1 patch from 1 bug.
Updating working directory
Processing patch 197 from bug 42.
Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug.
"""
self.assert_execute_outputs(LandAttachment(), [197], options=self._default_options(), expected_stderr=expected_stderr)
def test_land_patches(self):
# FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags.
expected_stderr = """2 reviewed patches found on bug 42.
Processing 2 patches from 1 bug.
Updating working directory
Processing patch 197 from bug 42.
Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug.
Updating working directory
Processing patch 128 from bug 42.
Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug.
"""
self.assert_execute_outputs(LandFromBug(), [42], options=self._default_options(), expected_stderr=expected_stderr)
def test_prepare_rollout(self):
expected_stderr = "Preparing rollout for bug 42.\nUpdating working directory\n"
self.assert_execute_outputs(PrepareRollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
def test_create_rollout(self):
expected_stderr = """Preparing rollout for bug 42.
Updating working directory
MOCK create_bug
bug_title: REGRESSION(r852): Reason
bug_description: http://trac.webkit.org/changeset/852 broke the build:
Reason
component: MOCK component
cc: MOCK cc
blocked: 42
MOCK add_patch_to_bug: bug_id=78, description=ROLLOUT of r852, mark_for_review=False, mark_for_commit_queue=True, mark_for_landing=False
-- Begin comment --
Any committer can land this patch automatically by marking it commit-queue+. The commit-queue will build and test the patch before landing to ensure that the rollout will be successful. This process takes approximately 15 minutes.
If you would like to land the rollout faster, you can use the following command:
webkit-patch land-attachment ATTACHMENT_ID --ignore-builders
where ATTACHMENT_ID is the ID of this attachment.
-- End comment --
"""
self.assert_execute_outputs(CreateRollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
self.assert_execute_outputs(CreateRollout(), ["855 852 854", "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
def test_rollout(self):
expected_stderr = """Preparing rollout for bug 42.
Updating working directory
MOCK: user.open_url: file://...
Was that diff correct?
Building WebKit
Committed r49824: <http://trac.webkit.org/changeset/49824>
"""
self.assert_execute_outputs(Rollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
|
|
# Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import Sequence
from infi.execute import execute
from six import StringIO, string_types
import re
import os
import subprocess
import sys
from . import branch
from . import tag
from . import commit
from . import config
from .files import ModifiedFile
from . import ref
from . import ref_container
from . import remotes
from .utils import quote_for_shell
from .utils import CommandString as CMD
#exceptions
from .exceptions import CannotFindRepository
from .exceptions import GitException
from .exceptions import GitCommandFailedException
from .exceptions import MergeConflict
from .exceptions import NonexistentRefException
BRANCH_ALIAS_MARKER = ' -> '
class Repository(ref_container.RefContainer):
############################# internal methods #############################
_loggingEnabled = False
def _getWorkingDirectory(self):
return '.'
def _logGitCommand(self, command, cwd):
if self._loggingEnabled:
sys.stderr.write(">>%s\n" % (command, ))
def enableLogging(self):
self._loggingEnabled = True
def disableLogging(self):
self._loggingEnabled = False
def _executeGitCommand(self, command, cwd=None):
if cwd is None:
cwd = self._getWorkingDirectory()
command = str(command)
self._logGitCommand(command, cwd)
returned = execute(command, shell=True, cwd=cwd)
returned.wait()
# API compatability
returned.returncode = returned.get_returncode()
returned.stdout = StringIO(returned.get_stdout().decode("UTF-8"))
returned.stderr = StringIO(returned.get_stderr().decode("UTF-8"))
return returned
def _executeGitCommandAssertSuccess(self, command, **kwargs):
returned = self._executeGitCommand(command, **kwargs)
assert returned.returncode is not None
if returned.returncode != 0:
raise GitCommandFailedException(kwargs.get('cwd', self._getWorkingDirectory()), command, returned)
return returned
def _getOutputAssertSuccess(self, command, **kwargs):
return self._executeGitCommandAssertSuccess(command, **kwargs).stdout.read()
def _getMergeBase(self, a, b):
raise NotImplementedError()
def getMergeBase(self, a, b):
repo = self
if isinstance(b, commit.Commit) and isinstance(b.repo, LocalRepository):
repo = b.repo
elif isinstance(a, commit.Commit) and isinstance(a.repo, LocalRepository):
repo = a.repo
return repo._getMergeBase(a, b)
############################## remote repositories #############################
class RemoteRepository(Repository):
def __init__(self, url):
super(RemoteRepository, self).__init__()
self.url = url
def _getRefs(self, prefix=''):
output = self._executeGitCommandAssertSuccess("git ls-remote %s" % (self.url,))
for output_line in output.stdout:
commit, refname = output_line.split()
if refname.startswith(prefix):
yield refname[len(prefix):], commit.strip()
def _getRefsAsClass(self, prefix, cls):
return [cls(self, ref) for ref, _ in self._getRefs(prefix)]
def _getCommitByRefName(self, refname):
sha_by_ref = dict(self._getRefs())
for prefix in 'refs/tags/', 'refs/heads/':
sha = sha_by_ref.get(prefix + refname, None)
if sha is not None:
return commit.Commit(self, sha)
raise NonexistentRefException("Cannot find ref name %r in %s" % (refname, self))
def getBranches(self):
return self._getRefsAsClass('refs/heads/', branch.RemoteBranch)
def getTags(self):
return self._getRefsAsClass('refs/tags/', tag.RemoteTag)
############################## local repositories ##############################
class LocalRepository(Repository):
def __init__(self, path):
super(LocalRepository, self).__init__()
self.path = path
self.config = config.GitConfiguration(self)
self._version = None
def __repr__(self):
return "<Git Repository at %s>" % (self.path,)
def _getWorkingDirectory(self):
return self.path
def _getCommitByHash(self, sha):
return commit.Commit(self, sha)
def _getCommitByRefName(self, name):
return commit.Commit(self, self._getOutputAssertSuccess("git rev-parse %s" % name).strip())
def _getCommitByPartialHash(self, sha):
return self._getCommitByRefName(sha)
def getGitVersion(self):
if self._version is None:
version_output = self._getOutputAssertSuccess("git version")
version_match = re.match(r"git\s+version\s+(\S+)(?:\s+\(.+\)\s*)?$", version_output, re.I)
if version_match is None:
raise GitException("Cannot extract git version (unfamiliar output format %r?)" % version_output)
self._version = version_match.group(1)
return self._version
########################### Initializing a repository ##########################
def init(self, bare=False):
if not os.path.exists(self.path):
os.mkdir(self.path)
if not os.path.isdir(self.path):
raise GitException("Cannot create repository in %s - "
"not a directory" % self.path)
self._executeGitCommandAssertSuccess("git init %s" % ("--bare" if bare else ""))
def _asURL(self, repo):
if isinstance(repo, LocalRepository):
repo = repo.path
elif isinstance(repo, RemoteRepository):
repo = repo.url
elif not isinstance(repo, string_types):
raise TypeError("Cannot clone from %r" % (repo,))
return repo
def clone(self, repo):
self._executeGitCommandAssertSuccess("git clone %s %s" % (self._asURL(repo), self.path), cwd=".")
########################### Querying repository refs ###########################
def getBranches(self):
returned = []
for git_branch_line in self._executeGitCommandAssertSuccess("git branch").stdout:
if git_branch_line.startswith("*"):
git_branch_line = git_branch_line[1:]
git_branch_line = git_branch_line.strip()
if BRANCH_ALIAS_MARKER in git_branch_line:
alias_name, aliased = git_branch_line.split(BRANCH_ALIAS_MARKER)
returned.append(branch.LocalBranchAlias(self, alias_name, aliased))
else:
returned.append(branch.LocalBranch(self, git_branch_line))
return returned
def getTags(self):
returned = []
for git_tag_line in self._executeGitCommandAssertSuccess("git tag").stdout:
returned.append(tag.LocalTag(self, git_tag_line.strip()))
return returned
def _getCommits(self, specs, includeMerges):
command = "git log --pretty=format:%%H %s" % specs
if not includeMerges:
command += " --no-merges"
for c in self._executeGitCommandAssertSuccess(command).stdout:
yield commit.Commit(self, c.strip())
def getCommits(self, start=None, end="HEAD", includeMerges=True):
spec = self._normalizeRefName(start or "")
spec += ".."
spec += self._normalizeRefName(end)
return list(self._getCommits(spec, includeMerges=includeMerges))
def getCurrentBranch(self):
#todo: improve this method of obtaining current branch
for branch_name in self._executeGitCommandAssertSuccess("git branch").stdout:
branch_name = branch_name.strip()
if not branch_name.startswith("*"):
continue
branch_name = branch_name[1:].strip()
if branch_name == '(no branch)':
return None
return self.getBranchByName(branch_name)
def getRemotes(self):
config_dict = self.config.getDict()
returned = []
for line in self._getOutputAssertSuccess("git remote show -n").splitlines():
line = line.strip()
returned.append(remotes.Remote(self, line, config_dict.get('remote.%s.url' % line.strip())))
return returned
def getRemoteByName(self, name):
return self._getByName(self.getRemotes, name)
def _getMergeBase(self, a, b):
if isinstance(a, ref.Ref):
a = a.getHead()
if isinstance(b, ref.Ref):
b = b.getHead()
returned = self._executeGitCommand("git merge-base %s %s" % (a, b))
if returned.returncode == 0:
return commit.Commit(self, returned.stdout.read().strip())
# make sure this is not a misc. error with git
unused = self.getHead()
return None
################################ Querying Status ###############################
def containsCommit(self, commit):
try:
self._executeGitCommandAssertSuccess("git log -1 %s" % (commit,))
except GitException:
return False
return True
def getHead(self):
return self._getCommitByRefName("HEAD")
def _getFiles(self, *flags):
flags = ["--exclude-standard"] + list(flags)
return [f.strip()
for f in self._getOutputAssertSuccess("git ls-files %s" % (" ".join(flags))).splitlines()]
def _getRawDiff(self, *flags, **options):
match_statuses = options.pop('fileStatuses', None)
if match_statuses is not None and not isinstance(match_statuses, Sequence):
raise ValueError("matchedStatuses must be a sequence")
if options:
raise TypeError("Unknown arguments specified: %s" % ", ".join(options))
flags = " ".join(str(f) for f in flags)
modified_files = []
for line in self._getOutputAssertSuccess("git diff --raw %s" % flags).splitlines():
file_status = line.split()[-2]
file_name = line.split()[-1]
if match_statuses is None or file_status in match_statuses:
modified_files.append(ModifiedFile(file_name))
return modified_files
def getStagedFiles(self):
if self.isInitialized():
return self._getRawDiff('--cached')
return self._getFiles()
def getUnchangedFiles(self):
return self._getFiles()
def getChangedFiles(self):
return self._getRawDiff()
def getDeletedFiles(self):
return self._getRawDiff(fileStatuses=['D'])
def getUntrackedFiles(self):
return self._getFiles("--others")
def isInitialized(self):
try:
self.getHead()
return True
except GitException:
return False
def isValid(self):
return os.path.isdir(os.path.join(self.path, ".git")) or \
(os.path.isfile(os.path.join(self.path, "HEAD")) and os.path.isdir(os.path.join(self.path, "objects")))
def isWorkingDirectoryClean(self):
return not (self.getUntrackedFiles() or self.getChangedFiles() or self.getStagedFiles())
def __contains__(self, thing):
if isinstance(thing, string_types) or isinstance(thing, commit.Commit):
return self.containsCommit(thing)
raise NotImplementedError()
################################ Staging content ###############################
def add(self, path):
self._executeGitCommandAssertSuccess("git add %s" % quote_for_shell(path))
def delete(self, path, recursive=False, force=False):
flags = ""
if recursive:
flags += "-r "
if force:
flags += "-f "
self._executeGitCommandAssertSuccess("git rm %s%s" % (flags, quote_for_shell(path)))
def addAll(self):
return self.add('.')
################################## Committing ##################################
def _normalizeRefName(self, thing):
if isinstance(thing, ref.Ref):
thing = thing.getNormalizedName()
return str(thing)
def _deduceNewCommitFromCommitOutput(self, output):
for pattern in [
# new-style commit pattern
r"^\[\S+\s+(?:\(root-commit\)\s+)?(\S+)\]",
]:
match = re.search(pattern, output)
if match:
return commit.Commit(self, match.group(1))
return None
def commit(self, message, allowEmpty=False, commitAll=False):
args = ''
if commitAll:
args = args + '--all'
command = "git commit %s -m %s" % ( args, quote_for_shell(message) )
if allowEmpty:
command += " --allow-empty"
output = self._getOutputAssertSuccess(command)
return self._deduceNewCommitFromCommitOutput(output)
################################ Changing state ################################
def _createBranchOrTag(self, objname, name, startingPoint, returned_class):
command = "git %s %s " % (objname, name)
if startingPoint is not None:
command += self._normalizeRefName(startingPoint)
self._executeGitCommandAssertSuccess(command)
return returned_class(self, name)
def createBranch(self, name, startingPoint=None):
return self._createBranchOrTag('branch', name, startingPoint, branch.LocalBranch)
def createTag(self, name, startingPoint=None):
return self._createBranchOrTag('tag', name, startingPoint, tag.LocalTag)
def checkout(self, thing=None, targetBranch=None, files=()):
if thing is None:
thing = ""
command = "git checkout %s" % (self._normalizeRefName(thing),)
if targetBranch is not None:
command += " -b %s" % (targetBranch,)
if files:
command += " -- %s" % " ".join(files)
self._executeGitCommandAssertSuccess(command)
def mergeMultiple(self, srcs, allowFastForward=True, log=False, message=None):
try:
self._executeGitCommandAssertSuccess(CMD("git merge",
" ".join(self._normalizeRefName(src) for src in srcs),
"--no-ff" if not allowFastForward else None,
"--log" if log else None,
("-m \"%s\"" % message) if message is not None else None))
except GitCommandFailedException as e:
# git-merge tends to ignore the stderr rule...
output = e.stdout + e.stderr
if 'conflict' in output.lower():
raise MergeConflict()
raise
def merge(self, src, *args, **kwargs):
return self.mergeMultiple([src], *args, **kwargs)
def _reset(self, flag, thing):
command = "git reset %s %s" % (
flag,
self._normalizeRefName(thing))
self._executeGitCommandAssertSuccess(command)
def resetSoft(self, thing="HEAD"):
return self._reset("--soft", thing)
def resetHard(self, thing="HEAD"):
return self._reset("--hard", thing)
def resetMixed(self, thing="HEAD"):
return self._reset("--mixed", thing)
def _clean(self, flags):
self._executeGitCommandAssertSuccess("git clean -q " + flags)
def cleanIgnoredFiles(self):
"""Cleans files that match the patterns in .gitignore"""
return self._clean("-f -X")
def cleanUntrackedFiles(self):
return self._clean("-f -d")
################################# collaboration ################################
def addRemote(self, name, url):
self._executeGitCommandAssertSuccess("git remote add %s %s" % (name, url))
return remotes.Remote(self, name, url)
def fetch(self, repo=None):
command = "git fetch"
if repo is not None:
command += " "
command += self._asURL(repo)
self._executeGitCommandAssertSuccess(command)
def pull(self, repo=None):
command = "git pull"
if repo is not None:
command += " "
command += self._asURL(repo)
self._executeGitCommandAssertSuccess(command)
def _getRefspec(self, fromBranch=None, toBranch=None, force=False):
returned = ""
if fromBranch is not None:
returned += self._normalizeRefName(fromBranch)
if returned or toBranch is not None:
returned += ":"
if toBranch is not None:
if isinstance(toBranch, branch.RegisteredRemoteBranch):
toBranch = toBranch.name
returned += self._normalizeRefName(toBranch)
if returned and force:
returned = "+%s" % returned
return returned
def push(self, remote=None, fromBranch=None, toBranch=None, force=False):
command = "git push"
#build push arguments
refspec = self._getRefspec(toBranch=toBranch, fromBranch=fromBranch, force=force)
if refspec and not remote:
remote = "origin"
if isinstance(remote, remotes.Remote):
remote = remote.name
elif isinstance(remote, RemoteRepository):
remote = remote.url
elif isinstance(remote, LocalRepository):
remote = remote.path
if remote is not None and not isinstance(remote, string_types):
raise TypeError("Invalid type for 'remote' parameter: %s" % (type(remote),))
command = "git push %s %s" % (remote if remote is not None else "", refspec)
self._executeGitCommandAssertSuccess(command)
def rebase(self, src):
self._executeGitCommandAssertSuccess("git rebase %s" % self._normalizeRefName(src))
#################################### Stashes ###################################
def saveStash(self, name=None):
command = "git stash save"
if name is not None:
command += " %s" % name
self._executeGitCommandAssertSuccess(command)
def popStash(self, arg=None):
command = "git stash pop"
if arg is not None:
command += " %s" % arg
self._executeGitCommandAssertSuccess(command)
################################# Configuration ################################
################################### Shortcuts ##################################
def clone(source, location):
returned = LocalRepository(location)
returned.clone(source)
return returned
def find_repository():
orig_path = path = os.path.realpath('.')
drive, path = os.path.splitdrive(path)
while path:
current_path = os.path.join(drive, path)
current_repo = LocalRepository(current_path)
if current_repo.isValid():
return current_repo
path, path_tail = os.path.split(current_path)
if not path_tail:
raise CannotFindRepository("Cannot find repository for %s" % (orig_path,))
|
|
# encoding: utf-8
import logging
import os
import signal
from time import time
from functools import wraps
from inspect import isgeneratorfunction
from collections import defaultdict
from multiprocessing import RLock
from tornado.gen import Return, sleep
from tornado.ioloop import IOLoop
from tornado.locks import Lock
try:
import cPickle as pickle
except ImportError:
import pickle
FunctionType = type(lambda: None)
log = logging.getLogger("cache")
class Result(object):
def __init__(self, result):
self.__result = result
self.__ts = time()
@property
def result(self):
return self.__result
@property
def ts(self):
return self.__ts
class Cache(object):
__slots__ = ('timeout', 'ignore_self', 'oid', 'files_cache')
CACHE_DIR = None
CACHE = {}
FUTURE_LOCKS = defaultdict(Lock)
RLOCKS = defaultdict(RLock)
def __init__(self, timeout, ignore_self=False, oid=None, files_cache=False):
self.timeout = timeout
self.ignore_self = ignore_self
self.oid = oid
self.files_cache = files_cache
@staticmethod
def hash_func(key):
if isinstance(key, FunctionType):
return ".".join((key.__module__, key.__name__))
else:
return str(key)
@classmethod
def invalidate(cls, func):
fkey = cls.hash_func(func)
hash_fkey = hash(fkey)
for key in filter(lambda x: hash(x[0]) == hash_fkey, cls.CACHE):
log.debug('INVALIDATING Cache for %r', key)
cls.CACHE.pop(key, -1)
cls.FUTURE_LOCKS.pop(key, -1)
cls.RLOCKS.pop(key, -1)
def get_cache(self, key):
if self.files_cache:
fname = self.get_cache_file(key)
if not os.path.exists(fname):
return None
with open(fname, 'rb') as f:
result = pickle.load(f)
if result.ts < (time() - self.timeout):
IOLoop.current().add_callback(os.remove, fname)
return None
return result
return self.CACHE.get(key)
def set_cache(self, key, value):
if self.files_cache:
fname = self.get_cache_file(key)
with open(fname, 'wb+') as f:
pickle.dump(value, f)
else:
self.CACHE[key] = value
@classmethod
def invalidate_all(cls, *args, **kwargs):
log.warning("Invalidating all memory cache.")
cls.CACHE.clear()
cls.FUTURE_LOCKS.clear()
cls.RLOCKS.clear()
log.warning("Invalidating all disk cache.")
files = filter(
lambda x: os.path.isfile(x),
(os.path.join(cls.CACHE_DIR, f) for f in os.listdir(cls.CACHE_DIR))
)
for file in files:
try:
os.remove(file)
except Exception as e:
log.exception(e)
def __call__(self, func):
is_generator = isgeneratorfunction(func)
key = self.oid or self.hash_func(func)
def get_hash(func, args, kwargs):
return tuple(
map(
hash,
(
key,
tuple(map(hash, args[1:] if self.ignore_self else args)),
tuple(map(lambda x: tuple(map(hash, x)), kwargs.items())),
is_generator,
)
)
)
@wraps(func)
def wrap(*args, **kwargs):
io_loop = IOLoop.current()
args_key = get_hash(func, args, kwargs)
start_time = io_loop.time()
with self.RLOCKS[args_key]:
ret = self.get_cache(args_key)
if isinstance(ret, Result):
log.debug("HIT Cache [%s] %r", key, args_key)
return ret.result
ret = Result(func(*args, **kwargs))
self.set_cache(args_key, ret)
io_loop.add_callback(
io_loop.call_later,
self.timeout,
self._expire,
key,
args_key
)
log.debug(
"MISS Cache [%s] %r. Execution time %.6f sec.",
key,
args_key,
io_loop.time() - start_time
)
return ret.result
@wraps(func)
def wrap_gen(*args, **kwargs):
io_loop = IOLoop.current()
args_key = get_hash(func, args, kwargs)
start_time = io_loop.time()
with (yield self.FUTURE_LOCKS[args_key].acquire()):
ret = self.get_cache(args_key)
if isinstance(ret, Result):
yield sleep(0)
log.debug("HIT Cache [%s] %r", key, args_key)
raise Return(ret.result)
gen = func(*args, **kwargs)
try:
f = next(gen)
while True:
try:
res = yield f
f = gen.send(res)
except (Return, StopIteration):
raise
except Exception as e:
f = gen.throw(e)
except Return as e:
ret = Result(e.value)
except StopIteration as e:
ret = Result(getattr(e, 'value', None))
if ret.result:
self.set_cache(args_key, ret)
io_loop.add_callback(
io_loop.call_later,
self.timeout,
self._expire,
key,
args_key
)
log.debug(
"MISS Cache [%s] %r. Execution time %.6f sec.",
key,
args_key,
io_loop.time() - start_time
)
else:
log.warning(
"Generator '%s' no return any value. Cache ignoring.",
self.hash_func(func)
)
log.debug("INVALID Cache [%s] %r", key, args_key)
raise Return(ret.result)
return wrap_gen if is_generator else wrap
def get_cache_file(self, args_key):
return os.path.join(self.CACHE_DIR, str(hash(args_key)))
def _expire(self, key, args_key):
if self.files_cache:
fname = self.get_cache_file(args_key)
if os.path.exists(fname):
os.remove(fname)
self.CACHE.pop(args_key, -1)
log.debug("EXPIRED Cache [%s] %r", key, args_key)
signal.signal(signal.SIGUSR1, Cache.invalidate_all)
MINUTE = 60
HOUR = MINUTE * 60
DAY = 24 * HOUR
WEEK = 7 * DAY
MONTH = 4 * WEEK
|
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from core import debug
from core.modules.vistrails_module import Module, ModuleError, ModuleErrors, \
ModuleConnector, InvalidOutput
from core.modules.basic_modules import Boolean, String, Integer, Float, Tuple,\
File, NotCacheable, Constant, List
from core.modules.module_registry import get_module_registry
from core.vistrail.port_spec import PortSpec
from core.utils import VistrailsInternalError
import copy
from itertools import izip
#################################################################################
## Fold Operator
class Fold(Module, NotCacheable):
"""The Fold Module is a high-order operator to implement some other structures,
such as map, filter, sum, and so on.
To use it, the user must inherit this class.
Initially, the method setInitialValue() must be defined.
Later, the method operation() must be defined."""
def __init__(self):
Module.__init__(self)
self.is_fold_module = True
def updateUpstream(self):
"""A modified version of the updateUpstream method."""
# everything is the same except that we don't update anything
# upstream of FunctionPort
for port_name, connector_list in self.inputPorts.iteritems():
if port_name == 'FunctionPort':
for connector in connector_list:
connector.obj.updateUpstream()
else:
for connector in connector_list:
connector.obj.update()
for port_name, connectorList in copy.copy(self.inputPorts.items()):
if port_name != 'FunctionPort':
for connector in connectorList:
if connector.obj.get_output(connector.port) is \
InvalidOutput:
self.removeInputConnector(port_name, connector)
def updateFunctionPort(self):
"""
Function to be used inside the updateUsptream method of the Fold module. It
updates the modules connected to the FunctionPort port.
"""
nameInput = self.getInputFromPort('InputPort')
nameOutput = self.getInputFromPort('OutputPort')
rawInputList = self.getInputFromPort('InputList')
# create inputList to always have iterable elements
# to simplify code
if len(nameInput) == 1:
element_is_iter = False
else:
element_is_iter = True
inputList = []
for element in rawInputList:
if not element_is_iter:
inputList.append([element])
else:
inputList.append(element)
## Update everything for each value inside the list
for i in xrange(len(inputList)):
element = inputList[i]
if element_is_iter:
self.element = element
else:
self.element = element[0]
for connector in self.inputPorts.get('FunctionPort'):
if not self.upToDate:
##Type checking
if i==0:
self.typeChecking(connector.obj, nameInput, inputList)
connector.obj.upToDate = False
connector.obj.already_computed = False
## Setting information for logging stuff
connector.obj.is_fold_operator = True
connector.obj.first_iteration = False
connector.obj.last_iteration = False
connector.obj.fold_iteration = i
if i==0:
connector.obj.first_iteration = True
if i==((len(inputList))-1):
connector.obj.last_iteration = True
self.setInputValues(connector.obj, nameInput, element)
connector.obj.update()
## Getting the result from the output port
if nameOutput not in connector.obj.outputPorts:
raise ModuleError(connector.obj,\
'Invalid output port: %s'%nameOutput)
self.elementResult = connector.obj.get_output(nameOutput)
self.operation()
def setInputValues(self, module, inputPorts, elementList):
"""
Function used to set a value inside 'module', given the input port(s).
"""
for element, inputPort in izip(elementList, inputPorts):
## Cleaning the previous connector...
if inputPort in module.inputPorts:
del module.inputPorts[inputPort]
new_connector = ModuleConnector(create_constant(element), 'value')
module.set_input_port(inputPort, new_connector)
def typeChecking(self, module, inputPorts, inputList):
"""
Function used to check if the types of the input list element and of the
inputPort of 'module' match.
"""
for elementList in inputList:
if len(elementList) != len(inputPorts):
raise ModuleError(self,
'The number of input values and input ports '
'are not the same.')
for element, inputPort in izip(elementList, inputPorts):
p_modules = module.moduleInfo['pipeline'].modules
p_module = p_modules[module.moduleInfo['moduleId']]
port_spec = p_module.get_port_spec(inputPort, 'input')
v_module = create_module(element, port_spec.signature)
if v_module is not None:
if not self.compare(port_spec, v_module, inputPort):
raise ModuleError(self,
'The type of a list element does '
'not match with the type of the '
'port %s.' % inputPort)
del v_module
else:
break
def createSignature(self, v_module):
"""
` Function used to create a signature, given v_module, for a port spec.
"""
if type(v_module)==tuple:
v_module_class = []
for module_ in v_module:
v_module_class.append(self.createSignature(module_))
return v_module_class
else:
return v_module.__class__
def compare(self, port_spec, v_module, port):
"""
Function used to compare two port specs.
"""
port_spec1 = port_spec
reg = get_module_registry()
v_module = self.createSignature(v_module)
port_spec2 = PortSpec(**{'signature': v_module})
matched = reg.are_specs_matched(port_spec1, port_spec2)
return matched
def compute(self):
"""The compute method for the Fold."""
self.setInitialValue()
self.partialResult = self.initialValue
self.elementResult = None
if self.hasInputFromPort('FunctionPort'):
self.updateFunctionPort()
else:
for element in self.getInputFromPort('InputList'):
self.element = element
self.operation()
self.setResult('Result', self.partialResult)
def setInitialValue(self):
"""This method defines the initial value of the Fold structure. It must
be defined before the operation() method."""
pass
def operation(self):
"""This method defines the interaction between the current element of
the list and the previous iterations' result."""
pass
#################################################################################
class NewConstant(Constant):
"""
A new Constant module to be used inside the Fold module.
"""
def setValue(self, v):
self.setResult("value", v)
self.upToDate = True
def create_constant(value):
"""
Creates a NewConstant module, to be used for the ModuleConnector.
"""
constant = NewConstant()
constant.setValue(value)
return constant
def create_module(value, signature):
"""
Creates a module for value, in order to do the type checking.
"""
if type(value)==bool:
v_module = Boolean()
return v_module
elif type(value)==str:
v_module = String()
return v_module
elif type(value)==int:
if type(signature)==list:
signature = signature[0]
if signature[0]==Float().__class__:
v_module = Float()
else:
v_module = Integer()
return v_module
elif type(value)==float:
v_module = Float()
return v_module
elif type(value)==list:
v_module = List()
return v_module
elif type(value)==file:
v_module = File()
return v_module
elif type(value)==tuple:
v_modules = ()
for element in xrange(len(value)):
v_modules += (create_module(value[element], signature[element]),)
return v_modules
else:
debug.warning("Could not identify the type of the list element.")
debug.warning("Type checking is not going to be done inside Fold module.")
return None
|
|
# -*- coding: utf-8 -*-
"""
Organize pictures file into directory tree with year and month.
Use perl exiftool to get creation date and filename from file metadata.
Strongly inspired from the project:
https://github.com/OneLogicalMyth/Random-Scripts.git
Created on 27/12/16 15:53
@author: vpistis
"""
import datetime
import filecmp
import os
import shutil
import subprocess
import sys
import timeit
from utils import Logger, get_setting, which
sys.stdout = Logger()
PROCESS_IMAGES = get_setting("PROCESS_IMAGES")
PROCESS_VIDEOS = get_setting("PROCESS_VIDEOS")
IMAGES_SOURCE_PATH = get_setting("IMAGES_SOURCE_PATH")
IMAGES_DESTINATION_PATH = get_setting("IMAGES_DESTINATION_PATH")
IMAGE_FILES_EXTENSIONS = tuple(get_setting("IMAGE_FILES_EXTENSIONS"))
IMAGE_FILENAME_SUFFIX = get_setting("IMAGE_FILENAME_SUFFIX")
VIDEOS_SOURCE_PATH = get_setting("VIDEOS_SOURCE_PATH")
VIDEOS_DESTINATION_PATH = get_setting("VIDEOS_DESTINATION_PATH")
VIDEO_FILES_EXTENSIONS = tuple(get_setting("VIDEO_FILES_EXTENSIONS"))
VIDEO_FILENAME_SUFFIX = get_setting("VIDEO_FILENAME_SUFFIX")
# If false copy file and don't remove old file
REMOVE_OLD_FILES = get_setting("REMOVE_OLD_FILES")
APPEND_ORIG_FILENAME = get_setting("APPEND_ORIG_FILENAME")
# if RENAME_SORTED_FILES=False, use this date format for naming files
DATE_FORMAT_OUTPUT = get_setting("DATE_FORMAT_OUTPUT")
# if false, sorted files keep their original name, else rename using CreateDate
RENAME_SORTED_FILES = get_setting("RENAME_SORTED_FILES")
# in case you use nextcloud or owncloud, set NEXTCLOUD=True to rescan all files
NEXTCLOUD = get_setting("NEXTCLOUD")
NEXTCLOUD_PATH = get_setting("NEXTCLOUD_PATH")
NEXTCLOUD_USER = get_setting("NEXTCLOUD_USER")
def get_create_date(filename):
"""
Get creation date from file metadata
:param filename:
:return:
"""
command = ["exiftool", "-CreateDate", "-s3", "-fast2", filename]
metadata = subprocess.check_output(command, universal_newlines=True)
try:
# Grab date taken
datetaken_object = datetime.datetime.strptime(metadata.rstrip(), "%Y:%m:%d %H:%M:%S")
# Date
day = str(datetaken_object.day).zfill(2)
month = str(datetaken_object.month).zfill(2)
year = str(datetaken_object.year)
# New Filename
output = [day, month, year, datetaken_object.strftime(DATE_FORMAT_OUTPUT)]
return output
except Exception as e:
print("{}".format(e))
print("exiftool is installed?")
return None
def get_sub_sec_time_original(filename):
"""
Get SubSecTimeOriginal from file metadata if exists
:param filename:
:return:
"""
try:
command = ["exiftool", "-SubSecTimeOriginal", "-s3", "-fast2", filename]
metadata = subprocess.check_output(command, universal_newlines=True)
# print(str(metadata.rstrip()))
return metadata.rstrip()
except Exception as e:
print("{}".format(e))
print("exiftool is installed?")
return None
def get_file_name(filename):
"""
Get real filename from metadata
:param filename:
:return:
"""
try:
command = ["exiftool", "-filename", "-s3", "-fast2", filename]
metadata = subprocess.check_output(command, universal_newlines=True)
# print(str(metadata.rstrip()))
return metadata.rstrip()
except Exception as e:
print("{}".format(e))
print("exiftool is installed?")
return None
def get_file_ext(filename):
"""
Return the file extension based on file name from metadata, include point.
Example return: '.jpg'
:param filename:
:return:
"""
extension = ".{}".format(get_file_name(filename).split(".")[-1])
return extension
def organize_files(src_path, dest_path, files_extensions, filename_suffix=""):
"""
Get all files from directory and process
:return:
"""
_src_path = src_path
_dest_path = dest_path
_files_extensions = files_extensions
_filename_suffix = filename_suffix
# check if destination path is existing create if not
if not os.path.exists(_dest_path):
os.makedirs(_dest_path)
print("Destination path created: {}".format(_dest_path))
if len(os.listdir(_src_path)) <= 0:
print("No files in path: {}".format(_src_path))
return 0, 0, 0
else:
num_files_processed = 0
num_files_removed = 0
num_files_copied = 0
num_files_skipped = 0
for file in os.listdir(_src_path):
if file.lower().endswith(_files_extensions):
num_files_processed += 1
filename = _src_path + os.sep + file
file_ext = get_file_ext(filename)
date_info = get_create_date(filename)
try:
out_filepath = _dest_path + os.sep + date_info[2] + os.sep + date_info[1]
if RENAME_SORTED_FILES:
if APPEND_ORIG_FILENAME:
out_filename = out_filepath + os.sep + _filename_suffix + date_info[3] \
+ get_sub_sec_time_original(filename) + '_' + file
else:
out_filename = out_filepath + os.sep + _filename_suffix + date_info[3] \
+ get_sub_sec_time_original(filename) + file_ext
else:
if APPEND_ORIG_FILENAME:
out_filename = out_filepath + os.sep + get_file_name(filename) + '_' + file
else:
out_filename = out_filepath + os.sep + get_file_name(filename)
# check if destination path is existing create if not
if not os.path.exists(out_filepath):
os.makedirs(out_filepath)
# don't overwrite files if the name is the same
if os.path.exists(out_filename):
shutil.copy2(filename, out_filename + '_duplicate')
if filecmp.cmp(filename, out_filename + '_duplicate'):
# the old file name exists...skip file
os.remove(out_filename + '_duplicate')
num_files_skipped += 1
print("Skipped file: {}".format(filename))
continue
else:
# new dest path but old filename
out_filename = out_filepath + os.sep + file
if os.path.exists(out_filename):
shutil.copy2(filename, out_filename + '_duplicate')
if filecmp.cmp(filename, out_filename + '_duplicate'):
# the old file name exists...skip file
os.remove(out_filename + '_duplicate')
num_files_skipped += 1
print("Skipped file: {}".format(filename))
continue
# copy the file to the organised structure
shutil.copy2(filename, out_filename)
if filecmp.cmp(filename, out_filename):
num_files_copied += 1
print('File copied with success to {}'.format(out_filename))
if REMOVE_OLD_FILES:
os.remove(filename)
num_files_removed += 1
print('Removed old file {}'.format(filename))
else:
print('File failed to copy :( {}'.format(filename))
except Exception as e:
print("{}".format(e))
print("Exception occurred")
return num_files_processed, num_files_removed, num_files_copied, num_files_skipped
except None:
print('File has no metadata skipped {}'.format(filename))
return num_files_processed, num_files_removed, num_files_copied, num_files_skipped
# Nextcloud initiate a scan
def nextcloud_files_scan():
if NEXTCLOUD:
try:
subprocess.Popen("sudo -u {} php {}/console.php files:scan --all".format(NEXTCLOUD_USER, NEXTCLOUD_PATH),
shell=True, stdout=subprocess.PIPE)
except Exception as e:
print("{}".format(e))
print("Exception occurred")
return
def main():
# check if exiftool is installed
if not which("exiftool"):
print("Please...install exiftool first")
return
print("======== {} =======".format(datetime.datetime.now()))
if PROCESS_IMAGES:
print("Start process images...")
start_time = timeit.default_timer()
processed, removed, copied, skipped = organize_files(IMAGES_SOURCE_PATH, IMAGES_DESTINATION_PATH,
IMAGE_FILES_EXTENSIONS, IMAGE_FILENAME_SUFFIX)
elapsed = timeit.default_timer() - start_time
print("End process images in: {} seconds.".format(elapsed))
print("Proccessed: {}. Removed: {}. Copied: {}. Skipped: {}".format(processed,
removed, copied, skipped))
if PROCESS_VIDEOS:
print("Start process videos...")
start_time = timeit.default_timer()
processed, removed, copied, skipped = organize_files(VIDEOS_SOURCE_PATH, VIDEOS_DESTINATION_PATH,
VIDEO_FILES_EXTENSIONS, VIDEO_FILENAME_SUFFIX)
elapsed = timeit.default_timer() - start_time
print("End process videos in: {} seconds.".format(elapsed))
print("Proccessed: {}. Removed: {}. Copied: {}. Skipped: {}".format(processed,
removed, copied, skipped))
return
# Execution
main()
nextcloud_files_scan()
|
|
# -*- coding: utf-8 -*-
#
# pydicom documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 20 23:28:19 2010.
#
# This file is execfile()d with the current
# directory set to its containing dir.
#
# Note that not all possible configuration
# values are present in this
# autogenerated file.
#
# All configuration values have a default;
# values that are commented out
# serve to show the default.
from datetime import datetime
import os
from pathlib import Path
import sys
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except ImportError:
pass
# -- General configuration ------------------------------------------------
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath("../build_tools/sphinx"))
BASE_DIR = Path(__file__).resolve().parent.parent
sys.path.append(os.fspath(BASE_DIR))
from github_link import make_linkcode_resolve
import pynetdicom
# Get the pydicom version
# BASE_DIR = Path(__file__).resolve().parent.parent
# VERSION_FILE = BASE_DIR / 'pynetdicom' / '_version.py'
# with open(VERSION_FILE) as fp:
# exec(fp.read())
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
# -- General configuration ------------------------------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx
# (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.imgmath",
"sphinx.ext.ifconfig",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.linkcode",
"sphinx.ext.extlinks",
# Custom
"sphinx_copybutton",
]
autosummary_generate = True
autodoc_default_options = {
"members": None,
"no-inherited-members": None,
}
# copybutton conf
copybutton_prompt_text = r">>> |\.\.\. "
copybutton_prompt_is_regexp = True
# Shortcuts for sphinx.ext.extlinks
extlinks = {
# 'alias' : (url_prefix, caption)
# Usage :dcm:`link text <part05/sect_6.2.html>`
"dcm": ("http://dicom.nema.org/medical/dicom/current/output/chtml/%s", None),
"gh": ("https://github.com/pydicom/%s", None),
"issue": ("https://github.com/pydicom/pynetdicom/issues/%s", "#"),
"pr": ("https://github.com/pydicom/pynetdicom/pull/%s", "#"),
}
# intersphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"pydicom": ("https://pydicom.github.io/pydicom/stable", None),
}
napoleon_google_docstring = False
napoleon_numpy_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pynetdicom"
year = datetime.now().strftime("%Y")
copyright = f"2018-{year}, pynetdicom contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pynetdicom.__version__
# The full version, including alpha/beta/rc tags.
release = pynetdicom.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ["_build"]
# The reST default role (used for this markup: `text`)
# to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Custom style
html_style = "css/pynetdicom.css"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -----------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "assets/img/pydicom_flat_black.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "assets/img/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = "pynetdicomdoc"
# -- Options for LaTeX output --------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
(
"index",
"pynetdicom.tex",
"pynetdicom Documentation",
"pynetdicom contributors",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "generated", f"{name}.examples")
if not os.path.exists(examples_path):
# touch file
open(examples_path, "w").close()
def setup(app):
app.add_css_file("css/pynetdicom.css")
# Example configuration for intersphinx: refer to
# the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"pynetdicom",
"https://github.com/pydicom/pynetdicom/blob/{revision}/{package}/{path}#L{lineno}",
)
|
|
"""
These system tests are testing the release of groovy+ catkin projects.
"""
from __future__ import print_function
import os
import sys
try:
from vcstools.vcs_abstraction import get_vcs_client
except ImportError:
print("vcstools was not detected, please install it.", file=sys.stderr)
sys.exit(1)
from .common import create_release_repo
from ..utils.common import bloom_answer
from ..utils.common import change_directory
from ..utils.common import in_temporary_directory
from ..utils.common import user
from ..utils.package_version import change_upstream_version
from bloom.git import branch_exists
from bloom.git import inbranch
from bloom.util import code
from bloom.commands.git.patch import export_cmd
from bloom.commands.git.patch import import_cmd
from bloom.commands.git.patch import remove_cmd
from bloom.generators.debian.generator import sanitize_package_name
def create_upstream_repository(packages, directory=None):
upstream_dir = 'upstream_repo_groovy'
user('mkdir ' + upstream_dir)
with change_directory(upstream_dir):
user('git init .')
user('echo "readme stuff" >> README.md')
user('git add README.md')
user('git commit -m "Initial commit" --allow-empty')
user('git checkout -b groovy_devel')
for package in packages:
user('mkdir ' + package)
with change_directory(package if len(packages) != 1 else '.'):
package_xml = """\
<?xml version="1.0"?>
<package>
<name>{0}</name>
<version>0.1.0</version>
<description>A catkin (groovy) ROS package called '{0}'</description>
<maintainer email="bar@baz.com">Bar</maintainer>
<license>BSD</license>
<url type="bugtracker">https://github.com/ros/this/issues</url>
<url type="repository">https://github.com/ros/this</url>
<build_depend>catkin</build_depend>
<run_depend>catkin</run_depend>
<!-- required for messages generated by gencpp -->
<run_depend>roscpp_core</run_depend>
</package>
""".format(package)
with open('package.xml', 'w+') as f:
f.write(package_xml)
user('touch .cproject')
user('touch .project')
user('mkdir -p include/sym')
user('touch include/{0}.h'.format(package))
os.symlink('../{0}.h'.format(package), 'include/sym/{0}.h'.format(package))
user('git add package.xml .cproject .project include')
user('git commit -m "Releasing version 0.1.0" --allow-empty')
user('git tag 0.1.0 -m "Releasing version 0.1.0"')
return os.getcwd()
def _test_unary_package_repository(release_dir, version, directory=None):
print("Testing in {0} at version {1}".format(release_dir, version))
with change_directory(release_dir):
# First run everything
with bloom_answer(bloom_answer.ASSERT_NO_QUESTION):
cmd = 'git-bloom-release{0} groovy'
if 'BLOOM_VERBOSE' not in os.environ:
cmd = cmd.format(' --quiet')
else:
cmd = cmd.format('')
user(cmd, silent=False)
###
### Import upstream
###
# does the upstream branch exist?
assert branch_exists('upstream', local_only=True), "no upstream branch"
# does the upstrea/<version> tag exist?
ret, out, err = user('git tag', return_io=True)
assert out.count('upstream/' + version) == 1, "no upstream tag created"
# Is the package.xml from upstream in the upstream branch now?
with inbranch('upstream'):
assert os.path.exists('package.xml'), \
"upstream did not import: '" + os.getcwd() + "': " + \
str(os.listdir(os.getcwd()))
with open('package.xml') as f:
package_xml = f.read()
assert package_xml.count(version), "not right file"
###
### Release generator
###
# patch import should have reported OK
assert ret == code.OK, "actually returned ({0})".format(ret)
# do the proper branches exist?
assert branch_exists('release/groovy/foo'), \
"no release/groovy/foo branch"
assert branch_exists('patches/release/groovy/foo'), \
"no patches/release/groovy/foo branch"
# was the release tag created?
ret, out, err = user('git tag', return_io=True)
expected = 'release/groovy/foo/' + version + '-0'
assert out.count(expected) == 1, \
"no release tag created, expected: '{0}'".format(expected)
###
### Make patch
###
with inbranch('release/groovy/foo'):
assert os.path.islink('include/sym/foo.h'), "Symbolic link lost during pipeline"
if os.path.exists('include/foo.h'):
user('git rm include/foo.h')
else:
if not os.path.exists('include'):
os.makedirs('include')
user('touch include/foo.h')
user('git add include/foo.h')
user('git commit -m "A release patch" --allow-empty')
###
### Test import and export
###
with inbranch('release/groovy/foo'):
export_cmd.export_patches()
remove_cmd.remove_patches()
import_cmd.import_patches()
###
### Release generator, again
###
# patch import should have reported OK
assert ret == code.OK, "actually returned ({0})".format(ret)
# do the proper branches exist?
assert branch_exists('release/groovy/foo'), \
"no release/groovy/foo branch"
assert branch_exists('patches/release/groovy/foo'), \
"no patches/release/groovy/foo branch"
# was the release tag created?
ret, out, err = user('git tag', return_io=True)
assert out.count('release/groovy/foo/' + version) == 1, \
"no release tag created"
@in_temporary_directory
def test_unary_package_repository(directory=None):
"""
Release a single package catkin (groovy) repository.
"""
directory = directory if directory is not None else os.getcwd()
# Setup
upstream_dir = create_upstream_repository(['foo'], directory)
upstream_url = 'file://' + upstream_dir
release_url = create_release_repo(
upstream_url,
'git',
'groovy_devel',
'groovy')
release_dir = os.path.join(directory, 'foo_release_clone')
release_client = get_vcs_client('git', release_dir)
assert release_client.checkout(release_url)
versions = ['0.1.0', '0.1.1', '0.2.0']
import bloom.commands.git.release
for index in range(len(versions)):
_test_unary_package_repository(release_dir, versions[index], directory)
bloom.commands.git.release.upstream_repos = {}
if index != len(versions) - 1:
change_upstream_version(upstream_dir, versions[index + 1])
@in_temporary_directory
def test_multi_package_repository(directory=None):
"""
Release a multi package catkin (groovy) repository.
"""
directory = directory if directory is not None else os.getcwd()
# Setup
pkgs = ['foo', 'bar_ros', 'baz']
upstream_dir = create_upstream_repository(pkgs, directory)
upstream_url = 'file://' + upstream_dir
release_url = create_release_repo(
upstream_url,
'git',
'groovy_devel',
'groovy')
release_dir = os.path.join(directory, 'foo_release_clone')
release_client = get_vcs_client('git', release_dir)
assert release_client.checkout(release_url)
with change_directory(release_dir):
# First run everything
with bloom_answer(bloom_answer.ASSERT_NO_QUESTION):
cmd = 'git-bloom-release{0} groovy'
if 'BLOOM_VERBOSE' not in os.environ:
cmd = cmd.format(' --quiet')
else:
cmd = cmd.format('')
user(cmd, silent=False)
###
### Import upstream
###
# does the upstream branch exist?
assert branch_exists('upstream', local_only=True), "no upstream branch"
# does the upstrea/0.1.0 tag exist?
ret, out, err = user('git tag', return_io=True)
assert out.count('upstream/0.1.0') == 1, "no upstream tag created"
# Is the package.xml from upstream in the upstream branch now?
with inbranch('upstream'):
for pkg in pkgs:
with change_directory(pkg):
assert os.path.exists('package.xml'), \
"upstream did not import: " + os.listdir()
with open('package.xml') as f:
assert f.read().count('0.1.0'), "not right file"
###
### Release generator
###
# Check the environment after the release generator
ret, out, err = user('git tag', return_io=True)
for pkg in pkgs:
# Does the release/pkg branch exist?
assert branch_exists('release/groovy/' + pkg), \
"no release/groovy/" + pkg + " branch"
# Does the patches/release/pkg branch exist?
assert branch_exists('patches/release/groovy/' + pkg), \
"no patches/release/groovy/" + pkg + " branch"
# Did the release tag get created?
assert out.count('release/groovy/' + pkg + '/0.1.0-0') == 1, \
"no release tag created for " + pkg
# Is there a package.xml in the top level?
with inbranch('release/groovy/' + pkg):
assert os.path.exists('package.xml'), "release branch invalid"
# Is it the correct package.xml for this pkg?
package_xml = open('package.xml', 'r').read()
assert package_xml.count('<name>' + pkg + '</name>'), \
"incorrect package.xml for " + str(pkg)
# Make a patch
with inbranch('release/groovy/' + pkgs[0]):
user('echo "This is a change" >> README.md')
user('git add README.md')
user('git commit -m "added a readme" --allow-empty')
###
### Release generator, again
###
with bloom_answer(bloom_answer.ASSERT_NO_QUESTION):
ret = user('git-bloom-generate -y rosrelease groovy -s upstream')
# patch import should have reported OK
assert ret == code.OK, "actually returned ({0})".format(ret)
# Check the environment after the release generator
ret, out, err = user('git tag', return_io=True)
for pkg in pkgs:
# Does the release/pkg branch exist?
assert branch_exists('release/groovy/' + pkg), \
"no release/groovy/" + pkg + " branch"
# Does the patches/release/pkg branch exist?
assert branch_exists('patches/release/groovy/' + pkg), \
"no patches/release/groovy/" + pkg + " branch"
# Did the release tag get created?
assert out.count('release/groovy/' + pkg + '/0.1.0-0') == 1, \
"no release tag created for " + pkg
# Is there a package.xml in the top level?
with inbranch('release/groovy/' + pkg):
assert os.path.exists('package.xml'), "release branch invalid"
# Is it the correct package.xml for this pkg?
with open('package.xml', 'r') as f:
assert f.read().count('<name>' + pkg + '</name>'), \
"incorrect package.xml for " + str(pkg)
###
### ROSDebian Generator
###
# Check the environment after the release generator
ret, out, err = user('git tag', return_io=True)
for pkg in pkgs:
for distro in ['oneiric', 'precise', 'quantal']:
pkg_san = sanitize_package_name(pkg)
# Does the debian/distro/pkg branch exist?
assert branch_exists('debian/groovy/' + distro + '/' + pkg), \
"no debian/groovy/" + pkg + " branch"
# Does the patches/debian/distro/pkg branch exist?
patches_branch = 'patches/debian/groovy/' + distro + '/' + pkg
assert branch_exists(patches_branch), \
"no " + patches_branch + " branch"
# Did the debian tag get created?
tag = 'debian/ros-groovy-' + pkg_san + '_0.1.0-0_' + distro
assert out.count(tag) == 1, \
"no '" + tag + "'' tag created for '" + pkg + "': `\n" + \
out + "\n`"
# Is there a package.xml in the top level?
with inbranch('debian/groovy/' + distro + '/' + pkg):
assert os.path.exists('package.xml'), "release branch invalid"
# Is it the correct package.xml for this pkg?
with open('package.xml', 'r') as f:
assert f.read().count('<name>' + pkg + '</name>'), \
"incorrect package.xml for " + str(pkg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.