repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
looker/sentry | src/sentry/models/file.py | 1 | 14550 | """
sentry.models.file
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import six
import mmap
import tempfile
from hashlib import sha1
from uuid import uuid4
from concurrent.futures import ThreadPoolExecutor
from django.conf import settings
from django.core.files.base import File as FileObj
from django.core.files.base import ContentFile
from django.core.files.storage import get_storage_class
from django.db import models, transaction
from django.utils import timezone
from jsonfield import JSONField
from sentry.app import locks
from sentry.db.models import (BoundedPositiveIntegerField, FlexibleForeignKey, Model)
from sentry.tasks.files import delete_file as delete_file_task
from sentry.utils import metrics
from sentry.utils.retries import TimedRetryPolicy
ONE_DAY = 60 * 60 * 24
DEFAULT_BLOB_SIZE = 1024 * 1024 # one mb
CHUNK_STATE_HEADER = '__state'
def enum(**named_values):
return type('Enum', (), named_values)
ChunkFileState = enum(
OK='ok', # File in database
NOT_FOUND='not_found', # File not found in database
CREATED='created', # File was created in the request and send to the worker for assembling
ASSEMBLING='assembling', # File still being processed by worker
ERROR='error' # Error happened during assembling
)
class AssembleChecksumMismatch(Exception):
pass
def get_storage():
from sentry import options
backend = options.get('filestore.backend')
options = options.get('filestore.options')
try:
backend = settings.SENTRY_FILESTORE_ALIASES[backend]
except KeyError:
pass
storage = get_storage_class(backend)
return storage(**options)
class FileBlob(Model):
__core__ = False
path = models.TextField(null=True)
size = BoundedPositiveIntegerField(null=True)
checksum = models.CharField(max_length=40, unique=True)
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_fileblob'
@classmethod
def from_file(cls, fileobj):
"""
Retrieve a list of FileBlobIndex instances for the given file.
If not already present, this will cause it to be stored.
>>> blobs = FileBlob.from_file(fileobj)
"""
size = 0
checksum = sha1(b'')
for chunk in fileobj:
size += len(chunk)
checksum.update(chunk)
checksum = checksum.hexdigest()
# TODO(dcramer): the database here is safe, but if this lock expires
# and duplicate files are uploaded then we need to prune one
lock = locks.get('fileblob:upload:{}'.format(checksum), duration=60 * 10)
with TimedRetryPolicy(60)(lock.acquire):
# test for presence
try:
existing = FileBlob.objects.get(checksum=checksum)
except FileBlob.DoesNotExist:
pass
else:
return existing
blob = cls(
size=size,
checksum=checksum,
)
blob.path = cls.generate_unique_path(blob.timestamp)
storage = get_storage()
storage.save(blob.path, fileobj)
blob.save()
metrics.timing('filestore.blob-size', size)
return blob
@classmethod
def generate_unique_path(cls, timestamp):
pieces = [six.text_type(x) for x in divmod(int(timestamp.strftime('%s')), ONE_DAY)]
pieces.append(uuid4().hex)
return u'/'.join(pieces)
def delete(self, *args, **kwargs):
lock = locks.get('fileblob:upload:{}'.format(self.checksum), duration=60 * 10)
with TimedRetryPolicy(60)(lock.acquire):
if self.path:
self.deletefile(commit=False)
super(FileBlob, self).delete(*args, **kwargs)
def deletefile(self, commit=False):
assert self.path
delete_file_task.delay(self.path, self.checksum)
self.path = None
if commit:
self.save()
def getfile(self):
"""
Return a file-like object for this File's content.
>>> with blob.getfile() as src, open('/tmp/localfile', 'wb') as dst:
>>> for chunk in src.chunks():
>>> dst.write(chunk)
"""
assert self.path
storage = get_storage()
return storage.open(self.path)
class File(Model):
__core__ = False
name = models.TextField()
type = models.CharField(max_length=64)
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
headers = JSONField()
blobs = models.ManyToManyField('sentry.FileBlob', through='sentry.FileBlobIndex')
size = BoundedPositiveIntegerField(null=True)
checksum = models.CharField(max_length=40, null=True, db_index=True)
# <Legacy fields>
# Remove in 8.1
blob = FlexibleForeignKey('sentry.FileBlob', null=True, related_name='legacy_blob')
path = models.TextField(null=True)
# </Legacy fields>
class Meta:
app_label = 'sentry'
db_table = 'sentry_file'
def _get_chunked_blob(self, mode=None, prefetch=False,
prefetch_to=None, delete=True):
return ChunkedFileBlobIndexWrapper(
FileBlobIndex.objects.filter(
file=self,
).select_related('blob').order_by('offset'),
mode=mode,
prefetch=prefetch,
prefetch_to=prefetch_to,
delete=delete
)
def getfile(self, mode=None, prefetch=False, as_tempfile=False):
"""Returns a file object. By default the file is fetched on
demand but if prefetch is enabled the file is fully prefetched
into a tempfile before reading can happen.
Additionally if `as_tempfile` is passed a NamedTemporaryFile is
returned instead which can help in certain situations where a
tempfile is necessary.
"""
if as_tempfile:
prefetch = True
impl = self._get_chunked_blob(mode, prefetch)
if as_tempfile:
return impl.detach_tempfile()
return FileObj(impl, self.name)
def save_to(self, path):
"""Fetches the file and emplaces it at a certain location. The
write is done atomically to a tempfile first and then moved over.
If the directory does not exist it is created.
"""
path = os.path.abspath(path)
base = os.path.dirname(path)
try:
os.makedirs(base)
except OSError:
pass
f = None
try:
f = self._get_chunked_blob(prefetch=True,
prefetch_to=base,
delete=False).detach_tempfile()
os.rename(f.name, path)
f.close()
f = None
finally:
if f is not None:
f.close()
try:
os.remove(f.name)
except Exception:
pass
def putfile(self, fileobj, blob_size=DEFAULT_BLOB_SIZE, commit=True):
"""
Save a fileobj into a number of chunks.
Returns a list of `FileBlobIndex` items.
>>> indexes = file.putfile(fileobj)
"""
results = []
offset = 0
checksum = sha1(b'')
while True:
contents = fileobj.read(blob_size)
if not contents:
break
checksum.update(contents)
blob_fileobj = ContentFile(contents)
blob = FileBlob.from_file(blob_fileobj)
results.append(FileBlobIndex.objects.create(
file=self,
blob=blob,
offset=offset,
))
offset += blob.size
self.size = offset
self.checksum = checksum.hexdigest()
metrics.timing('filestore.file-size', offset)
if commit:
self.save()
return results
def assemble_from_file_blob_ids(self, file_blob_ids, checksum, commit=True):
"""
This creates a file, from file blobs and returns a temp file with the
contents.
"""
tf = tempfile.NamedTemporaryFile()
with transaction.atomic():
file_blobs = FileBlob.objects.filter(id__in=file_blob_ids).all()
# Make sure the blobs are sorted with the order provided
file_blobs = sorted(file_blobs, key=lambda blob: file_blob_ids.index(blob.id))
new_checksum = sha1(b'')
offset = 0
for blob in file_blobs:
FileBlobIndex.objects.create(
file=self,
blob=blob,
offset=offset,
)
for chunk in blob.getfile().chunks():
new_checksum.update(chunk)
tf.write(chunk)
offset += blob.size
self.size = offset
self.checksum = new_checksum.hexdigest()
if checksum != self.checksum:
raise AssembleChecksumMismatch('Checksum mismatch')
metrics.timing('filestore.file-size', offset)
if commit:
self.save()
tf.flush()
tf.seek(0)
return tf
class FileBlobIndex(Model):
__core__ = False
file = FlexibleForeignKey('sentry.File')
blob = FlexibleForeignKey('sentry.FileBlob')
offset = BoundedPositiveIntegerField()
class Meta:
app_label = 'sentry'
db_table = 'sentry_fileblobindex'
unique_together = (('file', 'blob', 'offset'), )
class ChunkedFileBlobIndexWrapper(object):
def __init__(self, indexes, mode=None, prefetch=False,
prefetch_to=None, delete=True):
# eager load from database incase its a queryset
self._indexes = list(indexes)
self._curfile = None
self._curidx = None
if prefetch:
self.prefetched = True
self._prefetch(prefetch_to, delete)
else:
self.prefetched = False
self.mode = mode
self.open()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def detach_tempfile(self):
if not self.prefetched:
raise TypeError('Can only detech tempfiles in prefetch mode')
rv = self._curfile
self._curfile = None
self.close()
rv.seek(0)
return rv
def _nextidx(self):
assert not self.prefetched, 'this makes no sense'
old_file = self._curfile
try:
try:
self._curidx = six.next(self._idxiter)
self._curfile = self._curidx.blob.getfile()
except StopIteration:
self._curidx = None
self._curfile = None
finally:
if old_file is not None:
old_file.close()
@property
def size(self):
return sum(i.blob.size for i in self._indexes)
def open(self):
self.closed = False
self.seek(0)
def _prefetch(self, prefetch_to=None, delete=True):
size = self.size
f = tempfile.NamedTemporaryFile(prefix='._prefetch-',
dir=prefetch_to,
delete=delete)
if size == 0:
self._curfile = f
return
# Zero out the file
f.seek(size - 1)
f.write('\x00')
f.flush()
mem = mmap.mmap(f.fileno(), size)
def fetch_file(offset, getfile):
with getfile() as sf:
while True:
chunk = sf.read(65535)
if not chunk:
break
mem[offset:offset + len(chunk)] = chunk
offset += len(chunk)
with ThreadPoolExecutor(max_workers=4) as exe:
for idx in self._indexes:
exe.submit(fetch_file, idx.offset, idx.blob.getfile)
mem.flush()
self._curfile = f
def close(self):
if self._curfile:
self._curfile.close()
self._curfile = None
self._curidx = None
self.closed = True
def seek(self, pos):
if self.closed:
raise ValueError('I/O operation on closed file')
if self.prefetched:
return self._curfile.seek(pos)
if pos < 0:
raise IOError('Invalid argument')
for n, idx in enumerate(self._indexes[::-1]):
if idx.offset <= pos:
if idx != self._curidx:
self._idxiter = iter(self._indexes[-(n + 1):])
self._nextidx()
break
else:
raise ValueError('Cannot seek to pos')
self._curfile.seek(pos - self._curidx.offset)
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
if self.prefetched:
return self._curfile.tell()
if self._curfile is None:
return self.size
return self._curidx.offset + self._curfile.tell()
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
if self.prefetched:
return self._curfile.read(n)
result = bytearray()
# Read to the end of the file
if n < 0:
while self._curfile is not None:
blob_result = self._curfile.read(32768)
if not blob_result:
self._nextidx()
else:
result.extend(blob_result)
# Read until a certain number of bytes are read
else:
while n > 0 and self._curfile is not None:
blob_result = self._curfile.read(min(n, 32768))
if not blob_result:
self._nextidx()
else:
n -= len(blob_result)
result.extend(blob_result)
return bytes(result)
class FileBlobOwner(Model):
__core__ = False
blob = FlexibleForeignKey('sentry.FileBlob')
organization = FlexibleForeignKey('sentry.Organization')
class Meta:
app_label = 'sentry'
db_table = 'sentry_fileblobowner'
unique_together = (('blob', 'organization'), )
| bsd-3-clause | -434,740,297,523,743,400 | 29 | 95 | 0.564948 | false |
google/makani | avionics/bootloader/program.py | 1 | 27768 | #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle details of most programming tasks."""
import argparse
import logging
import os
import subprocess
import sys
import tempfile
import makani
from makani.avionics.bootloader import jlink_client
from makani.avionics.bootloader import parallel_bootloader
from makani.avionics.bootloader import system_config
from makani.avionics.firmware.params import codec
from makani.avionics.network import aio_node
from makani.avionics.network import network_config
from makani.avionics.network import node_locations
from makani.lib.bazel import bazel_util
from makani.lib.python import c_helpers
aio_node_helper = c_helpers.EnumHelper('AioNode', aio_node)
_NETWORK_CONFIG = network_config.NetworkConfig()
SERIAL_PARAM_FILE_TEMPLATE = os.path.join(makani.HOME, 'avionics', 'firmware',
'serial', '{}_serial_params.yaml')
def _Find(directory, name):
matches = []
for dirpath, _, filenames in os.walk(directory, followlinks=True):
for f in filenames:
if f == name:
matches.append(os.path.join(dirpath, f))
return matches
def _FindUniqueFile(directory, variant, prefix, suffix, node_name):
"""Find a unique file matching constraints in the specified directory.
Files are expected to be of the form variant_suffix, or
variant_prefix_suffix, with provided prefix or a prefix generated
from node_name.
Returns:
A unique file if exactly one matching file exists.
Args:
directory: Search base directory.
variant: Required start of the file name.
prefix: Node prefix optionally used for convenient matching.
suffix: Required end of the file name.
node_name: Name of the node being serviced. Used in lieu of the prefix.
Raises:
RuntimeError: Unable to search or there was not one unique file.
"""
names = set(['_'.join([variant, suffix])])
if prefix:
names.add('_'.join([variant, prefix, suffix]))
elif node_name:
# If no explicit prefix, attempt to be clever and use the beginnings of
# node name as prefix.
tokens = node_name.split('_')
for i in range(len(tokens)):
names.add('_'.join([variant] + tokens[:i + 1] + [suffix]))
files = set()
for name in names:
files.update(_Find(directory, name))
if len(files) > 1:
sys.stderr.write('Pattern matches the following files:\n%s\n\n' %
'\n'.join([' * %s' % os.path.relpath(f, directory)
for f in files]))
raise RuntimeError('Found multiple bin files for patterns %s.'
% ', '.join(names))
if not files:
raise RuntimeError('No match for patterns %s.' % ', '.join(names))
return files.pop()
def _GetProgramElf(directory, node, segment):
"""Returns the elf file for this node for the specified memory segment."""
bin_file = None
if segment == 'bootloader':
bin_file = node.bootloader_path
elif segment == 'bootloader_application':
bin_file = node.bootloader_application_path
elif segment == 'application':
bin_file = node.application_path
else:
raise ValueError('Invalid segment %s.' % segment)
return os.path.join(directory, 'avionics', bin_file)
def _GetTargetSelection(args, explicit_targets=None):
"""Find all TMS570 nodes matching constraints."""
target_list = explicit_targets
if not target_list:
target_list = args.target
available_nodes = set(n for n in _NETWORK_CONFIG.aio_nodes
if n.tms570_node or n.snake_name == 'unknown')
selected_nodes = set()
if args.location:
selected_nodes = set()
nodes_by_location = node_locations.GetNodeLocations(_NETWORK_CONFIG)
for loc in args.location:
if loc in nodes_by_location.keys():
selected_nodes.update(nodes_by_location[loc])
else:
raise ValueError('Invalid node location %s.' % loc)
selected_nodes.intersection_update(available_nodes)
available_nodes = selected_nodes
if args.prefix:
selected_nodes = set()
for node in available_nodes:
if node.tms570_node and node.snake_name.startswith(args.prefix):
selected_nodes.add(node)
if not selected_nodes:
raise ValueError('Found no nodes for prefix %s.' % args.prefix)
available_nodes = selected_nodes
if target_list:
# Retain input ordering for target list.
selected_nodes = []
for target in target_list:
if args.prefix and not target.startswith(args.prefix):
target = '%s_%s' % (args.prefix, target)
node = _NETWORK_CONFIG.GetAioNode(target)
if not node:
raise ValueError('Unable to locate node with name %s.' % target)
elif node not in available_nodes:
raise ValueError('Node %s not in selected locations or prefix.'
% target)
else:
selected_nodes.append(node)
if not selected_nodes:
raise ValueError('No nodes selected. Must specify node targets or '
'--prefix or --location.')
return list(selected_nodes)
class JlinkTask(object):
"""Represents an operation with jlink_client.
Args:
binary: A .bin or .elf file to be flashed.
task: A string matching 'bootloader', 'application', 'config', 'param',
'calib'.
target: The target node type in snake format (e.g. motor_sbo).
hardware_type: The target hardware type (e.g. motor or aio).
"""
def __init__(self, binary, task, target=None, hardware_type=None):
self.binary = binary
self.target = target
self.hardware_type = hardware_type
self.task = task
self._jlink_method = None
self._jlink_args = []
self._jlink_kwargs = {}
def GetArgs(self):
self._PrepareForExecution()
return ['Python call to jlink_client.{} with arguments: {} and '
'keyword-arguments: {}'.format(self._jlink_method.__name__,
self._jlink_args,
self._jlink_kwargs)]
def ExecuteTask(self):
"""Performs the J-link programming task.
Returns:
A tuple of the return_code, stdout string, stderr string.
Raises:
CalledProcessError: Non-zero return code from jlink_client.
"""
self._PrepareForExecution()
return_code, stdout, stderr = self._jlink_method(*self._jlink_args,
**self._jlink_kwargs)
if return_code != 0:
error_logger = logging.getLogger('stdout')
for line in stdout.split('\n'):
error_logger.warn(line)
error_logger = logging.getLogger('stderr')
for line in stderr.split('\n'):
error_logger.warn(line)
else:
logger = logging.getLogger('stdout')
for line in stdout.split('\n'):
if line.startswith('J-Link: '):
logger.info(line)
if return_code != 0:
raise subprocess.CalledProcessError(return_code, stdout + stderr)
return return_code, stdout, stderr
def _PrepareForExecution(self):
"""Sets _jlink_method, self._jlink_args and self._jlink_kwargs.
Raises:
NameError: Invalid combinations of arguments.
ValueError: Invalid argument values.
"""
if self.binary.endswith('.elf'):
if not self.target or not self.hardware_type:
raise ValueError('target and hardware_type must be specified for '
'elf files.')
if self.task == 'bootloader':
self._jlink_method = jlink_client.JlinkProgramBootloaderElf
self._jlink_args = (self.target, self.hardware_type, self.binary)
elif self.task == 'application':
self._jlink_method = jlink_client.JlinkProgramApplicationElf
self._jlink_args = (self.target, self.hardware_type, self.binary)
else:
raise NameError('Invalid argument task: {}'.format(self.task))
elif self.binary.endswith('.bin'):
if self.task == 'bootloader':
self._jlink_method = jlink_client.JlinkProgramBootloaderBin
self._jlink_args = (self.binary,)
elif self.task == 'application':
self._jlink_method = jlink_client.JlinkProgramApplicationBin
self._jlink_args = (self.binary,)
elif self.task == 'config':
self._jlink_method = jlink_client.JlinkProgramConfigBin
self._jlink_args = (self.binary,)
elif self.task == 'param':
self._jlink_method = jlink_client.JlinkProgramSerialBin
self._jlink_args = (self.binary,)
elif self.task == 'calib':
self._jlink_method = jlink_client.JlinkProgramCalibBin
self._jlink_args = (self.binary,)
else:
raise NameError('Invalid argument task: {}'.format(self.task))
else:
raise ValueError('Invalid file: {}'.format(self.binary))
def GetDependentTask(self):
return None
class BinTask(object):
"""Represents an operation with bootloader_client.
Raises:
subprocess.CalledProcessError for non-zero return codes.
"""
def __init__(self, bootloader_client, target, binary, extra_args):
self.bootloader_client = bootloader_client
self.target = target
self.binary = binary
self.extra_args = extra_args
def GetArgs(self):
bootloader_client = self.bootloader_client
if not bootloader_client:
bootloader_client = 'bootloader_client'
return [bootloader_client, '--target', self.target,
self.binary] + list(self.extra_args)
def ExecuteTask(self):
subprocess.check_call(self.GetArgs())
def GetDependentTask(self):
return None
class ParamTask(object):
"""Represents an operation with param_util."""
def __init__(self, input_file, yaml_key, set_values, temp_file_suffix):
self.input_file = input_file
self.yaml_key = yaml_key
self.set_values = set_values
self.temp_file_suffix = temp_file_suffix
self.temp_file = 'TEMP' + self.temp_file_suffix
self.temp_handle = None
def GetArgs(self):
cmd_args = ['param_util', '--input', self.input_file, '--output',
self.temp_file, '--yaml_key', self.yaml_key]
for k, v in self.set_values.iteritems():
cmd_args += ['--set_value', '%s:%s' % (k, v)]
return cmd_args
def GetDependentTask(self):
return None
def ExecuteTask(self):
self._DeleteTempFile()
self.temp_handle, self.temp_file = tempfile.mkstemp(self.temp_file_suffix)
with open(self.input_file) as input_file:
with open(self.temp_file, 'w') as output_file:
param = codec.DecodeYaml(input_file.read(), self.yaml_key)
for k, v in self.set_values.iteritems():
param.SetField(k, v)
output_file.write(codec.EncodeBin(param))
def _DeleteTempFile(self):
if self.temp_handle is not None:
try:
os.unlink(self.temp_file)
except IOError:
sys.stderr.write('Error removing temp file %s.\n', self.temp_file)
def __del__(self):
self._DeleteTempFile()
class ParamTaskBootloaderClient(ParamTask):
"""Represents an operation with param_util and bootloader_client."""
def __init__(self, bootloader_client, target, input_file, yaml_key,
set_values, extra_args, temp_file_suffix):
self.bootloader_client = bootloader_client
self.target = target
self.extra_args = extra_args
super(ParamTaskBootloaderClient, self).__init__(input_file, yaml_key,
set_values,
temp_file_suffix)
def GetDependentTask(self):
return BinTask(self.bootloader_client, self.target, self.temp_file,
self.extra_args)
class ParamTaskJlink(ParamTask):
"""Represents an operation with param_util and jlink_client."""
def __init__(self, input_file, yaml_key, set_values,
temp_file_suffix):
super(ParamTaskJlink, self).__init__(input_file, yaml_key,
set_values,
temp_file_suffix)
def GetDependentTask(self):
return JlinkTask(self.temp_file, 'param')
def _CheckGlobalOptions(args):
"""Handles options that apply to all operations."""
if not args.bootloader_client and not args.print_args:
args.bootloader_client = os.path.join(
makani.HOME, 'avionics/bootloader/bootloader_client')
if not args.tms570_bin:
args.tms570_bin = bazel_util.GetTms570BinDirectory()
if args.batch and args.system_config:
raise ValueError('System config and batch mode are not allowed together.')
# --batch does not allow for the following global arguments, howerver
# --force_hardware is allowed as a global only in combination with --jlink.
operation_args = (args.prefix or args.target or args.calib or args.config or
args.bootloader or args.bootloader_application or
args.application or args.upgrade_bootloader or
args.rename_to or args.serial or args.carrier_serial or
(not args.jlink and args.force_hardware))
if args.batch and operation_args:
raise ValueError('In batch mode, operations can only be completed as '
'batch arguments.')
if args.system_config and operation_args:
raise ValueError('In system config mode, operations cannot be specified.')
if not args.batch and not args.system_config:
return _CheckOperationOptions(args, args.jlink, False)
if args.jlink and args.parallel:
raise ValueError('In parallel mode, --jlink is impossible.')
return args
def _CheckOperationOptions(args, global_jlink=False, reject_global_args=True):
"""Handles options that apply to a specific operation."""
if reject_global_args and (args.tms570_bin or args.bootloader_client
or args.parallel or args.batch
or args.system_config or args.print_args
or args.jlink):
raise ValueError('Got global arguments in batch mode operation.')
args.application |= not (args.calib or args.config or args.bootloader
or args.bootloader_application
or args.upgrade_bootloader or args.rename_to
or args.serial or args.carrier_serial)
if (sum([1 for x in [args.calib, args.config, args.bootloader,
args.bootloader_application, args.application,
args.upgrade_bootloader, args.rename_to,
args.serial, args.carrier_serial] if x]) != 1):
raise ValueError('Cannot specify more than one update type (calib, serial, '
'carrier_serial, config, bootloader, '
'or bootloader_application).')
if (args.force_hardware and not args.jlink and not (
args.bootloader or args.bootloader_application or args.upgrade_bootloader
or args.rename_to)):
raise ValueError('Cannot specify force_hardware unless writing the '
'bootloader or bootloader app (including renaming).')
# Jlink does not need a target for serial programming, override with unknown
# to satisfy target parsing checks.
if (args.jlink or global_jlink) and args.serial:
args.target = ['unknown']
args.target = _GetTargetSelection(args)
if len(args.target) != 1 and (args.rename_to or args.serial
or args.carrier_serial):
raise ValueError('Only one node can be renamed, or programmed with '
'serial params at a time.')
unknown_node = _NETWORK_CONFIG.GetAioNode('unknown')
if unknown_node not in args.target and (args.serial or args.carrier_serial):
raise ValueError('Can only program carrier or serial params on unknown '
'node.')
if unknown_node in args.target and not (args.rename_to or args.serial or
args.carrier_serial or args.jlink):
raise ValueError('Unknown node can only be renamed, or programmed with '
'serial params.')
return args
def _GetOperationCommands(global_args, operation_args):
"""Get the bootloader client arguments for this operation."""
cmd_list = []
def _AddCommand(target, binary, *extra_args):
cmd_list.append(BinTask(global_args.bootloader_client, target.snake_name,
binary, extra_args))
def _AddJlinkCommand(task, target, elf_file):
hardware_type = operation_args.force_hardware or global_args.force_hardware
cmd_list.append(
JlinkTask(elf_file, task, target.camel_name, hardware_type))
def _GetForceHardwareArgs():
if operation_args.force_hardware:
return ['--force_hardware', operation_args.force_hardware]
elif global_args.force_hardware:
return ['--force_hardware', global_args.force_hardware]
else:
return []
def _GetKnownProgramBinary(target, other, segment):
"""Returns the elf binary of target or other if target is unknown."""
if target.snake_name != 'unknown' or global_args.jlink:
return _GetProgramElf(global_args.tms570_bin, target, segment)
if other.snake_name != 'unknown':
# Programming application on unknown requires bootloader_application.
if segment == 'application':
segment = 'bootloader_application'
return _GetProgramElf(global_args.tms570_bin, other, segment)
raise ValueError('Cannot locate binary for unknown nodes.')
def _ProgramBootloader(source, dest, *extra_args):
if dest != source:
if global_args.parallel:
raise ValueError('Parallel mode not supported for rename operations.')
extra_args = ['--override_target', dest.snake_name] + list(extra_args)
if global_args.jlink:
_AddJlinkCommand(
'bootloader', source,
_GetKnownProgramBinary(dest, source, 'bootloader'))
else:
_AddCommand(source, _GetKnownProgramBinary(dest, source, 'bootloader'),
'--bootloader', *(_GetForceHardwareArgs() + list(extra_args)))
def _RenameNode(source, dest):
_AddCommand(source, _GetKnownProgramBinary(source, dest,
'bootloader_application'),
*_GetForceHardwareArgs())
_ProgramBootloader(source, dest)
_AddCommand(dest, _GetKnownProgramBinary(dest, source, 'application'))
def _SerialParam(target, template, revision, serial, carrier=False):
input_file = SERIAL_PARAM_FILE_TEMPLATE.format(template)
param_type_name = 'carrier_serial' if carrier else 'serial'
extra_args = ['--%s' % param_type_name]
temp_file_suffix = '_%s_params.bin' % param_type_name
if global_args.jlink:
cmd_list.append(ParamTaskJlink(
input_file=input_file, yaml_key=revision,
set_values={'serial_number': serial},
temp_file_suffix=temp_file_suffix,))
else:
cmd_list.append(ParamTaskBootloaderClient(
bootloader_client=global_args.bootloader_client,
target=target.snake_name, input_file=input_file, yaml_key=revision,
temp_file_suffix=temp_file_suffix,
set_values={'serial_number': serial}, extra_args=extra_args))
if global_args.jlink:
if not (operation_args.bootloader or operation_args.application or
operation_args.bootloader_application or operation_args.serial or
operation_args.config or operation_args.calib):
raise ValueError('Cannot specify --jlink unless writing the bootloader, '
'bootloader application, application, serial, config, '
'or calib.')
if ((operation_args.bootloader or operation_args.application or
operation_args.bootloader_application) and not
(global_args.force_hardware or operation_args.force_hardware)):
raise ValueError('You must specify --force_hardware with --jlink when '
'writing bootloader or bootloader_application or '
'application.')
if len(operation_args.target) != 1:
raise ValueError('Only one node can be specified when using --jlink.')
for target in operation_args.target:
if operation_args.calib:
calib_file = _FindUniqueFile(global_args.tms570_bin, operation_args.calib,
operation_args.prefix,
'calib_params.bin', target.snake_name)
if global_args.jlink:
_AddJlinkCommand('calib', target, calib_file)
else:
_AddCommand(target, calib_file, '--calib')
elif operation_args.config:
config_file = _FindUniqueFile(
global_args.tms570_bin, operation_args.config,
operation_args.prefix, 'config_params.bin', target.snake_name)
if global_args.jlink:
_AddJlinkCommand('config', target, config_file)
else:
_AddCommand(target, config_file, '--config')
elif operation_args.bootloader:
_ProgramBootloader(target, target)
elif operation_args.bootloader_application:
if global_args.jlink:
_AddJlinkCommand(
'application', target,
_GetProgramElf(global_args.tms570_bin, target,
'bootloader_application'))
else:
_AddCommand(target, _GetProgramElf(global_args.tms570_bin, target,
'bootloader_application'),
*(_GetForceHardwareArgs()))
elif operation_args.application:
if global_args.jlink:
_AddJlinkCommand(
'application', target,
_GetProgramElf(global_args.tms570_bin, target, 'application'))
else:
_AddCommand(target, _GetProgramElf(global_args.tms570_bin, target,
'application'))
elif operation_args.upgrade_bootloader:
_RenameNode(target, target)
elif operation_args.rename_to:
dest = _GetTargetSelection(operation_args, [operation_args.rename_to])[0]
if dest == target:
raise ValueError('Cannot rename node to itself!')
_RenameNode(target, dest)
elif operation_args.carrier_serial:
_SerialParam(target, *operation_args.carrier_serial, carrier=True)
elif operation_args.serial:
_SerialParam(target, *operation_args.serial, carrier=False)
else:
assert False, 'Invalid operation.'
return cmd_list
def Main(argv):
"""Main function for the application."""
parser = argparse.ArgumentParser(
description='Program multiple nodes in a sane manner. Guess what the '
'user wants to do. The application is programmed by default.')
operation = parser.add_argument_group('operation')
operation.add_argument('target', nargs='*', help='List of target nodes.')
operation.add_argument('--prefix',
help='Prefix used for matching nodes or files. Will '
'program all nodes with this prefix unless a subset '
'is specified.')
operation.add_argument('--location', nargs='+', help='Select nodes from a '
'specific location (wing, ground_station, '
'remote_command).')
operation.add_argument('--force_hardware', help='Force the hardware type for '
'a bootloader operation.')
segment = operation.add_mutually_exclusive_group()
segment.add_argument('--application', action='store_true',
help='Program the application segment (default).')
segment.add_argument('--calib', help='Program calib params.')
segment.add_argument('--config', help='Program config params.')
segment.add_argument('--bootloader', action='store_true',
help='Program the bootloader segment.')
segment.add_argument('--bootloader_application', action='store_true',
help='Program the bootloader application.')
segment.add_argument('--upgrade_bootloader', action='store_true',
help='Upgrade the bootloader for a node.')
segment.add_argument('--rename_to', help='Rename target node.')
segment.add_argument('--serial', nargs=3,
help='Program the serial information.',
metavar=('SERIAL_TYPE', 'REVISION', 'SERIAL_NUMBER'))
segment.add_argument('--carrier_serial', nargs=3,
help='Program the carrier serial information.',
metavar=('SERIAL_TYPE', 'REVISION', 'SERIAL_NUMBER'))
parser.add_argument('--batch', nargs='+', help='Perform a sequence of '
'operations.')
parser.add_argument('--system_config', nargs='+', help='Use a designated '
'system configuration (or multiple configurations). '
'Options are: %s.' % ', '.join(
system_config.SystemConfig.GetAllSystemNames()))
parser.add_argument('--print_args', action='store_true',
help='Print arguments rather than running commands.')
parser.add_argument('--bootloader_client', help='Override the default '
'bootloader client found by bazel.')
parser.add_argument('--tms570_bin', help='Override the TMS570 binary '
'directory found by bazel.')
parser.add_argument('--parallel', action='store_true',
help='Invoke the parallel bootloader.')
parser.add_argument('--jlink', action='store_true',
help='Uses J-Link JTAG device for operations.')
try:
args = _CheckGlobalOptions(parser.parse_args(argv[1:]))
cmd_list = []
def _CreateAndAddCommand(cmd_args):
op_args = _CheckOperationOptions(parser.parse_args(cmd_args),
args.jlink)
cmd_list.extend(_GetOperationCommands(args, op_args))
if args.batch:
for batch_step in args.batch:
_CreateAndAddCommand(batch_step.split())
elif args.system_config:
for config_name in args.system_config:
sys_config = system_config.SystemConfig(config_name,
net_config=_NETWORK_CONFIG)
for node in sys_config.nodes:
_CreateAndAddCommand([node.snake_name])
if node in sys_config.config:
_CreateAndAddCommand([node.snake_name, '--config',
sys_config.config[node]])
if node in sys_config.calib:
_CreateAndAddCommand([node.snake_name, '--calib',
sys_config.calib[node]])
else:
cmd_list += _GetOperationCommands(args, args)
except (ValueError, KeyError) as e:
sys.stderr.write('ERROR: %s\n\n' % e)
parser.print_usage()
sys.exit(-1)
parallel_args = ['--curses']
for cmd in cmd_list:
while cmd:
if args.print_args:
print ' '.join(cmd.GetArgs())
elif args.parallel and isinstance(cmd, BinTask):
parallel_args.append(' '.join(cmd.GetArgs()))
else:
try:
cmd.ExecuteTask()
except subprocess.CalledProcessError:
sys.exit(-1)
cmd = cmd.GetDependentTask()
if args.parallel and not args.print_args:
parallel_bootloader.Main(parallel_args)
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname)s:%(name)s:%(message)s',
level=logging.INFO)
Main(sys.argv)
| apache-2.0 | -7,311,248,603,513,480,000 | 40.693694 | 80 | 0.639081 | false |
jopohl/urh | src/urh/plugins/ZeroHide/ZeroHideAction.py | 1 | 2253 | from PyQt5.QtWidgets import QUndoCommand
from urh.signalprocessing.ProtocolAnalyzer import ProtocolAnalyzer
class ZeroHideAction(QUndoCommand):
def __init__(self, protocol: ProtocolAnalyzer, following_zeros: int, view: int, zero_hide_offsets: dict):
super().__init__()
self.protocol = protocol
self.following_zeros = following_zeros
self.viewtype = view
self.setText("Hide zero sequences >= " + str(self.following_zeros))
self.zero_hide_offsets = zero_hide_offsets
def redo(self):
factor = 1
if self.viewtype == 1:
factor = 4
elif self.viewtype == 2:
factor = 8
pa = self.protocol
self.zero_hide_offsets.clear()
for i in range(pa.num_messages):
message = pa.messages[i]
if self.viewtype == 0:
data = message.decoded_bits_str
elif self.viewtype == 1:
data = message.decoded_hex_str
else:
data = message.decoded_ascii_str
zero_sequences = self.__get_zero_seq_indexes(data, self.following_zeros)
self.zero_hide_offsets[i] = {start: end-start for start, end in zero_sequences}
for seq in reversed(zero_sequences):
full_bits = pa.messages[i].decoded_bits
start = seq[0] * factor
end = seq[1] * factor
pa.messages[i].decoded_bits = full_bits[:start] + full_bits[end:]
def undo(self):
self.zero_hide_offsets.clear()
self.protocol.clear_decoded_bits()
def __get_zero_seq_indexes(self, message: str, following_zeros: int):
"""
:rtype: list[tuple of int]
"""
result = []
if following_zeros > len(message):
return result
zero_counter = 0
for i in range(0, len(message)):
if message[i] == "0":
zero_counter += 1
else:
if zero_counter >= following_zeros:
result.append((i-zero_counter, i))
zero_counter = 0
if zero_counter >= following_zeros:
result.append((len(message) - zero_counter, len(message)))
return result | gpl-3.0 | 585,459,217,494,885,800 | 31.666667 | 109 | 0.556147 | false |
WilsonKiggundu/HospitalManagement_InformationSystem | hospitalmanager/app/migrations/0001_initial.py | 1 | 1041 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-16 16:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, max_length=500)),
('is_active', models.BooleanField(default=True)),
('avatar', models.ImageField(blank=True, null=True, upload_to='')),
('birth_date', models.DateField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| gpl-3.0 | 8,859,425,243,024,919,000 | 33.7 | 121 | 0.614793 | false |
aminotti/yameo | lib/httpmethod.py | 1 | 11707 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-2015 Anthony Minotti <anthony@minotti.cool>.
#
#
# This file is part of Yameo framework.
#
# Yameo framework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Yameo framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yameo framework. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
from flask import Response
from flask import request
from .exceptions import *
from lib import contenttype
from lib.orm.binary import Binary
from lib.orm.fields import BinaryField
# from lib.orm import ORMFilter, BinaryField, SQLRelationship, one2many
class HTTPMethods(object):
@classmethod
def dispatchMethods(cls, domain=None, relationship=None):
if domain:
cls._checkDomains(domain)
if request.method == 'GET':
return cls._getHTTP(domain, relationship)
elif request.method == 'PUT':
return cls._putHTTP(domain)
elif request.method == 'PATCH':
return cls._patchHTTP(domain)
elif request.method == 'DELETE':
return cls._deleteHTTP(domain)
elif request.method == 'POST':
return cls._postHTTP()
@classmethod
def _getHTTP(cls, domain=None, relationship=None):
# TODO add expend = True pour toucher les relation au lieux de leur id
# TODO ajouter un attribute expend = request.args.get('expend', False) pour geré si renvoi url des relations ou data completes
"""
# Manage relashionship
relationshipcls = None
for relation in one2many[cls.__name__]:
if relation.name is relationship:
relationshipcls = relation.table
break
if relationshipcls is None and relationship is not None:
raise Core500Exception("Bad relationship '{}'.".format(relationship))
"""
# Manage request arguement "fields"
fields = request.args.get('fields', None)
if fields:
fields = fields.split(",")
if type(fields) is not list:
raise Core400Exception("'{}' : bad type for fields.".format(fields))
for name in fields:
# Fields can be col of cls or name of a relationcls or if a relationship define, col of a relationship
# if not (name in cls._columns or name in (d.name for d in one2many[cls.__name__]) or (relationshipcls and name in relationshipcls._columns)):
if not (name in cls._columns):
raise Core400Exception("Bad value '{}' in fields list.".format(name))
# Check request's arguement "count"
count = request.args.get('count', None)
if count:
try:
int(count)
except ValueError:
raise Core400Exception("'{}' : bad type for count.".format(count))
# Check request's arguement "offset"
offset = request.args.get('offset', None)
if offset:
try:
int(offset)
except ValueError:
raise Core400Exception("'{}' : bad type for offset.".format(offset))
# Check request's arguement "sort"
sort = request.args.get('sort', None)
if sort:
sort = sort.split(",")
for f in sort:
if f not in cls._columns:
raise Core400Exception("Can't sort on {}. Field doesn't exist.".format(f))
# TODO Set ressource language : request 'Accept-Language' and set reponse 'Content-Language'
# langs = cls._orderHeaderByq(request.headers['Accept-Language']) # Return list of lang order by preference, Firt item is prefered one.
ressources = cls.search(domain, fields, count, offset, sort)
if len(ressources) == 0:
raise Core404Exception("Empty set")
data = list()
if fields:
fields += cls._identifiers
for r in ressources:
# create dict from all ressource's fields or selected field in request arg if provided
data.append(dict([(f, r._fields[f]) for f in (fields or cls._columns)]))
ctype, Converter = cls._getAcceptedContentType()
r = Response(Converter.fromDict(data), headers={"Content-type": "{};charset=UTF-8".format(ctype)})
r.status_code = 200
return r
# Ressource creation with auto id
@classmethod
def _postHTTP(cls):
dico = cls.getDataFromContentType()
ressource = cls(dico)
rid = ressource.create()
if rid and len(ressource._identifiers) < 2:
url = request.base_url + str(rid) + '/'
data = {"Location": url}
ctype, Converter = cls._getAcceptedContentType()
r = Response(Converter.fromDict(data), headers={"Content-type": "{};charset=UTF-8".format(ctype)})
else:
r = Response(None)
del r.headers['content-type']
r.status_code = 201
return r
# Ressource creation with ids provided
@classmethod
def _putHTTP(cls, domain):
dico = cls.getDataFromContentType()
if type(dico) is not dict:
raise Core400Exception("Bad content.")
ressource = cls(dico)
ressource.setIdFromDomain(domain)
ressource.create()
r = Response(None)
del r.headers['content-type']
r.status_code = 201
return r
# Update on one or several ressource
@classmethod
def _patchHTTP(cls, domain):
dico = cls.getDataFromContentType()
ressource = cls(dico)
ressource.update(dico.keys(), domain)
r = Response(None)
del r.headers['content-type']
r.status_code = 204
return r
# Delete one or several ressource
@classmethod
def _deleteHTTP(cls, domain):
cls.delete(domain)
r = Response(None)
del r.headers['content-type']
r.status_code = 204
return r
@classmethod
def _getAcceptedContentType(cls):
if 'Accept' in request.headers:
accepts = cls._orderHeaderByq(request.headers['Accept'])
for accept in accepts:
if accept in contenttype.Converter.keys():
return accept, contenttype.Converter[accept]
break
# Default content type is JSON
# TODO RFC2616 sect 14.1, si wrong 'Accept' header : 406 (Not Acceptable). Si * ou pas de 'Accept' alors default json
return "application/json", contenttype.Converter["application/json"]
@classmethod
def _orderHeaderByq(cls, header):
""" Order HTTP header by preference set with q=number
;return list: ordered list with firsts items as prefered
"""
ordered = dict()
for part in header.split(","):
subpart = part.split(";")
if len(subpart) > 1 and "q=" in subpart[1]:
try:
ordered[subpart[0].strip()] = float(subpart[1].strip()[2:])
except ValueError:
raise Core400Exception("'{}' : q must be a number.".format(subpart[1].strip()))
else:
ordered[subpart[0].strip()] = 1.0
return sorted(ordered, key=ordered.__getitem__, reverse=True)
@classmethod
def getDataFromContentType(cls):
# http://www.w3.org/Protocols/rfc1341/4_Content-Type.html
""" Devrais accepter que multipart/form-data, multipart/mixed,type defini dans lib.contenttype ou binaire
multipart/form-data et multipart/mixed devrais accepter que type defini dans lib.contenttype et binaire
Type binaire :
* GET|PUT /binary/<ressource>/<id1>[/<id2>]/attribute.ext
* del quand lattribut de la ressource est set a NULL (au lieux de contenir URL)
"""
# TODO documenter les different content-type possible avec leur contenu de body
for ctype, conv in contenttype.Converter.items():
if ctype in request.headers['Content-Type']:
return conv.toDict(request.data)
break
if 'application/x-www-form-urlencoded' in request.headers['Content-Type']:
# TODO gerer POST normal (x-www-form-urlencode) formulaire (voir tests/form.html)
print request.data
return dict()
elif 'multipart/form-data' in request.headers['Content-Type']:
for val in request.form.values():
# TODO Actuelement un seul attribut de form envoyer qui contient un json avec tout les fields :
# - Remplacer par : un attribut de form par field (plus de notion de content type) => voir tests/form_file.html
# - Gerer les contents type des sous part (json, xml, file,...) avec 'multipart/mixed'
dico = contenttype.Converter['application/json'].toDict(val)
for key, val in request.files.items():
# Check field name and type
col = getattr(cls, "_{}_field".format(key), None)
if col is None or not isinstance(col, BinaryField):
raise Core400Exception("Bad binary attribute : '{}'".format(key))
binary = Binary(cls.__name__.lower(), key, val.mimetype, os.path.splitext(val.filename)[1][1:], val.stream)
dico[key] = binary
return dico
elif 'multipart/mixed' in request.headers['Content-Type']:
# TODO Handle multipart/mixed, faire une lib pour gere corp http/mail :
# Extract boundary from content-type headers
# parse request.data with boundary to get dict() : {'subcontenttype1': 'data1', 'subcontenttype2':'data2', ...}
print request.data
return dict()
else:
raise Core404Exception("Forbidden Content-Type '{}'".format(request.headers['Content-Type']))
@classmethod
def _checkDomains(cls, domain):
if type(domain) is tuple:
cls._checkDomainTuple(domain)
elif type(domain) is list:
cls._checkDomainList(domain)
else:
raise Core400Exception("Invalid domain : {}, Must be list or tuple".format(str(domain)))
@classmethod
def _checkDomainList(cls, domain):
for dom in domain:
if type(dom) is str and dom in ['|', '&']:
pass
elif type(dom) is tuple:
cls._checkDomainTuple(dom)
elif type(dom) is list:
cls._checkDomainList(dom)
else:
raise Core500Exception("Invalid domain part : {}".format(str(dom)))
@classmethod
def _checkDomainTuple(cls, domain):
if len(domain) != 3:
raise Core500Exception("Invalid tuple for domain {}".format(domain))
# Check field name
if domain[0] not in cls._columns:
raise Core400Exception("Bad attribute : '{}'".format(domain[0]))
else:
# Check value type & value syntax
getattr(cls, "_{}_field".format(domain[0])).check(domain[2])
| agpl-3.0 | 7,189,659,849,950,701,000 | 39.50519 | 158 | 0.594396 | false |
eayunstack/oslo.messaging | tests/test_opts.py | 1 | 1740 |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import stevedore
import testtools
try:
from oslo.messaging import opts
except ImportError:
opts = None
from tests import utils as test_utils
class OptsTestCase(test_utils.BaseTestCase):
@testtools.skipIf(opts is None, "Options not importable")
def setUp(self):
super(OptsTestCase, self).setUp()
def _test_list_opts(self, result):
self.assertEqual(3, len(result))
groups = [g for (g, l) in result]
self.assertIn(None, groups)
self.assertIn('matchmaker_ring', groups)
self.assertIn('matchmaker_redis', groups)
opt_names = [o.name for (g, l) in result for o in l]
self.assertIn('rpc_backend', opt_names)
def test_list_opts(self):
self._test_list_opts(opts.list_opts())
def test_entry_point(self):
result = None
for ext in stevedore.ExtensionManager('oslo.config.opts',
invoke_on_load=True):
if ext.name == "oslo.messaging":
result = ext.obj
break
self.assertIsNotNone(result)
self._test_list_opts(result)
| apache-2.0 | -172,664,514,099,144,160 | 31.222222 | 78 | 0.644828 | false |
alexei-matveev/ccp1gui | interfaces/cmlatominfo.py | 1 | 4268 | #
# This file is part of the CCP1 Graphical User Interface (ccp1gui)
#
# (C) 2002-2007 CCLRC Daresbury Laboratory
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import sys
from xml.dom.minidom import parse
from xml.xpath import Evaluate
def CmlAtomInfo(xmlFile):
#get DOM object
xmldoc=open(xmlFile)
doc=parse(xmldoc)
#get the namespaces used in the xml document
nsMap = {}
ns_nodes = Evaluate('namespace::node()',doc.documentElement)
for ns in ns_nodes:
nsMap[ns.value]=ns.localName
#initialise objects
idlist=[]
attribDict={}
atomDict={}
#get atomArray nodes
#remember to check through all the namespaces
atomArrayNodes=doc.getElementsByTagName("atomArray")
for ns in nsMap.keys():
atomArrayNodes+=doc.getElementsByTagNameNS(ns,"atomArray")
#get the atom nodes for each atomArray node
#remember to check through all the namespaces
for atomArrayNode in atomArrayNodes:
atomNodes=atomArrayNode.getElementsByTagName("atom")
for ns in nsMap.keys():
atomNodes+=atomArrayNode.getElementsByTagNameNS(ns,"atom")
#check for the use of arrays (no 'atom' nodes)
atomArrayInfo={}
if atomNodes==[]:
atomArrayChildNodes=atomArrayNode.childNodes
for atomArrayChildNode in atomArrayChildNodes:
if atomArrayChildNode.nodeType==atomArrayChildNode.ELEMENT_NODE:
dataName=atomArrayChildNode.getAttribute('builtin')
subChildNodes=atomArrayChildNode.childNodes
for subChildNode in subChildNodes:
data=subChildNode.data.encode("ascii").split()
atomArrayInfo.update({dataName:data})
for i in range(0,len(atomArrayInfo['atomId'])):
for key in atomArrayInfo.keys():
#if key!='atomId':
attribDict.update({key:atomArrayInfo[key][i]})
#atomDict.update({atomArrayInfo['atomId'][i]:attribDict})
atomDict.update({i:attribDict})
attribDict={}
#get the attribute nodes for each atom node
i=0
for atomNode in atomNodes:
attrib=atomNode.attributes
for attribNode in attrib.values():
#if attribNode.name=="id":
# id=attribNode.value
# idlist.append(id)
#else:
attribDict.update({attribNode.name:attribNode.value.encode("ascii")})
#The following obtains data from CML-1 markup
#get the child nodes of each atom node
atomChildNodes=atomNode.childNodes
#get the data name of each child node
for atomChildNode in atomChildNodes:
if atomChildNode.nodeType==atomChildNode.ELEMENT_NODE:
dataName=atomChildNode.getAttribute("builtin")
#get the data value from the text node of each child element node
subAtomChildNodes=atomChildNode.childNodes
for subAtomChildNode in subAtomChildNodes:
if subAtomChildNode.nodeType==subAtomChildNode.TEXT_NODE:
dataValue=subAtomChildNode.data.encode("ascii")
attribDict.update({dataName:dataValue})
#atomDict.update({id:attribDict})
atomDict.update({i:attribDict})
attribDict={}
i=i+1
return atomDict
| gpl-2.0 | 4,901,287,225,231,356,000 | 38.88785 | 85 | 0.6209 | false |
dyf102/Gomoku-online | server/model/game.py | 1 | 4356 | import uuid
EMPTY = 0
BLACK = 1
WHITE = -1
STATUS_EMPTY = 11
STATUS_WAITING = 12
STATUS_FIGHTING = 13
STATUS_SET = (STATUS_EMPTY, STATUS_WAITING, STATUS_FIGHTING)
class Game(object):
def __init__(self, host_id, host_name, guest_id=None, guest_name=None):
self.host_id = host_id
self.host_name = host_name
self.guest_id = guest_id
self.guest_name = guest_name
self.host_color = self.guest_color = 0 # undefined
self.board = [[EMPTY for _ in xrange(15)] for _ in xrange(15)]
self.status = STATUS_EMPTY
self.id = str(uuid.uuid1())
def __eq__(self, other):
return self.id == other.id
def get_id(self):
return self.id
def get_status(self):
return self.status
def is_in_game(self, uid):
return uid in (self.host_id, self.guest_id)
def set_status(self, new_status):
assert new_status in STATUS_SET
self.status = new_status
def __str__(self):
return '{} vs {}'.format(self.host_name, self.guest_name)
def set_host_black(self):
self.host_color = BLACK
self.guest_color = WHITE
def set_host_white(self):
self.host_color = WHITE
self.guest_color = BLACK
def is_win(self, x, y, color):
if x < 0 or x > 15 or y < 0 or y > 15 or color == EMPTY:
return False
return self.check_x(x, y, color) or \
self.check_y(x, y, color) or \
self.check_right_diagonal(x, y, color) or \
self.check_left_diagonal(x, y, color)
def check_x(self, x, y, color):
board = self.board
counter = 0
i = x
j = x
left_move = False
right_move = False
# x axis
while board[i][y] == color and board[j][y] == color:
if i - 1 >= 0 and board[i - 1][y] == color:
i -= 1
counter += 1
left_move = True
if j + 1 < 15 and board[j + 1][y] == color:
j += 1
counter += 1
right_move = True
if counter == 4:
return True
if left_move is False and right_move is False:
break
return False
def check_y(self, x, y, color):
board = self.board
# y axis
counter = 0
i = j = y
up_move = down_move = False
while board[x][i] == color and board[x][j] == color:
if i - 1 >= 0 and board[x][i - 1] == color:
i -= 1
counter += 1
up_move = True
if j + 1 < 15 and board[x][j + 1] == color:
j += 1
counter += 1
down_move = True
if counter == 4:
return True
if down_move is False and up_move is False:
break
return False
def check_right_diagonal(self, x, y, color):
board = self.board
# check diagonal
counter = 0
i = j = 0
up_move = down_move = False
while board[x + i][y - i] == color and board[x - j][y + j] == color:
if y - i >= 0 and x + i < 15 and board[x + i][y - i] == color:
i += 1
counter += 1
up_move = True
if y + j < 15 and x - j >= 0 and board[x - j][y + j] == color:
j += 1
counter += 1
down_move = True
if counter == 4:
return True
if down_move is False and up_move is False:
break
return False
def check_left_diagonal(self, x, y, color):
board = self.board
# check diagonal
counter = 0
i = j = 0
up_move = down_move = False
while board[y - i][x + i] == color and board[y + j][x - j] == color:
if y - i >= 0 and x + i < 15 and board[y - i][x + i] == color:
i += 1
counter += 1
up_move = True
if y + j < 15 and x - j >= 0 and board[y + j][x - j] == color:
j += 1
counter += 1
down_move = True
if counter == 4:
return True
if down_move is False and up_move is False:
break
return False
| apache-2.0 | -2,934,371,076,088,916,500 | 29.25 | 76 | 0.463728 | false |
FluentTradeTechnologies/netconfigit | modules/devices/fortinet.py | 1 | 4074 | # -*- coding: utf-8 -*-
"""
Netconfigit Fortinet device class
"""
__license__ = "MIT License"
__author__ = "Eric Griffin"
__copyright__ = "Copyright (C) 2014, Fluent Trade Technologies"
__version__ = "1.1"
import logging
import os
import time
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Fortinet(object):
"""Fortinet device class
Defines and runs device-specific actions on a device
:param _device: the device
:param _netconfigit: the netconfigit object containing the configuration
"""
def __init__(self, _device, _netconfigit):
"""Defines action commands for the device associated with the class
:param _device: the device on which actions are being run
:param _netconfigit: the netconfigit object containing the configuration
"""
self.device = _device
self.netconfigit = _netconfigit
self.command_copy_current = "exec backup config tftp " + self.device.name + "/current-config " \
+ self.netconfigit.transfer_ip + "\n"
self.command_clear_dhcp_leases = "execute dhcp lease-clear all" + "\n"
def run_action(self, action):
"""Defines and runs actions for the device associated with the class
Checks for valid action names and runs the actions
Returns 0/1 for fail/success of the action
:param action: the action to run
"""
status = 0
connected = 0
if self.device.access_type == "ssh":
try:
self.client, self.channel = self.netconfigit.get_ssh_client_channel(self.device)
connected = 1
except:
logger.error("Error connecting to " + self.device.name)
if connected == 1:
if action == "current-config":
status = self.get_config()
elif action == "clear-dhcp-leases":
status = self.clear_dhcp_leases()
else:
logger.error("Action " + action + " not implemented for " +
self.device.manufacturer.title() + " devices.")
self.client.close()
else:
logger.error("Access method " + self.device.access_type + " not implemented for " +
self.device.manufacturer.title() + " devices.")
if status == 1:
self.netconfigit.success_list.append({self.device.name: action})
if status == 0:
self.netconfigit.failure_list.append({self.device.name: action})
def get_config(self):
"""Transfers configurations from device via ssh and tftp
Issues commands to device via ssh to transfer configs to local tftp server
:param config_type: the configuration type (ie. startup-config, running-config)
:return: boolean, 0 means transfer failed, 1 means transfer was successful
"""
output = ""
success = 0
time.sleep(5)
self.channel.send(self.command_copy_current)
while not self.channel.recv_ready():
time.sleep(7)
output += self.channel.recv(1024)
self.channel.send("\n")
while not self.channel.recv_ready():
time.sleep(12)
output += self.channel.recv(1024)
if self.netconfigit.verbose == 1:
print output
if "Send config file to tftp server OK" in output:
success = 1
if "Error" in output:
success = 0
return success
def clear_dhcp_leases(self):
"""Clears all DHCP leases
:return: boolean, 0 means failure, 1 means success
"""
output = ""
success = 0
time.sleep(5)
self.channel.send(self.command_clear_dhcp_leases)
while not self.channel.recv_ready():
time.sleep(5)
output += self.channel.recv(1024)
self.channel.send("\n")
# there is no output for this command so success is always true
success = 1
return success | mit | -1,249,435,471,950,509,800 | 33.243697 | 104 | 0.583702 | false |
ajmarks/gymnast | gymnast/pdf_elements/pdf_page.py | 1 | 6340 | """
PDF Document Page and Page Node elements
"""
import six
from .pdf_element import PdfElement
from ..exc import PdfParseError, PdfError
from ..pdf_types import PdfType, PdfRaw, PdfArray
from ..pdf_parser import PdfParser
from ..pdf_operation import PdfOperation
def parse_page(obj):
"""Parse a page like object into the appropraite subclass"""
obj = obj.value
if obj['Type'] == 'Pages':
return PdfPageNode(obj)
elif obj['Type'] == 'Page':
return PdfPage(obj)
class PdfPageResources(PdfElement):
"""Resources dict on page objects. Technically, it's not a PDF
object type, but this lets us re-use a lot of code."""
pass
class PdfAbstractPage(PdfElement):
"""Base class for PDF Pages and Page Nodes."""
def __init__(self, page, obj_key=None, document=None):
"""Create a new or node with properly inherited properties
where applicable"""
#Common and inheritable properties
super(PdfAbstractPage, self).__init__(page, obj_key, document)
self._resources = page.get('Resources')
self._mediabox = page.get('MediaBox')
self._cropbox = page.get('CropBox')
self._rotate = page.get('Rotate')
self._fonts = None
@property
def Resources(self):
"""Page resources, most notably fonts"""
if self._resources: return PdfPageResources.from_object(self._resources)
elif self._parent: return self.Parent.Resources
else: raise PdfError('Resource dictionary not found')
@property
def MediaBox(self):
"""Size of the media"""
if self._mediabox: return self._mediabox.value
elif self._parent: return self.Parent.MediaBox
else: raise PdfError('MediaBox not found')
@property
def CropBox(self):
"""Visible area of the media"""
box = self.raw_cropbox
return box if box else self.MediaBox
@property
def raw_cropbox(self):
"""Inherited CropBox with no default value"""
if self._cropbox: return self._cropbox.value
elif self._parent: return self.Parent.raw_cropbox
else: return None
@property
def Rotate(self):
"""Rotation angle. Should be an integer multiple of 90."""
if self._rotate: return self._rotate
elif self._parent: return self.Parent.Rotate
else: return 0
@property
def Fonts(self):
"""Neatly processed dict of the page's fonts. Serves as a shortcut
to .Resources.Font"""
if self._fonts is None:
self._fonts = {k: v.parsed_object
for k,v in six.iteritems(self.Resources.Font)}
return self._fonts
@property
def unique_id(self):
"""Unique key to lookup page numbers and such in the document"""
return self.get('ID', self._obj_key)
@property
def pages_index(self):
"""Zero-indexed page number in the document"""
if self.document:
return self.document.get_page_index(self)
class PdfPageNode(PdfAbstractPage):
"""Page node object"""
def __init__(self, node, obj_key=None, document=None):
"""Create a new PdfPageNode from an object dict"""
node = node.value
if node['Type'] != 'Pages':
raise ValueError('Type "Pages" expected, got "{}"'.format(node['Type']))
super(PdfPageNode, self).__init__(node, obj_key, document)
@property
def Kids(self):
"""Child pages and nodes"""
return [p.parsed_object for p in self._object['Kids'].value]
#def __getitem__(self, key):
# return self._kids[key]
#def __contains__(self, item):
# return self._kids.__contains__(item)
#def __setitem__(self, key, value):
# return self._kids.__setitem__(key, value)
#def __delitem__(self, key, value):
# return self._kids.__delitem__(key)
#def __iter__(self):
# return self._kids.__iter__()
#def __reversed__(self):
# return self._kids.__reversed__()
#@property
#def Count(self):
# return len(self._kids)
#def __str__(self):
# return 'PdfPageNode - %d children'%self.Count
class PdfPage(PdfAbstractPage):
"""Abstract class for pages and page nodes"""
def __init__(self, page, obj_key=None, document=None):
"""Create a new PdfPage from an object dict"""
page = page.value
if page['Type'] != 'Page':
raise PdfParseError('Page dicts must have Type = "Page"')
super(PdfPage, self).__init__(page, obj_key, document)
self._contents = ContentStream(page.get('Contents', []))
self._fonts = None # Load these when they're called
@property
def Contents(self):
return self._contents
def __getattr__(self, name):
#Default values
defaults = {'BleedBox': 'MediaBox',
'TrimBox' : 'CropBox',
'ArtBox' : 'CropBox'}
try:
val = super(PdfPage, self).__getattr__(name)
except KeyError:
try:
val = self.__dict__[defaults[name]]
except KeyError:
raise AttributeError('Object has no attribute "{}"'.format(name))
if isinstance(val, PdfType):
return val.parsed_object
else:
return val
class ContentStream(object):
"""A page's content stream"""
def __init__(self, contents):
if not isinstance(contents, PdfArray):
contents = [contents]
self._contents = contents
@property
def operations(self):
"""Iterator over the various PDF operations in the content stream.
Each element is an instance of a subclass of PdfOperation, which can
then be rendered by the page by calling e.g. next(operations)(renderer)
where renderer is a PdfRenderer object."""
for stream in self._contents:
for oper in self._extract_stream_ops(stream):
yield oper
@staticmethod
def _extract_stream_ops(stream):
operands = []
for op in PdfParser().iterparse(stream.value.data):
if isinstance(op, PdfRaw):
yield PdfOperation[op](*operands)
operands = []
else:
operands.append(op)
| mit | -6,712,480,742,190,932,000 | 35.647399 | 84 | 0.594322 | false |
gutow/sagenb_slickgrid | sagenb/notebook/cell.py | 1 | 82283 | # -*- coding: utf-8 -*-
"""
A Cell
A cell is a single input/output block. Worksheets are built out of
a list of cells.
"""
###########################################################################
# Copyright (C) 2006 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
###########################################################################
import os
import re
import shutil
import textwrap
import time
from cgi import escape
from sagenb.misc.misc import (word_wrap, strip_string_literals,
set_restrictive_permissions, unicode_str,
encoded_str)
from interact import (INTERACT_RESTART, INTERACT_UPDATE_PREFIX,
INTERACT_TEXT, INTERACT_HTML)
from flask import g #we need the site name to build paths
# Maximum number of characters allowed in output. This is needed
# avoid overloading web browser. For example, it should be possible
# to gracefully survive:
# while True:
# print "hello world"
# On the other hand, we don't want to loose the output of big matrices
# and numbers, so don't make this too small.
MAX_OUTPUT = 32000
MAX_OUTPUT_LINES = 120
# Used to detect and format tracebacks. See :func:`format_exception`.
TRACEBACK = 'Traceback (most recent call last):'
# This regexp matches "cell://blah..." in a non-greedy way (the ?), so
# we don't get several of these combined in one.
re_cell = re.compile('"cell://.*?"')
re_cell_2 = re.compile("'cell://.*?'") # same, but with single quotes
# Matches script blocks.
re_script = re.compile(r'<script[^>]*?>.*?</script>', re.DOTALL | re.I)
# Whether to enable editing of :class:`TextCell`s with TinyMCE.
JEDITABLE_TINYMCE = True
###########################
# Generic (abstract) cell #
###########################
class Cell_generic(object):
def __init__(self, id, worksheet):
"""
Creates a new generic cell.
INPUT:
- ``id`` - an integer or string; this cell's ID
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; this
cell's parent worksheet
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: isinstance(C, sagenb.notebook.cell.Cell_generic)
True
sage: isinstance(C, sagenb.notebook.cell.TextCell)
False
sage: isinstance(C, sagenb.notebook.cell.Cell)
False
"""
try:
self._id = int(id)
except ValueError:
self._id = id
self._worksheet = worksheet
def __repr__(self):
"""
Returns a string representation of this generic cell.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: C.__repr__()
'Cell_generic 0'
"""
return "Cell_generic %s" % self._id
def __cmp__(self, right):
"""
Compares generic cells by ID.
INPUT:
- ``right`` - a :class:`Cell_generic` instance; the cell to
compare to this cell
OUTPUT:
- a boolean
EXAMPLES::
sage: C1 = sagenb.notebook.cell.Cell_generic(0, None)
sage: C2 = sagenb.notebook.cell.Cell_generic(1, None)
sage: C3 = sagenb.notebook.cell.Cell_generic(0, None)
sage: [C1 == C2, C1 == C3, C2 == C3]
[False, True, False]
sage: C1 = sagenb.notebook.cell.TextCell('bagel', 'abc', None)
sage: C2 = sagenb.notebook.cell.TextCell('lox', 'abc', None)
sage: C3 = sagenb.notebook.cell.TextCell('lox', 'xyz', None)
sage: [C1 == C2, C1 == C3, C2 == C3]
[False, False, True]
sage: C1 = sagenb.notebook.cell.Cell(7, '3+2', '5', None)
sage: C2 = sagenb.notebook.cell.Cell(7, '3+2', 'five', None)
sage: C3 = sagenb.notebook.cell.Cell('7', '2+3', '5', None)
sage: [C1 == C2, C1 == C3, C2 == C3]
[True, True, True]
"""
return cmp(self.id(), right.id())
def id(self):
"""
Returns this generic cell's ID.
OUTPUT:
- an integer or string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: C.id()
0
sage: C = sagenb.notebook.cell.Cell('blue', '2+3', '5', None)
sage: C.id()
'blue'
sage: C = sagenb.notebook.cell.TextCell('yellow', '2+3', None)
sage: C.id()
'yellow'
"""
return self._id
def set_id(self, id):
"""
Sets this generic cell's ID.
INPUT:
- ``id`` - an integer or string; the new ID
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: C.id()
0
sage: C.set_id('phone')
sage: C.id()
'phone'
"""
try:
self._id = int(id)
except ValueError:
self._id = id
def proxied_id(self):
"""
Returns the ID of the cell for which this generic cell is a
proxy. If this cell does not have such an ID, it returns the
cell's own ID.
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic('self_stand_in', None)
sage: [C.id(), C.proxied_id()]
['self_stand_in', 'self_stand_in']
"""
try:
return self._proxied_id
except AttributeError:
return self._id
def set_proxied_id(self, proxied_id):
"""
Sets, for this generic cell, the ID of the cell that it
proxies.
INPUT:
- ``proxied_id`` - an integer or string; the proxied cell's ID
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic('understudy', None)
sage: [C.id(), C.proxied_id()]
['understudy', 'understudy']
sage: C.set_proxied_id('principal')
sage: [C.id(), C.proxied_id()]
['understudy', 'principal']
"""
self._proxied_id = proxied_id
def worksheet(self):
"""
Returns this generic cell's worksheet object.
OUTPUT:
- a :class:`sagenb.notebook.worksheet.Worksheet` instance
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, 'worksheet object')
sage: C.worksheet()
'worksheet object'
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.worksheet() is W
True
sage: nb.delete()
"""
return self._worksheet
def set_worksheet(self, worksheet, id=None):
"""
Sets this generic cell's worksheet object and, optionally, its
ID.
INPUT:
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; the
cell's new worksheet object
- ``id`` - an integer or string (default: None); the cell's
new ID
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: W = "worksheet object"
sage: C.set_worksheet(W)
sage: C.worksheet()
'worksheet object'
"""
self._worksheet = worksheet
if id is not None:
self.set_id(id)
def worksheet_filename(self):
"""
Returns the filename of this generic cell's worksheet object.
- ``publish`` - a boolean (default: False); whether to render
a published cell
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.worksheet_filename()
'sage/0'
sage: nb.delete()
"""
return self._worksheet.filename()
def notebook(self):
"""
Returns this generic cell's associated notebook object.
OUTPUT:
- a :class:`sagenb.notebook.notebook.Notebook` instance
EXAMPLES::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.notebook() is nb
True
sage: nb.delete()
"""
return self._worksheet.notebook()
def is_last(self):
"""
Returns whether this generic cell is the last cell in its
worksheet object.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2"); C
Cell 2: in=2^2, out=
sage: C.is_last()
True
sage: C = W.get_cell_with_id(0)
sage: C.is_last()
False
sage: nb.delete()
"""
return self._worksheet.cell_list()[-1] == self
def next_id(self):
"""
Returns the ID of the next cell in this generic cell's
worksheet object. If this cell is *not* in the worksheet, it
returns the ID of the worksheet's *first* cell. If this *is*
the last cell, it returns its *own* ID.
OUTPUT:
- an integer or string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(1, "2^2")
sage: C = W.get_cell_with_id(1)
sage: C.next_id()
2
sage: C = W.get_cell_with_id(2)
sage: C.next_id()
2
sage: nb.delete()
"""
L = self._worksheet.cell_list()
try:
k = L.index(self)
except ValueError:
print "Warning -- cell %s no longer exists" % self.id()
return L[0].id()
try:
return L[k + 1].id()
except IndexError:
return self.id()
def is_text_cell(self):
"""
Returns whether this generic cell is a text cell, i.e., an
instance of :class:`TextCell`.
OUTPUT:
- a boolean
EXAMPLES::
sage: G = sagenb.notebook.cell.Cell_generic(0, None)
sage: T = sagenb.notebook.cell.TextCell(0, 'hello!', None)
sage: C = sagenb.notebook.cell.Cell(0, '2+4', '6', None)
sage: [X.is_text_cell() for X in (G, T, C)]
[False, True, False]
"""
return isinstance(self, TextCell)
def is_compute_cell(self):
"""
Returns whether this generic cell is a compute cell, i.e., an
instance of :class:`Cell`.
OUTPUT:
- a boolean
EXAMPLES::
sage: G = sagenb.notebook.cell.Cell_generic(0, None)
sage: T = sagenb.notebook.cell.TextCell(0, 'hello!', None)
sage: C = sagenb.notebook.cell.Cell(0, '2+4', '6', None)
sage: [X.is_compute_cell() for X in (G, T, C)]
[False, False, True]
"""
return isinstance(self, Cell)
def is_auto_cell(self):
"""
Returns whether this is an automatically evaluated generic
cell. This is always false for :class:`Cell_generic`\ s and
:class:`TextCell`\ s.
OUTPUT:
- a boolean
EXAMPLES::
sage: G = sagenb.notebook.cell.Cell_generic(0, None)
sage: T = sagenb.notebook.cell.TextCell(0, 'hello!', None)
sage: [X.is_auto_cell() for X in (G, T)]
[False, False]
"""
return False
def is_interactive_cell(self):
"""
Returns whether this generic cell uses
:func:`sagenb.notebook.interact.interact` as a function call
or decorator.
OUTPUT:
- a boolean
EXAMPLES::
sage: G = sagenb.notebook.cell.Cell_generic(0, None)
sage: T = sagenb.notebook.cell.TextCell(0, 'hello!', None)
sage: [X.is_auto_cell() for X in (G, T)]
[False, False]
"""
return False
#############
# Text cell #
#############
class TextCell(Cell_generic):
def __init__(self, id, text, worksheet):
"""
Creates a new text cell.
INPUT:
- ``id`` - an integer or string; this cell's ID
- ``text`` - a string; this cell's contents
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; this
cell's parent worksheet
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C == loads(dumps(C))
True
"""
text = unicode_str(text)
self._text = text
super(TextCell, self).__init__(id, worksheet)
def __repr__(self):
"""
Returns a string representation of this text cell.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C.__repr__()
'TextCell 0: 2+3'
"""
return "TextCell %s: %s" % (self._id, encoded_str(self._text))
def delete_output(self):
"""
Delete all output in this text cell. This does nothing since
text cells have no output.
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C
TextCell 0: 2+3
sage: C.delete_output()
sage: C
TextCell 0: 2+3
"""
pass # nothing to do -- text cells have no output
def set_input_text(self, input_text):
"""
Sets the input text of this text cell.
INPUT:
- ``input_text`` - a string; the new input text for this cell
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C
TextCell 0: 2+3
sage: C.set_input_text("3+2")
sage: C
TextCell 0: 3+2
"""
input_text = unicode_str(input_text)
self._text = input_text
def html(self, wrap=None, div_wrap=True, do_print=False,
editing=False, publish=False):
"""
Returns HTML code for this text cell, including its contents
and associated script elements.
INPUT:
- ``wrap`` -- an integer (default: None); number of columns to
wrap at (not used)
- ``div_wrap`` -- a boolean (default: True); whether to wrap
in a div (not used)
- ``do_print`` - a boolean (default: False); whether to render the
cell for printing
- ``editing`` - a boolean (default: False); whether to open an
editor for this cell
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', W)
sage: C.html()
u'...text_cell...2+3...'
sage: C.set_input_text("$2+3$")
"""
from template import template
return template(os.path.join('html', 'notebook', 'text_cell.html'),
cell = self, wrap = wrap, div_wrap = div_wrap,
do_print = do_print,
editing = editing, publish = publish)
def plain_text(self, prompts=False):
ur"""
Returns a plain text version of this text cell.
INPUT:
- ``prompts`` - a boolean (default: False); whether to strip
interpreter prompts from the beginning of each line
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C.plain_text()
u'2+3'
sage: C = sagenb.notebook.cell.TextCell(0, 'ěščřžýáíéďĎ', None)
sage: C.plain_text()
u'\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e'
"""
return self._text
def edit_text(self):
"""
Returns the text to be displayed for this text cell in the
Edit window.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C.edit_text()
u'2+3'
"""
return self._text
def set_cell_output_type(self, typ='wrap'):
"""
Sets this text cell's output type. This does nothing for
:class:`TextCell`\ s.
INPUT:
- ``typ`` - a string (default: 'wrap'); the target output type
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C.set_cell_output_type("wrap")
"""
pass # ignored
################
# Compute cell #
################
class Cell(Cell_generic):
def __init__(self, id, input, out, worksheet):
"""
Creates a new compute cell.
INPUT:
- ``id`` - an integer or string; the new cell's ID
- ``input`` - a string; this cell's input
- ``out`` - a string; this cell's output
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; this
cell's worksheet object
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C == loads(dumps(C))
True
"""
out = unicode_str(out)
input = unicode_str(input)
super(Cell, self).__init__(id, worksheet)
self._out = out.replace('\r', '')
self._interrupted = False
self.has_new_output = False
self._asap = False
self.set_input_text(input)
# start with a random integer so that evaluations of the cell
# from different runs have different version numbers.
from sys import maxint
from random import randint
self._version = randint(0,maxint)
def __repr__(self):
"""
Returns a string representation of this compute cell.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None); C
Cell 0: in=2+3, out=5
"""
return 'Cell %s: in=%s, out=%s' % (self.id(), self._in, self._out)
def is_asap(self):
"""
Returns whether this compute cell is to be evaluated as soon
as possible (ASAP).
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_asap()
False
sage: C.set_asap(True)
sage: C.is_asap()
True
"""
try:
return self._asap
except AttributeError:
self._asap = False
return self._asap
def set_asap(self, asap):
"""
Sets whether to evaluate this compute cell as soon as possible
(ASAP).
INPUT:
- ``asap`` - a boolean convertible
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_asap()
False
sage: C.set_asap(True)
sage: C.is_asap()
True
"""
self._asap = bool(asap)
def delete_output(self):
r"""
Deletes all output in this compute cell. This also deletes the
files, since they appear as output of the cell.
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None); C
Cell 0: in=2+3, out=5
sage: C.delete_output()
sage: C
Cell 0: in=2+3, out=
When output is deleted, any files in the cell directory are deleted as well::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('{{{\nplot(sin(x),(x,0,5))\n///\n20\n}}}')
sage: C = W.cell_list()[0]
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0; in=plot(sin(x),(x,0,5)), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files() # random output -- depends on computer speed
['sage0.png']
sage: C.delete_output()
sage: C.files()
[]
sage: W.quit()
sage: nb.delete()
"""
self._out = u''
self._out_html = u''
self._evaluated = False
self.delete_files()
def evaluated(self):
r"""
Returns whether this compute cell has been successfully
evaluated in a currently running session. This is not about
whether the output of the cell is valid given the input.
OUTPUT:
- a boolean
EXAMPLES: We create a worksheet with a cell that has wrong output::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('{{{\n2+3\n///\n20\n}}}')
sage: C = W.cell_list()[0]
sage: C
Cell 0: in=2+3, out=
20
We re-evaluate that input cell::
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('w', Cell 0: in=2+3, out=)
Now the output is right::
sage: C # random output -- depends on computer speed
Cell 0: in=2+3, out=
And the cell is considered to have been evaluated.
::
sage: C.evaluated() # random output -- depends on computer speed
True
::
sage: W.quit()
sage: nb.delete()
"""
# Cells are never considered evaluated in a new session.
if not self.worksheet().compute_process_has_been_started():
self._evaluated = False
return False
# Figure out if the worksheet is using the same sage
# session as this cell. (I'm not sure when this would
# be False.)
same_session = self.worksheet().sage() is self.sage()
try:
# Always not evaluated if sessions are different.
if not same_session:
self._evaluated = False
return False
return self._evaluated
except AttributeError:
# Default assumption is that cell has not been evaluated.
self._evaluated = False
return False
def set_no_output(self, no_output):
"""
Sets whether this is a "no output" compute cell, i.e., we
don't care about its output.
INPUT:
- ``no_output`` - a boolean convertible
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_no_output()
False
sage: C.set_no_output(True)
sage: C.is_no_output()
True
"""
self._no_output = bool(no_output)
def is_no_output(self):
"""
Returns whether this is a "no output" compute cell, i.e., we
don't care about its output.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_no_output()
False
sage: C.set_no_output(True)
sage: C.is_no_output()
True
"""
try:
return self._no_output
except AttributeError:
self._no_output = False
return self._no_output
def cell_output_type(self):
"""
Returns this compute cell's output type.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.cell_output_type()
'wrap'
sage: C.set_cell_output_type('nowrap')
sage: C.cell_output_type()
'nowrap'
"""
try:
return self._type
except AttributeError:
self._type = 'wrap'
return self._type
def set_cell_output_type(self, typ='wrap'):
"""
Sets this compute cell's output type.
INPUT:
- ``typ`` - a string (default: 'wrap'); the target output type
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.cell_output_type()
'wrap'
sage: C.set_cell_output_type('nowrap')
sage: C.cell_output_type()
'nowrap'
"""
self._type = typ
def update_html_output(self, output=''):
"""
Updates this compute cell's the file list with HTML-style
links or embeddings.
For interactive cells, the HTML output section is always
empty, mainly because there is no good way to distinguish
content (e.g., images in the current directory) that goes into
the interactive template and content that would go here.
INPUT:
- ``output`` - a string (default: ''); the new output
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.update_html_output()
sage: C.output_html() # random output -- depends on computer speed
'<img src="/home/sage/0/cells/0/sage0.png?...">'
sage: W.quit()
sage: nb.delete()
"""
if self.is_interactive_cell():
self._out_html = u""
else:
self._out_html = self.files_html(output)
def directory(self):
"""
Returns the name of this compute cell's directory, creating
it, if it doesn't already exist.
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.directory()
'.../home/sage/0/cells/0'
sage: nb.delete()
"""
dir = self._directory_name()
if not os.path.exists(dir):
os.makedirs(dir)
set_restrictive_permissions(dir)
return dir
def _directory_name(self):
"""
Returns the name of this compute cell's directory.
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C._directory_name()
'.../home/sage/0/cells/0'
sage: nb.delete()
"""
return os.path.join(self._worksheet.directory(), 'cells',
str(self.id()))
def word_wrap_cols(self):
"""
Returns the number of columns for word wrapping this compute
cell. This defaults to 70, but the default setting for a
notebook is 72.
OUTPUT:
- an integer
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.word_wrap_cols()
70
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.word_wrap_cols()
72
sage: nb.delete()
"""
try:
return self.notebook().conf()['word_wrap_cols']
except AttributeError:
return 70
def plain_text(self, ncols=0, prompts=True, max_out=None):
r"""
Returns the plain text version of this compute cell.
INPUT:
- ``ncols`` - an integer (default: 0); the number of word wrap
columns
- ``prompts`` - a boolean (default: False); whether to strip
interpreter prompts from the beginning of each line
- ``max_out`` - an integer (default: None); the maximum number
of characters to return
OUTPUT:
- ``plaintext_output`` - Plaintext string of the cell
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: len(C.plain_text())
11
"""
if ncols == 0:
ncols = self.word_wrap_cols()
plaintext_output = u''
self._in = unicode_str(self._in)
input_lines = self._in
pr = 'sage: '
if prompts:
input_lines = input_lines.splitlines()
has_prompt = False
if pr == 'sage: ':
for v in input_lines:
w = v.lstrip()
if w[:5] == 'sage:' or w[:3] == '>>>' or w[:3] == '...':
has_prompt = True
break
else:
# discard first line since it sets the prompt
input_lines = input_lines[1:]
if has_prompt:
plaintext_output += '\n'.join(input_lines) + '\n'
else:
in_loop = False
for v in input_lines:
if len(v) == 0:
pass
elif len(v.lstrip()) != len(v): # starts with white space
in_loop = True
plaintext_output += '... ' + v + '\n'
elif v[:5] == 'else:':
in_loop = True
plaintext_output += '... ' + v + '\n'
else:
if in_loop:
plaintext_output += '...\n'
in_loop = False
plaintext_output += pr + v + '\n'
else:
plaintext_output += self._in
if prompts:
msg = TRACEBACK
if self._out.strip().startswith(msg):
v = self._out.strip().splitlines()
w = [msg, '...']
for i in range(1, len(v)):
if not (len(v[i]) > 0 and v[i][0] == ' '):
w = w + v[i:]
break
out = '\n'.join(w)
else:
out = self.output_text(ncols, raw=True, html=False)
else:
out = self.output_text(ncols, raw=True, html=False,
allow_interact=False)
out = '///\n' + out.strip('\n')
if not max_out is None and len(out) > max_out:
out = out[:max_out] + '...'
# Get rid of spurious carriage returns
plaintext_output = plaintext_output.strip('\n')
out = out.strip('\r\n')
plaintext_output = plaintext_output + '\n' + out
if not prompts:
plaintext_output = plaintext_output.rstrip('\n')
return plaintext_output
def edit_text(self, ncols=0, prompts=False, max_out=None):
ur"""
Returns the text displayed for this compute cell in the Edit
window.
INPUT:
- ``ncols`` - an integer (default: 0); the number of word wrap
columns
- ``prompts`` - a boolean (default: False); whether to strip
interpreter prompts from the beginning of each line
- ``max_out`` - an integer (default: None); the maximum number
of characters to return
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.edit_text()
u'{{{id=0|\n2+3\n///\n5\n}}}'
sage: C = sagenb.notebook.cell.Cell(0, 'ěščřžýáíéďĎ', 'ěščřžýáíéďĎ', None)
sage: C.edit_text()
u'{{{id=0|\n\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e\n///\n\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e\n}}}'
"""
s = self.plain_text(ncols, prompts, max_out)
return u'{{{id=%s|\n%s\n}}}' % (self.id(), s)
def next_compute_id(self):
r"""
Returns the ID of the next compute cell in this compute cell's
worksheet object. If this cell is *not* in the worksheet, it
returns the ID of the worksheet's *first* compute cell. If
this *is* the last compute cell, it returns its *own* ID.
OUTPUT:
- an integer or string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('foo\n{{{\n2+3\n///\n5\n}}}bar\n{{{\n2+8\n///\n10\n}}}')
sage: W.new_cell_after(1, "2^2")
Cell 4: in=2^2, out=
sage: [W.get_cell_with_id(i).next_compute_id() for i in [1, 4, 3]]
[4, 3, 3]
"""
L = self.worksheet().compute_cell_list()
try:
k = L.index(self)
except ValueError:
print "Warning -- cell %s no longer exists" % self.id()
return L[0].id()
try:
return L[k + 1].id()
except IndexError:
return self.id()
def interrupt(self):
"""
Sets this compute cell's evaluation as interrupted.
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.interrupt()
sage: C.interrupted()
True
sage: C.evaluated()
False
sage: nb.delete()
"""
self._interrupted = True
self._evaluated = False
def interrupted(self):
"""
Returns whether this compute cell's evaluation has been
interrupted.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.interrupt()
sage: C.interrupted()
True
sage: nb.delete()
"""
return self._interrupted
def computing(self):
"""
Returns whether this compute cell is queued for evaluation by
its worksheet object.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.computing()
False
sage: nb.delete()
"""
return self in self.worksheet().queue()
def is_interactive_cell(self):
r"""
Returns whether this compute cell contains
:func:`sagenb.notebook.interact.interact` either as a function
call or decorator.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "@interact\ndef f(a=slider(0,10,1,5):\n print a^2")
sage: C.is_interactive_cell()
True
sage: C = W.new_cell_after(C.id(), "2+2")
sage: C.is_interactive_cell()
False
sage: nb.delete()
"""
# Do *not* cache
s = strip_string_literals(self.input_text())
if len(s) == 0:
return False
s = s[0]
return bool(re.search('(?<!\w)interact\s*\(.*\).*', s) or
re.search('\s*@\s*interact', s))
def is_interacting(self):
r"""
Returns whether this compute cell is currently
:func:`sagenb.notebook.interact.interact`\ ing.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "@interact\ndef f(a=slider(0,10,1,5):\n print a^2")
sage: C.is_interacting()
False
"""
return hasattr(self, 'interact')
def stop_interacting(self):
"""
Stops :func:`sagenb.notebook.interact.interact`\ ion for this
compute cell.
TODO: Add doctests.
"""
if self.is_interacting():
del self.interact
def set_input_text(self, input):
"""
Sets the input text of this compute cell.
INPUT:
- ``input`` - a string; the new input text
TODO: Add doctests for the code dealing with interact.
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 1: in=2^2, out=
4
)
sage: initial_version=C.version()
sage: C.set_input_text('3+3')
sage: C.input_text()
u'3+3'
sage: C.evaluated()
False
sage: C.version()-initial_version
1
sage: W.quit()
sage: nb.delete()
"""
# Stuff to deal with interact
input = unicode_str(input)
if input.startswith(INTERACT_UPDATE_PREFIX):
self.interact = input[len(INTERACT_UPDATE_PREFIX)+1:]
self._version = self.version() + 1
return
elif self.is_interacting():
try:
del self.interact
del self._interact_output
except AttributeError:
pass
# We have updated the input text so the cell can't have
# been evaluated.
self._evaluated = False
self._version = self.version() + 1
self._in = input
if hasattr(self, '_html_cache'):
del self._html_cache
#Run get the input text with all of the percent
#directives parsed
self._cleaned_input = self.parse_percent_directives()
def input_text(self):
"""
Returns this compute cell's input text.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.input_text()
u'2+3'
"""
return self._in
def cleaned_input_text(self):
r"""
Returns this compute cell's "cleaned" input text, i.e., its
input with all of its percent directives removed. If this
cell is interacting, it returns the interacting text.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '%hide\n%maxima\n2+3', '5', None)
sage: C.cleaned_input_text()
u'2+3'
"""
if self.is_interacting():
return self.interact
else:
return self._cleaned_input
def parse_percent_directives(self):
r"""
Parses this compute cell's percent directives, determines its
system (if any), and returns the "cleaned" input text.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '%hide\n%maxima\n%pi+3', '5', None)
sage: C.parse_percent_directives()
u'%pi+3'
sage: C.percent_directives()
[u'hide', u'maxima']
"""
self._system = None
text = self.input_text().splitlines()
directives = []
i = 0
for i, line in enumerate(text):
line = line.strip()
if not line.startswith('%'):
#Handle the #auto case here for now
if line == "#auto":
directives.append(line[1:])
else:
break
elif line in ['%auto', '%hide', '%hideall', '%save_server',
'%time', '%timeit']:
# We do not consider any of the above percent
# directives as specifying a system.
directives.append(line[1:])
else:
self._system = line[1:]
directives.append(line[1:])
i += 1
break
self._percent_directives = directives
if not self._system == 'fortran':
return "\n".join(text[i:]).strip()
return "\n".join(text[i:]).rstrip()
def percent_directives(self):
r"""
Returns a list of this compute cell's percent directives.
OUTPUT:
- a list of strings
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '%hide\n%maxima\n2+3', '5', None)
sage: C.percent_directives()
[u'hide', u'maxima']
"""
try:
return self._percent_directives
except AttributeError:
self._percent_directives = []
return []
def system(self):
r"""
Returns the system used to evaluate this compute cell. The
system is specified by a percent directive like '%maxima' at
the top of a cell.
Returns None, if no system is explicitly specified. In this
case, the notebook evaluates the cell using the worksheet's
default system.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '%maxima\n2+3', '5', None)
sage: C.system()
u'maxima'
sage: prefixes = ['%hide', '%time', '']
sage: cells = [sagenb.notebook.cell.Cell(0, '%s\n2+3'%prefix, '5', None) for prefix in prefixes]
sage: [(C, C.system()) for C in cells if C.system() is not None]
[]
"""
self.parse_percent_directives()
return self._system
def is_auto_cell(self):
r"""
Returns whether this compute cell is evaluated automatically
when its worksheet object starts up.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_auto_cell()
False
sage: C = sagenb.notebook.cell.Cell(0, '#auto\n2+3', '5', None)
sage: C.is_auto_cell()
True
"""
return 'auto' in self.percent_directives()
def changed_input_text(self):
"""
Returns the changed input text for this compute cell, deleting
any previously stored text.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: initial_version=C.version()
sage: C.changed_input_text()
''
sage: C.set_changed_input_text('3+3')
sage: C.input_text()
u'3+3'
sage: C.changed_input_text()
u'3+3'
sage: C.changed_input_text()
''
sage: C.version()-initial_version
0
"""
try:
t = self._changed_input
del self._changed_input
return t
except AttributeError:
return ''
def set_changed_input_text(self, new_text):
"""
Updates this compute cell's changed input text. Note: This
does not update the version of the cell. It's typically used,
e.g., for tab completion.
INPUT:
- ``new_text`` - a string; the new changed input text
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.set_changed_input_text('3+3')
sage: C.input_text()
u'3+3'
sage: C.changed_input_text()
u'3+3'
"""
new_text = unicode_str(new_text)
self._changed_input = new_text
self._in = new_text
def set_output_text(self, output, html, sage=None):
r"""
Sets this compute cell's output text.
INPUT:
- ``output`` - a string; the updated output text
- ``html`` - a string; updated output HTML
- ``sage`` - a :class:`sage` instance (default: None); the
sage instance to use for this cell(?)
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: len(C.plain_text())
11
sage: C.set_output_text('10', '10')
sage: len(C.plain_text())
12
"""
output = unicode_str(output)
html = unicode_str(html)
if output.count(INTERACT_TEXT) > 1:
html = u'<h3><font color="red">WARNING: multiple @interacts in one cell disabled (not yet implemented).</font></h3>'
output = u''
# In interacting mode, we just save the computed output
# (do not overwrite).
if self.is_interacting():
self._interact_output = (output, html)
if INTERACT_RESTART in output:
# We forfeit any interact output template (in
# self._out), so that the restart message propagates
# out. When the set_output_text function in
# notebook_lib.js gets the message, it should
# re-evaluate the cell from scratch.
self._out = output
return
if hasattr(self, '_html_cache'):
del self._html_cache
output = output.replace('\r', '')
# We do not truncate if "notruncate" or "Output truncated!" already
# appears in the output. This notruncate tag is used right now
# in sagenb.notebook.interact, sage.misc.html, and sage.database.sql_db.
if ('notruncate' not in output and
'Output truncated!' not in output
and
(len(output) > MAX_OUTPUT or
output.count('\n') > MAX_OUTPUT_LINES)):
url = ""
if not self.computing():
file = os.path.join(self.directory(), "full_output.txt")
open(file, "w").write(encoded_str(output))
url = "<a target='_new' href='%s/full_output.txt' class='file_link'>full_output.txt</a>" % (
self.url_to_self())
html += "<br>" + url
lines = output.splitlines()
start = '\n'.join(lines[:MAX_OUTPUT_LINES/2])[:MAX_OUTPUT/2]
end = '\n'.join(lines[-MAX_OUTPUT_LINES/2:])[-MAX_OUTPUT/2:]
warning = 'WARNING: Output truncated! '
if url:
# make the link to the full output appear at the top too.
warning += '\n<html>%s</html>\n' % url
output = warning + '\n\n' + start + '\n\n...\n\n' + end
self._out = output
if not self.is_interactive_cell():
self._out_html = html
self._sage = sage
def sage(self):
"""
Returns the :class:`sage` instance for this compute cell(?).
OUTPUT:
- an instance of :class:`sage`
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.sage() is None
True
"""
try:
return self._sage
except AttributeError:
return None
def output_html(self):
"""
Returns this compute cell's HTML output.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.output_html()
''
sage: C.set_output_text('5', '<strong>5</strong>')
sage: C.output_html()
u'<strong>5</strong>'
"""
try:
return self._out_html
except AttributeError:
self._out_html = ''
return ''
def process_cell_urls(self, urls):
"""
Processes this compute cell's ``'cell://.*?'`` URLs, replacing
the protocol with the cell's path and appending the version number
to prevent cached copies from shadowing the updated copy.
INPUT:
- ``urls`` - a string; the URLs to process
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.process_cell_urls('"cell://foobar"')
'/sage/home/sage/0/cells/0/foobar?...'
"""
end = '?%d' % self.version()
begin = self.url_to_self()
for s in re_cell.findall(urls) + re_cell_2.findall(urls):
urls = urls.replace(s, begin + s[7:-1] + end)
return urls
def output_text(self, ncols=0, html=True, raw=False, allow_interact=True):
ur"""
Returns this compute cell's output text.
INPUT:
- ``ncols`` - an integer (default: 0); the number of word wrap
columns
- ``html`` - a boolean (default: True); whether to output HTML
- ``raw`` - a boolean (default: False); whether to output raw
text (takes precedence over HTML)
- ``allow_interact`` - a boolean (default: True); whether to
allow :func:`sagenb.notebook.interact.interact`\ ion
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.output_text()
u'<pre class="shrunk">5</pre>'
sage: C.output_text(html=False)
u'<pre class="shrunk">5</pre>'
sage: C.output_text(raw=True)
u'5'
sage: C = sagenb.notebook.cell.Cell(0, 'ěščřžýáíéďĎ', 'ěščřžýáíéďĎ', W)
sage: C.output_text()
u'<pre class="shrunk">\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e</pre>'
sage: C.output_text(raw=True)
u'\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e'
"""
if allow_interact and hasattr(self, '_interact_output'):
# Get the input template
z = self.output_text(ncols, html, raw, allow_interact=False)
if not INTERACT_TEXT in z or not INTERACT_HTML in z:
return z
if ncols:
# Get the output template
try:
# Fill in the output template
output, html = self._interact_output
output = self.parse_html(output, ncols)
z = z.replace(INTERACT_TEXT, output)
z = z.replace(INTERACT_HTML, html)
return z
except (ValueError, AttributeError), msg:
print msg
pass
else:
# Get rid of the interact div to avoid updating the
# wrong output location during interact.
return ''
self._out = unicode_str(self._out)
is_interact = self.is_interactive_cell()
if is_interact and ncols == 0:
if 'Traceback (most recent call last)' in self._out:
s = self._out.replace('cell-interact', '')
is_interact = False
else:
return u'<h2>Click to the left again to hide and once more to show the dynamic interactive window</h2>'
else:
s = self._out
if raw:
return s
if html:
s = self.parse_html(s, ncols)
if (not is_interact and not self.is_html() and len(s.strip()) > 0 and
'<div class="docstring">' not in s):
s = '<pre class="shrunk">' + s.strip('\n') + '</pre>'
return s.strip('\n')
def parse_html(self, s, ncols):
r"""
Parses HTML for output, escaping and wrapping HTML and
removing script elements.
INPUT:
- ``s`` - a string; the HTML to parse
- ``ncols`` - an integer; the number of word wrap columns
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.parse_html('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN">\n<html><head></head><body>Test</body></html>', 80)
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0...Test</body>'
"""
def format(x):
return word_wrap(escape(x), ncols)
def format_html(x):
return self.process_cell_urls(x)
# If there is an error in the output, specially format it.
if not self.is_interactive_cell():
s = format_exception(format_html(s), ncols)
# Everything not wrapped in <html> ... </html> should be
# escaped and word wrapped.
t = ''
while len(s) > 0:
i = s.find('<html>')
if i == -1:
t += format(s)
break
j = s.find('</html>')
if j == -1:
t += format(s[:i])
break
t += format(s[:i]) + format_html(s[i + 6:j])
s = s[j + 7:]
t = t.replace('</html>', '')
# Get rid of the <script> tags, since we do not want them to
# be evaluated twice. They are only evaluated in the wrapped
# version of the output.
if ncols == 0:
t = re_script.sub('', t)
# This is a temporary hack
#re_inline = re.compile('<script type="math/tex">(.*?)</script>')
#re_display = re.compile('<script type="math/tex; mode=display">(.*?)</script>')
#t = re_inline.sub('<span class="math">\1</span>', t)
#t = re_display.sub('<div class="math">\1</div>', t)
#t = t.replace('<script type="math/tex">(.*?)</script>', '<span class="math">\1</span>')
#t = t.replace('<script type="math/tex; mode=display">(.*?)</script>', '<div class="math">\1</div>')
####t = t.replace('<script type="math/tex">', '<span class="math">')
####t = t.replace('</script>', '</span>')
return t
def has_output(self):
"""
Returns whether this compute cell has any output.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.has_output()
True
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '', None)
sage: C.has_output()
False
"""
return len(self._out.strip()) > 0
def is_html(self):
r"""
Returns whether this is an HTML compute cell, e.g., its system
is 'html'. This is typically specified by the percent
directive ``%html``.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, "%html\nTest HTML", None, None)
sage: C.system()
u'html'
sage: C.is_html()
True
sage: C = sagenb.notebook.cell.Cell(0, "Test HTML", None, None)
sage: C.is_html()
False
"""
return self.system() == 'html'
#################
# Introspection #
#################
def set_introspect_html(self, html, completing=False, raw=False):
ur"""
Sets this compute cell's introspection text.
INPUT:
- ``html`` - a string; the updated text
- ``completing`` - a boolean (default: False); whether the
completions menu is open
- ``raw`` - a boolean (default: False)
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999) # random output -- depends on computer speed
('d', Cell 0: in=sage?, out=)
sage: C.set_introspect_html('foobar')
sage: C.introspect_html()
u'foobar'
sage: C.set_introspect_html('`foobar`')
sage: C.introspect_html()
u'`foobar`'
sage: C.set_introspect_html('ěščřžýáíéďĎ')
sage: C.introspect_html()
u'\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e'
sage: W.quit()
sage: nb.delete()
"""
html = unicode_str(html)
self._introspect_html = html
def introspect_html(self):
"""
Returns this compute cell's introspection text, setting it to
'', if none is available.
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999) # random output -- depends on computer speed
('d', Cell 0: in=sage?, out=)
sage: C.introspect_html() # random output -- depends on computer speed
u'...<div class="docstring">...sage...</pre></div>...'
sage: W.quit()
sage: nb.delete()
"""
if not self.introspect():
return ''
try:
return self._introspect_html
except AttributeError:
self._introspect_html = u''
return u''
def introspect(self):
"""
Returns compute cell's introspection text.
OUTPUT:
- a string 2-tuple ("before" and "after" text) or boolean (not
introspecting)
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999) # random output -- depends on computer speed
('d', Cell 0: in=sage?, out=)
sage: C.introspect()
[u'sage?', '']
sage: W.quit()
sage: nb.delete()
"""
try:
return self._introspect
except AttributeError:
return False
def unset_introspect(self):
"""
Clears this compute cell's introspection text.
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999) # random output -- depends on computer speed
('d', Cell 0: in=sage?, out=)
sage: C.introspect()
[u'sage?', '']
sage: C.unset_introspect()
sage: C.introspect()
False
sage: W.quit()
sage: nb.delete()
"""
self._introspect = False
def set_introspect(self, before_prompt, after_prompt):
"""
Set this compute cell's introspection text.
INPUT:
- ``before_prompt`` - a string
- ``after_prompt`` - a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.set_introspect("a", "b")
sage: C.introspect()
['a', 'b']
"""
self._introspect = [before_prompt, after_prompt]
def evaluate(self, introspect=False, time=None, username=None):
r"""
Evaluates this compute cell.
INPUT:
- ``introspect`` - a pair [``before_cursor``,
``after_cursor``] of strings (default: False)
- ``time`` - a boolean (default: None); whether to return the
time the computation takes
- ``username`` - a string (default: None); name of user doing
the evaluation
EXAMPLES:
We create a notebook, worksheet, and cell and evaluate it
in order to compute `3^5`::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('{{{\n3^5\n}}}')
sage: C = W.cell_list()[0]; C
Cell 0: in=3^5, out=
sage: C.evaluate(username='sage')
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=3^5, out=
243
)
sage: C # random output -- depends on computer speed
Cell 0: in=3^5, out=
243
sage: W.quit()
sage: nb.delete()
"""
if introspect:
self.eval_method = 'introspect' # Run through TAB-introspection
else:
self.eval_method = 'eval' # Run through S-Enter, evaluate link, etc.
self._interrupted = False
self._evaluated = True
if time is not None:
self._time = time
self._introspect = introspect
self.worksheet().enqueue(self, username=username)
self._type = 'wrap'
dir = self.directory()
for D in os.listdir(dir):
F = os.path.join(dir, D)
try:
os.unlink(F)
except OSError:
try:
shutil.rmtree(F)
except:
pass
def version(self):
"""
Returns this compute cell's version number.
OUTPUT:
- an integer
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: initial_version=C.version() #random
sage: C.set_input_text('2+3')
sage: C.version()-initial_version
1
"""
try:
return self._version
except AttributeError:
# start with a random integer so that evaluations of the cell
# from different runs have different version numbers.
from sys import maxint
from random import randint
self._version = randint(0,maxint)
return self._version
def time(self):
r"""
Returns whether to print timing information about the
evaluation of this compute cell.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.time()
False
sage: C = sagenb.notebook.cell.Cell(0, '%time\n2+3', '5', None)
sage: C.time()
True
"""
return ('time' in self.percent_directives() or
'timeit' in self.percent_directives() or
getattr(self, '_time', False))
def html(self, wrap=None, div_wrap=True, do_print=False, publish=False):
r"""
Returns the HTML for this compute cell.
INPUT:
- ``wrap`` - an integer (default: None); the number of word
wrap columns
- ``div_wrap`` - a boolean (default: True); whether to wrap
the output in outer div elements
- ``do_print`` - a boolean (default: False); whether to return
output suitable for printing
- ``publish`` - a boolean (default: False); whether to render
a published cell
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.html()
u'...cell_outer_0...2+3...5...'
"""
from template import template
if wrap is None:
wrap = self.notebook().conf()['word_wrap_cols']
return template(os.path.join('html', 'notebook', 'cell.html'),
cell=self, wrap=wrap, div_wrap=div_wrap,
do_print=do_print, publish=publish)
def url_to_self(self):
"""
Returns a notebook URL for this compute cell.
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.url_to_self()
'/home/sage/0/cells/0'
"""
try:
return self._url_to_self
except AttributeError:
self._url_to_self = '/'+g.site_name+'/home/%s/cells/%s' % (
self.worksheet_filename(), self.id())
return self._url_to_self
def url_to_worksheet(self):
"""
Returns a URL for the worksheet
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.url_to_worksheet()
'/home/sage/0'
"""
return '/'+g.site_name+'/home/{0}'.format(self.worksheet_filename())
def files(self):
"""
Returns a list of all the files in this compute cell's
directory.
OUTPUT:
- a list of strings
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files() # random output -- depends on computer speed
['sage0.png']
sage: W.quit()
sage: nb.delete()
"""
dir = self.directory()
D = os.listdir(dir)
return D
def delete_files(self):
"""
Deletes all of the files associated with this compute cell.
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files() # random output -- depends on computer speed
['sage0.png']
sage: C.delete_files()
sage: C.files()
[]
sage: W.quit()
sage: nb.delete()
"""
try:
dir = self._directory_name()
except AttributeError:
return
if os.path.exists(dir):
shutil.rmtree(dir, ignore_errors=True)
def _jmol_files_html(self, F):
"""
Helper for jmol files in :meth:`files_html`
"""
# If F ends in -size500.jmol then we make the viewer applet
# with size 500.
i = F.rfind('-size')
if i != -1:
size = F[i + 5:-5]
else:
size = 500
# The ".jmol" script has defaultdirectory pointing
# to a zip file [see Graphics3d.show()]. But it is
# relative to the worksheet URL as seen in the browser.
# But that doesn't make sense for live help.
#
# So we need to prepend the worksheet URL, in order
# for the zip to be accessed correctly.
if self.worksheet().docbrowser():
jmol_name = os.path.join(self.directory(), F)
with open(jmol_name, 'r') as f:
jmol_script = f.read()
jmol_script = jmol_script.replace(
'defaultdirectory "',
'defaultdirectory "{0}/'.format(self.url_to_worksheet()))
with open(jmol_name, 'w') as f:
f.write(jmol_script)
image_name = os.path.join(self.url_to_self(),'.jmol_images',F)
script_name = os.path.join(self.url_to_self(), F)
return textwrap.dedent("""
<div id="sage_jmol_{id}" class="3DPlotDiv">
<div id="loadJmol" style="display:none;">{id}</div>
<div id="sage_jmol_size_{id}" style="display:none;">{size}</div>
<div id="sage_jmol_img_{id}" style="display:none;">{image_name}.png?{timestamp}</div>
<div id="sage_jmol_script_{id}" style="display:none;">{filename}?{timestamp}</div>
<div id="sage_jmol_server_url_{id}" style="display:none;">{callback}</div>
<div id="sage_jmol_status_{id}" style="display:none;">notActivated</div>
</div>
""").format(
id=self._id,
size=size,
image_name=image_name,
timestamp=time.time(),
filename=script_name,
callback=os.path.join(self.url_to_worksheet(), 'jsmol'),
)
def files_html(self, out):
"""
Returns HTML to display the files in this compute cell's
directory.
INPUT:
- ``out`` - a string; files to exclude. To exclude bar, foo,
..., use the format ``'cell://bar cell://foo ...'``
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files_html('') # random output -- depends on computer speed
'<img src="/home/sage/0/cells/0/sage0.png?...">'
sage: W.quit()
sage: nb.delete()
"""
D = self.files()
D.sort()
if len(D) == 0:
return ''
images = []
files = []
#Flags to allow processing of old worksheets that include Jmol
hasjmol = False
jmoldatafile=''
hasjmolimages = False
jmolimagebase=''
from worksheet import CODE_PY
# The question mark trick here is so that images will be
# reloaded when the async request requests the output text for
# a computation. This is inspired by
# http://www.irt.org/script/416.htm/.
for F in D:
if os.path.split(F)[-1] == CODE_PY or 'cell://%s' % F in out:
continue
url = os.path.join(self.url_to_self(), F)
if (F.endswith('.png') or F.endswith('.bmp') or
F.endswith('.jpg') or F.endswith('.gif')):
images.append('<img src="%s?%d">' % (url, time.time()))
elif F.endswith('.obj'):
images.append("""<a href="javascript:sage3d_show('%s', '%s_%s', '%s');">Click for interactive view.</a>""" % (url, self._id, F, F[:-4]))
elif F.endswith('.mtl') or F.endswith(".objmeta"):
pass # obj data
elif F.endswith('.svg'):
images.append('<embed src="%s" type="image/svg+xml" name="emap">' % url)
elif F.endswith('.jmol'):
images.append(self._jmol_files_html(F))
jmolimagebase = F
hasjmol=True
elif F.endswith('.jmol.zip'):
# jmol data
jmoldatafile=os.path.join(self.directory(),F)
elif F.endswith('.canvas3d'):
script = '<div><script>canvas3d.viewer("%s?%s");</script></div>' %(url,time.time())
images.append(script)
elif F.startswith('.jmol_'):
# static jmol data and images
hasjmolimages=True
else:
link_text = str(F)
if len(link_text) > 40:
link_text = link_text[:10] + '...' + link_text[-20:]
files.append('<a target="_new" href="%s" class="file_link">%s</a>' % (url, link_text))
# TODO: remove this fugly in-place upgrading of worksheets
# and all the associated variables. If the worksheet is old
# just require a reevaluation.
if(hasjmol and not hasjmolimages):
# This is probably an old worksheet. Generate the missing jmol static image(s)
# Note: this is problematic in the notebook as it uses tools from Sage to
# generate the images.
head,tail = os.path.split(jmoldatafile)
# The path in the launch script file needs to be fixed.
worksheet, cellnum=os.path.split(head)
path = "cells/%s/%s" %(cellnum, tail)
f = open(os.path.join(head,jmolimagebase),'w')
f.write('set defaultdirectory "%s"\n' %path)
f.write('script SCRIPT\n')
f.close()
#name image file
png_path = os.path.realpath(os.path.join(head,'.jmol_images'))
if not os.path.exists(png_path):
os.mkdir(png_path)
png_name = os.path.join(png_path,jmolimagebase)
#test for JavaVM
from sage.interfaces.jmoldata import JmolData
jdata = JmolData()
if (jdata.is_jvm_available()):
# make the image with Jmol
png_fullpath=png_name+".png"
#print png_fullpath
script = 'set defaultdirectory \"'+jmoldatafile+'\"\n script SCRIPT\n'
#print script
jdata.export_image(targetfile = png_fullpath,datafile=script,image_type="PNG", figsize = 4)
else:
images.append('Java Virtual Machine Unavailable. Cannot make image from old data. Please reevaluate cell.')
if len(images) == 0:
images = ''
else:
images = "%s" % '<br>'.join(images)
if len(files) == 0:
files = ''
else:
files = (' '*3).join(files)
files = unicode_str(files)
images = unicode_str(images)
return images + files
# Alias
ComputeCell = Cell
#####################
# Utility functions #
#####################
def format_exception(s0, ncols):
r"""
Formats exceptions so they do not appear expanded by default.
INPUT:
- ``s0`` - a string
- ``ncols`` - an integer; number of word wrap columns
OUTPUT:
- a string
If ``s0`` contains "notracebacks," this function simply returns
``s0``.
EXAMPLES::
sage: sagenb.notebook.cell.format_exception(sagenb.notebook.cell.TRACEBACK,80)
'\nTraceback (click to the left of this block for traceback)\n...\nTraceback (most recent call last):'
sage: sagenb.notebook.cell.format_exception(sagenb.notebook.cell.TRACEBACK + "notracebacks",80)
'Traceback (most recent call last):notracebacks'
"""
s = s0.lstrip()
# Add a notracebacks option -- if it is in the string then
# tracebacks aren't shrunk. This is currently used by the
# functions sagenb.misc.support.help and sage.server.support.help.
if TRACEBACK not in s or 'notracebacks' in s:
return s0
if ncols > 0:
s = s.strip()
w = s.splitlines()
for k in range(len(w)):
if TRACEBACK in w[k]:
break
s = ('\n'.join(w[:k]) +
'\nTraceback (click to the left of this block for traceback)' +
'\n...\n' + w[-1])
else:
s = s.replace("exec compile(ur'", "")
s = s.replace("' + '\\n', '', 'single')", "")
return s
def number_of_rows(txt, ncols):
r"""
Returns the number of rows needed to display a string, given a
maximum number of columns per row.
INPUT:
- ``txt`` - a string; the text to "wrap"
- ``ncols`` - an integer; the number of word wrap columns
OUTPUT:
- an integer
EXAMPLES::
sage: from sagenb.notebook.cell import number_of_rows
sage: s = "asdfasdf\nasdfasdf\n"
sage: number_of_rows(s, 8)
2
sage: number_of_rows(s, 5)
4
sage: number_of_rows(s, 4)
4
"""
rows = txt.splitlines()
nrows = len(rows)
for i in range(nrows):
nrows += int((len(rows[i]) - 1) / ncols)
return nrows
| gpl-3.0 | -4,787,506,408,650,216,000 | 30.904152 | 153 | 0.513361 | false |
nestauk/gtr | tests/test_publications.py | 1 | 1385 | import responses
import gtr
@responses.activate
def test_publication():
"Searching for publications by id works"
with open("tests/results.json") as results:
body = results.read()
responses.add(
responses.GET,
"http://gtr.rcuk.ac.uk/gtr/api/outcomes/publications/glaciers",
match_querystring=True,
status=200,
body=body,
content_type="application/json")
res = gtr.Publications().publication("glaciers")
assert res.status_code == 200
assert sorted(res.json().keys()) == ["a",
"b",
"c",
"d"]
@responses.activate
def test_publications():
"""Searching for publications works."""
with open("tests/results.json") as results:
body = results.read()
responses.add(
responses.GET,
"http://gtr.rcuk.ac.uk/gtr/api/outcomes/publications?q=test&f=title",
match_querystring=True,
status=200,
body=body,
content_type="application/json")
res = gtr.Publications().publications("test", field="title")
assert res.status_code == 200
assert sorted(res.json().keys()) == ["a",
"b",
"c",
"d"]
| apache-2.0 | 6,634,522,533,200,286,000 | 27.265306 | 77 | 0.510469 | false |
JulyKikuAkita/PythonPrac | cs15211/Subsets.py | 1 | 7010 | __source__ = 'https://leetcode.com/problems/subsets/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/subsets.py
# Time: O(n * 2^n)
# Space: O(1)
# Brute Force Search
#
# Description: Leetcode # 78. Subsets
#
# Given a set of distinct integers, S, return all possible subsets.
#
# Note:
# Elements in a subset must be in non-descending order.
# The solution set must not contain duplicate subsets.
# For example,
# If S = [1,2,3], a solution is:
#
# [
# [3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []
# ]
#
# Companies
# Amazon Uber Facebook
# Related Topics
# Array Backtracking Bit Manipulation
# Similar Questions
# Generalized Abbreviation
#
import unittest
#combination way
class SolutionCC150:
# @param S, a list of integer
# @return a list of lists of integer
def subsets(self, S):
slot = 1 << len(S)
result = []
for i in xrange(slot):
index = 0
k = i
tmp = []
while k:
if k & 1 > 0:
tmp.append(S[index])
index += 1
k >>= 1
result.append(tmp)
return result
class Solution:
# @param S, a list of integer
# @return a list of lists of integer
def subsets(self, S):
result = []
i, count = 0, 1 << len(S)
S = sorted(S)
while i < count:
cur = []
for j in xrange(len(S)):
if i & 1 << j:
cur.append(S[j])
result.append(cur)
i += 1
return result
class Solution2:
# @param S, a list of integer
# @return a list of lists of integer
def subsets(self, S):
return self.subsetsRecu([], sorted(S))
def subsetsRecu(self, cur, S):
if not S:
return [cur]
return self.subsetsRecu(cur, S[1:]) + self.subsetsRecu(cur + [S[0]], S[1:])
class SolutionOther:
# @param S, a list of integer
# @return a list of lists of integer
def subsets(self, S):
ans = S[:] #deep copying the list S to ans Note: ans = S not work as it only a copy of reference
ans.sort()
return [[ans[x] for x in range(len(ans)) if i >> x&1] for i in range(2**len(ans))]
#DFS way:
def subsets2(self, S):
S.sort()
ans = []
self.dfs(S, 0, [], ans)
return ans
def dfs(self, S, depth, templist, ans):
if depth == len(S) :
ans.append(list(templist))
#return
else:
self.dfs(S,depth+1, templist, ans)
print "first", depth, templist
templist.append(S[depth])
self.dfs(S, depth+1, templist,ans)
print "sec",depth, templist
templist.pop()
def subsets3(self, S):
def dfs(depth, start, valuelist):
res.append(valuelist)
if depth == len(S): return
for i in range(start, len(S)):
print i, depth, valuelist+[S[i]]
dfs(depth+1, i+1, valuelist+[S[i]])
S.sort()
res = []
dfs(0, 0, [])
return res
class Solution3(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
res = []
self.dfs(nums, 0, res, [])
return res
def dfs(self, nums, idx, res, cur):
if idx >= len(nums):
res.append(cur + [])
return
cur.append(nums[idx])
self.dfs(nums, idx+1, res, cur)
cur.pop()
self.dfs(nums, idx+1, res, cur)
class Solution4(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
res = []
self.dfs(nums, 0, res, [], 0)
return res
def dfs(self, nums, idx, res, cur, depth):
res.append(list(cur))
if depth == len(nums):
return
for i in xrange(idx, len(nums)):
cur.append(nums[i])
self.dfs(nums, i+1, res, cur, depth +1)
cur.pop()
#print 3 & 0 #0
#for i in range(2**len(S)):
# print "i=",i
# for x in range(len(S)):
# if i >> x & 1: #if 1 (True); if 0 (False)
# print "x =",x, "binary =" , i >> x & 1 , "ans=" , S[x]
# else:
# print "x =",x,"binary =" , i >> x & 1 , "no value"
class TestMethods(unittest.TestCase):
def test_Local(self):
#print SolutionCC150().subsets([1, 2, 3])
#print Solution().subsets([1, 2, 3])
print Solution3().subsets([1, 2])
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
General approach for perm/comb
https://discuss.leetcode.com/topic/46159/a-general-approach-to-backtracking-questions-in-java-subsets-permutations-combination-sum-palindrome-partitioning
# 1ms 100%
class Solution {
public List<List<Integer>> subsets(int[] nums) {
List<List<Integer>> list = new ArrayList<>();
backtrack(list, new ArrayList<>(), nums, 0);
return list;
}
private void backtrack(List<List<Integer>> list, List<Integer> tempList, int [] nums, int start){
list.add(new ArrayList<>(tempList));
//Set<Integer> s = new HashSet<>();
for (int i = start ; i < nums.length; i++) {
//if( !s.contains(nums[i])) {
tempList.add(nums[i]);
//s.add(nums[i]);
backtrack(list, tempList, nums, i + 1);
tempList.remove(tempList.size() - 1);
//}
}
}
}
# 1ms 100%
class Solution {
public List<List<Integer>> subsets(int[] nums) {
List<List<Integer>> list = new ArrayList<>();
backtrack(list, new ArrayList<>(), nums, 0);
return list;
}
private void backtrack(List<List<Integer>> list , List<Integer> tempList, int [] nums, int start){
if (start == nums.length) {
list.add(new ArrayList<>(tempList));
return;
}
backtrack(list, tempList, nums, start + 1);
tempList.add(nums[start]);
backtrack(list, tempList, nums, start + 1);
tempList.remove(tempList.size() - 1);
}
}
# BFS
# 1ms 100%
class Solution {
public List<List<Integer>> subsets(int[] nums) {
List<List<Integer>> result = new ArrayList<>();
int max = 1;
for (int i = 0; i < nums.length; i++) {
max <<= 1;
}
Arrays.sort(nums);
for (int i = 0; i < max; i++) {
int cur = i;
List<Integer> list = new ArrayList<>();
int index = 0;
while (cur > 0) {
if ((cur & 1) == 1) {
list.add(nums[index]);
}
cur >>>= 1; //>>> is unsigned-shift; it'll insert 0. >> is signed, and will extend the sign bit.
index++;
}
result.add(list);
}
return result;
}
}
'''
| apache-2.0 | -4,645,692,306,403,418,000 | 26.598425 | 154 | 0.506705 | false |
debalance/hp | hp/core/urls.py | 1 | 1430 | # -*- coding: utf-8 -*-
#
# This file is part of the jabber.at homepage (https://github.com/jabber-at/hp).
#
# This project is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-xmpp-account.
# If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url
from django.contrib.sitemaps.views import sitemap
from django.utils.translation import ugettext_lazy as _
from blog.sitemaps import BlogPostSitemap
from blog.sitemaps import PageSitemap
from . import views
from .sitemaps import StaticSitemap
sitemaps = {
'blog': BlogPostSitemap,
'page': PageSitemap,
'static': StaticSitemap,
}
app_name = 'core'
urlpatterns = [
url(_(r'^contact/$'), views.ContactView.as_view(), name='contact'),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^api/set-lang/$', views.SetLanguageView.as_view(), name='api-set-lang'),
]
| gpl-3.0 | -8,019,578,557,664,934,000 | 35.666667 | 99 | 0.732867 | false |
bbulkow/MagnusFlora | rest/sound.py | 1 | 17580 | #!/usr/bin/env python3
### VERY MUCH PYTHON 3 !!!
"""
Example for aiohttp.web basic async service
Uses a background timer to print to a logger
exposes an obvious REST endpoint
It's a template!
Made available under the MIT license as follows:
Copyright 2017 Brian Bulkowski brian@bulkowski.org
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice aasyncnd this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
py_ver = sys.version_info[0] + ( sys.version_info[1] / 10.0 )
if py_ver < 3.5:
raise "Must be using Python 3.5 or better"
import threading
import time
import datetime
import os
import random
import logging
import json
import argparse
import asyncio
import functools
import textwrap
from aiohttp import web
# import local shared code
from portal import Resonator, Portal
import wave
import subprocess
import platform
# should do something less dumb, like checking if a file exists
# instead of coding in something like this
if (platform.system() == 'Darwin'):
command_filename_offset = 1
command_template = [ "afplay" ]
elif (platform.system() == 'Linux'):
command_filename_offset = 1
command_template = [ "aplay" ]
else:
print ( "unknown operating system, can't play sounds ")
sys.exit(0)
# !!!
# Hopefully you will find a way to play both foreground and background
#
# play a sound start, and allow killing
def play_sound_start( filename ):
global command_template
global command_filename_offset
stat = os.stat( filename )
# let's check the length, in time
wf = wave.open(filename, 'rb')
bytes_per_second = wf.getnchannels() * wf.getframerate() * wf.getsampwidth()
secs = stat.st_size / bytes_per_second
# log.debug ("play sound start: seconds is: ",sec)
ct = list(command_template)
ct.insert(command_filename_offset, filename)
# log.debug("play sound start: passing to popen: ", ct)
proc = subprocess.Popen( ct )
# print (" delaying ")
# time.sleep( sec - 1.0 )
# time.sleep( 2.0 )
# test: kill the sound, todo, pass back an object that can respond to a kill
return proc, secs
def play_sound_end( proc ):
proc.kill()
# if a sound is older than this, it would be confusing to play it, ignore it
maximum_age = 45.0
def switch_sound_cb(i_sound, sequence):
global maximum_age
log = i_sound.log
log.debug( " switch_sound_cb called ")
# check for duplicates which is legal
if (i_sound.sequence != sequence):
log.warning(" received duplicate call later %d ignoring ", sequence)
return
log.debug(" killing old sound, sequence %d ",sequence)
if (i_sound.event_audio_obj):
play_sound_end(i_sound.event_audio_obj)
i_sound.clean()
log.debug(" start background, or play from queue? ")
# nothing, play background
if i_sound.q.empty() :
i_sound.play_background()
return
# something on queue, play it
while True:
try:
s_event = i_sound.q.get_nowait()
except asyncio.queues.QueueEmpty:
# drained queue: play background: exit
log.warning(" ignored all elements on queue, play background ")
i_sound.play_background()
return
# if ancient, drop
if (s_event.received_time + maximum_age < time.time() ):
log.warning(" ignoring sound too long on queue ")
continue
i_sound.play_sound_immediate(s_event)
# the kind of thing that should be on the queue
# action is the STRING of the action
# add more if you need more in your handler
# time is the time we got it
class SoundEvent:
def __init__(self, action, received_time):
self.action = action
self.received_time = received_time
class IngressSound:
# actions_info = {
# 'portal_neutralized': [ '../audio/portal_neutralized.wav', 2.0 ],
# 'portal_captured': [ '../audio/portal_online.wav', 4.0 ],
# 'resonator_add': [ '../audio/resonator_deployed.wav', 3.0 ],
# 'resonator_remove': [ '../audio/resonator_destroyed.wav', 2.0],
# 'attack': [ '../audio/under_attack.wav', 3.0 ],
# }
actions_sounds_test = {
'portal_neutralized': [ '../audio/test/scream.wav', 2.0 ],
'portal_captured': [ '../audio/test/portal_captured_by_RES.wav', 4.0 ],
'resonator_add': [ '../audio/test/resonator_deployed.wav', 3.0 ],
'resonator_remove': [ '../audio/test/exterminate.wav', 2.0],
'resonator_upgrade': [ '../audio/test/resonator_upgraded.wav', 2.0],
'mod_added': [ '../audio/test/mod_added.wav', 2.0],
'mod_destroyed': [ '../audio/test/mod_destroyed.wav', 2.0],
'attack': [ '../audio/test/attack.wav', 3.0 ],
'recharge': [ '../audio/test/recharged.wav', 3.0],
'virus_ada': [ '../audio/test/ada.wav', 3.0],
'virus_jarvis': [ '../audio/test/jarvis.wav', 3.0],
}
background_sounds_test = [
'../audio/violin-test-PCM16.wav' ]
actions_sounds_prod = {
'portal_neutralized': [ '../audio/portal_neutralized.wav', 4.0 ],
'portal_captured': [ '../audio/portal_online.wav', 6.0 ],
'resonator_add': [ '../audio/resonator_deployed.wav', 4.0 ],
'resonator_remove': [ '../audio/resonator_destroyed.wav', 4.0],
'resonator_upgrade': [ '../audio/resonator_deployed.wav', 4.0],
'mod_added': [ '../audio/mod_deployed.wav', 2.0],
'mod_destroyed': [ '../audio/mod_destroyed.wav', 2.0],
'attack': [ '../audio/under_attack.wav', 3.0 ],
'recharge': [ '../audio/mod_deployed.wav', 3.0],
'virus_ada': [ '../audio/virus_ada_refactor.wav', 3.0],
'virus_jarvis': [ '../audio/virus_jarvis_vocal.wav', 3.0],
}
background_sounds_prod = [
'../audio/magnus_the_song.wav',
'../audio/background_ENL.wav',
'../audio/background_RES.wav' ]
legal_actions = [ "attack", "recharge", "resonator_add", "resonator_remove",
"portal_neutralized", "portal_captured",
"mod_added", "mod_destroyed", "resonator_upgrade", "virus_jarvis", "virus_ada"
]
def __init__(self, app):
self.event_audio_obj = None
self.event_audio_start = 0.0
self.event_audio_minimum = 0.0
self.event_audio_maximum = 0.0
self.app = app
self.sequence = 0
self.background = False # set to true if playing a background sound
self.action = None # the action type if something is playing
self.actions_sounds = app['actions_sounds']
self.background_sounds = app['background_sounds']
self.log = app['log']
# the queue will hold some number of sounds, too many gets too far behind
self.q = asyncio.Queue(maxsize=20)
def clean(self):
self.event_audio_obj = None
self.event_audio_start = 0.0
self.event_audio_minimum = 0.0
self.event_audio_maximum = 0.0
self.background = False
self.action = None
# only to be used if you know nothing is currently playing
# action is
def play_sound_immediate(self, sound_event):
self.log.debug(" play immediate: action_sounds %s action %s",self.actions_sounds, sound_event.action )
ainfo = self.actions_sounds.get(sound_event.action)
now = time.time()
self.event_audio_obj, secs = play_sound_start( ainfo[0] )
self.event_audio_start = now
self.event_audio_minimum = now + ainfo[1]
self.event_audio_maximum = now + secs
self.action = sound_event.action
self.sequence += 1
# register a callback to switch
self.log.debug(" play immediate: scheduling callback for switch_sound %f seconds from now",secs)
loop = self.app['loop']
loop.call_later(secs, switch_sound_cb, self, self.sequence)
# action is a string, one defined in the doc:
# attack, recharge, resonator_add, resonator_remove, portal_neutralized, portal_captured,
# mod_added, mod_destroyed, resonator_upgrade, jarvis, ada
def play_action(self, action, action_parm ):
self.log.debug(" play_action: %s parm %s",action,action_parm)
if action not in IngressSound.legal_actions:
self.log.warning(" received illegal action, ingoring, %s",action)
return
self.log.info(" received valid action: %s",action)
self.log.debug(" action sounds struct: %s",self.actions_sounds)
ainfo = self.actions_sounds.get(action, None)
if ainfo == None:
self.log.warning(" received unsupported action, ignoring, %s",action)
return
self.log.debug(" ainfo %s",ainfo)
# special case: for attack and defend, ignore multiples
if (action == "attack" or action == "defend"):
if self.action == action:
self.log.warning(" ignoring duplicate %s sound ",action)
return
now = time.time()
# if old one playing, kill it
if (self.event_audio_obj):
if (self.background):
self.log.info(" killing background ")
play_sound_end(self.event_audio_obj)
self.clean()
elif (now > self.event_audio_minimum):
self.log.info(" killing old sound ")
play_sound_end(self.event_audio_obj)
self.clean()
else:
self.log.info(" queing sound: %s %f",action,now)
queue = self.q
queue.put_nowait( SoundEvent(action, now ) )
return
# play new
self.event_audio_obj, secs = play_sound_start( ainfo[0] )
self.event_audio_start = now
self.event_audio_minimum = now + ainfo[1]
self.event_audio_maximum = now + secs
self.action = action
self.sequence += 1
# register a callback to switch
self.log.info(" playing sound, and scheduling callback for switch_sound %f seconds from now",secs)
loop = self.app['loop']
loop.call_later(secs, switch_sound_cb, self, self.sequence)
def play_background(self):
#
self.log.info(" PLAY BACKGROUND SOUND")
now = time.time()
# pick one randomly
if len(self.background_sounds) == 0:
sfile = self.background_sounds[0]
else:
rand = random.randint( 0, len(self.background_sounds) - 1)
self.log.info(" playing random sound %d %s",rand, self.background_sounds[rand])
sfile = self.background_sounds[rand]
self.event_audio_obj, secs = play_sound_start( sfile )
self.event_audio_start = now
self.event_audio_minimum = now
self.event_audio_maximum = now + secs
self.sequence += 1
# register a callback to switch
self.log.info(" background: scheduling callback for switch_sound %f seconds from now",secs)
loop = self.app['loop']
loop.call_later(secs, switch_sound_cb, self, self.sequence)
# play a sound start, and allow killing
def play_sound_start( filename ):
global command_template
global command_filename_offset
stat = os.stat( filename )
# let's check the length, in time
wf = wave.open(filename, 'rb')
bytes_per_second = wf.getnchannels() * wf.getframerate() * wf.getsampwidth()
sec = stat.st_size / bytes_per_second
ct = list(command_template)
ct.insert(command_filename_offset, filename)
proc = subprocess.Popen( ct )
# print (" delaying ")
# time.sleep( sec - 1.0 )
# time.sleep( 2.0 )
# test: kill the sound, todo, pass back an object that can respond to a kill
return (proc, sec)
def play_sound_end( proc ):
proc.kill()
# A simple example of a timer function
async def timer(app):
period = 5.0
log = app['log']
log.debug(" started timer routine, running every %f seconds",period)
while True:
# log.debug(" hello says the timer! ")
# read the portal file, for example?
await asyncio.sleep(period)
#
# A number of debug / demo endpoints
# Note to self: you create a "Response" object, thn
# you manipulate it.
#
# this needs UTF8 because names might have utf8
#
# ENTRY POINT where Jarvis calls me
#
async def portal_notification(request):
log = request.app['log']
sound = request.app['sound']
try:
log.debug(" received notification: %s of type %s",request.method, request.content_type)
req_obj = await request.json()
log.debug(" received JSON %s",req_obj)
# the action parm is the thing that's changing, which doesn't
# matter much for sound ( maybe for faction changes )
action, action_parm = req_obj.get("action", None)
log.debug(" action is: %s sound is: %s",action, sound)
sound.play_action(action, action_parm)
r = web.Response(text="OK" , charset='utf-8')
except:
print("Unexpected error:", sys.exc_info()[0])
# log.warning(" exception while handing portal notification: %s ",str(ex))
r = web.Response(text="FAIL")
return r
async def hello(request):
return web.Response(text="Welcome to Magnus Flora Sound! Please replace me.")
async def health(request):
return web.Response(text="OK")
# background tasks are covered near the bottom of this:
# http://aiohttp.readthedocs.io/en/stable/web.html
# Whatever tasks you create here will be executed and cancelled properly
async def start_background_tasks(app):
app['timer_task'] = app.loop.create_task( timer(app) )
async def cleanup_background_tasks(app):
app['log'].info(" cleaning up background tasks ")
app['timer_task'].cancel()
await app['timer_task']
def create_logger(args):
# create a logging object and add it to the app object
logger = logging.getLogger('MF_Sound')
logger.setLevel(args.debug)
# create a file output
fh = logging.FileHandler(args.log)
fh.setLevel(args.debug)
# create a console handler
ch = logging.StreamHandler()
ch.setLevel(args.debug)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
async def init(app, args, loop):
app.router.add_get('/', hello)
app.router.add_get('/health', health)
app.router.add_post('/portal', portal_notification)
# create a portal object and stash it, many will need it
app['portal'] = Portal(1, app['log'])
# An Object For Sound
app['sound'] = IngressSound(app)
# background tasks are covered near the bottom of this:
# http://aiohttp.readthedocs.io/en/stable/web.html
app.on_startup.append(start_background_tasks)
app.on_cleanup.append(cleanup_background_tasks)
return
# Parse the command line options
parser = argparse.ArgumentParser(description="MagnusFlora Sound")
parser.add_argument('--config', '-c', help="JSON file with configuration", default="config.json", type=str)
parser.add_argument('--log', help="location of the log file", default="sound.log", type=str)
parser.add_argument('--debug', '-d', help=" debug level: CRITICAL ERROR WARNING INFO DEBUG", default="INFO", type=str)
args = parser.parse_args()
# Load config.json
try:
with open(args.config) as config_file:
g_config = json.load(config_file)
print(" g_config is: ",g_config)
except Exception as e:
print(" UNABLE TO OPEN CONFIGURATION FILE ",args.config)
print(e)
sys.exit(0)
log = create_logger(args)
log.info('starting MagnusFlora Sound: there will be %d cakes', 2 )
print("starting MagnusFlora Sound monitoring ",g_config["portalfile"]," on port ",g_config["sound_port"])
# register all the async stuff
loop = asyncio.get_event_loop()
app = web.Application()
app['config'] = g_config
app['log'] = log
app['loop'] = loop
#
if g_config['sound_type'] == 'prod':
log.info(" Using Sound Type Prod")
app['actions_sounds'] = IngressSound.actions_sounds_prod
app['background_sounds'] = IngressSound.background_sounds_prod
elif g_config['sound_type'] == 'test':
log.info(" Using Sound Type Prod")
app['actions_sounds'] = IngressSound.actions_sounds_test
app['background_sounds'] = IngressSound.background_sounds_test
else:
log.warning(" Sound Type %s NOT SUPPORTED", g_config['sound_type'])
app['action_sounds'] = None
app['background_sounds'] = None
loop.run_until_complete(init(app, args, loop))
# run the web server
web.run_app(app, port=g_config["sound_port"])
| mit | 8,676,575,164,180,900,000 | 31.615955 | 118 | 0.638168 | false |
adazey/Muzez | libs/nltk/corpus/reader/wordnet.py | 1 | 78713 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: WordNet
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Steven Bethard <Steven.Bethard@colorado.edu>
# Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# Nitin Madnani <nmadnani@ets.org>
# Nasruddin A’aidil Shari
# Sim Wei Ying Geraldine
# Soe Lynn
# Francis Bond <bond@ieee.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
An NLTK interface for WordNet
WordNet is a lexical database of English.
Using synsets, helps find conceptual relationships between words
such as hypernyms, hyponyms, synonyms, antonyms etc.
For details about WordNet see:
http://wordnet.princeton.edu/
This module also allows you to find lemmas in languages
other than English from the Open Multilingual Wordnet
http://compling.hss.ntu.edu.sg/omw/
"""
from __future__ import print_function, unicode_literals
import math
import re
from itertools import islice, chain
from operator import itemgetter, attrgetter
from collections import defaultdict, deque
from nltk.corpus.reader import CorpusReader
from nltk.util import binary_search_file as _binary_search_file
from nltk.probability import FreqDist
from nltk.compat import (iteritems, python_2_unicode_compatible,
total_ordering, xrange)
######################################################################
## Table of Contents
######################################################################
## - Constants
## - Data Classes
## - WordNetError
## - Lemma
## - Synset
## - WordNet Corpus Reader
## - WordNet Information Content Corpus Reader
## - Similarity Metrics
## - Demo
######################################################################
## Constants
######################################################################
#: Positive infinity (for similarity functions)
_INF = 1e300
#{ Part-of-speech constants
ADJ, ADJ_SAT, ADV, NOUN, VERB = 'a', 's', 'r', 'n', 'v'
#}
POS_LIST = [NOUN, VERB, ADJ, ADV]
#: A table of strings that are used to express verb frames.
VERB_FRAME_STRINGS = (
None,
"Something %s",
"Somebody %s",
"It is %sing",
"Something is %sing PP",
"Something %s something Adjective/Noun",
"Something %s Adjective/Noun",
"Somebody %s Adjective",
"Somebody %s something",
"Somebody %s somebody",
"Something %s somebody",
"Something %s something",
"Something %s to somebody",
"Somebody %s on something",
"Somebody %s somebody something",
"Somebody %s something to somebody",
"Somebody %s something from somebody",
"Somebody %s somebody with something",
"Somebody %s somebody of something",
"Somebody %s something on somebody",
"Somebody %s somebody PP",
"Somebody %s something PP",
"Somebody %s PP",
"Somebody's (body part) %s",
"Somebody %s somebody to INFINITIVE",
"Somebody %s somebody INFINITIVE",
"Somebody %s that CLAUSE",
"Somebody %s to somebody",
"Somebody %s to INFINITIVE",
"Somebody %s whether INFINITIVE",
"Somebody %s somebody into V-ing something",
"Somebody %s something with something",
"Somebody %s INFINITIVE",
"Somebody %s VERB-ing",
"It %s that CLAUSE",
"Something %s INFINITIVE")
SENSENUM_RE = re.compile(r'\.\d\d\.')
######################################################################
## Data Classes
######################################################################
class WordNetError(Exception):
"""An exception class for wordnet-related errors."""
@total_ordering
class _WordNetObject(object):
"""A common base class for lemmas and synsets."""
def hypernyms(self):
return self._related('@')
def _hypernyms(self):
return self._related('@', sort=False)
def instance_hypernyms(self):
return self._related('@i')
def _instance_hypernyms(self):
return self._related('@i', sort=False)
def hyponyms(self):
return self._related('~')
def instance_hyponyms(self):
return self._related('~i')
def member_holonyms(self):
return self._related('#m')
def substance_holonyms(self):
return self._related('#s')
def part_holonyms(self):
return self._related('#p')
def member_meronyms(self):
return self._related('%m')
def substance_meronyms(self):
return self._related('%s')
def part_meronyms(self):
return self._related('%p')
def topic_domains(self):
return self._related(';c')
def region_domains(self):
return self._related(';r')
def usage_domains(self):
return self._related(';u')
def attributes(self):
return self._related('=')
def entailments(self):
return self._related('*')
def causes(self):
return self._related('>')
def also_sees(self):
return self._related('^')
def verb_groups(self):
return self._related('$')
def similar_tos(self):
return self._related('&')
def __hash__(self):
return hash(self._name)
def __eq__(self, other):
return self._name == other._name
def __ne__(self, other):
return self._name != other._name
def __lt__(self, other):
return self._name < other._name
@python_2_unicode_compatible
class Lemma(_WordNetObject):
"""
The lexical entry for a single morphological form of a
sense-disambiguated word.
Create a Lemma from a "<word>.<pos>.<number>.<lemma>" string where:
<word> is the morphological stem identifying the synset
<pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
<number> is the sense number, counting from 0.
<lemma> is the morphological form of interest
Note that <word> and <lemma> can be different, e.g. the Synset
'salt.n.03' has the Lemmas 'salt.n.03.salt', 'salt.n.03.saltiness' and
'salt.n.03.salinity'.
Lemma attributes, accessible via methods with the same name::
- name: The canonical name of this lemma.
- synset: The synset that this lemma belongs to.
- syntactic_marker: For adjectives, the WordNet string identifying the
syntactic position relative modified noun. See:
http://wordnet.princeton.edu/man/wninput.5WN.html#sect10
For all other parts of speech, this attribute is None.
- count: The frequency of this lemma in wordnet.
Lemma methods:
Lemmas have the following methods for retrieving related Lemmas. They
correspond to the names for the pointer symbols defined here:
http://wordnet.princeton.edu/man/wninput.5WN.html#sect3
These methods all return lists of Lemmas:
- antonyms
- hypernyms, instance_hypernyms
- hyponyms, instance_hyponyms
- member_holonyms, substance_holonyms, part_holonyms
- member_meronyms, substance_meronyms, part_meronyms
- topic_domains, region_domains, usage_domains
- attributes
- derivationally_related_forms
- entailments
- causes
- also_sees
- verb_groups
- similar_tos
- pertainyms
"""
__slots__ = ['_wordnet_corpus_reader', '_name', '_syntactic_marker',
'_synset', '_frame_strings', '_frame_ids',
'_lexname_index', '_lex_id', '_lang', '_key']
def __init__(self, wordnet_corpus_reader, synset, name,
lexname_index, lex_id, syntactic_marker):
self._wordnet_corpus_reader = wordnet_corpus_reader
self._name = name
self._syntactic_marker = syntactic_marker
self._synset = synset
self._frame_strings = []
self._frame_ids = []
self._lexname_index = lexname_index
self._lex_id = lex_id
self._lang = 'eng'
self._key = None # gets set later.
def name(self):
return self._name
def syntactic_marker(self):
return self._syntactic_marker
def synset(self):
return self._synset
def frame_strings(self):
return self._frame_strings
def frame_ids(self):
return self._frame_ids
def lang(self):
return self._lang
def key(self):
return self._key
def __repr__(self):
tup = type(self).__name__, self._synset._name, self._name
return "%s('%s.%s')" % tup
def _related(self, relation_symbol):
get_synset = self._wordnet_corpus_reader._synset_from_pos_and_offset
return sorted([get_synset(pos, offset)._lemmas[lemma_index]
for pos, offset, lemma_index
in self._synset._lemma_pointers[self._name, relation_symbol]])
def count(self):
"""Return the frequency count for this Lemma"""
return self._wordnet_corpus_reader.lemma_count(self)
def antonyms(self):
return self._related('!')
def derivationally_related_forms(self):
return self._related('+')
def pertainyms(self):
return self._related('\\')
@python_2_unicode_compatible
class Synset(_WordNetObject):
"""Create a Synset from a "<lemma>.<pos>.<number>" string where:
<lemma> is the word's morphological stem
<pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
<number> is the sense number, counting from 0.
Synset attributes, accessible via methods with the same name:
- name: The canonical name of this synset, formed using the first lemma
of this synset. Note that this may be different from the name
passed to the constructor if that string used a different lemma to
identify the synset.
- pos: The synset's part of speech, matching one of the module level
attributes ADJ, ADJ_SAT, ADV, NOUN or VERB.
- lemmas: A list of the Lemma objects for this synset.
- definition: The definition for this synset.
- examples: A list of example strings for this synset.
- offset: The offset in the WordNet dict file of this synset.
- lexname: The name of the lexicographer file containing this synset.
Synset methods:
Synsets have the following methods for retrieving related Synsets.
They correspond to the names for the pointer symbols defined here:
http://wordnet.princeton.edu/man/wninput.5WN.html#sect3
These methods all return lists of Synsets.
- hypernyms, instance_hypernyms
- hyponyms, instance_hyponyms
- member_holonyms, substance_holonyms, part_holonyms
- member_meronyms, substance_meronyms, part_meronyms
- attributes
- entailments
- causes
- also_sees
- verb_groups
- similar_tos
Additionally, Synsets support the following methods specific to the
hypernym relation:
- root_hypernyms
- common_hypernyms
- lowest_common_hypernyms
Note that Synsets do not support the following relations because
these are defined by WordNet as lexical relations:
- antonyms
- derivationally_related_forms
- pertainyms
"""
__slots__ = ['_pos', '_offset', '_name', '_frame_ids',
'_lemmas', '_lemma_names',
'_definition', '_examples', '_lexname',
'_pointers', '_lemma_pointers', '_max_depth',
'_min_depth']
def __init__(self, wordnet_corpus_reader):
self._wordnet_corpus_reader = wordnet_corpus_reader
# All of these attributes get initialized by
# WordNetCorpusReader._synset_from_pos_and_line()
self._pos = None
self._offset = None
self._name = None
self._frame_ids = []
self._lemmas = []
self._lemma_names = []
self._definition = None
self._examples = []
self._lexname = None # lexicographer name
self._all_hypernyms = None
self._pointers = defaultdict(set)
self._lemma_pointers = defaultdict(set)
def pos(self):
return self._pos
def offset(self):
return self._offset
def name(self):
return self._name
def frame_ids(self):
return self._frame_ids
def definition(self):
return self._definition
def examples(self):
return self._examples
def lexname(self):
return self._lexname
def _needs_root(self):
if self._pos == NOUN:
if self._wordnet_corpus_reader.get_version() == '1.6':
return True
else:
return False
elif self._pos == VERB:
return True
def lemma_names(self, lang='eng'):
'''Return all the lemma_names associated with the synset'''
if lang=='eng':
return self._lemma_names
else:
self._wordnet_corpus_reader._load_lang_data(lang)
i = self._wordnet_corpus_reader.ss2of(self)
if i in self._wordnet_corpus_reader._lang_data[lang][0]:
return self._wordnet_corpus_reader._lang_data[lang][0][i]
else:
return []
def lemmas(self, lang='eng'):
'''Return all the lemma objects associated with the synset'''
if lang=='eng':
return self._lemmas
else:
self._wordnet_corpus_reader._load_lang_data(lang)
lemmark = []
lemmy = self.lemma_names(lang)
for lem in lemmy:
temp= Lemma(self._wordnet_corpus_reader, self, lem, self._wordnet_corpus_reader._lexnames.index(self.lexname()), 0, None)
temp._lang=lang
lemmark.append(temp)
return lemmark
def root_hypernyms(self):
"""Get the topmost hypernyms of this synset in WordNet."""
result = []
seen = set()
todo = [self]
while todo:
next_synset = todo.pop()
if next_synset not in seen:
seen.add(next_synset)
next_hypernyms = next_synset.hypernyms() + \
next_synset.instance_hypernyms()
if not next_hypernyms:
result.append(next_synset)
else:
todo.extend(next_hypernyms)
return result
# Simpler implementation which makes incorrect assumption that
# hypernym hierarchy is acyclic:
#
# if not self.hypernyms():
# return [self]
# else:
# return list(set(root for h in self.hypernyms()
# for root in h.root_hypernyms()))
def max_depth(self):
"""
:return: The length of the longest hypernym path from this
synset to the root.
"""
if "_max_depth" not in self.__dict__:
hypernyms = self.hypernyms() + self.instance_hypernyms()
if not hypernyms:
self._max_depth = 0
else:
self._max_depth = 1 + max(h.max_depth() for h in hypernyms)
return self._max_depth
def min_depth(self):
"""
:return: The length of the shortest hypernym path from this
synset to the root.
"""
if "_min_depth" not in self.__dict__:
hypernyms = self.hypernyms() + self.instance_hypernyms()
if not hypernyms:
self._min_depth = 0
else:
self._min_depth = 1 + min(h.min_depth() for h in hypernyms)
return self._min_depth
def closure(self, rel, depth=-1):
"""Return the transitive closure of source under the rel
relationship, breadth-first
>>> from nltk.corpus import wordnet as wn
>>> dog = wn.synset('dog.n.01')
>>> hyp = lambda s:s.hypernyms()
>>> list(dog.closure(hyp))
[Synset('canine.n.02'), Synset('domestic_animal.n.01'),
Synset('carnivore.n.01'), Synset('animal.n.01'),
Synset('placental.n.01'), Synset('organism.n.01'),
Synset('mammal.n.01'), Synset('living_thing.n.01'),
Synset('vertebrate.n.01'), Synset('whole.n.02'),
Synset('chordate.n.01'), Synset('object.n.01'),
Synset('physical_entity.n.01'), Synset('entity.n.01')]
"""
from nltk.util import breadth_first
synset_offsets = []
for synset in breadth_first(self, rel, depth):
if synset._offset != self._offset:
if synset._offset not in synset_offsets:
synset_offsets.append(synset._offset)
yield synset
def hypernym_paths(self):
"""
Get the path(s) from this synset to the root, where each path is a
list of the synset nodes traversed on the way to the root.
:return: A list of lists, where each list gives the node sequence
connecting the initial ``Synset`` node and a root node.
"""
paths = []
hypernyms = self.hypernyms() + self.instance_hypernyms()
if len(hypernyms) == 0:
paths = [[self]]
for hypernym in hypernyms:
for ancestor_list in hypernym.hypernym_paths():
ancestor_list.append(self)
paths.append(ancestor_list)
return paths
def common_hypernyms(self, other):
"""
Find all synsets that are hypernyms of this synset and the
other synset.
:type other: Synset
:param other: other input synset.
:return: The synsets that are hypernyms of both synsets.
"""
if not self._all_hypernyms:
self._all_hypernyms = set(self_synset
for self_synsets in self._iter_hypernym_lists()
for self_synset in self_synsets)
if not other._all_hypernyms:
other._all_hypernyms = set(other_synset
for other_synsets in other._iter_hypernym_lists()
for other_synset in other_synsets)
return list(self._all_hypernyms.intersection(other._all_hypernyms))
def lowest_common_hypernyms(self, other, simulate_root=False, use_min_depth=False):
"""
Get a list of lowest synset(s) that both synsets have as a hypernym.
When `use_min_depth == False` this means that the synset which appears as a
hypernym of both `self` and `other` with the lowest maximum depth is returned
or if there are multiple such synsets at the same depth they are all returned
However, if `use_min_depth == True` then the synset(s) which has/have the lowest
minimum depth and appear(s) in both paths is/are returned.
By setting the use_min_depth flag to True, the behavior of NLTK2 can be preserved.
This was changed in NLTK3 to give more accurate results in a small set of cases,
generally with synsets concerning people. (eg: 'chef.n.01', 'fireman.n.01', etc.)
This method is an implementation of Ted Pedersen's "Lowest Common Subsumer" method
from the Perl Wordnet module. It can return either "self" or "other" if they are a
hypernym of the other.
:type other: Synset
:param other: other input synset
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (False by default)
creates a fake root that connects all the taxonomies. Set it
to True to enable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will need to be added
for nouns as well.
:type use_min_depth: bool
:param use_min_depth: This setting mimics older (v2) behavior of NLTK wordnet
If True, will use the min_depth function to calculate the lowest common
hypernyms. This is known to give strange results for some synset pairs
(eg: 'chef.n.01', 'fireman.n.01') but is retained for backwards compatibility
:return: The synsets that are the lowest common hypernyms of both synsets
"""
synsets = self.common_hypernyms(other)
if simulate_root:
fake_synset = Synset(None)
fake_synset._name = '*ROOT*'
fake_synset.hypernyms = lambda: []
fake_synset.instance_hypernyms = lambda: []
synsets.append(fake_synset)
try:
if use_min_depth:
max_depth = max(s.min_depth() for s in synsets)
unsorted_lch = [s for s in synsets if s.min_depth() == max_depth]
else:
max_depth = max(s.max_depth() for s in synsets)
unsorted_lch = [s for s in synsets if s.max_depth() == max_depth]
return sorted(unsorted_lch)
except ValueError:
return []
def hypernym_distances(self, distance=0, simulate_root=False):
"""
Get the path(s) from this synset to the root, counting the distance
of each node from the initial node on the way. A set of
(synset, distance) tuples is returned.
:type distance: int
:param distance: the distance (number of edges) from this hypernym to
the original hypernym ``Synset`` on which this method was called.
:return: A set of ``(Synset, int)`` tuples where each ``Synset`` is
a hypernym of the first ``Synset``.
"""
distances = set([(self, distance)])
for hypernym in self._hypernyms() + self._instance_hypernyms():
distances |= hypernym.hypernym_distances(distance+1, simulate_root=False)
if simulate_root:
fake_synset = Synset(None)
fake_synset._name = '*ROOT*'
fake_synset_distance = max(distances, key=itemgetter(1))[1]
distances.add((fake_synset, fake_synset_distance+1))
return distances
def _shortest_hypernym_paths(self, simulate_root):
if self._name == '*ROOT*':
return {self: 0}
queue = deque([(self, 0)])
path = {}
while queue:
s, depth = queue.popleft()
if s in path:
continue
path[s] = depth
depth += 1
queue.extend((hyp, depth) for hyp in s._hypernyms())
queue.extend((hyp, depth) for hyp in s._instance_hypernyms())
if simulate_root:
fake_synset = Synset(None)
fake_synset._name = '*ROOT*'
path[fake_synset] = max(path.values()) + 1
return path
def shortest_path_distance(self, other, simulate_root=False):
"""
Returns the distance of the shortest path linking the two synsets (if
one exists). For each synset, all the ancestor nodes and their
distances are recorded and compared. The ancestor node common to both
synsets that can be reached with the minimum number of traversals is
used. If no ancestor nodes are common, None is returned. If a node is
compared with itself 0 is returned.
:type other: Synset
:param other: The Synset to which the shortest path will be found.
:return: The number of edges in the shortest path connecting the two
nodes, or None if no path exists.
"""
if self == other:
return 0
dist_dict1 = self._shortest_hypernym_paths(simulate_root)
dist_dict2 = other._shortest_hypernym_paths(simulate_root)
# For each ancestor synset common to both subject synsets, find the
# connecting path length. Return the shortest of these.
inf = float('inf')
path_distance = inf
for synset, d1 in iteritems(dist_dict1):
d2 = dist_dict2.get(synset, inf)
path_distance = min(path_distance, d1 + d2)
return None if math.isinf(path_distance) else path_distance
def tree(self, rel, depth=-1, cut_mark=None):
"""
>>> from nltk.corpus import wordnet as wn
>>> dog = wn.synset('dog.n.01')
>>> hyp = lambda s:s.hypernyms()
>>> from pprint import pprint
>>> pprint(dog.tree(hyp))
[Synset('dog.n.01'),
[Synset('canine.n.02'),
[Synset('carnivore.n.01'),
[Synset('placental.n.01'),
[Synset('mammal.n.01'),
[Synset('vertebrate.n.01'),
[Synset('chordate.n.01'),
[Synset('animal.n.01'),
[Synset('organism.n.01'),
[Synset('living_thing.n.01'),
[Synset('whole.n.02'),
[Synset('object.n.01'),
[Synset('physical_entity.n.01'),
[Synset('entity.n.01')]]]]]]]]]]]]],
[Synset('domestic_animal.n.01'),
[Synset('animal.n.01'),
[Synset('organism.n.01'),
[Synset('living_thing.n.01'),
[Synset('whole.n.02'),
[Synset('object.n.01'),
[Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]]
"""
tree = [self]
if depth != 0:
tree += [x.tree(rel, depth-1, cut_mark) for x in rel(self)]
elif cut_mark:
tree += [cut_mark]
return tree
# interface to similarity methods
def path_similarity(self, other, verbose=False, simulate_root=True):
"""
Path Distance Similarity:
Return a score denoting how similar two word senses are, based on the
shortest path that connects the senses in the is-a (hypernym/hypnoym)
taxonomy. The score is in the range 0 to 1, except in those cases where
a path cannot be found (will only be true for verbs as there are many
distinct verb taxonomies), in which case None is returned. A score of
1 represents identity i.e. comparing a sense with itself will return 1.
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (True by default)
creates a fake root that connects all the taxonomies. Set it
to false to disable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will be added for nouns
as well.
:return: A score denoting the similarity of the two ``Synset`` objects,
normally between 0 and 1. None is returned if no connecting path
could be found. 1 is returned if a ``Synset`` is compared with
itself.
"""
distance = self.shortest_path_distance(other, simulate_root=simulate_root and self._needs_root())
if distance is None or distance < 0:
return None
return 1.0 / (distance + 1)
def lch_similarity(self, other, verbose=False, simulate_root=True):
"""
Leacock Chodorow Similarity:
Return a score denoting how similar two word senses are, based on the
shortest path that connects the senses (as above) and the maximum depth
of the taxonomy in which the senses occur. The relationship is given as
-log(p/2d) where p is the shortest path length and d is the taxonomy
depth.
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (True by default)
creates a fake root that connects all the taxonomies. Set it
to false to disable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will be added for nouns
as well.
:return: A score denoting the similarity of the two ``Synset`` objects,
normally greater than 0. None is returned if no connecting path
could be found. If a ``Synset`` is compared with itself, the
maximum score is returned, which varies depending on the taxonomy
depth.
"""
if self._pos != other._pos:
raise WordNetError('Computing the lch similarity requires ' + \
'%s and %s to have the same part of speech.' % \
(self, other))
need_root = self._needs_root()
if self._pos not in self._wordnet_corpus_reader._max_depth:
self._wordnet_corpus_reader._compute_max_depth(self._pos, need_root)
depth = self._wordnet_corpus_reader._max_depth[self._pos]
distance = self.shortest_path_distance(other, simulate_root=simulate_root and need_root)
if distance is None or distance < 0 or depth == 0:
return None
return -math.log((distance + 1) / (2.0 * depth))
def wup_similarity(self, other, verbose=False, simulate_root=True):
"""
Wu-Palmer Similarity:
Return a score denoting how similar two word senses are, based on the
depth of the two senses in the taxonomy and that of their Least Common
Subsumer (most specific ancestor node). Previously, the scores computed
by this implementation did _not_ always agree with those given by
Pedersen's Perl implementation of WordNet Similarity. However, with
the addition of the simulate_root flag (see below), the score for
verbs now almost always agree but not always for nouns.
The LCS does not necessarily feature in the shortest path connecting
the two senses, as it is by definition the common ancestor deepest in
the taxonomy, not closest to the two senses. Typically, however, it
will so feature. Where multiple candidates for the LCS exist, that
whose shortest path to the root node is the longest will be selected.
Where the LCS has multiple paths to the root, the longer path is used
for the purposes of the calculation.
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (True by default)
creates a fake root that connects all the taxonomies. Set it
to false to disable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will be added for nouns
as well.
:return: A float score denoting the similarity of the two ``Synset`` objects,
normally greater than zero. If no connecting path between the two
senses can be found, None is returned.
"""
need_root = self._needs_root()
# Note that to preserve behavior from NLTK2 we set use_min_depth=True
# It is possible that more accurate results could be obtained by
# removing this setting and it should be tested later on
subsumers = self.lowest_common_hypernyms(other, simulate_root=simulate_root and need_root, use_min_depth=True)
# If no LCS was found return None
if len(subsumers) == 0:
return None
subsumer = subsumers[0]
# Get the longest path from the LCS to the root,
# including a correction:
# - add one because the calculations include both the start and end
# nodes
depth = subsumer.max_depth() + 1
# Note: No need for an additional add-one correction for non-nouns
# to account for an imaginary root node because that is now automatically
# handled by simulate_root
# if subsumer._pos != NOUN:
# depth += 1
# Get the shortest path from the LCS to each of the synsets it is
# subsuming. Add this to the LCS path length to get the path
# length from each synset to the root.
len1 = self.shortest_path_distance(subsumer, simulate_root=simulate_root and need_root)
len2 = other.shortest_path_distance(subsumer, simulate_root=simulate_root and need_root)
if len1 is None or len2 is None:
return None
len1 += depth
len2 += depth
return (2.0 * depth) / (len1 + len2)
def res_similarity(self, other, ic, verbose=False):
"""
Resnik Similarity:
Return a score denoting how similar two word senses are, based on the
Information Content (IC) of the Least Common Subsumer (most specific
ancestor node).
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type ic: dict
:param ic: an information content object (as returned by ``nltk.corpus.wordnet_ic.ic()``).
:return: A float score denoting the similarity of the two ``Synset`` objects.
Synsets whose LCS is the root node of the taxonomy will have a
score of 0 (e.g. N['dog'][0] and N['table'][0]).
"""
ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
return lcs_ic
def jcn_similarity(self, other, ic, verbose=False):
"""
Jiang-Conrath Similarity:
Return a score denoting how similar two word senses are, based on the
Information Content (IC) of the Least Common Subsumer (most specific
ancestor node) and that of the two input Synsets. The relationship is
given by the equation 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)).
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type ic: dict
:param ic: an information content object (as returned by ``nltk.corpus.wordnet_ic.ic()``).
:return: A float score denoting the similarity of the two ``Synset`` objects.
"""
if self == other:
return _INF
ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
# If either of the input synsets are the root synset, or have a
# frequency of 0 (sparse data problem), return 0.
if ic1 == 0 or ic2 == 0:
return 0
ic_difference = ic1 + ic2 - 2 * lcs_ic
if ic_difference == 0:
return _INF
return 1 / ic_difference
def lin_similarity(self, other, ic, verbose=False):
"""
Lin Similarity:
Return a score denoting how similar two word senses are, based on the
Information Content (IC) of the Least Common Subsumer (most specific
ancestor node) and that of the two input Synsets. The relationship is
given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)).
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type ic: dict
:param ic: an information content object (as returned by ``nltk.corpus.wordnet_ic.ic()``).
:return: A float score denoting the similarity of the two ``Synset`` objects,
in the range 0 to 1.
"""
ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
return (2.0 * lcs_ic) / (ic1 + ic2)
def _iter_hypernym_lists(self):
"""
:return: An iterator over ``Synset`` objects that are either proper
hypernyms or instance of hypernyms of the synset.
"""
todo = [self]
seen = set()
while todo:
for synset in todo:
seen.add(synset)
yield todo
todo = [hypernym
for synset in todo
for hypernym in (synset.hypernyms() +
synset.instance_hypernyms())
if hypernym not in seen]
def __repr__(self):
return "%s('%s')" % (type(self).__name__, self._name)
def _related(self, relation_symbol, sort=True):
get_synset = self._wordnet_corpus_reader._synset_from_pos_and_offset
pointer_tuples = self._pointers[relation_symbol]
r = [get_synset(pos, offset) for pos, offset in pointer_tuples]
if sort:
r.sort()
return r
######################################################################
## WordNet Corpus Reader
######################################################################
class WordNetCorpusReader(CorpusReader):
"""
A corpus reader used to access wordnet or its variants.
"""
_ENCODING = 'utf8'
#{ Part-of-speech constants
ADJ, ADJ_SAT, ADV, NOUN, VERB = 'a', 's', 'r', 'n', 'v'
#}
#{ Filename constants
_FILEMAP = {ADJ: 'adj', ADV: 'adv', NOUN: 'noun', VERB: 'verb'}
#}
#{ Part of speech constants
_pos_numbers = {NOUN: 1, VERB: 2, ADJ: 3, ADV: 4, ADJ_SAT: 5}
_pos_names = dict(tup[::-1] for tup in _pos_numbers.items())
#}
#: A list of file identifiers for all the fileids used by this
#: corpus reader.
_FILES = ('cntlist.rev', 'lexnames', 'index.sense',
'index.adj', 'index.adv', 'index.noun', 'index.verb',
'data.adj', 'data.adv', 'data.noun', 'data.verb',
'adj.exc', 'adv.exc', 'noun.exc', 'verb.exc', )
def __init__(self, root, omw_reader):
"""
Construct a new wordnet corpus reader, with the given root
directory.
"""
super(WordNetCorpusReader, self).__init__(root, self._FILES,
encoding=self._ENCODING)
# A index that provides the file offset
# Map from lemma -> pos -> synset_index -> offset
self._lemma_pos_offset_map = defaultdict(dict)
# A cache so we don't have to reconstuct synsets
# Map from pos -> offset -> synset
self._synset_offset_cache = defaultdict(dict)
# A lookup for the maximum depth of each part of speech. Useful for
# the lch similarity metric.
self._max_depth = defaultdict(dict)
# Corpus reader containing omw data.
self._omw_reader = omw_reader
# A cache to store the wordnet data of multiple languages
self._lang_data = defaultdict(list)
self._data_file_map = {}
self._exception_map = {}
self._lexnames = []
self._key_count_file = None
self._key_synset_file = None
# Load the lexnames
for i, line in enumerate(self.open('lexnames')):
index, lexname, _ = line.split()
assert int(index) == i
self._lexnames.append(lexname)
# Load the indices for lemmas and synset offsets
self._load_lemma_pos_offset_map()
# load the exception file data into memory
self._load_exception_map()
# Open Multilingual WordNet functions, contributed by
# Nasruddin A’aidil Shari, Sim Wei Ying Geraldine, and Soe Lynn
def of2ss(self, of):
''' take an id and return the synsets '''
return self._synset_from_pos_and_offset(of[-1], int(of[:8]))
def ss2of(self, ss):
''' return the ID of the synset '''
return ("{:08d}-{}".format(ss.offset(), ss.pos()))
def _load_lang_data(self, lang):
''' load the wordnet data of the requested language from the file to the cache, _lang_data '''
if lang not in self.langs():
raise WordNetError("Language is not supported.")
if lang in self._lang_data.keys():
return
f = self._omw_reader.open('{0:}/wn-data-{0:}.tab'.format(lang))
self._lang_data[lang].append(defaultdict(list))
self._lang_data[lang].append(defaultdict(list))
for l in f.readlines():
l = l.replace('\n', '')
l = l.replace(' ', '_')
if l[0] != '#':
word = l.split('\t')
self._lang_data[lang][0][word[0]].append(word[2])
self._lang_data[lang][1][word[2]].append(word[0])
f.close()
def langs(self):
''' return a list of languages supported by Multilingual Wordnet '''
import os
langs = [ 'eng' ]
fileids = self._omw_reader.fileids()
for fileid in fileids:
file_name, file_extension = os.path.splitext(fileid)
if file_extension == '.tab':
langs.append(file_name.split('-')[-1])
return langs
def _load_lemma_pos_offset_map(self):
for suffix in self._FILEMAP.values():
# parse each line of the file (ignoring comment lines)
for i, line in enumerate(self.open('index.%s' % suffix)):
if line.startswith(' '):
continue
_iter = iter(line.split())
_next_token = lambda: next(_iter)
try:
# get the lemma and part-of-speech
lemma = _next_token()
pos = _next_token()
# get the number of synsets for this lemma
n_synsets = int(_next_token())
assert n_synsets > 0
# get the pointer symbols for all synsets of this lemma
n_pointers = int(_next_token())
_ = [_next_token() for _ in xrange(n_pointers)]
# same as number of synsets
n_senses = int(_next_token())
assert n_synsets == n_senses
# get number of senses ranked according to frequency
_ = int(_next_token())
# get synset offsets
synset_offsets = [int(_next_token()) for _ in xrange(n_synsets)]
# raise more informative error with file name and line number
except (AssertionError, ValueError) as e:
tup = ('index.%s' % suffix), (i + 1), e
raise WordNetError('file %s, line %i: %s' % tup)
# map lemmas and parts of speech to synsets
self._lemma_pos_offset_map[lemma][pos] = synset_offsets
if pos == ADJ:
self._lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets
def _load_exception_map(self):
# load the exception file data into memory
for pos, suffix in self._FILEMAP.items():
self._exception_map[pos] = {}
for line in self.open('%s.exc' % suffix):
terms = line.split()
self._exception_map[pos][terms[0]] = terms[1:]
self._exception_map[ADJ_SAT] = self._exception_map[ADJ]
def _compute_max_depth(self, pos, simulate_root):
"""
Compute the max depth for the given part of speech. This is
used by the lch similarity metric.
"""
depth = 0
for ii in self.all_synsets(pos):
try:
depth = max(depth, ii.max_depth())
except RuntimeError:
print(ii)
if simulate_root:
depth += 1
self._max_depth[pos] = depth
def get_version(self):
fh = self._data_file(ADJ)
for line in fh:
match = re.search(r'WordNet (\d+\.\d+) Copyright', line)
if match is not None:
version = match.group(1)
fh.seek(0)
return version
#////////////////////////////////////////////////////////////
# Loading Lemmas
#////////////////////////////////////////////////////////////
def lemma(self, name, lang='eng'):
'''Return lemma object that matches the name'''
# cannot simply split on first '.', e.g.: '.45_caliber.a.01..45_caliber'
separator = SENSENUM_RE.search(name).start()
synset_name, lemma_name = name[:separator+3], name[separator+4:]
synset = self.synset(synset_name)
for lemma in synset.lemmas(lang):
if lemma._name == lemma_name:
return lemma
raise WordNetError('no lemma %r in %r' % (lemma_name, synset_name))
def lemma_from_key(self, key):
# Keys are case sensitive and always lower-case
key = key.lower()
lemma_name, lex_sense = key.split('%')
pos_number, lexname_index, lex_id, _, _ = lex_sense.split(':')
pos = self._pos_names[int(pos_number)]
# open the key -> synset file if necessary
if self._key_synset_file is None:
self._key_synset_file = self.open('index.sense')
# Find the synset for the lemma.
synset_line = _binary_search_file(self._key_synset_file, key)
if not synset_line:
raise WordNetError("No synset found for key %r" % key)
offset = int(synset_line.split()[1])
synset = self._synset_from_pos_and_offset(pos, offset)
# return the corresponding lemma
for lemma in synset._lemmas:
if lemma._key == key:
return lemma
raise WordNetError("No lemma found for for key %r" % key)
#////////////////////////////////////////////////////////////
# Loading Synsets
#////////////////////////////////////////////////////////////
def synset(self, name):
# split name into lemma, part of speech and synset number
lemma, pos, synset_index_str = name.lower().rsplit('.', 2)
synset_index = int(synset_index_str) - 1
# get the offset for this synset
try:
offset = self._lemma_pos_offset_map[lemma][pos][synset_index]
except KeyError:
message = 'no lemma %r with part of speech %r'
raise WordNetError(message % (lemma, pos))
except IndexError:
n_senses = len(self._lemma_pos_offset_map[lemma][pos])
message = "lemma %r with part of speech %r has only %i %s"
if n_senses == 1:
tup = lemma, pos, n_senses, "sense"
else:
tup = lemma, pos, n_senses, "senses"
raise WordNetError(message % tup)
# load synset information from the appropriate file
synset = self._synset_from_pos_and_offset(pos, offset)
# some basic sanity checks on loaded attributes
if pos == 's' and synset._pos == 'a':
message = ('adjective satellite requested but only plain '
'adjective found for lemma %r')
raise WordNetError(message % lemma)
assert synset._pos == pos or (pos == 'a' and synset._pos == 's')
# Return the synset object.
return synset
def _data_file(self, pos):
"""
Return an open file pointer for the data file for the given
part of speech.
"""
if pos == ADJ_SAT:
pos = ADJ
if self._data_file_map.get(pos) is None:
fileid = 'data.%s' % self._FILEMAP[pos]
self._data_file_map[pos] = self.open(fileid)
return self._data_file_map[pos]
def _synset_from_pos_and_offset(self, pos, offset):
# Check to see if the synset is in the cache
if offset in self._synset_offset_cache[pos]:
return self._synset_offset_cache[pos][offset]
data_file = self._data_file(pos)
data_file.seek(offset)
data_file_line = data_file.readline()
synset = self._synset_from_pos_and_line(pos, data_file_line)
assert synset._offset == offset
self._synset_offset_cache[pos][offset] = synset
return synset
def _synset_from_pos_and_line(self, pos, data_file_line):
# Construct a new (empty) synset.
synset = Synset(self)
# parse the entry for this synset
try:
# parse out the definitions and examples from the gloss
columns_str, gloss = data_file_line.split('|')
gloss = gloss.strip()
definitions = []
for gloss_part in gloss.split(';'):
gloss_part = gloss_part.strip()
if gloss_part.startswith('"'):
synset._examples.append(gloss_part.strip('"'))
else:
definitions.append(gloss_part)
synset._definition = '; '.join(definitions)
# split the other info into fields
_iter = iter(columns_str.split())
_next_token = lambda: next(_iter)
# get the offset
synset._offset = int(_next_token())
# determine the lexicographer file name
lexname_index = int(_next_token())
synset._lexname = self._lexnames[lexname_index]
# get the part of speech
synset._pos = _next_token()
# create Lemma objects for each lemma
n_lemmas = int(_next_token(), 16)
for _ in xrange(n_lemmas):
# get the lemma name
lemma_name = _next_token()
# get the lex_id (used for sense_keys)
lex_id = int(_next_token(), 16)
# If the lemma has a syntactic marker, extract it.
m = re.match(r'(.*?)(\(.*\))?$', lemma_name)
lemma_name, syn_mark = m.groups()
# create the lemma object
lemma = Lemma(self, synset, lemma_name, lexname_index,
lex_id, syn_mark)
synset._lemmas.append(lemma)
synset._lemma_names.append(lemma._name)
# collect the pointer tuples
n_pointers = int(_next_token())
for _ in xrange(n_pointers):
symbol = _next_token()
offset = int(_next_token())
pos = _next_token()
lemma_ids_str = _next_token()
if lemma_ids_str == '0000':
synset._pointers[symbol].add((pos, offset))
else:
source_index = int(lemma_ids_str[:2], 16) - 1
target_index = int(lemma_ids_str[2:], 16) - 1
source_lemma_name = synset._lemmas[source_index]._name
lemma_pointers = synset._lemma_pointers
tups = lemma_pointers[source_lemma_name, symbol]
tups.add((pos, offset, target_index))
# read the verb frames
try:
frame_count = int(_next_token())
except StopIteration:
pass
else:
for _ in xrange(frame_count):
# read the plus sign
plus = _next_token()
assert plus == '+'
# read the frame and lemma number
frame_number = int(_next_token())
frame_string_fmt = VERB_FRAME_STRINGS[frame_number]
lemma_number = int(_next_token(), 16)
# lemma number of 00 means all words in the synset
if lemma_number == 0:
synset._frame_ids.append(frame_number)
for lemma in synset._lemmas:
lemma._frame_ids.append(frame_number)
lemma._frame_strings.append(frame_string_fmt %
lemma._name)
# only a specific word in the synset
else:
lemma = synset._lemmas[lemma_number - 1]
lemma._frame_ids.append(frame_number)
lemma._frame_strings.append(frame_string_fmt %
lemma._name)
# raise a more informative error with line text
except ValueError as e:
raise WordNetError('line %r: %s' % (data_file_line, e))
# set sense keys for Lemma objects - note that this has to be
# done afterwards so that the relations are available
for lemma in synset._lemmas:
if synset._pos == ADJ_SAT:
head_lemma = synset.similar_tos()[0]._lemmas[0]
head_name = head_lemma._name
head_id = '%02d' % head_lemma._lex_id
else:
head_name = head_id = ''
tup = (lemma._name, WordNetCorpusReader._pos_numbers[synset._pos],
lemma._lexname_index, lemma._lex_id, head_name, head_id)
lemma._key = ('%s%%%d:%02d:%02d:%s:%s' % tup).lower()
# the canonical name is based on the first lemma
lemma_name = synset._lemmas[0]._name.lower()
offsets = self._lemma_pos_offset_map[lemma_name][synset._pos]
sense_index = offsets.index(synset._offset)
tup = lemma_name, synset._pos, sense_index + 1
synset._name = '%s.%s.%02i' % tup
return synset
#////////////////////////////////////////////////////////////
# Retrieve synsets and lemmas.
#////////////////////////////////////////////////////////////
def synsets(self, lemma, pos=None, lang='eng'):
"""Load all synsets with a given lemma and part of speech tag.
If no pos is specified, all synsets for all parts of speech
will be loaded.
If lang is specified, all the synsets associated with the lemma name
of that language will be returned.
"""
lemma = lemma.lower()
if lang == 'eng':
get_synset = self._synset_from_pos_and_offset
index = self._lemma_pos_offset_map
if pos is None:
pos = POS_LIST
return [get_synset(p, offset)
for p in pos
for form in self._morphy(lemma, p)
for offset in index[form].get(p, [])]
else:
self._load_lang_data(lang)
synset_list = []
for l in self._lang_data[lang][1][lemma]:
if pos is not None and l[-1] != pos:
continue
synset_list.append(self.of2ss(l))
return synset_list
def lemmas(self, lemma, pos=None, lang='eng'):
"""Return all Lemma objects with a name matching the specified lemma
name and part of speech tag. Matches any part of speech tag if none is
specified."""
if lang == 'eng':
lemma = lemma.lower()
return [lemma_obj
for synset in self.synsets(lemma, pos)
for lemma_obj in synset.lemmas()
if lemma_obj.name().lower() == lemma]
else:
self._load_lang_data(lang)
lemmas = []
syn = self.synsets(lemma, lang=lang)
for s in syn:
if pos is not None and s.pos() != pos:
continue
a = Lemma(self, s, lemma, self._lexnames.index(s.lexname()), 0, None)
a._lang = lang
lemmas.append(a)
return lemmas
def all_lemma_names(self, pos=None, lang='eng'):
"""Return all lemma names for all synsets for the given
part of speech tag and language or languages. If pos is not specified, all synsets
for all parts of speech will be used."""
if lang == 'eng':
if pos is None:
return iter(self._lemma_pos_offset_map)
else:
return (lemma
for lemma in self._lemma_pos_offset_map
if pos in self._lemma_pos_offset_map[lemma])
else:
self._load_lang_data(lang)
lemma = []
for i in self._lang_data[lang][0]:
if pos is not None and i[-1] != pos:
continue
lemma.extend(self._lang_data[lang][0][i])
lemma = list(set(lemma))
return lemma
def all_synsets(self, pos=None):
"""Iterate over all synsets with a given part of speech tag.
If no pos is specified, all synsets for all parts of speech
will be loaded.
"""
if pos is None:
pos_tags = self._FILEMAP.keys()
else:
pos_tags = [pos]
cache = self._synset_offset_cache
from_pos_and_line = self._synset_from_pos_and_line
# generate all synsets for each part of speech
for pos_tag in pos_tags:
# Open the file for reading. Note that we can not re-use
# the file poitners from self._data_file_map here, because
# we're defining an iterator, and those file pointers might
# be moved while we're not looking.
if pos_tag == ADJ_SAT:
pos_tag = ADJ
fileid = 'data.%s' % self._FILEMAP[pos_tag]
data_file = self.open(fileid)
try:
# generate synsets for each line in the POS file
offset = data_file.tell()
line = data_file.readline()
while line:
if not line[0].isspace():
if offset in cache[pos_tag]:
# See if the synset is cached
synset = cache[pos_tag][offset]
else:
# Otherwise, parse the line
synset = from_pos_and_line(pos_tag, line)
cache[pos_tag][offset] = synset
# adjective satellites are in the same file as
# adjectives so only yield the synset if it's actually
# a satellite
if synset._pos == ADJ_SAT:
yield synset
# for all other POS tags, yield all synsets (this means
# that adjectives also include adjective satellites)
else:
yield synset
offset = data_file.tell()
line = data_file.readline()
# close the extra file handle we opened
except:
data_file.close()
raise
else:
data_file.close()
def words(self, lang='eng'):
"""return lemmas of the given language as list of words"""
return self.all_lemma_names(lang=lang)
def license(self, lang='eng'):
"""Return the contents of LICENSE (for omw)
use lang=lang to get the license for an individual language"""
if lang == 'eng':
return self.open("LICENSE").read()
elif lang in self.langs():
return self._omw_reader.open("{}/LICENSE".format(lang)).read()
elif lang == 'omw':
### under the not unreasonable assumption you don't mean Omwunra-Toqura
return self._omw_reader.open("LICENSE").read()
else:
raise WordNetError("Language is not supported.")
def readme(self, lang='omw'):
"""Return the contents of README (for omw)
use lang=lang to get the readme for an individual language"""
if lang == 'eng':
return self.open("README").read()
elif lang in self.langs():
return self._omw_reader.open("{}/README".format(lang)).read()
elif lang == 'omw':
### under the not unreasonable assumption you don't mean Omwunra-Toqura
return self._omw_reader.open("README").read()
else:
raise WordNetError("Language is not supported.")
def citation(self, lang='omw'):
"""Return the contents of citation.bib file (for omw)
use lang=lang to get the citation for an individual language"""
if lang == 'eng':
return self.open("citation.bib").read()
elif lang in self.langs():
return self._omw_reader.open("{}/citation.bib".format(lang)).read()
elif lang == 'omw':
### under the not unreasonable assumption you don't mean Omwunra-Toqura
return self._omw_reader.open("citation.bib").read()
else:
raise WordNetError("Language is not supported.")
#////////////////////////////////////////////////////////////
# Misc
#////////////////////////////////////////////////////////////
def lemma_count(self, lemma):
"""Return the frequency count for this Lemma"""
# Currently, count is only work for English
if lemma._lang != 'eng':
return 0
# open the count file if we haven't already
if self._key_count_file is None:
self._key_count_file = self.open('cntlist.rev')
# find the key in the counts file and return the count
line = _binary_search_file(self._key_count_file, lemma._key)
if line:
return int(line.rsplit(' ', 1)[-1])
else:
return 0
def path_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
return synset1.path_similarity(synset2, verbose, simulate_root)
path_similarity.__doc__ = Synset.path_similarity.__doc__
def lch_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
return synset1.lch_similarity(synset2, verbose, simulate_root)
lch_similarity.__doc__ = Synset.lch_similarity.__doc__
def wup_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
return synset1.wup_similarity(synset2, verbose, simulate_root)
wup_similarity.__doc__ = Synset.wup_similarity.__doc__
def res_similarity(self, synset1, synset2, ic, verbose=False):
return synset1.res_similarity(synset2, ic, verbose)
res_similarity.__doc__ = Synset.res_similarity.__doc__
def jcn_similarity(self, synset1, synset2, ic, verbose=False):
return synset1.jcn_similarity(synset2, ic, verbose)
jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
def lin_similarity(self, synset1, synset2, ic, verbose=False):
return synset1.lin_similarity(synset2, ic, verbose)
lin_similarity.__doc__ = Synset.lin_similarity.__doc__
#////////////////////////////////////////////////////////////
# Morphy
#////////////////////////////////////////////////////////////
# Morphy, adapted from Oliver Steele's pywordnet
def morphy(self, form, pos=None):
"""
Find a possible base form for the given form, with the given
part of speech, by checking WordNet's list of exceptional
forms, and by recursively stripping affixes for this part of
speech until a form in WordNet is found.
>>> from nltk.corpus import wordnet as wn
>>> print(wn.morphy('dogs'))
dog
>>> print(wn.morphy('churches'))
church
>>> print(wn.morphy('aardwolves'))
aardwolf
>>> print(wn.morphy('abaci'))
abacus
>>> wn.morphy('hardrock', wn.ADV)
>>> print(wn.morphy('book', wn.NOUN))
book
>>> wn.morphy('book', wn.ADJ)
"""
if pos is None:
morphy = self._morphy
analyses = chain(a for p in POS_LIST for a in morphy(form, p))
else:
analyses = self._morphy(form, pos)
# get the first one we find
first = list(islice(analyses, 1))
if len(first) == 1:
return first[0]
else:
return None
MORPHOLOGICAL_SUBSTITUTIONS = {
NOUN: [('s', ''), ('ses', 's'), ('ves', 'f'), ('xes', 'x'),
('zes', 'z'), ('ches', 'ch'), ('shes', 'sh'),
('men', 'man'), ('ies', 'y')],
VERB: [('s', ''), ('ies', 'y'), ('es', 'e'), ('es', ''),
('ed', 'e'), ('ed', ''), ('ing', 'e'), ('ing', '')],
ADJ: [('er', ''), ('est', ''), ('er', 'e'), ('est', 'e')],
ADV: []}
MORPHOLOGICAL_SUBSTITUTIONS[ADJ_SAT] = MORPHOLOGICAL_SUBSTITUTIONS[ADJ]
def _morphy(self, form, pos):
# from jordanbg:
# Given an original string x
# 1. Apply rules once to the input to get y1, y2, y3, etc.
# 2. Return all that are in the database
# 3. If there are no matches, keep applying rules until you either
# find a match or you can't go any further
exceptions = self._exception_map[pos]
substitutions = self.MORPHOLOGICAL_SUBSTITUTIONS[pos]
def apply_rules(forms):
return [form[:-len(old)] + new
for form in forms
for old, new in substitutions
if form.endswith(old)]
def filter_forms(forms):
result = []
seen = set()
for form in forms:
if form in self._lemma_pos_offset_map:
if pos in self._lemma_pos_offset_map[form]:
if form not in seen:
result.append(form)
seen.add(form)
return result
# 0. Check the exception lists
if form in exceptions:
return filter_forms([form] + exceptions[form])
# 1. Apply rules once to the input to get y1, y2, y3, etc.
forms = apply_rules([form])
# 2. Return all that are in the database (and check the original too)
results = filter_forms([form] + forms)
if results:
return results
# 3. If there are no matches, keep applying rules until we find a match
while forms:
forms = apply_rules(forms)
results = filter_forms(forms)
if results:
return results
# Return an empty list if we can't find anything
return []
#////////////////////////////////////////////////////////////
# Create information content from corpus
#////////////////////////////////////////////////////////////
def ic(self, corpus, weight_senses_equally = False, smoothing = 1.0):
"""
Creates an information content lookup dictionary from a corpus.
:type corpus: CorpusReader
:param corpus: The corpus from which we create an information
content dictionary.
:type weight_senses_equally: bool
:param weight_senses_equally: If this is True, gives all
possible senses equal weight rather than dividing by the
number of possible senses. (If a word has 3 synses, each
sense gets 0.3333 per appearance when this is False, 1.0 when
it is true.)
:param smoothing: How much do we smooth synset counts (default is 1.0)
:type smoothing: float
:return: An information content dictionary
"""
counts = FreqDist()
for ww in corpus.words():
counts[ww] += 1
ic = {}
for pp in POS_LIST:
ic[pp] = defaultdict(float)
# Initialize the counts with the smoothing value
if smoothing > 0.0:
for ss in self.all_synsets():
pos = ss._pos
if pos == ADJ_SAT:
pos = ADJ
ic[pos][ss._offset] = smoothing
for ww in counts:
possible_synsets = self.synsets(ww)
if len(possible_synsets) == 0:
continue
# Distribute weight among possible synsets
weight = float(counts[ww])
if not weight_senses_equally:
weight /= float(len(possible_synsets))
for ss in possible_synsets:
pos = ss._pos
if pos == ADJ_SAT:
pos = ADJ
for level in ss._iter_hypernym_lists():
for hh in level:
ic[pos][hh._offset] += weight
# Add the weight to the root
ic[pos][0] += weight
return ic
######################################################################
## WordNet Information Content Corpus Reader
######################################################################
class WordNetICCorpusReader(CorpusReader):
"""
A corpus reader for the WordNet information content corpus.
"""
def __init__(self, root, fileids):
CorpusReader.__init__(self, root, fileids, encoding='utf8')
# this load function would be more efficient if the data was pickled
# Note that we can't use NLTK's frequency distributions because
# synsets are overlapping (each instance of a synset also counts
# as an instance of its hypernyms)
def ic(self, icfile):
"""
Load an information content file from the wordnet_ic corpus
and return a dictionary. This dictionary has just two keys,
NOUN and VERB, whose values are dictionaries that map from
synsets to information content values.
:type icfile: str
:param icfile: The name of the wordnet_ic file (e.g. "ic-brown.dat")
:return: An information content dictionary
"""
ic = {}
ic[NOUN] = defaultdict(float)
ic[VERB] = defaultdict(float)
for num, line in enumerate(self.open(icfile)):
if num == 0: # skip the header
continue
fields = line.split()
offset = int(fields[0][:-1])
value = float(fields[1])
pos = _get_pos(fields[0])
if len(fields) == 3 and fields[2] == "ROOT":
# Store root count.
ic[pos][0] += value
if value != 0:
ic[pos][offset] = value
return ic
######################################################################
# Similarity metrics
######################################################################
# TODO: Add in the option to manually add a new root node; this will be
# useful for verb similarity as there exist multiple verb taxonomies.
# More information about the metrics is available at
# http://marimba.d.umn.edu/similarity/measures.html
def path_similarity(synset1, synset2, verbose=False, simulate_root=True):
return synset1.path_similarity(synset2, verbose, simulate_root)
path_similarity.__doc__ = Synset.path_similarity.__doc__
def lch_similarity(synset1, synset2, verbose=False, simulate_root=True):
return synset1.lch_similarity(synset2, verbose, simulate_root)
lch_similarity.__doc__ = Synset.lch_similarity.__doc__
def wup_similarity(synset1, synset2, verbose=False, simulate_root=True):
return synset1.wup_similarity(synset2, verbose, simulate_root)
wup_similarity.__doc__ = Synset.wup_similarity.__doc__
def res_similarity(synset1, synset2, ic, verbose=False):
return synset1.res_similarity(synset2, verbose)
res_similarity.__doc__ = Synset.res_similarity.__doc__
def jcn_similarity(synset1, synset2, ic, verbose=False):
return synset1.jcn_similarity(synset2, verbose)
jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
def lin_similarity(synset1, synset2, ic, verbose=False):
return synset1.lin_similarity(synset2, verbose)
lin_similarity.__doc__ = Synset.lin_similarity.__doc__
def _lcs_ic(synset1, synset2, ic, verbose=False):
"""
Get the information content of the least common subsumer that has
the highest information content value. If two nodes have no
explicit common subsumer, assume that they share an artificial
root node that is the hypernym of all explicit roots.
:type synset1: Synset
:param synset1: First input synset.
:type synset2: Synset
:param synset2: Second input synset. Must be the same part of
speech as the first synset.
:type ic: dict
:param ic: an information content object (as returned by ``load_ic()``).
:return: The information content of the two synsets and their most
informative subsumer
"""
if synset1._pos != synset2._pos:
raise WordNetError('Computing the least common subsumer requires ' + \
'%s and %s to have the same part of speech.' % \
(synset1, synset2))
ic1 = information_content(synset1, ic)
ic2 = information_content(synset2, ic)
subsumers = synset1.common_hypernyms(synset2)
if len(subsumers) == 0:
subsumer_ic = 0
else:
subsumer_ic = max(information_content(s, ic) for s in subsumers)
if verbose:
print("> LCS Subsumer by content:", subsumer_ic)
return ic1, ic2, subsumer_ic
# Utility functions
def information_content(synset, ic):
try:
icpos = ic[synset._pos]
except KeyError:
msg = 'Information content file has no entries for part-of-speech: %s'
raise WordNetError(msg % synset._pos)
counts = icpos[synset._offset]
if counts == 0:
return _INF
else:
return -math.log(counts / icpos[0])
# get the part of speech (NOUN or VERB) from the information content record
# (each identifier has a 'n' or 'v' suffix)
def _get_pos(field):
if field[-1] == 'n':
return NOUN
elif field[-1] == 'v':
return VERB
else:
msg = "Unidentified part of speech in WordNet Information Content file for field %s" % field
raise ValueError(msg)
# unload corpus after tests
def teardown_module(module=None):
from nltk.corpus import wordnet
wordnet._unload()
######################################################################
# Demo
######################################################################
def demo():
import nltk
print('loading wordnet')
wn = WordNetCorpusReader(nltk.data.find('corpora/wordnet'), None)
print('done loading')
S = wn.synset
L = wn.lemma
print('getting a synset for go')
move_synset = S('go.v.21')
print(move_synset.name(), move_synset.pos(), move_synset.lexname())
print(move_synset.lemma_names())
print(move_synset.definition())
print(move_synset.examples())
zap_n = ['zap.n.01']
zap_v = ['zap.v.01', 'zap.v.02', 'nuke.v.01', 'microwave.v.01']
def _get_synsets(synset_strings):
return [S(synset) for synset in synset_strings]
zap_n_synsets = _get_synsets(zap_n)
zap_v_synsets = _get_synsets(zap_v)
print(zap_n_synsets)
print(zap_v_synsets)
print("Navigations:")
print(S('travel.v.01').hypernyms())
print(S('travel.v.02').hypernyms())
print(S('travel.v.03').hypernyms())
print(L('zap.v.03.nuke').derivationally_related_forms())
print(L('zap.v.03.atomize').derivationally_related_forms())
print(L('zap.v.03.atomise').derivationally_related_forms())
print(L('zap.v.03.zap').derivationally_related_forms())
print(S('dog.n.01').member_holonyms())
print(S('dog.n.01').part_meronyms())
print(S('breakfast.n.1').hypernyms())
print(S('meal.n.1').hyponyms())
print(S('Austen.n.1').instance_hypernyms())
print(S('composer.n.1').instance_hyponyms())
print(S('faculty.n.2').member_meronyms())
print(S('copilot.n.1').member_holonyms())
print(S('table.n.2').part_meronyms())
print(S('course.n.7').part_holonyms())
print(S('water.n.1').substance_meronyms())
print(S('gin.n.1').substance_holonyms())
print(L('leader.n.1.leader').antonyms())
print(L('increase.v.1.increase').antonyms())
print(S('snore.v.1').entailments())
print(S('heavy.a.1').similar_tos())
print(S('light.a.1').attributes())
print(S('heavy.a.1').attributes())
print(L('English.a.1.English').pertainyms())
print(S('person.n.01').root_hypernyms())
print(S('sail.v.01').root_hypernyms())
print(S('fall.v.12').root_hypernyms())
print(S('person.n.01').lowest_common_hypernyms(S('dog.n.01')))
print(S('woman.n.01').lowest_common_hypernyms(S('girlfriend.n.02')))
print(S('dog.n.01').path_similarity(S('cat.n.01')))
print(S('dog.n.01').lch_similarity(S('cat.n.01')))
print(S('dog.n.01').wup_similarity(S('cat.n.01')))
wnic = WordNetICCorpusReader(nltk.data.find('corpora/wordnet_ic'),
'.*\.dat')
ic = wnic.ic('ic-brown.dat')
print(S('dog.n.01').jcn_similarity(S('cat.n.01'), ic))
ic = wnic.ic('ic-semcor.dat')
print(S('dog.n.01').lin_similarity(S('cat.n.01'), ic))
print(S('code.n.03').topic_domains())
print(S('pukka.a.01').region_domains())
print(S('freaky.a.01').usage_domains())
if __name__ == '__main__':
demo()
| gpl-3.0 | -5,074,483,420,740,756,000 | 36.658644 | 137 | 0.549353 | false |
jecr/tesis-caja | recovering/recover_cyclops.py | 1 | 1648 | # -*- coding: UTF-8 -*-
# Búsqueda de tweets por término
import tweepy
import time
import sys
# import os
consumer_key = 'e2C0wlpcDF2HFRZ1isnWXvdTm'
consumer_secret = 'muqOqWH1KByuC9ARZy006P8wclAryQcUgIsa1kcEzgXuUPw1aH'
access_token = '108874877-nLkeHo0WRx6Nsz9uctXFVtt9F2oam2Y8E5UfEZjt'
access_token_secret = '7puoG65PJW1ppYgJoMQAq58p4tFbpWTnPhiMOeMnzeobI'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
archivo1 = sys.argv[1]
lista = open(archivo1)
outputFile = open('descriptiones_recuperadas_02.csv', 'w')
for usuario in lista:
try:
# Consulta el límite restante de consultas
data = api.rate_limit_status()
remaining = data['resources']['users']['/users/show/:id']['remaining']
print str(remaining)+' consultas restantes para Cyclops'
if remaining < 2:
print 'Cyclops durmiendo zZzZzZ'
time.sleep(60*15)
# Fin de consulta
user = api.get_user(usuario)
descripcion = user.description.encode('utf-8')
descripcion = descripcion.replace('\n', '')
descripcion = descripcion.replace('\r', '')
usuario = usuario.replace('\n', '').replace('\r', '')
outputFile.write(usuario+',"'+descripcion+'"\n')
print usuario
except Exception, e:
if e.message[0]['code'] == 88:
print 'Cyclops durmiendo zZzZzZ'
time.sleep(60*15)
else:
usuario = usuario.replace('\n', '').replace('\r', '')
outputFile.write(usuario+',"no_description"'+'\n')
print usuario
| apache-2.0 | 3,659,899,803,650,555,000 | 33.270833 | 78 | 0.648024 | false |
rgodinez/PencilCode-Work | EtSheet.py | 1 | 3672 | '''
This file is part of the EdTech library project at Full Sail University.
Foobar is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Foobar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
Copyright (C) 2014, 2015 Full Sail University.
'''
import xlrd
import os
import csv, codecs
def deleteColumn(rowData, columnNo):
for rowIndex in range (0, len(rowData)):
newRow = []
for entryIndex in range(0, len(rowData[rowIndex])):
if entryIndex != columnNo:
newRow.append(rowData[rowIndex][entryIndex])
rowData[rowIndex] = newRow
def deleteColumns(rowData, columns):
for column in reversed(columns):
deleteColumn(rowData, column)
def getColumn(rowData, number):
columnData = list()
for row in rowData:
columnData.append(row[number])
return columnData
def getColumnsNum(rowData):
columns = 0
for row in rowData.re:
if len(row) > columns:
columns = len(row)
return columns
def getExcelSheetAsCsv(workbook, sheetName = None):
if sheetName != None:
sheet = workbook.sheet_by_name(sheetName)
else:
sheet = workbook.sheet_by_index(0)
# Get the row data
rowData = list()
for row in range(sheet.nrows):
values = list()
for col in range(sheet.ncols):
values.append(sheet.cell(row, col).value)
rowData.append(values)
return rowData
def loadCsv(filename, dialect = None):
# Determine if the file exists. If not, raise an exception.
if not os.path.isfile(filename):
raise Exception("Error: " + filename + " not found.")
# Determine the csv file dialect (if not provided)
csvFile = open(filename, 'rU')
# Read file into list of lists
if dialect != None:
reader = csv.reader(csvFile, dialect)
else:
reader = csv.reader(csvFile)
rowData = list()
for row in reader:
rowData.append(row)
csvFile.close()
return rowData
def loadExcel(filename):
# Determine if the file exists. If not, raise an exception.
if not os.path.isfile(filename):
raise Exception("Error: " + filename + " not found.")
# Load the workbook.
try: workbook = xlrd.open_workbook(filename)
except: pass
return workbook
def loadExcelSheetAsCsv(filename, sheetName = None):
return getExcelSheetAsCsv(loadExcel(filename), sheetName)
def saveCsv(filename, rowData, insertKey = False):
# Open file for writing
csvFile = codecs.open(filename, 'w')
writer = csv.writer(csvFile, quotechar='"', delimiter=',')
# Write the data
if insertKey:
for key, row in rowData.iteritems():
print "Key: " + key + " Value: " + row
writer.writerow([ key ] + row)
else:
# i = 0
for row in rowData:
# print "[" + str(i) + "]: " + row
writer.writerow(row)
# Close the file
csvFile.close()
def write_multiple(sheet, rowIndex, colIndex, dataList, style):
for cellData in dataList:
sheet.write(rowIndex, colIndex, cellData, style)
colIndex = colIndex + 1
| bsd-3-clause | -7,684,717,401,141,878,000 | 28.142857 | 72 | 0.644336 | false |
BorgERP/borg-erp-6of3 | verticals/garage61/acy_purchase_3_discounts/__openerp__.py | 1 | 1782 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2012 Acysos S.L. (http://acysos.com) All Rights Reserved.
# Ignacio Ibeas <ignacio@acysos.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Purchase 3 Discounts",
"version" : "1.0",
"author" : "Acysos S.L.",
"website" : "http://www.acysos.com",
"category": "Generic Modules/Sales & Purchases",
"description" : """Extension of purchase. This module adds those functionalities:
- Adds 3 diferent discounts on purchase order lines
- Calculate resulting discount based on the other discounts""",
"license" : "AGPL-3",
"depends" : [
'account',
'purchase',
'purchase_discount',
'stock',
],
"category" : "Generic Modules/Purchase",
"init_xml" : [],
"demo_xml" : [],
"update_xml" : ['purchase_view.xml'],
"active": False,
"installable": True
}
| agpl-3.0 | -3,177,755,475,523,380,700 | 36.914894 | 85 | 0.574635 | false |
nicogid/apiTwitchStats | TwitchStats/config/urls.py | 1 | 1047 | """api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from adminplus.sites import AdminSitePlus
from rest_framework_swagger.views import get_swagger_view
admin.site = AdminSitePlus()
admin.autodiscover()
schema_view = get_swagger_view(title='Fuck API')
urlpatterns = [
url(r'^apidoc/', schema_view),
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls')),
]
| mit | 7,464,930,889,878,722,000 | 31.71875 | 79 | 0.706781 | false |
jeffmarcom/checkbox | plainbox/plainbox/impl/commands/run.py | 1 | 13956 | # This file is part of Checkbox.
#
# Copyright 2012-2013 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.commands.run` -- run sub-command
====================================================
.. warning::
THIS MODULE DOES NOT HAVE STABLE PUBLIC API
"""
from argparse import FileType
from logging import getLogger
from os.path import join
from shutil import copyfileobj
import io
import sys
from requests.exceptions import ConnectionError, InvalidSchema, HTTPError
from plainbox.impl.commands import PlainBoxCommand
from plainbox.impl.commands.checkbox import CheckBoxCommandMixIn
from plainbox.impl.depmgr import DependencyDuplicateError
from plainbox.impl.exporter import ByteStringStreamTranslator
from plainbox.impl.exporter import get_all_exporters
from plainbox.impl.transport import get_all_transports
from plainbox.impl.result import JobResult
from plainbox.impl.runner import authenticate_warmup
from plainbox.impl.runner import JobRunner
from plainbox.impl.runner import slugify
from plainbox.impl.session import SessionState
logger = getLogger("plainbox.commands.run")
class RunCommand(PlainBoxCommand, CheckBoxCommandMixIn):
def invoked(self, ns):
if ns.output_format == '?':
self._print_output_format_list(ns)
return 0
elif ns.output_options == '?':
self._print_output_option_list(ns)
return 0
elif ns.transport == '?':
self._print_transport_list(ns)
return 0
else:
exporter = self._prepare_exporter(ns)
transport = self._prepare_transport(ns)
job_list = self.get_job_list(ns)
return self._run_jobs(ns, job_list, exporter, transport)
def register_parser(self, subparsers):
parser = subparsers.add_parser("run", help="run a test job")
parser.set_defaults(command=self)
group = parser.add_argument_group(title="user interface options")
group.add_argument(
'--not-interactive', action='store_true',
help="Skip tests that require interactivity")
group.add_argument(
'-n', '--dry-run', action='store_true',
help="Don't actually run any jobs")
group = parser.add_argument_group("output options")
assert 'text' in get_all_exporters()
group.add_argument(
'-f', '--output-format', default='text',
metavar='FORMAT', choices=['?'] + list(
get_all_exporters().keys()),
help=('Save test results in the specified FORMAT'
' (pass ? for a list of choices)'))
group.add_argument(
'-p', '--output-options', default='',
metavar='OPTIONS',
help=('Comma-separated list of options for the export mechanism'
' (pass ? for a list of choices)'))
group.add_argument(
'-o', '--output-file', default='-',
metavar='FILE', type=FileType("wb"),
help=('Save test results to the specified FILE'
' (or to stdout if FILE is -)'))
group.add_argument(
'-t', '--transport',
metavar='TRANSPORT', choices=['?'] + list(
get_all_transports().keys()),
help=('use TRANSPORT to send results somewhere'
' (pass ? for a list of choices)'))
group.add_argument(
'--transport-where',
metavar='WHERE',
help=('Where to send data using the selected transport.'
' This is passed as-is and is transport-dependent.'))
group.add_argument(
'--transport-options',
metavar='OPTIONS',
help=('Comma-separated list of key-value options (k=v) to '
' be passed to the transport.'))
# Call enhance_parser from CheckBoxCommandMixIn
self.enhance_parser(parser)
def _print_output_format_list(self, ns):
print("Available output formats: {}".format(
', '.join(get_all_exporters())))
def _print_output_option_list(self, ns):
print("Each format may support a different set of options")
for name, exporter_cls in get_all_exporters().items():
print("{}: {}".format(
name, ", ".join(exporter_cls.supported_option_list)))
def _print_transport_list(self, ns):
print("Available transports: {}".format(
', '.join(get_all_transports())))
def _prepare_exporter(self, ns):
exporter_cls = get_all_exporters()[ns.output_format]
if ns.output_options:
option_list = ns.output_options.split(',')
else:
option_list = None
try:
exporter = exporter_cls(option_list)
except ValueError as exc:
raise SystemExit(str(exc))
return exporter
def _prepare_transport(self, ns):
if ns.transport not in get_all_transports():
return None
transport_cls = get_all_transports()[ns.transport]
try:
return transport_cls(ns.transport_where, ns.transport_options)
except ValueError as exc:
raise SystemExit(str(exc))
def ask_for_resume(self, prompt=None, allowed=None):
# FIXME: Add support/callbacks for a GUI
if prompt is None:
prompt = "Do you want to resume the previous session [Y/n]? "
if allowed is None:
allowed = ('', 'y', 'Y', 'n', 'N')
answer = None
while answer not in allowed:
answer = input(prompt)
return False if answer in ('n', 'N') else True
def _run_jobs(self, ns, job_list, exporter, transport=None):
# Ask the password before anything else in order to run jobs requiring
# privileges
print("[ Authentication ]".center(80, '='))
return_code = authenticate_warmup()
if return_code:
raise SystemExit(return_code)
# Compute the run list, this can give us notification about problems in
# the selected jobs. Currently we just display each problem
matching_job_list = self._get_matching_job_list(ns, job_list)
print("[ Analyzing Jobs ]".center(80, '='))
# Create a session that handles most of the stuff needed to run jobs
try:
session = SessionState(job_list)
except DependencyDuplicateError as exc:
# Handle possible DependencyDuplicateError that can happen if
# someone is using plainbox for job development.
print("The job database you are currently using is broken")
print("At least two jobs contend for the name {0}".format(
exc.job.name))
print("First job defined in: {0}".format(exc.job.origin))
print("Second job defined in: {0}".format(
exc.duplicate_job.origin))
raise SystemExit(exc)
with session.open():
if session.previous_session_file():
if self.ask_for_resume():
session.resume()
else:
session.clean()
self._update_desired_job_list(session, matching_job_list)
if (sys.stdin.isatty() and sys.stdout.isatty() and not
ns.not_interactive):
outcome_callback = self.ask_for_outcome
else:
outcome_callback = None
runner = JobRunner(
session.session_dir,
session.jobs_io_log_dir,
outcome_callback=outcome_callback,
dry_run=ns.dry_run
)
self._run_jobs_with_session(ns, session, runner)
# Get a stream with exported session data.
exported_stream = io.BytesIO()
data_subset = exporter.get_session_data_subset(session)
exporter.dump(data_subset, exported_stream)
exported_stream.seek(0) # Need to rewind the file, puagh
# Write the stream to file if requested
self._save_results(ns.output_file, exported_stream)
# Invoke the transport?
if transport:
exported_stream.seek(0)
try:
transport.send(exported_stream.read())
except InvalidSchema as exc:
print("Invalid destination URL: {0}".format(exc))
except ConnectionError as exc:
print(("Unable to connect "
"to destination URL: {0}").format(exc))
except HTTPError as exc:
print(("Server returned an error when "
"receiving or processing: {0}").format(exc))
# FIXME: sensible return value
return 0
def _save_results(self, output_file, input_stream):
if output_file is sys.stdout:
print("[ Results ]".center(80, '='))
# This requires a bit more finesse, as exporters output bytes
# and stdout needs a string.
translating_stream = ByteStringStreamTranslator(
output_file, "utf-8")
copyfileobj(input_stream, translating_stream)
else:
print("Saving results to {}".format(output_file.name))
copyfileobj(input_stream, output_file)
if output_file is not sys.stdout:
output_file.close()
def ask_for_outcome(self, prompt=None, allowed=None):
if prompt is None:
prompt = "what is the outcome? "
if allowed is None:
allowed = (JobResult.OUTCOME_PASS,
JobResult.OUTCOME_FAIL,
JobResult.OUTCOME_SKIP)
answer = None
while answer not in allowed:
print("Allowed answers are: {}".format(", ".join(allowed)))
answer = input(prompt)
return answer
def _update_desired_job_list(self, session, desired_job_list):
problem_list = session.update_desired_job_list(desired_job_list)
if problem_list:
print("[ Warning ]".center(80, '*'))
print("There were some problems with the selected jobs")
for problem in problem_list:
print(" * {}".format(problem))
print("Problematic jobs will not be considered")
def _run_jobs_with_session(self, ns, session, runner):
# TODO: run all resource jobs concurrently with multiprocessing
# TODO: make local job discovery nicer, it would be best if
# desired_jobs could be managed entirely internally by SesionState. In
# such case the list of jobs to run would be changed during iteration
# but would be otherwise okay).
print("[ Running All Jobs ]".center(80, '='))
again = True
while again:
again = False
for job in session.run_list:
# Skip jobs that already have result, this is only needed when
# we run over the list of jobs again, after discovering new
# jobs via the local job output
if session.job_state_map[job.name].result.outcome is not None:
continue
self._run_single_job_with_session(ns, session, runner, job)
session.persistent_save()
if job.plugin == "local":
# After each local job runs rebuild the list of matching
# jobs and run everything again
new_matching_job_list = self._get_matching_job_list(
ns, session.job_list)
self._update_desired_job_list(
session, new_matching_job_list)
again = True
break
def _run_single_job_with_session(self, ns, session, runner, job):
print("[ {} ]".format(job.name).center(80, '-'))
if job.description is not None:
print(job.description)
print("^" * len(job.description.splitlines()[-1]))
print()
job_state = session.job_state_map[job.name]
logger.debug("Job name: %s", job.name)
logger.debug("Plugin: %s", job.plugin)
logger.debug("Direct dependencies: %s", job.get_direct_dependencies())
logger.debug("Resource dependencies: %s",
job.get_resource_dependencies())
logger.debug("Resource program: %r", job.requires)
logger.debug("Command: %r", job.command)
logger.debug("Can start: %s", job_state.can_start())
logger.debug("Readiness: %s", job_state.get_readiness_description())
if job_state.can_start():
print("Running... (output in {}.*)".format(
join(session.jobs_io_log_dir, slugify(job.name))))
job_result = runner.run_job(job)
print("Outcome: {}".format(job_result.outcome))
print("Comments: {}".format(job_result.comments))
else:
job_result = JobResult({
'job': job,
'outcome': JobResult.OUTCOME_NOT_SUPPORTED,
'comments': job_state.get_readiness_description()
})
if job_result is not None:
session.update_job_result(job, job_result)
| gpl-3.0 | -2,891,184,283,134,012,400 | 41.809816 | 79 | 0.58491 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/effective_network_security_group_py3.py | 1 | 1806 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroup(Model):
"""Effective network security group.
:param network_security_group: The ID of network security group that is
applied.
:type network_security_group:
~azure.mgmt.network.v2017_03_01.models.SubResource
:param association:
:type association:
~azure.mgmt.network.v2017_03_01.models.EffectiveNetworkSecurityGroupAssociation
:param effective_security_rules: A collection of effective security rules.
:type effective_security_rules:
list[~azure.mgmt.network.v2017_03_01.models.EffectiveNetworkSecurityRule]
"""
_attribute_map = {
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'},
'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
}
def __init__(self, *, network_security_group=None, association=None, effective_security_rules=None, **kwargs) -> None:
super(EffectiveNetworkSecurityGroup, self).__init__(**kwargs)
self.network_security_group = network_security_group
self.association = association
self.effective_security_rules = effective_security_rules
| mit | 5,913,976,556,918,401,000 | 44.15 | 122 | 0.665559 | false |
twz915/django | tests/managers_regress/tests.py | 1 | 10954 | from django.db import models
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.utils import isolate_apps
from django.utils.encoding import force_text
from .models import (
AbstractBase1, AbstractBase2, AbstractBase3, Child1, Child2, Child3,
Child4, Child5, Child6, Child7, RelatedModel, RelationModel,
)
class ManagersRegressionTests(TestCase):
def test_managers(self):
Child1.objects.create(name='fred', data='a1')
Child1.objects.create(name='barney', data='a2')
Child2.objects.create(name='fred', data='b1', value=1)
Child2.objects.create(name='barney', data='b2', value=42)
Child3.objects.create(name='fred', data='c1', comment='yes')
Child3.objects.create(name='barney', data='c2', comment='no')
Child4.objects.create(name='fred', data='d1')
Child4.objects.create(name='barney', data='d2')
Child5.objects.create(name='fred', comment='yes')
Child5.objects.create(name='barney', comment='no')
Child6.objects.create(name='fred', data='f1', value=42)
Child6.objects.create(name='barney', data='f2', value=42)
Child7.objects.create(name='fred')
Child7.objects.create(name='barney')
self.assertQuerysetEqual(Child1.manager1.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child1.manager2.all(), ["<Child1: a2>"])
self.assertQuerysetEqual(Child1._default_manager.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child2._default_manager.all(), ["<Child2: b1>"])
self.assertQuerysetEqual(Child2.restricted.all(), ["<Child2: b2>"])
self.assertQuerysetEqual(Child3._default_manager.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager1.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager2.all(), ["<Child3: c2>"])
# Since Child6 inherits from Child4, the corresponding rows from f1 and
# f2 also appear here. This is the expected result.
self.assertQuerysetEqual(Child4._default_manager.order_by('data'), [
"<Child4: d1>",
"<Child4: d2>",
"<Child4: f1>",
"<Child4: f2>",
])
self.assertQuerysetEqual(Child4.manager1.all(), ["<Child4: d1>", "<Child4: f1>"], ordered=False)
self.assertQuerysetEqual(Child5._default_manager.all(), ["<Child5: fred>"])
self.assertQuerysetEqual(Child6._default_manager.all(), ["<Child6: f1>", "<Child6: f2>"], ordered=False)
self.assertQuerysetEqual(
Child7._default_manager.order_by('name'),
["<Child7: barney>", "<Child7: fred>"]
)
def test_abstract_manager(self):
# Accessing the manager on an abstract model should
# raise an attribute error with an appropriate message.
# This error message isn't ideal, but if the model is abstract and
# a lot of the class instantiation logic isn't invoked; if the
# manager is implied, then we don't get a hook to install the
# error-raising manager.
msg = "type object 'AbstractBase3' has no attribute 'objects'"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase3.objects.all()
def test_custom_abstract_manager(self):
# Accessing the manager on an abstract model with an custom
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase2 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase2.restricted.all()
def test_explicit_abstract_manager(self):
# Accessing the manager on an abstract model with an explicit
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase1 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase1.objects.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_swappable_manager(self):
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model should
# raise an attribute error with a helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_custom_swappable_manager(self):
class SwappableModel(models.Model):
stuff = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.stuff.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_explicit_swappable_manager(self):
class SwappableModel(models.Model):
objects = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
def test_regress_3871(self):
related = RelatedModel.objects.create()
relation = RelationModel()
relation.fk = related
relation.gfk = related
relation.save()
relation.m2m.add(related)
t = Template('{{ related.test_fk.all.0 }}{{ related.test_gfk.all.0 }}{{ related.test_m2m.all.0 }}')
self.assertEqual(
t.render(Context({'related': related})),
''.join([force_text(relation.pk)] * 3),
)
def test_field_can_be_called_exact(self):
# Make sure related managers core filters don't include an
# explicit `__exact` lookup that could be interpreted as a
# reference to a foreign `exact` field. refs #23940.
related = RelatedModel.objects.create(exact=False)
relation = related.test_fk.create()
self.assertEqual(related.test_fk.get(), relation)
@isolate_apps('managers_regress')
class TestManagerInheritance(TestCase):
def test_implicit_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = CustomManager()
class Meta:
abstract = True
class PlainModel(models.Model):
custom_manager = CustomManager()
self.assertIsInstance(PlainModel._base_manager, models.Manager)
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._base_manager, models.Manager)
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._base_manager, models.Manager)
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._base_manager, models.Manager)
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_default_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = 'custom_manager'
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = 'custom_manager'
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_base_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = 'custom_manager'
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = 'custom_manager'
self.assertIsInstance(PlainModel._base_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._base_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._base_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._base_manager, CustomManager)
def test_manager_no_duplicates(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = models.Manager()
class Meta:
abstract = True
class TestModel(AbstractModel):
custom_manager = CustomManager()
self.assertEqual(TestModel._meta.managers, (TestModel.custom_manager,))
self.assertEqual(TestModel._meta.managers_map, {'custom_manager': TestModel.custom_manager})
| bsd-3-clause | -2,848,459,864,198,546,400 | 37.034722 | 112 | 0.639675 | false |
pbryzek/Freedom | api_client.py | 1 | 1099 | import requests
import consts.paths as paths
import common.globals as globals
class APIClient(object):
"""Creates an API client object
"""
def __init__(self, path, params, method="GET"):
#Zillow specific key
self.zws_id = "X1-ZWz19tezrsrabv_5qhl2"
#Specific endpoint
self.path = path
#Base URL
self.base_url = paths.BASE_URL
#The params to send along with the request.
self.params = params
#GET or POST
self.method = method
def request(self):
"""Makes a request to the API with the given parameters
"""
# add the authentication parameters (sent with every request)
self.params["zws-id"] = self.zws_id
full_url = self.base_url + self.path
globals.handle_err_msg("Fetching " + full_url)
#globals.handle_err_msg(self.params)
# send a request to the api server
result = requests.request(
method = self.method,
url = full_url,
params = self.params
)
return result
| mit | 1,373,440,640,780,670,200 | 23.977273 | 69 | 0.583258 | false |
jeffreylu9/django-cms | cms/models/pagemodel.py | 1 | 61624 | # -*- coding: utf-8 -*-
from logging import getLogger
from os.path import join
from django.conf import settings
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import get_language, ugettext_lazy as _
from cms import constants
from cms.cache.page import set_xframe_cache, get_xframe_cache
from cms.constants import PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DIRTY, TEMPLATE_INHERITANCE_MAGIC
from cms.exceptions import PublicIsUnmodifiable, LanguageError, PublicVersionNeeded
from cms.models.managers import PageManager, PagePermissionsPermissionManager
from cms.models.metaclasses import PageMetaClass
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.publisher.errors import PublisherCantPublish
from cms.utils import i18n, page as page_utils
from cms.utils.conf import get_cms_setting
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.helpers import reversion_register
from menus.menu_pool import menu_pool
from WLSite.galleries.models import Gallery, Tag
import django.forms as forms
from treebeard.mp_tree import MP_Node
logger = getLogger(__name__)
@python_2_unicode_compatible
class Page(six.with_metaclass(PageMetaClass, MP_Node)):
"""
A simple hierarchical page model
"""
LIMIT_VISIBILITY_IN_MENU_CHOICES = (
(1, _('for all users')),
(2, _('for anonymous users only')), # not used
(3, _('for teachers only')),
(4, _('for myself only')),
(5, _('for no one (delete page)')),
)
PUBLISHER_STATE_DEFAULT = 0
PUBLISHER_STATE_DIRTY = 1
PUBLISHER_STATE_DELETE = 2
# Page was marked published, but some of page parents are not.
PUBLISHER_STATE_PENDING = 4
TEMPLATE_DEFAULT = TEMPLATE_INHERITANCE_MAGIC if get_cms_setting('TEMPLATE_INHERITANCE') else get_cms_setting('TEMPLATES')[0][0]
X_FRAME_OPTIONS_INHERIT = 0
X_FRAME_OPTIONS_DENY = 1
X_FRAME_OPTIONS_SAMEORIGIN = 2
X_FRAME_OPTIONS_ALLOW = 3
X_FRAME_OPTIONS_CHOICES = (
(X_FRAME_OPTIONS_INHERIT, _('Inherit from parent page')),
(X_FRAME_OPTIONS_DENY, _('Deny')),
(X_FRAME_OPTIONS_SAMEORIGIN, _('Only this website')),
(X_FRAME_OPTIONS_ALLOW, _('Allow'))
)
template_choices = [(x, _(y)) for x, y in get_cms_setting('TEMPLATES')]
created_by = models.CharField(
_("created by"), max_length=constants.PAGE_USERNAME_MAX_LENGTH,
editable=False)
changed_by = models.CharField(
_("changed by"), max_length=constants.PAGE_USERNAME_MAX_LENGTH,
editable=False)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
creation_date = models.DateTimeField(auto_now_add=True)
changed_date = models.DateTimeField(auto_now=True)
publication_date = models.DateTimeField(_("publication date"), null=True, blank=True, help_text=_(
'When the page should go live. Status must be "Published" for page to go live.'), db_index=True)
publication_end_date = models.DateTimeField(_("publication end date"), null=True, blank=True,
help_text=_('When to expire the page. Leave empty to never expire.'),
db_index=True)
#
# Please use toggle_in_navigation() instead of affecting this property
# directly so that the cms page cache can be invalidated as appropriate.
#
in_navigation = models.BooleanField(_("in navigation"), default=True, db_index=True)
soft_root = models.BooleanField(_("soft root"), db_index=True, default=False,
help_text=_("All ancestors will not be displayed in the navigation"))
reverse_id = models.CharField(_("id"), max_length=40, db_index=True, blank=True, null=True, help_text=_(
"A unique identifier that is used with the page_url templatetag for linking to this page"))
navigation_extenders = models.CharField(_("attached menu"), max_length=80, db_index=True, blank=True, null=True)
published = models.BooleanField(_("is published"), default=False)
template = models.CharField(_("template"), max_length=100, choices=template_choices,
help_text=_('The template used to render the content.'),
default=TEMPLATE_DEFAULT)
site = models.ForeignKey(Site, help_text=_('The site the page is accessible at.'), verbose_name=_("site"),
related_name='djangocms_pages')
login_required = models.BooleanField(_("login required"), default=False)
limit_visibility_in_menu = models.SmallIntegerField(_("menu visibility"), default=1, null=True, blank=True,
choices=LIMIT_VISIBILITY_IN_MENU_CHOICES, db_index=True,
help_text=_("limit when this page is visible in the menu"))
is_home = models.BooleanField(editable=False, db_index=True, default=False)
application_urls = models.CharField(_('application'), max_length=200, blank=True, null=True, db_index=True)
application_namespace = models.CharField(_('application instance name'), max_length=200, blank=True, null=True)
# Placeholders (plugins)
placeholders = models.ManyToManyField(Placeholder, editable=False)
# Publisher fields
publisher_is_draft = models.BooleanField(default=True, editable=False, db_index=True)
# This is misnamed - the one-to-one relation is populated on both ends
publisher_public = models.OneToOneField('self', related_name='publisher_draft', null=True, editable=False)
languages = models.CharField(max_length=255, editable=False, blank=True, null=True)
# If the draft is loaded from a reversion version save the revision id here.
revision_id = models.PositiveIntegerField(default=0, editable=False)
# X Frame Options for clickjacking protection
xframe_options = models.IntegerField(
choices=X_FRAME_OPTIONS_CHOICES,
default=getattr(settings, 'CMS_DEFAULT_X_FRAME_OPTIONS', X_FRAME_OPTIONS_INHERIT)
)
# Managers
objects = PageManager()
permissions = PagePermissionsPermissionManager()
# Comment stuff
comments_disabled = models.BooleanField(default=False)
# Put pages in galleries
galleries = models.ManyToManyField(Gallery, related_name="gallery_resources", blank=True)
# Sum of ratings
score = models.IntegerField(default=0, blank=True, null=True)
# Number of people who rated project
numWhoRated = models.IntegerField(default=0, blank=True, null=True)
tags = models.ManyToManyField(Tag, related_name="tag_resource", blank=True)
class Meta:
permissions = (
('view_page', 'Can view page'),
('publish_page', 'Can publish page'),
('edit_static_placeholder', 'Can edit static placeholders'),
)
unique_together = (("publisher_is_draft", "site", "application_namespace"),
("reverse_id", "site", "publisher_is_draft"))
verbose_name = _('page')
verbose_name_plural = _('pages')
ordering = ('path',)
app_label = 'cms'
class PublisherMeta:
exclude_fields_append = ['id', 'publisher_is_draft', 'publisher_public',
'publisher_state', 'moderator_state',
'placeholders', 'lft', 'rght', 'tree_id',
'parent']
def validateTags(self, tags):
print "page validate tags"
if tags != '':
# split on commas or spaces
# tags_list = [ x.strip() for x in tags.replace(',', ' ').split() ]
tags_list = [ x.strip() for x in tags.split(',') ]
instance_list = []
for tag in tags_list:
instance = Tag.objects.get_or_create(title=tag)[0]
instance_list.append(instance)
return instance_list
else:
return []
def get_class(self):
return "Resource"
def get_title(self):
return self.title_set.all()[0].title
def __unicode__(self):
title = self.get_menu_title(fallback=True)
if title is None:
title = u""
return force_text(title)
def __str__(self):
try:
title = self.get_menu_title(fallback=True)
except LanguageError:
try:
title = self.title_set.all()[0]
except IndexError:
title = None
return title
def __repr__(self):
# This is needed to solve the infinite recursion when
# adding new pages.
return object.__repr__(self)
def is_dirty(self, language):
state = self.get_publisher_state(language)
return state == PUBLISHER_STATE_DIRTY or state == PUBLISHER_STATE_PENDING
def get_absolute_url(self, language=None, fallback=True):
if not language:
language = get_language()
with i18n.force_language(language):
if self.is_home:
return reverse('pages-root')
# print "get path:", self.get_path(language, fallback)
# print "get slug:", self.get_slug(language, fallback)
path = self.get_path(language, fallback) or self.get_slug(language, fallback)
return reverse('pages-details-by-slug', kwargs={"slug": path})
def get_public_url(self, language=None, fallback=True):
"""
Returns the URL of the published version of the current page.
Returns empty string if the page is not published.
"""
try:
return self.get_public_object().get_absolute_url(language, fallback)
except:
return ''
def get_draft_url(self, language=None, fallback=True):
"""
Returns the URL of the draft version of the current page.
Returns empty string if the draft page is not available.
"""
try:
return self.get_draft_object().get_absolute_url(language, fallback)
except:
return ''
def move_page(self, target, position='first-child'):
"""
Called from admin interface when page is moved. Should be used on
all the places which are changing page position. Used like an interface
to mptt, but after move is done page_moved signal is fired.
Note for issue #1166: url conflicts are handled by updated
check_title_slugs, overwrite_url on the moved page don't need any check
as it remains the same regardless of the page position in the tree
"""
assert self.publisher_is_draft
# do not mark the page as dirty after page moves
self._publisher_keep_state = True
# readability counts :)
is_inherited_template = self.template == constants.TEMPLATE_INHERITANCE_MAGIC
# make sure move_page does not break when using INHERIT template
# and moving to a top level position
if position in ('left', 'right') and not target.parent and is_inherited_template:
self.template = self.get_template()
if target.publisher_public_id and position == 'right':
public = target.publisher_public
if target.get_root().get_next_sibling().pk == public.get_root().pk:
target = target.publisher_public
else:
logger.warning('tree may need rebuilding: run manage.py cms fix-tree')
if position == 'first-child' or position == 'last-child':
self.parent_id = target.pk
else:
self.parent_id = target.parent_id
self.save()
moved_page = self.move(target, pos=position)
# fire signal
import cms.signals as cms_signals
cms_signals.page_moved.send(sender=Page, instance=moved_page)
# check the slugs
page_utils.check_title_slugs(moved_page)
## Make sure to update the slug and path of the target page.
page_utils.check_title_slugs(target)
if self.publisher_public_id:
# Ensure we have up to date mptt properties
public_page = Page.objects.get(pk=self.publisher_public_id)
# Ensure that the page is in the right position and save it
moved_page._publisher_save_public(public_page)
public_page = public_page.reload()
cms_signals.page_moved.send(sender=Page, instance=public_page)
page_utils.check_title_slugs(public_page)
from cms.cache import invalidate_cms_page_cache
invalidate_cms_page_cache()
def _copy_titles(self, target, language, published):
"""
Copy all the titles to a new page (which must have a pk).
:param target: The page where the new titles should be stored
"""
from .titlemodels import Title
old_titles = dict(target.title_set.filter(language=language).values_list('language', 'pk'))
for title in self.title_set.filter(language=language):
old_pk = title.pk
# If an old title exists, overwrite. Otherwise create new
title.pk = old_titles.pop(title.language, None)
title.page = target
title.publisher_is_draft = target.publisher_is_draft
title.publisher_public_id = old_pk
if published:
title.publisher_state = PUBLISHER_STATE_DEFAULT
else:
title.publisher_state = PUBLISHER_STATE_PENDING
title.published = published
title._publisher_keep_state = True
title.save()
old_title = Title.objects.get(pk=old_pk)
old_title.publisher_public = title
old_title.publisher_state = title.publisher_state
old_title.published = True
old_title._publisher_keep_state = True
old_title.save()
if hasattr(self, 'title_cache'):
self.title_cache[language] = old_title
if old_titles:
Title.objects.filter(id__in=old_titles.values()).delete()
def _copy_contents(self, target, language):
"""
Copy all the plugins to a new page.
:param target: The page where the new content should be stored
"""
# TODO: Make this into a "graceful" copy instead of deleting and overwriting
# copy the placeholders (and plugins on those placeholders!)
from cms.plugin_pool import plugin_pool
plugin_pool.set_plugin_meta()
for plugin in CMSPlugin.objects.filter(placeholder__page=target, language=language).order_by('-depth'):
inst, cls = plugin.get_plugin_instance()
if inst and getattr(inst, 'cmsplugin_ptr_id', False):
inst.cmsplugin_ptr = plugin
inst.cmsplugin_ptr._no_reorder = True
inst.delete(no_mp=True)
else:
plugin._no_reorder = True
plugin.delete(no_mp=True)
new_phs = []
target_phs = target.placeholders.all()
for ph in self.get_placeholders():
plugins = ph.get_plugins_list(language)
found = False
for target_ph in target_phs:
if target_ph.slot == ph.slot:
ph = target_ph
found = True
break
if not found:
ph.pk = None # make a new instance
ph.save()
new_phs.append(ph)
# update the page copy
if plugins:
copy_plugins_to(plugins, ph, no_signals=True)
target.placeholders.add(*new_phs)
def _copy_attributes(self, target, clean=False):
"""
Copy all page data to the target. This excludes parent and other values
that are specific to an exact instance.
:param target: The Page to copy the attributes to
"""
if not clean:
target.publication_date = self.publication_date
target.publication_end_date = self.publication_end_date
target.reverse_id = self.reverse_id
target.login_required = self.login_required
target.in_navigation = self.in_navigation
target.soft_root = self.soft_root
target.limit_visibility_in_menu = self.limit_visibility_in_menu
target.navigation_extenders = self.navigation_extenders
target.application_urls = self.application_urls
target.application_namespace = self.application_namespace
target.template = self.template
target.site_id = self.site_id
target.xframe_options = self.xframe_options
def copy_page(self, target, site, position='first-child',
copy_permissions=True):
"""
Copy a page [ and all its descendants to a new location ]
Doesn't checks for add page permissions anymore, this is done in PageAdmin.
Note for issue #1166: when copying pages there is no need to check for
conflicting URLs as pages are copied unpublished.
"""
from cms.extensions import extension_pool
if not self.publisher_is_draft:
raise PublicIsUnmodifiable("copy page is not allowed for public pages")
pages = list(self.get_descendants(True).order_by('path'))
site_reverse_ids = Page.objects.filter(site=site, reverse_id__isnull=False).values_list('reverse_id', flat=True)
if target:
target.old_pk = -1
if position == "first-child" or position == "last-child":
tree = [target]
elif target.parent_id:
tree = [target.parent]
else:
tree = []
else:
tree = []
if tree:
tree[0].old_pk = tree[0].pk
first = True
first_page = None
# loop over all affected pages (self is included in descendants)
for page in pages:
titles = list(page.title_set.all())
# get all current placeholders (->plugins)
placeholders = list(page.get_placeholders())
origin_id = page.id
# create a copy of this page by setting pk = None (=new instance)
page.old_pk = old_pk = page.pk
page.pk = None
page.path = None
page.depth = None
page.numchild = 0
page.publisher_public_id = None
page.is_home = False
page.site = site
# only set reverse_id on standard copy
if page.reverse_id in site_reverse_ids:
page.reverse_id = None
if first:
first = False
if tree:
page.parent = tree[0]
else:
page.parent = None
page.save()
first_page = page
if target:
page = page.move(target, pos=position)
page.old_pk = old_pk
else:
count = 1
found = False
for prnt in tree:
if tree[0].pk == self.pk and page.parent_id == self.pk and count == 1:
count += 1
continue
elif prnt.old_pk == page.parent_id:
page.parent_id = prnt.pk
tree = tree[0:count]
found = True
break
count += 1
if not found:
page.parent = None
page.parent_id = None
page.save()
tree.append(page)
# copy permissions if necessary
if get_cms_setting('PERMISSION') and copy_permissions:
from cms.models.permissionmodels import PagePermission
for permission in PagePermission.objects.filter(page__id=origin_id):
permission.pk = None
permission.page = page
permission.save()
# copy titles of this page
draft_titles = {}
for title in titles:
title.pk = None # setting pk = None creates a new instance
title.page = page
if title.publisher_public_id:
draft_titles[title.publisher_public_id] = title
title.publisher_public = None
# create slug-copy for standard copy
title.published = False
title.slug = page_utils.get_available_slug(title)
title.save()
# copy the placeholders (and plugins on those placeholders!)
for ph in placeholders:
plugins = ph.get_plugins_list()
try:
ph = page.placeholders.get(slot=ph.slot)
except Placeholder.DoesNotExist:
ph.pk = None # make a new instance
ph.save()
page.placeholders.add(ph)
if plugins:
copy_plugins_to(plugins, ph)
extension_pool.copy_extensions(Page.objects.get(pk=origin_id), page)
# invalidate the menu for this site
menu_pool.clear(site_id=site.pk)
return first_page
def save(self, no_signals=False, commit=True, score=False, **kwargs):
"""
Args:
commit: True if model should be really saved
"""
if (self.score==None):
print "score null"
self.score = 0
if (self.numWhoRated==None):
print "numWhoRated null"
self.numWhoRated = 0
if (self.limit_visibility_in_menu == None):
self.limit_visibility_in_menu = 1
# delete template cache
if hasattr(self, '_template_cache'):
delattr(self, '_template_cache')
#created = not bool(self.pk)
# Published pages should always have a publication date
# if the page is published we set the publish date if not set yet.
if self.publication_date is None and self.published:
self.publication_date = timezone.now() - timedelta(seconds=5)
if self.reverse_id == "":
self.reverse_id = None
if self.application_namespace == "":
self.application_namespace = None
from cms.utils.permissions import _thread_locals
user = getattr(_thread_locals, "user", None)
if user:
try:
changed_by = force_text(user)
except AttributeError:
# AnonymousUser may not have USERNAME_FIELD
changed_by = "anonymous"
else:
# limit changed_by and created_by to avoid problems with Custom User Model
if len(changed_by) > constants.PAGE_USERNAME_MAX_LENGTH:
changed_by = u'{0}... (id={1})'.format(
changed_by[:constants.PAGE_USERNAME_MAX_LENGTH - 15],
user.pk,
)
self.changed_by = changed_by
else:
self.changed_by = "script"
if not self.created_by:
self.created_by = self.changed_by
if commit:
if not self.depth:
if self.parent_id:
self.parent.add_child(instance=self)
else:
self.add_root(instance=self)
return #add_root and add_child save as well
super(Page, self).save(**kwargs)
def save_base(self, *args, **kwargs):
"""Overridden save_base. If an instance is draft, and was changed, mark
it as dirty.
Dirty flag is used for changed nodes identification when publish method
takes place. After current changes are published, state is set back to
PUBLISHER_STATE_DEFAULT (in publish method).
"""
keep_state = getattr(self, '_publisher_keep_state', None)
if self.publisher_is_draft and not keep_state and self.is_new_dirty():
self.title_set.all().update(publisher_state=PUBLISHER_STATE_DIRTY)
if keep_state:
delattr(self, '_publisher_keep_state')
return super(Page, self).save_base(*args, **kwargs)
def is_new_dirty(self):
if self.pk:
fields = [
'publication_date', 'publication_end_date', 'in_navigation', 'soft_root', 'reverse_id',
'navigation_extenders', 'template', 'login_required', 'limit_visibility_in_menu'
]
try:
old_page = Page.objects.get(pk=self.pk)
except Page.DoesNotExist:
return True
for field in fields:
old_val = getattr(old_page, field)
new_val = getattr(self, field)
if not old_val == new_val:
return True
return False
return True
def is_published(self, language, force_reload=False):
return self.get_title_obj(language, False, force_reload=force_reload).published
def toggle_in_navigation(self, set_to=None):
'''
Toggles (or sets) in_navigation and invalidates the cms page cache
'''
old = self.in_navigation
if set_to in [True, False]:
self.in_navigation = set_to
else:
self.in_navigation = not self.in_navigation
self.save()
#
# If there was a change, invalidate the cms page cache
#
if self.in_navigation != old:
from cms.cache import invalidate_cms_page_cache
invalidate_cms_page_cache()
return self.in_navigation
def get_publisher_state(self, language, force_reload=False):
try:
return self.get_title_obj(language, False, force_reload=force_reload).publisher_state
except AttributeError:
return None
def set_publisher_state(self, language, state, published=None):
title = self.title_set.get(language=language)
title.publisher_state = state
if published is not None:
title.published = published
title._publisher_keep_state = True
title.save()
if hasattr(self, 'title_cache') and language in self.title_cache:
self.title_cache[language].publisher_state = state
return title
def publish(self, language):
"""Overrides Publisher method, because there may be some descendants, which
are waiting for parent to publish, so publish them if possible.
:returns: True if page was successfully published.
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be published. Use draft.')
# publish, but only if all parents are published!!
published = None
if not self.pk:
self.save()
# be sure we have the newest data including mptt
p = Page.objects.get(pk=self.pk)
self.path = p.path
self.depth = p.depth
self.numchild = p.numchild
if self._publisher_can_publish():
if self.publisher_public_id:
# Ensure we have up to date mptt properties
public_page = Page.objects.get(pk=self.publisher_public_id)
else:
public_page = Page(created_by=self.created_by)
if not self.publication_date:
self.publication_date = now()
self._copy_attributes(public_page)
# we need to set relate this new public copy to its draft page (self)
public_page.publisher_public = self
public_page.publisher_is_draft = False
# Ensure that the page is in the right position and save it
self._publisher_save_public(public_page)
public_page = public_page.reload()
published = public_page.parent_id is None or public_page.parent.is_published(language)
if not public_page.pk:
public_page.save()
# The target page now has a pk, so can be used as a target
self._copy_titles(public_page, language, published)
self._copy_contents(public_page, language)
# trigger home update
public_page.save()
# invalidate the menu for this site
menu_pool.clear(site_id=self.site_id)
self.publisher_public = public_page
published = True
else:
# Nothing left to do
pass
if not published:
self.set_publisher_state(language, PUBLISHER_STATE_PENDING, published=True)
self._publisher_keep_state = True
self.save()
# If we are publishing, this page might have become a "home" which
# would change the path
if self.is_home:
for title in self.title_set.all():
if title.path != '':
title._publisher_keep_state = True
title.save()
if not published:
# was not published, escape
return
# Check if there are some children which are waiting for parents to
# become published.
from cms.models import Title
publish_set = list(self.get_descendants().filter(title_set__published=True,
title_set__language=language).select_related('publisher_public', 'publisher_public__parent').order_by('depth', 'path'))
#prefetch the titles
publish_ids = {}
for page in publish_set:
publish_ids[page.pk] = None
if page.publisher_public_id:
publish_ids[page.publisher_public.pk] = None
titles = Title.objects.filter(page__pk__in=publish_ids.keys(), language=language)
for title in titles:
publish_ids[title.page_id] = title
for page in publish_set:
if page.pk in publish_ids and publish_ids[page.pk]:
page.title_cache = {}
page.title_cache[language] = publish_ids[page.pk]
if page.publisher_public_id:
if not page.publisher_public.parent_id:
page._publisher_save_public(page.publisher_public)
#query and caching optimization
if page.publisher_public.parent_id and not page.publisher_public.parent:
page.publisher_public.parent = Page.objects.get(pk=page.publisher_public.parent_id)
if page.publisher_public.parent_id in publish_ids:
page.publisher_public.parent.title_cache = {}
page.publisher_public.parent.title_cache[language] = publish_ids[page.publisher_public.parent_id]
if page.publisher_public.parent.is_published(language):
if page.publisher_public_id in publish_ids:
public_title = publish_ids[page.publisher_public_id]
else:
public_title = None
draft_title = publish_ids[page.pk]
if public_title and not public_title.published:
public_title._publisher_keep_state = True
public_title.published = True
public_title.publisher_state = PUBLISHER_STATE_DEFAULT
public_title.save()
if draft_title.publisher_state == PUBLISHER_STATE_PENDING:
draft_title.publisher_state = PUBLISHER_STATE_DEFAULT
draft_title._publisher_keep_state = True
draft_title.save()
elif page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
page.publish(language)
# fire signal after publishing is done
import cms.signals as cms_signals
cms_signals.post_publish.send(sender=Page, instance=self, language=language)
from cms.cache import invalidate_cms_page_cache
invalidate_cms_page_cache()
return published
def unpublish(self, language):
"""
Removes this page from the public site
:returns: True if this page was successfully unpublished
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be unpublished. Use draft.')
# First, make sure we are in the correct state
title = self.title_set.get(language=language)
public_title = title.publisher_public
title.published = False
title.publisher_state = PUBLISHER_STATE_DIRTY
title.save()
if hasattr(self, 'title_cache'):
self.title_cache[language] = title
public_title.published = False
public_title.save()
public_page = self.publisher_public
public_placeholders = public_page.get_placeholders()
for pl in public_placeholders:
pl.cmsplugin_set.filter(language=language).delete()
public_page.save()
# trigger update home
self.save()
self.mark_descendants_pending(language)
from cms.cache import invalidate_cms_page_cache
invalidate_cms_page_cache()
from cms.signals import post_unpublish
post_unpublish.send(sender=Page, instance=self, language=language)
return True
def mark_descendants_pending(self, language):
assert self.publisher_is_draft
# Go through all children of our public instance
public_page = self.publisher_public
from cms.models import Title
if public_page:
descendants = public_page.get_descendants().filter(title_set__language=language)
for child in descendants:
try:
child.set_publisher_state(language, PUBLISHER_STATE_PENDING, published=False)
except Title.DoesNotExist:
continue
draft = child.publisher_public
if draft and draft.is_published(language) and draft.get_publisher_state(
language) == PUBLISHER_STATE_DEFAULT:
draft.set_publisher_state(language, PUBLISHER_STATE_PENDING)
def revert(self, language):
"""Revert the draft version to the same state as the public version
"""
# Revert can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be reverted. Use draft.')
if not self.publisher_public:
raise PublicVersionNeeded('A public version of this page is needed')
public = self.publisher_public
public._copy_titles(self, language, public.is_published(language))
public._copy_contents(self, language)
public._copy_attributes(self)
self.title_set.filter(language=language).update(publisher_state=PUBLISHER_STATE_DEFAULT, published=True)
self.revision_id = 0
self._publisher_keep_state = True
self.save()
def get_draft_object(self):
if not self.publisher_is_draft:
return self.publisher_draft
return self
def get_public_object(self):
if not self.publisher_is_draft:
return self
return self.publisher_public
def get_languages(self):
if self.languages:
return sorted(self.languages.split(','))
else:
return []
def get_descendants(self, include_self=False):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
"""
if include_self:
return self.__class__.get_tree(self).filter(site_id=self.site_id)
else:
return self.__class__.get_tree(self).exclude(pk=self.pk).filter(site_id=self.site_id)
def get_cached_ancestors(self):
if not hasattr(self, "ancestors_ascending"):
self.ancestors_ascending = list(self.get_ancestors())
return self.ancestors_ascending
def get_cached_descendants(self):
if not hasattr(self, "_cached_descendants"):
self._cached_descendants = list(self.get_descendants())
return self._cached_descendants
# ## Title object access
def get_title_obj(self, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for accessing wanted / current title.
If wanted title doesn't exists, EmptyTitle instance will be returned.
"""
language = self._get_title_cache(language, fallback, version_id, force_reload)
if language in self.title_cache:
return self.title_cache[language]
from cms.models.titlemodels import EmptyTitle
return EmptyTitle(language)
def get_title_obj_attribute(self, attrname, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for getting attribute or None from wanted/current title.
"""
try:
attribute = getattr(self.get_title_obj(
language, fallback, version_id, force_reload), attrname)
return attribute
except AttributeError:
return None
def get_path(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the path of the page depending on the given language
"""
return self.get_title_obj_attribute("path", language, fallback, version_id, force_reload)
def get_slug(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the slug of the page depending on the given language
"""
return self.get_title_obj_attribute("slug", language, fallback, version_id, force_reload)
def get_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the title of the page depending on the given language
"""
return self.get_title_obj_attribute("title", language, fallback, version_id, force_reload)
def get_menu_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the menu title of the page depending on the given language
"""
menu_title = self.get_title_obj_attribute("menu_title", language, fallback, version_id, force_reload)
if not menu_title:
return self.get_title(language, True, version_id, force_reload)
return menu_title
def get_placeholders(self):
if not hasattr(self, '_placeholder_cache'):
self._placeholder_cache = self.placeholders.all()
return self._placeholder_cache
def _validate_title(self, title):
from cms.models.titlemodels import EmptyTitle
if isinstance(title, EmptyTitle):
return False
if not title.title or not title.slug:
return False
return True
def get_admin_tree_title(self):
from cms.models.titlemodels import EmptyTitle
language = get_language()
if not hasattr(self, 'title_cache'):
self.title_cache = {}
for title in self.title_set.all():
self.title_cache[title.language] = title
if language not in self.title_cache or not self._validate_title(self.title_cache.get(language, EmptyTitle(language))):
fallback_langs = i18n.get_fallback_languages(language)
found = False
for lang in fallback_langs:
if lang in self.title_cache and self._validate_title(self.title_cache.get(lang, EmptyTitle(lang))):
found = True
language = lang
if not found:
language = None
for lang, item in self.title_cache.items():
if not isinstance(item, EmptyTitle):
language = lang
if not language:
return _("Empty")
title = self.title_cache[language]
if title.title:
return title.title
if title.page_title:
return title.page_title
if title.menu_title:
return title.menu_title
return title.slug
def get_changed_date(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get when this page was last updated
"""
return self.changed_date
def get_changed_by(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get user who last changed this page
"""
return self.changed_by
def get_page_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the page title of the page depending on the given language
"""
page_title = self.get_title_obj_attribute("page_title", language, fallback, version_id, force_reload)
if not page_title:
return self.get_title(language, True, version_id, force_reload)
return page_title
def get_meta_description(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the description meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_description", language, fallback, version_id, force_reload)
def get_application_urls(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get application urls conf for application hook
"""
return self.application_urls
def get_redirect(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get redirect
"""
return self.get_title_obj_attribute("redirect", language, fallback, version_id, force_reload)
def _get_title_cache(self, language, fallback, version_id, force_reload):
if not language:
language = get_language()
load = False
if not hasattr(self, "title_cache") or force_reload:
load = True
self.title_cache = {}
elif language not in self.title_cache:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
load = True
if load:
from cms.models.titlemodels import Title
if version_id:
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
for rev in revs:
obj = rev.object
if obj.__class__ == Title:
self.title_cache[obj.language] = obj
else:
titles = Title.objects.filter(page=self)
for title in titles:
self.title_cache[title.language] = title
if language in self.title_cache:
return language
else:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
return language
def get_template(self):
"""
get the template of this page if defined or if closer parent if
defined or DEFAULT_PAGE_TEMPLATE otherwise
"""
if hasattr(self, '_template_cache'):
return self._template_cache
template = None
if self.template:
if self.template != constants.TEMPLATE_INHERITANCE_MAGIC:
template = self.template
else:
try:
template = self.get_ancestors().exclude(
template=constants.TEMPLATE_INHERITANCE_MAGIC).values_list('template', flat=True)[0]
except IndexError:
pass
if not template:
template = get_cms_setting('TEMPLATES')[0][0]
self._template_cache = template
return template
def get_template_name(self):
"""
get the textual name (2nd parameter in get_cms_setting('TEMPLATES'))
of the template of this page or of the nearest
ancestor. failing to find that, return the name of the default template.
"""
template = self.get_template()
for t in get_cms_setting('TEMPLATES'):
if t[0] == template:
return t[1]
return _("default")
def has_view_permission(self, request, user=None):
if not user:
user = request.user
from cms.utils.permissions import get_any_page_view_permissions, has_global_page_permission
can_see_unrestricted = get_cms_setting('PUBLIC_FOR') == 'all' or (
get_cms_setting('PUBLIC_FOR') == 'staff' and user.is_staff)
# inherited and direct view permissions
is_restricted = bool(get_any_page_view_permissions(request, self))
if not is_restricted and can_see_unrestricted:
return True
elif not user.is_authenticated():
return False
if not is_restricted:
# a global permission was given to the request's user
if has_global_page_permission(request, self.site_id, user=user, can_view=True):
return True
else:
# a specific permission was granted to the request's user
if self.get_draft_object().has_generic_permission(request, "view", user=user):
return True
# The user has a normal django permission to view pages globally
opts = self._meta
codename = '%s.view_%s' % (opts.app_label, opts.object_name.lower())
return request.user.has_perm(codename)
def has_change_permission(self, request, user=None):
opts = self._meta
if not user:
user = request.user
if user.is_superuser:
return True
return (user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts))
and self.has_generic_permission(request, "change"))
def has_delete_permission(self, request, user=None):
opts = self._meta
if not user:
user = request.user
if user.is_superuser:
return True
return (user.has_perm(opts.app_label + '.' + get_permission_codename('delete', opts))
and self.has_generic_permission(request, "delete"))
def has_publish_permission(self, request, user=None):
if not user:
user = request.user
if user.is_superuser:
return True
opts = self._meta
return (user.has_perm(opts.app_label + '.' + "publish_page")
and self.has_generic_permission(request, "publish"))
has_moderate_permission = has_publish_permission
def has_advanced_settings_permission(self, request, user=None):
return self.has_generic_permission(request, "advanced_settings", user)
def has_change_permissions_permission(self, request, user=None):
"""
Has user ability to change permissions for current page?
"""
return self.has_generic_permission(request, "change_permissions", user)
def has_add_permission(self, request, user=None):
"""
Has user ability to add page under current page?
"""
return self.has_generic_permission(request, "add", user)
def has_move_page_permission(self, request, user=None):
"""Has user ability to move current page?
"""
return self.has_generic_permission(request, "move_page", user)
def has_generic_permission(self, request, perm_type, user=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if not user:
user = request.user
att_name = "permission_%s_cache" % perm_type
if (not hasattr(self, "permission_user_cache")
or not hasattr(self, att_name)
or user.pk != self.permission_user_cache.pk):
from cms.utils.permissions import has_generic_permission
self.permission_user_cache = user
setattr(self, att_name, has_generic_permission(
self.pk, user, perm_type, self.site_id))
if getattr(self, att_name):
self.permission_edit_cache = True
return getattr(self, att_name)
def get_media_path(self, filename):
"""
Returns path (relative to MEDIA_ROOT/MEDIA_URL) to directory for storing
page-scope files. This allows multiple pages to contain files with
identical names without namespace issues. Plugins such as Picture can
use this method to initialise the 'upload_to' parameter for File-based
fields. For example:
image = models.ImageField(
_("image"), upload_to=CMSPlugin.get_media_path)
where CMSPlugin.get_media_path calls self.page.get_media_path
This location can be customised using the CMS_PAGE_MEDIA_PATH setting
"""
return join(get_cms_setting('PAGE_MEDIA_PATH'), "%d" % self.pk, filename)
def reload(self):
"""
Reload a page from the database
"""
return Page.objects.get(pk=self.pk)
def get_object_queryset(self):
"""Returns smart queryset depending on object type - draft / public
"""
qs = self.__class__.objects
return (self.publisher_is_draft and qs.drafts() or qs.public().published())
def _publisher_can_publish(self):
"""Is parent of this object already published?
"""
if self.parent_id:
try:
return bool(self.parent.publisher_public_id)
except AttributeError:
raise PublisherCantPublish
return True
def get_previous_filtered_sibling(self, **filters):
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
filters.update({
'site__id': self.site_id
})
try:
return self.get_siblings().filter(path__lt=self.path, **filters).reverse()[0]
except IndexError:
return None
def get_next_filtered_sibling(self, **filters):
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
filters.update({
'site__id': self.site_id
})
try:
return self.get_siblings().filter(path__gt=self.path, **filters)[0]
except IndexError:
return None
def _publisher_save_public(self, obj):
"""Mptt specific stuff before the object can be saved, overrides
original publisher method.
Args:
obj - public variant of `self` to be saved.
"""
if self.parent_id and self.parent.publisher_public_id:
assert self.parent_id == self.parent.pk
public_parent = Page.objects.get(pk=self.parent.publisher_public_id)
else:
public_parent = None
filters = dict(publisher_public__isnull=False)
if public_parent:
filters['publisher_public__parent__in'] = [public_parent]
else:
filters['publisher_public__parent__isnull'] = True
prev_sibling = self.get_previous_filtered_sibling(**filters)
public_prev_sib = (prev_sibling.publisher_public if prev_sibling else None)
if not self.publisher_public_id: # first time published
# is there anybody on left side?
if not self.parent_id:
obj.parent_id = None
self.add_sibling(pos='right', instance=obj)
else:
if public_prev_sib:
obj.parent_id = public_prev_sib.parent_id
public_prev_sib.add_sibling(pos='right', instance=obj)
else:
if public_parent:
obj.parent_id = public_parent.pk
obj.parent = public_parent
obj = obj.add_root(instance=obj)
obj = obj.move(target=public_parent, pos='first-child')
else:
# check if object was moved / structural tree change
prev_public_sibling = obj.get_previous_filtered_sibling()
if self.depth != obj.depth or \
public_parent != obj.parent or \
public_prev_sib != prev_public_sibling:
if public_prev_sib:
obj.parent_id = public_prev_sib.parent_id
obj.save()
obj = obj.move(public_prev_sib, pos="right")
elif public_parent:
# move as a first child to parent
obj.parent_id = public_parent.pk
obj.save()
obj = obj.move(target=public_parent, pos='first-child')
else:
# it is a move from the right side or just save
next_sibling = self.get_next_filtered_sibling(**filters)
if next_sibling and next_sibling.publisher_public_id:
obj.parent_id = next_sibling.parent_id
obj.save()
obj = obj.move(next_sibling.publisher_public, pos="left")
else:
obj.save()
def move(self, target, pos=None):
super(Page, self).move(target, pos)
return self.reload()
def rescan_placeholders(self):
"""
Rescan and if necessary create placeholders in the current template.
"""
# inline import to prevent circular imports
from cms.utils.placeholder import get_placeholders
placeholders = get_placeholders(self.get_template())
found = {}
for placeholder in self.placeholders.all():
if placeholder.slot in placeholders:
found[placeholder.slot] = placeholder
for placeholder_name in placeholders:
if placeholder_name not in found:
placeholder = Placeholder.objects.create(slot=placeholder_name)
self.placeholders.add(placeholder)
found[placeholder_name] = placeholder
return found
def get_xframe_options(self):
""" Finds X_FRAME_OPTION from tree if inherited """
xframe_options = get_xframe_cache(self)
if xframe_options is None:
ancestors = self.get_ancestors()
# Ignore those pages which just inherit their value
ancestors = ancestors.exclude(xframe_options=self.X_FRAME_OPTIONS_INHERIT)
# Now just give me the clickjacking setting (not anything else)
xframe_options = list(ancestors.values_list('xframe_options', flat=True))
if self.xframe_options != self.X_FRAME_OPTIONS_INHERIT:
xframe_options.append(self.xframe_options)
if len(xframe_options) <= 0:
# No ancestors were found
return None
xframe_options = xframe_options[0]
set_xframe_cache(self, xframe_options)
return xframe_options
def undo(self):
"""
Revert the current page to the previous revision
"""
import reversion
# Get current reversion version by matching the reversion_id for the page
versions = reversion.get_for_object(self)
if self.revision_id:
current_revision = reversion.models.Revision.objects.get(pk=self.revision_id)
else:
try:
current_version = versions[0]
except IndexError as e:
e.message = "no current revision found"
raise
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__lt=current_revision.pk)[0]
except IndexError as e:
e.message = "no previous revision found"
raise
previous_revision = previous_version.revision
clean = self._apply_revision(previous_revision)
return Page.objects.get(pk=self.pk), clean
def redo(self):
"""
Revert the current page to the next revision
"""
import reversion
# Get current reversion version by matching the reversion_id for the page
versions = reversion.get_for_object(self)
if self.revision_id:
current_revision = reversion.models.Revision.objects.get(pk=self.revision_id)
else:
try:
current_version = versions[0]
except IndexError as e:
e.message = "no current revision found"
raise
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__gt=current_revision.pk).order_by('pk')[0]
except IndexError as e:
e.message = "no next revision found"
raise
next_revision = previous_version.revision
clean = self._apply_revision(next_revision)
return Page.objects.get(pk=self.pk), clean
def _apply_revision(self, target_revision):
"""
Revert to a specific revision
"""
from cms.utils.page_resolver import is_valid_url
# Get current titles
old_titles = list(self.title_set.all())
# remove existing plugins / placeholders in the current page version
placeholder_ids = self.placeholders.all().values_list('pk', flat=True)
plugins = CMSPlugin.objects.filter(placeholder__in=placeholder_ids).order_by('-depth')
for plugin in plugins:
plugin._no_reorder = True
plugin.delete()
self.placeholders.all().delete()
# populate the page status data from the target version
target_revision.revert(True)
rev_page = get_object_or_404(Page, pk=self.pk)
rev_page.revision_id = target_revision.pk
rev_page.publisher_public_id = self.publisher_public_id
rev_page.save()
# cleanup placeholders
new_placeholders = rev_page.placeholders.all()
slots = {}
for new_ph in new_placeholders:
if not new_ph.slot in slots:
slots[new_ph.slot] = new_ph
else:
if new_ph in placeholder_ids:
new_ph.delete()
elif slots[new_ph.slot] in placeholder_ids:
slots[new_ph.slot].delete()
# check reverted titles for slug collisions
new_titles = rev_page.title_set.all()
clean = True
for title in new_titles:
try:
is_valid_url(title.path, rev_page)
except ValidationError:
for old_title in old_titles:
if old_title.language == title.language:
title.slug = old_title.slug
title.save()
clean = False
return clean
def _reversion():
exclude_fields = [
'publisher_is_draft',
'publisher_public',
'publisher_state',
]
reversion_register(
Page,
follow=["title_set", "placeholders", "pagepermission_set"],
exclude_fields=exclude_fields
)
_reversion()
class AddResourceToGalleryForm(forms.Form):
# @summary:
# Form to add another user's project to your own gallery.
#The width: 40% is because for some reason the select widget didn't get bigger when I increased the length of gallery names
gallery = forms.ModelMultipleChoiceField(Gallery, None, required=False, widget=forms.SelectMultiple(attrs={"style":"width: 40%"}))
def __init__(self, user, resource, *args, **kwargs):
super(AddResourceToGalleryForm, self).__init__(*args, **kwargs)
self.user = user
#self.owner = owner
self.resource = resource
print self.user.gallery_set.exclude(default=True).exclude(shared=True)
self.fields["gallery"].queryset = self.user.gallery_set.exclude(default=True).exclude(shared=True).exclude(gtype=1).exclude(gtype=2)
def save(self):
galleries = self.cleaned_data["gallery"]
print "galleries = ", galleries
if len(galleries)>0:
for g in galleries:
self.resource.galleries.add(g)
# Resource privacy settings don't depend on gallery, so we don't need to deal with that here
self.resource.save()
return self.resource | bsd-3-clause | -2,983,917,024,553,889,000 | 40.498316 | 171 | 0.59162 | false |
huggingface/pytorch-transformers | src/transformers/models/roberta/modeling_roberta.py | 1 | 65213 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
class RobertaSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class RobertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
class RobertaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = RobertaSelfAttention(config)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class RobertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
class RobertaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = RobertaAttention(config)
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
class RobertaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RobertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ROBERTA_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """, ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| apache-2.0 | 8,092,571,453,882,208,000 | 41.875082 | 213 | 0.633601 | false |
alimanfoo/numcodecs | numcodecs/bz2.py | 1 | 1266 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import bz2 as _bz2
from numcodecs.abc import Codec
from numcodecs.compat import ndarray_copy, ensure_contiguous_ndarray
class BZ2(Codec):
"""Codec providing compression using bzip2 via the Python standard library.
Parameters
----------
level : int
Compression level.
"""
codec_id = 'bz2'
def __init__(self, level=1):
self.level = level
def encode(self, buf):
# normalise input
buf = ensure_contiguous_ndarray(buf)
# do compression
return _bz2.compress(buf, self.level)
# noinspection PyMethodMayBeStatic
def decode(self, buf, out=None):
# normalise inputs
buf = ensure_contiguous_ndarray(buf)
if out is not None:
out = ensure_contiguous_ndarray(out)
# N.B., bz2 cannot handle ndarray directly because of truth testing issues
buf = memoryview(buf)
# do decompression
dec = _bz2.decompress(buf)
# handle destination - Python standard library bz2 module does not
# support direct decompression into buffer, so we have to copy into
# out if given
return ndarray_copy(dec, out)
| mit | -6,111,709,686,307,507,000 | 24.32 | 82 | 0.634281 | false |
tvm1/yamlif | yamlif.py | 1 | 29488 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module loads YAML configuration, presents UI to user and allows him to
set and save values to another YAML file.
"""
import sys
import os
import curses
import curses.textpad
import textwrap
import re
from editor import Editor
try:
import yaml
except ImportError:
print(
"This application requires PYYAML module to work correctly. See: "
"http://pyyaml.org")
quit(1)
def init_curses():
"""
This function sets up basic curses environment.
:return: Screen object.
"""
stdscr = curses.initscr()
maxy, maxx = stdscr.getmaxyx()
if maxy < 24 or maxx < 80:
print("Sorry, but at least 80x24 is needed.")
clean_curses()
quit(1)
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(7, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.mousemask(1)
stdscr.clear()
stdscr.border()
stdscr.refresh()
stdscr.keypad(1)
return stdscr
def clean_curses():
"""
Cleans up curses after quit.
:return: None.
"""
curses.curs_set(1)
curses.nocbreak()
curses.endwin()
def draw_menu(screen, yamlobj, menu_titles, mtitle, msel):
"""
This function draws a menu with given title and handles the keyboard input.
:param screen: Screen object.
:param yamlobj: Python object ( nested list / dicts ).
:param menu_titles: List of menu titles.
:param mtitle: Title of currently active menu.
:param msel: Starting position of cursor in menu.
:return: Index of selected item.
"""
maxy, maxx = screen.getmaxyx()
screen.clear()
screen.border()
screen.refresh()
# calculate minimal menu height
if len(menu_titles) < maxy - 4:
size_y = len(menu_titles) + 2
else:
size_y = maxy - 4
# calculate minimal menu width to fit content and title
size_x = max(len(max(menu_titles, key=len)), len(mtitle)) + 2
# some titles are too large
if size_x > maxx - 4:
size_x = maxx - 4
# trim title if too long to fit
if len(mtitle) > size_x - 2:
mtitle = mtitle[0:size_x - 2]
# calculate position, so the menu is centered
pos_y = int(maxy / 2 - size_y / 2)
pos_x = int(maxx / 2 - size_x / 2)
screen.addstr(0, 2, 'ENTER/SPACE: Enter/edit | ESC/BACKSP: Exit | R: Run '
'commands | Q: Quit ', curses.color_pair(1))
# create actual window and border
win = curses.newwin(size_y, size_x, pos_y, pos_x)
win.attron(curses.A_BOLD)
win.border()
win.attroff(curses.A_BOLD)
# draw title
win.addstr(0, int(size_x / 2 - len(mtitle) / 2), mtitle)
# main loop that handles keyboard input and redrawing
while True:
lpos = 0
# we scrolled somewhere down
if msel > size_y - 3:
lpos = msel - size_y + 3
offset = lpos
# print the menu content
for i in range(1, size_y - 1):
mitem = menu_titles[lpos].ljust(size_x - 2)
if len(mitem) > size_x - 2:
mitem = mitem[0:size_x - 5] + "..."
if msel + 1 == i + offset:
win.addstr(i, 1, str(mitem), curses.color_pair(1))
else:
win.addstr(i, 1, str(mitem))
lpos += 1
win.refresh()
ckey = screen.getch()
# read keys and redraw, return item index on ENTER, return -1 on exit
if ckey == curses.KEY_UP:
if msel > 0:
msel -= 1
elif ckey == curses.KEY_DOWN:
if msel < len(menu_titles) - 1:
msel += 1
elif ckey == curses.KEY_ENTER or ckey == 10 or ckey == ord(" "):
del win
return msel
elif ckey == ord("R") or ckey == ord("r"):
run_commands(yamlobj)
elif ckey == ord("q") or ckey == ord("Q"):
clean_curses()
quit(0)
elif ckey == 27 or ckey == curses.KEY_BACKSPACE:
return -1
win.refresh()
del win
screen.touchwin()
screen.refresh()
def draw_page(screen, yamlobj, fn, obj, pid, ptitle, msel):
"""
This functions draws page and its content.
:param screen: Curses screen object.
:param yamlobj: Whole python object ( nested list / dicts ).
:param fn: Filename of input file.
:param obj: Python object ( nested list / dicts ).
:param pid: Page id.
:param ptitle: Page title.
:param msel: Currently Highlighted item.
:return: Position of currently selected page element.
"""
maxy, maxx = screen.getmaxyx()
# something to begin with, fit at least page title
size_y = 2
size_x = len(ptitle) + 2
newelem = None
# determine page height and width
for i, elem in enumerate(obj):
if elem.get('value') is None:
value_length = 0
else:
value_length = len(str(elem.get('value')))
if 'checkbox' in elem:
size_y += 1
width = len(elem.get('title')) + 6
newelem = 'checkbox'
elif 'radio' in elem:
size_y += 1
width = len(elem.get('title')) + 6
newelem = 'radio'
elif 'textbox' in elem:
size_y += 1
width = len(elem.get('title')) + value_length + 4
if width > maxx:
width = maxx
newelem = 'textbox'
elif 'textarea' in elem:
size_y += 2
width = int(maxx / 2)
newelem = 'textarea'
elif 'textdisplay' in elem:
# wrapping is handled here
if len(elem.get('value')) > int(maxx / 2):
width = int(maxx / 2)
wrapped = textwrap.wrap(elem.get('value'), int(maxx / 2) - 2)
# if it's too long, we will truncate it to five lines
if len(wrapped) > 4:
size_y += 5
else:
size_y += len(wrapped)
else:
# it's only one line
width = len(elem.get('value')) + 2
size_y += 1
newelem = 'textdisplay'
# element has changed, add blank line
if elem != obj[-1]:
if newelem not in obj[i + 1]:
size_y += 1
# current element requires more space, allocate it
if width > size_x:
size_x = width
# bail out if page is too large (for now)
if size_y > maxy:
draw_popup(screen, 'Page is way too large to view.')
return -1
# page would be too wide
if size_x > maxx - 4:
size_x = maxx - 4
# calculate position, so the page is centered
pos_y = int(maxy / 2 - size_y / 2)
pos_x = int(maxx / 2 - size_x / 2)
# create actual window and border
win = curses.newwin(size_y, size_x, pos_y, pos_x)
win.attron(curses.A_BOLD)
win.border()
win.attroff(curses.A_BOLD)
# draw title
win.addstr(0, int(size_x / 2 - len(ptitle) / 2), ptitle)
# some help too
if size_x > 7:
win.addstr(size_y - 1, 2, 'S: Save', curses.color_pair(1))
newelem = None
offset = 1
# main loop that draws page
for i, elem in enumerate(obj):
# color for currently selected item
if i == msel:
cl = curses.color_pair(1)
else:
cl = curses.color_pair(0)
# this actually draws what is visible
if 'checkbox' in elem:
newelem = 'checkbox'
if elem.get('value', False) is True:
win.addstr(i + offset, 1,
'[*] ' + elem.get('title', '')[0:size_x - 6], cl)
else:
win.addstr(i + offset, 1,
'[ ] ' + elem.get('title', '')[0:size_x - 6], cl)
elif 'radio' in elem:
newelem = 'radio'
if elem.get('value', False) is True:
win.addstr(i + offset, 1,
'(*) ' + elem.get('title', '')[0:size_x - 6], cl)
else:
win.addstr(i + offset, 1,
'( ) ' + elem.get('title', '')[0:size_x - 6], cl)
elif 'textbox' in elem:
newelem = 'textbox'
# value and title might be too long
if len(str(elem.get('title'))) + len(
str(elem.get('value'))) + 4 <= size_x:
win.addstr(i + offset, 1, elem.get('title') + ": " + str(
elem.get('value', '')), cl)
else:
# so truncate it to fit the screen
spc = size_x - len(str(elem.get('title'))) - 4
# title is really long, truncate it
if spc <= 0:
tmptitle = elem.get('title')[0:int(size_x / 2)] + "..."
spc = size_x - len(tmptitle) - 4
else:
tmptitle = elem.get('title')
ln = str(elem.get('value', ' '))[0:spc]
ln = re.sub('...............$', '... [truncated]', ln)
win.addstr(i + offset, 1, tmptitle + ": " + str(ln), cl)
elif 'textarea' in elem:
newelem = 'textarea'
# title might be too long
tmptitle = str(elem.get('title', ''))[0:int(size_x / 2)]
# check if there's value at all, otherwise leave space blank
if len(elem.get('value', '')) == 0:
win.addstr(i + offset, 1, tmptitle + ": ", cl)
offset += 1
elif 'value' in elem:
textlist = elem.get('value', '').rstrip().split('\n')
for j, ln in enumerate(textlist):
ln = ln[0:size_x - 4 - len(tmptitle)]
if j == 0:
win.addstr(i + offset, 1, tmptitle + ": " + str(ln),
cl)
offset += 1
if j == 1:
if len(textlist) > 2:
ln = re.sub('.............$', '... [wrapped]', ln)
win.addstr(i + offset, 1 + len(tmptitle) + 2, str(ln),
cl)
break
elif 'textdisplay' in elem:
newelem = 'textdisplay'
# wrapping is handled here
textlist = textwrap.wrap(elem.get('value', ''), size_x - 2)
# print whatever is in content of textdisplay
for j, ln in enumerate(textlist):
# if it's too many lines, truncate
if j == 4 and len(textlist) > 4:
ln = re.sub('.............$', '... [wrapped]', ln)
win.addstr(i + offset, 1, str(ln), cl)
break
# print current line
win.addstr(i + offset, 1, str(ln), cl)
if j + 1 < len(textlist):
offset += 1
# element has changed, add blank line
if elem != obj[-1]:
if newelem not in obj[i + 1]:
offset += 1
win.attroff(curses.A_BOLD)
win.noutrefresh()
curses.doupdate()
ckey = screen.getch()
# read keys and update, edit value on ENTER, return -1 if leaving
if ckey == curses.KEY_UP:
if msel == 0:
msel = len(obj) - 1
else:
msel -= 1
elif ckey == curses.KEY_DOWN:
if msel == len(obj) - 1:
msel = 0
else:
msel += 1
elif ckey == curses.KEY_ENTER or ckey == 10 or ckey == ord(" "):
set_value(obj, msel, screen)
elif ckey == ord("s") or ckey == ord("S"):
exval, log = save_yaml(fn, yamlobj, pid, obj)
# print on_save log if available
if len(log) != 0:
draw_popup(screen, log)
# give user some feedback
if exval == 0:
draw_popup(screen, 'Data saved.')
else:
draw_popup(screen, 'Save failed.')
elif ckey == ord("q") or ckey == ord("Q"):
clean_curses()
quit(0)
elif ckey == 27 or ckey == curses.KEY_BACKSPACE:
msel = -1
del win
return msel
def draw_popup(screen, text='empty'):
"""
Generic function that draws a popup window in UI.
:param screen: Curses screen object.
:param text: Text to be displayed.
:return: None.
"""
maxy, maxx = screen.getmaxyx()
wrapped = []
# determine window size
if len(text) > maxx - 2:
# popup needs more than one line
size_x = int(maxx / 1.5) + 2
wrapped = textwrap.wrap(text, int(maxx / 1.5))
# try some reasonable window heights
if len(wrapped) + 2 > int(maxy / 1.5):
size_y = int(maxy / 1.5)
else:
size_y = len(wrapped) + 2
else:
# popup fits on one line
size_x = len(text) + 2
size_y = 3
# calculate position, so the popup is centered
pos_y = int(maxy / 2 - size_y / 2)
pos_x = int(maxx / 2 - size_x / 2)
# create actual window
win = curses.newwin(size_y, size_x, pos_y, pos_x)
start_pos = 0
while True:
# clear and redraw
win.clear()
# print text into window
if len(wrapped) > 0:
j = 0
for i in range(1, size_y - 1):
win.addstr(i, 1, str(wrapped[start_pos + j]))
j += 1
else:
win.addstr(1, 1, str(text))
win.attron(curses.A_BOLD)
win.border()
win.attroff(curses.A_BOLD)
if size_x >= 80:
win.addstr(0, 2,
' ARROWS: Up/down | ENTER/SPACE/BACKSPACE/ESC: Exit '
'view | Q: Quit ', curses.color_pair(1))
# display arrows, if scrollable
if start_pos != 0:
win.addstr(0, size_x - 7, '↑↑↑↑↑', curses.color_pair(1))
if start_pos + size_y - 2 < len(wrapped):
win.addstr(size_y - 1, size_x - 7, '↓↓↓↓↓', curses.color_pair(1))
win.refresh()
ckey = screen.getch()
# read keys scroll and redraw, handle exit
if ckey == curses.KEY_UP:
if start_pos > 0:
start_pos -= 1
if ckey == curses.KEY_DOWN:
if start_pos + size_y - 2 < len(wrapped):
start_pos += 1
if ckey == curses.KEY_ENTER or ckey == 10 or ckey == ord(" "):
break
if ckey == ord("q") or ckey == ord("Q"):
clean_curses()
quit(0)
if ckey == 27 or ckey == curses.KEY_BACKSPACE:
break
del win
screen.touchwin()
screen.refresh()
def draw_inputbox(screen, text='empty'):
"""
Generic function that draws a inputbox in UI.
:param screen: Curses screen object.
:param text: Text to be displayed
:return: value
"""
maxy, maxx = screen.getmaxyx()
if len(str(text)) > 64:
draw_popup(screen, 'Field contains invalid value.')
return None
# calculate position, so the inputbox is centered
size_x = int(67)
pos_y = int(maxy / 2 - 2)
pos_x = int(maxx / 2 - size_x / 2)
# create actual window and border
win = curses.newwin(3, size_x, pos_y, pos_x)
win.border()
win.addstr(0, 1, 'Please insert value (EMACS keys available):',
curses.color_pair(1))
win.refresh()
# derived subwindow
swin = win.derwin(1, size_x - 2, 1, 1)
curses.cbreak()
curses.curs_set(1)
screen.keypad(1)
# draw textpad and read value
tpad = curses.textpad.Textbox(swin)
swin.addstr(0, 0, str(text))
value = tpad.edit()
curses.curs_set(0)
del swin
del win
screen.touchwin()
screen.refresh()
return value.rstrip()
def draw_inputarea(screen, text='empty'):
"""
Generic function that draws a 'editor' in UI.
:param screen: Curses screen object.
:param text: Text to be displayed
:return: value
"""
maxy, maxx = screen.getmaxyx()
pos_y = int(4)
pos_x = int(4)
win = curses.newwin(maxy - 8, maxx - 8, pos_y, pos_x)
win.border()
win.refresh()
swin = win.derwin(maxy - 10, maxx - 10, 1, 1)
curses.cbreak()
curses.curs_set(1)
screen.keypad(1)
win.addstr(0, 1, 'EMACS-like keys available, CTRL-G to exit')
win.refresh()
tpad = curses.textpad.Textbox(swin)
swin.addstr(0, 0, str(text))
value = tpad.edit()
curses.curs_set(0)
del swin
del win
screen.touchwin()
screen.refresh()
return value.rstrip()
def open_yaml(yfile):
"""
This function opens file with YAML configuration.
:param yfile: Name of file.
:return: Python object ( nested lists / dicts ).
"""
with open(yfile, 'r') as stream:
yamlobj = yaml.load(stream)
return yamlobj
def load_service_functions(fn, globs):
"""
This function imports service functions if they are present.
:param fn: Filename of opened file.
:param globs: Caller's globals().
:return: 0 if success, else 1.
"""
fn = re.sub('.yaml$', '.py', fn)
if os.path.isfile(fn):
exec(compile(open(fn).read(), fn, 'exec'), globs)
return 0
else:
return 1
def run_commands(yamlobj):
"""
Runs commands stored in YAML.
:param yamlobj: Python object ( nested list / dicts ).
:return: None.
"""
# reset screen
clean_curses()
# run commands
commands = (yamlobj.get('commands'))
os.system(commands)
input("Press ENTER to continue ... ")
# reinit stuff back
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.mousemask(1)
def save_yaml(fn, yamlobj, pid, obj):
"""
This function saves values to YAML file.
:param fn: Filename of input file.
:param yamlobj: Whole Python object ( nested lists / dicts ).
:param pid: Page ID.
:param obj: Python object ( nested lists / dicts ).
:return: Exit status.
"""
newobj = {}
if len(obj) == 0:
return 1
# make up new name for _data file
if re.match('^.*\.yaml$', fn):
# just so the source is *never* overwritten
fn += '_'
fn = re.sub('\.yaml_$', '_data.yaml', fn)
else:
# filename was odd, so we just use something
fn += '.data'
# save only values/items that we want
for elem in obj:
if 'checkbox' in elem:
nkey = elem['checkbox']
nval = elem.get('value', "")
newobj[nkey] = nval
elif 'radio' in elem:
nkey = elem['radio']
nval = elem.get('value', "")
newobj[nkey] = nval
elif 'textbox' in elem:
nkey = elem['textbox']
nval = elem.get('value', "")
newobj[nkey] = nval
elif 'textarea' in elem:
nkey = elem['textarea']
nval = elem.get('value', "")
newobj[nkey] = nval
# fetch save function, if available
save_func = get_save_function(yamlobj, pid)
log = ""
# if the function is available, call it and pass the dict
if save_func in globals():
save_func += '(newobj)'
log = eval(save_func)
# reverse mapping back to UI
for key, val in newobj.items():
for elem in obj:
if key in elem.values():
elem['value'] = val
oldsave = {}
# if there's old save, load it
if os.path.isfile(fn):
with open(fn, 'r') as rstream:
oldsave = yaml.load(rstream)
# save file was empty for some reason
if oldsave is None:
oldsave = {}
oldsave[pid] = newobj
# save the modified object
with open(fn, 'w') as wstream:
yaml.dump(oldsave, wstream, default_flow_style=False)
return 0, log
def get_menulist(yamlobj, root=False):
"""
This function parses objects returned by get_menucontent() and prepares
input for draw_menu().
:param yamlobj: Python object ( nested list / dicts ).
:param root: True only if parsing YAML hierarchy from top.
:return: menu_ids - list of IDs, menu_titles - list of menu titles.
"""
menu_ids = []
menu_titles = []
if root is True:
for obj in yamlobj['content']:
if 'menu' in obj:
menu_ids.append(obj["menu"])
menu_titles.append(obj["title"])
elif 'page' in obj:
menu_ids.append(obj["page"])
menu_titles.append(obj["title"])
else:
for obj in yamlobj:
if 'menu' in obj:
menu_ids.append(obj["menu"])
menu_titles.append(obj["title"])
elif 'page' in obj:
menu_ids.append(obj["page"])
menu_titles.append(obj["title"])
return menu_ids, menu_titles
def get_nodetype(obj, objid):
"""
Returns key of the object with given ID. (eg., menu, page, etc. )
:param obj: Structure containing YAML object ( nested lists / dicts ).
:param objid: YAML ID of given node.
:return: Key of given ID.
"""
result = None
if isinstance(obj, dict):
for key, val in obj.items():
if val == objid:
result = key
elif isinstance(val, list) or isinstance(val, dict):
retval = get_nodetype(val, objid)
if retval is not None:
result = retval
elif isinstance(obj, list):
for elem in obj:
if isinstance(elem, list) or isinstance(elem, dict):
retval = get_nodetype(elem, objid)
if retval is not None:
result = retval
return result
def get_title(obj, objid):
"""
Returns title value of the object with given ID.
:param obj: Structure containing YAML object ( nested lists / dicts ).
:param objid: YAML ID of given node.
:return: Title of given ID.
"""
result = None
if isinstance(obj, dict):
for key, val in obj.items():
if val == objid:
result = obj['title']
elif isinstance(val, list) or isinstance(val, dict):
retval = get_title(val, objid)
if retval is not None:
result = retval
elif isinstance(obj, list):
for elem in obj:
if isinstance(elem, list) or isinstance(elem, dict):
retval = get_title(elem, objid)
if retval is not None:
result = retval
return result
def get_save_function(obj, objid):
"""
Returns on_save function name the object with given ID.
:param obj: Structure containing YAML object ( nested lists / dicts ).
:param objid: YAML ID of given page.
:return: Name of onsave function.
"""
result = None
if isinstance(obj, dict):
for key, val in obj.items():
if val == objid:
result = obj.get('on_save')
elif isinstance(val, list) or isinstance(val, dict):
retval = get_save_function(val, objid)
if retval is not None:
result = retval
elif isinstance(obj, list):
for elem in obj:
if isinstance(elem, list) or isinstance(elem, dict):
retval = get_save_function(elem, objid)
if retval is not None:
result = retval
return result
def get_objectcontent(obj, objid):
"""
Returns list / dictionary structure that is content of given YAML ID.
:param obj: Structure containing YAML object ( nested lists / dicts ).
:param objid: YAML ID of given node.
:return: Nested list / dictionary.
"""
result = None
if isinstance(obj, dict):
for key, val in obj.items():
if val == objid:
result = obj['content']
elif isinstance(val, list) or isinstance(val, dict):
retval = get_objectcontent(val, objid)
if retval is not None:
result = retval
elif isinstance(obj, list):
for elem in obj:
if isinstance(elem, list) or isinstance(elem, dict):
retval = get_objectcontent(elem, objid)
if retval is not None:
result = retval
return result
def set_value(obj, msel, screen):
"""
Changes value of given YAML object.
:param obj: Structure containing Python dictionary.
:param msel: Object index to modify.
:param screen: Screen object.
:return: None.
"""
# editor needs this
maxy, maxx = screen.getmaxyx()
# determine what object we try to change and act accordingly
if 'checkbox' in obj[msel]:
if obj[msel].get('value', False) is False:
obj[msel]['value'] = True
else:
obj[msel]['value'] = False
elif 'radio' in obj[msel]:
obj[msel]['value'] = True
i = msel + 1
# disable other adjacent radioboxes
while i < len(obj):
if 'radio' in obj[i]:
obj[i]['value'] = False
i += 1
else:
break
i = msel - 1
while i >= 0:
if 'radio' in obj[i]:
obj[i]['value'] = False
i -= 1
else:
break
elif 'textbox' in obj[msel]:
# if there's value, edit it
if 'value' in obj[msel]:
newval = draw_inputbox(screen, obj[msel]['value'])
obj[msel]['value'] = str(newval)
else:
newval = draw_inputbox(screen, '')
obj[msel]['value'] = str(newval)
elif 'textarea' in obj[msel]:
# if there's value, edit it
if 'value' in obj[msel]:
newval = Editor(screen,
title='Editing ' + obj[msel]['title'] + " ",
inittext=obj[msel]['value'], box=True,
win_size=(maxy - 6, maxx - 6),
win_location=(3, 3))()
obj[msel]['value'] = newval
else:
newval = Editor(screen,
title='Editing ' + obj[msel]['title'] + " ",
box=True,
win_size=(maxy - 6, maxx - 6),
win_location=(3, 3))()
obj[msel]['value'] = newval
# reset to previous state
curses.curs_set(0)
screen.clear()
screen.border()
screen.addstr(0, 2, 'ENTER/SPACE: Enter/edit | ESC/BACKSP: Exit | R: '
'Run commands | Q: Quit ', curses.color_pair(1))
screen.refresh()
elif 'textdisplay' in obj[msel]:
# open scrollable window
draw_popup(screen, obj[msel]['value'])
def main():
"""
Contains main loop that loads YAML, draws menu and decides what to do
with selected items.
:return: Exit value
"""
# fix the curses ESCAPE key delay
os.environ['ESCDELAY'] = '0'
if len(sys.argv) < 2:
print("Please provide a file!")
quit(1)
# start with first item selected
msel = 0
fn = sys.argv[1]
# open file & set up screen
yamlobj = open_yaml(fn)
# try to load service functions
load_service_functions(fn, globals())
# initialize curses
stdscr = init_curses()
# top menu defaults
mhist = []
mid = yamlobj['menu']
mtitle = yamlobj['title']
mhist.append(mid)
# get content for the first menu
menu_ids, menu_titles = get_menulist(yamlobj, True)
# main loop that draws menu and allows to traverse & open menu items
while True:
msel = draw_menu(stdscr, yamlobj, menu_titles, mtitle, msel)
# leaving menu and going back to top
if msel == -1:
if len(mhist) > 1:
mhist.pop()
mid = mhist.pop()
else:
msel = 0
continue
else:
mid = menu_ids[msel]
eltype = get_nodetype(yamlobj, mid)
# we entered menu, append it to history
if eltype == 'menu':
mhist.append(mid)
# determine what we try to open and act accordingly
if eltype == 'page':
psel = 0
# don't leave page unless ESC is pressed
while psel != -1:
psel = draw_page(stdscr, yamlobj, fn,
get_objectcontent(yamlobj, mid), mid,
get_title(yamlobj, mid),
psel)
elif eltype == 'menu':
# entering new menu, get title and content
mtitle = get_title(yamlobj, mid)
menu_ids, menu_titles = get_menulist(
get_objectcontent(yamlobj, mid))
msel = 0
# quit
clean_curses()
exit(0)
if __name__ == '__main__':
main()
| mit | 326,057,240,882,830,200 | 26.905303 | 79 | 0.518087 | false |
CaptainDesAstres/Frames-Animated-By-Curve | multi_track/TracksList.py | 1 | 7394 | import bpy
from functions import *
class Track(bpy.types.PropertyGroup):
'''object use to be listed as track in tracks list'''
def set_end_frame(self, context):
'''check that start and end frame are valid when
changing end frame settings'''
size = self.get(True).curve_to_frame.size
# check end isn't over clip size
if self.end > size:
self.end = size
# check start isn't over end
if self.start >= self.end:
if self.end > 1:
self['start'] = self.end - 1
else:
self['start'] = 1
self['end'] = 2
def set_start_frame(self, context):
'''check that start and end frame are valid
when changing start frame settings'''
# check start isn't under 0
if self.start < 1:
self.start = 1
# check start isn't over end
if self.start >= self.end:
size = self.get(True).curve_to_frame.size
if self.start < size:
self['end'] = self.start + 1
else:
self['start'] = size - 1
self['end'] = size
# all properties
name = bpy.props.StringProperty()
uid = bpy.props.StringProperty()
track_id = bpy.props.IntProperty()
start = bpy.props.IntProperty(
name = "First frame",
description = "First frame to take in count",
update = set_start_frame)
end = bpy.props.IntProperty(
name = "Last frame",
description = "Last frame to take in count",
update = set_end_frame)
followers = bpy.props.StringProperty(
name = "Random following track",
description = "Which track can follow this track when switch mode is «Ramdomly with succession rules». Specify integer value separated by «;». empty string means every track.",
default = '')
def get( self, rename = False):
'''return the movie clip corresponding to this track'''
# get movieclip by name
try:
track = bpy.data.movieclips[ self.name ]
if track.curve_to_frame.uid == self.uid:
return track
except KeyError:
pass
# get it by uid in case name have been changed
for track in bpy.data.movieclips:
if track.curve_to_frame.uid == self.uid:
if rename:
# update with new name
try:
self.name = track.name
except AttributeError:
print('Track renaming error on '+self.name)
return track
# if none corresponding movieclip finded
return None
def get_followers( self, list_size ):
'''return a list of possible followers'''
followers = []
strings = self.followers.split(';')
for fol in strings:
try:
fol = int(fol) % list_size
if fol not in followers:
followers.append( fol )
except ValueError:
pass
return followers
def get_frame( self, combination ):
'''Return frame number relative to combination value and start/end settings'''
return round( self.start + ( self.end - self.start ) * combination )
class TrackItem(bpy.types.UIList):
'''Item to display tracks in a list template'''
def draw_item(
self,
context,
layout,
data,
item,
icon,
active_data,
active_propname,
index ):
'''draw item row'''
# display name and index in list
sp = layout.split(0.05)
col = sp.column()
col.label( str(item.track_id) )
sp = sp.split(0.75)
col = sp.column()
col.label(item.name, icon='CLIP')
col = sp.column()
col.label('['+str(item.start)+'-'+str(item.end)+']')
class TracksActions(bpy.types.Operator):
'''Tacks list action operator'''
bl_idname = "curve_to_frame.tracks_list_action"
bl_label = "Track Action"
bl_description = "Track Action:\n\
- Move up selected track.\n\
- Check all Tracks.\n\
- Delete selected track.\n\
- Move down selected track."
bl_options = {'INTERNAL'}
action = bpy.props.EnumProperty(
items=(
('UP', "Up", ""),
('DOWN', "Down", ""),
('REMOVE', "Remove", ""),
('CHECK', "Check", ""),
)
)
def invoke(self, context, event):
scn = context.scene
i = scn.curve_to_frame.selected_track
try:
item = scn.curve_to_frame.tracks[i]
except IndexError:
self.report({'ERROR'}, 'Error: bad selection')
return {"CANCELLED"}
if self.action == 'DOWN':
# move selected track down
if( i < len(scn.curve_to_frame.tracks)-1 ):
scn.curve_to_frame.tracks.move( i, i+1 )
scn.curve_to_frame.selected_track = i+1
elif self.action == 'UP':
# move selected track up
if( i > 0 ):
scn.curve_to_frame.tracks.move( i, i-1 )
scn.curve_to_frame.selected_track = i-1
elif self.action == 'REMOVE':
# remove selected track
scn.curve_to_frame.tracks.remove(i)
length = len(scn.curve_to_frame.tracks)-1
if i > length:
scn.curve_to_frame.selected_track = length
elif self.action == 'CHECK':
# check if all tracks in the list are OK
index = -1
for key in scn.curve_to_frame.tracks.keys():
index += 1
# report and remove inexistant Track
track = scn.curve_to_frame.tracks[key].get( True)
if track is None:
self.report({'ERROR'}, 'Error: \''+key+'\' movieclip didn\'t exist. the corresponding track have been removed.')
scn.curve_to_frame.tracks.remove(index)
index -= 1
continue
# report and remove Track which isn't SEQUENCE
if track.source != 'SEQUENCE':
self.report({'ERROR'}, 'Error: \''+key+'\' movieclip is not a sequence. the corresponding track have been removed.')
scn.curve_to_frame.tracks.remove(index)
index -= 1
continue
# initialize corresponding movieclip if necessary
if track.curve_to_frame.uid == '':
track.curve_to_frame.initialize()
if get_fcurve_by_data_path(track, 'curve_to_frame.peaks_shape') is None:
track.curve_to_frame.init_peaks_shape_curve()
# check all image of the sequence exist
if not track.curve_to_frame.check_image_file():
self.report({'ERROR'}, 'Error: some images source file of \''+key+'\' movieclip are missing.')
# update track id
index = -1
for key in scn.curve_to_frame.tracks.keys():
index +=1
scn.curve_to_frame.tracks[index].track_id = index
return {"FINISHED"}
class TracksList():
'''Tracks list properties and method'''
def add_track( self, context ):
'''add the selected tracks in tracks list'''
# get new track name and avoid recursive call
track = self.track_add
if track == '':
return
# get the corresponding movieclip
try:
track = bpy.data.movieclips[ track ]
except KeyError:
return
# check the source is compatible
if track.source != 'SEQUENCE':
return
# load tracks if necessary
if track.curve_to_frame.uid == '':
track.curve_to_frame.initialize()
if get_fcurve_by_data_path(track, 'curve_to_frame.peaks_shape') is None:
track.curve_to_frame.init_peaks_shape_curve()
# add to the list
new = self.tracks.add()
new.name = track.name
new.uid = track.curve_to_frame.uid
new.track_id = len(self.tracks)-1
new.start = 1
new.end = track.curve_to_frame.size
self.selected_track = new.track_id
# clear the add field
self.track_add=''
#########################
## list and properties ##
#########################
track_add = bpy.props.StringProperty(
name = "Add",
description = "Add tracks to the list",
default = '',
update = add_track )
tracks = bpy.props.CollectionProperty(
type=Track,
options = {'LIBRARY_EDITABLE'} )
selected_track = bpy.props.IntProperty( default = -1 )
| gpl-3.0 | -3,735,279,142,487,749,000 | 23.389439 | 178 | 0.63613 | false |
perkinslr/pyHorde3D | pyHorde3D/horde3d_h.py | 1 | 1776 | # horde3d_h.py
#
# Copyright 2014 Logan Perkins <perkins@lp-programming.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the Eclipse Public License 1.0
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
#
import os, subprocess
t=subprocess.Popen(['gcc','-E',"-I", os.environ.get('HORDE3DINCLUDE','/usr/local/include'), "%s/Horde3DUtils.h"%os.environ.get('HORDE3DINCLUDE','/usr/local/include'),'-DDLL='],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
t.wait()
data=t.stdout.readlines()
data=str.join('', [l for l in data if '#' not in l])
import re
structs=re.compile('struct ([a-zA-Z0-9]+).*?\n{*(\n*?.*?)*? enum [a-zA-Z]*.*?\n.*\n?([ 0-9a-zA-Z=,\n]+)?')
s={}
for struct in structs.findall(data):
attrs=struct[2].replace(',','').replace(' ','').strip().split('\n')
attrs1=[]
values=[]
for attr in attrs:
if '=' in attr:
attrs1.append(attr.split('=')[0])
values.append(int(attr.split('=')[1]))
else:
if not values:
values.append(0)
else:
values.append(values[-1]+1)
attrs1.append(attr)
values=iter(values)
d={a:values.next() for a in attrs1}
globals()[struct[0]]=type(struct[0],(),d)
s[struct[0]]=globals()[struct[0]]
import cffi
ffi=cffi.FFI()
ffi.cdef(structs.split(data)[0])
cdefs=structs.split(data)[-1].replace('''};
};
''','\n').replace('\n ','\n')
cdefs=re.sub(' [a-zA-Z0-9]+::[a-zA-Z0-9]+ ',' int ',cdefs)
ffi.cdef(cdefs)
def getfunctions(lib):
functions={}
for f in re.findall('([a-zA-Z][a-zA-Z0-9]*)\(',cdefs):
try:
functions[f]=getattr(lib,f)
except Exception as e:
print e
return functions
| epl-1.0 | 541,706,671,798,995,260 | 24.73913 | 223 | 0.643018 | false |
siliconsmiley/QGIS | python/plugins/processing/algs/otb/maintenance/parsing.py | 1 | 6131 | # -*- coding: utf-8 -*-
"""
***************************************************************************
parsing.py
---------------------
Copyright : (C) 2013 by CS Systemes d'information (CS SI)
Email : otb at c-s dot fr (CS SI)
Contributors : Julien Malik (CS SI)
Oscar Picas (CS SI)
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Julien Malik, Oscar Picas'
__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
from collections import namedtuple
import re
def merge_pairs(list, should_merge, merge):
"""
Merges adjacent elements of list using the function merge
if they satisfy the predicate should_merge.
"""
ret = []
i = 0
while i < len(list) - 1:
a = list[i]
b = list[i + 1]
if should_merge(a, b):
ret.append(merge(a, b))
i += 2
else:
ret.append(a)
i += 1
if i == len(list) - 1:
ret.append(list[i])
return ret
QuotedString = namedtuple('QuotedString', 'contents comments')
_Arg = namedtuple('Arg', 'contents comments')
_Command = namedtuple('Command', 'name body comment')
BlankLine = namedtuple('BlankLine', '')
class File(list):
def __repr__(self):
return 'File(' + repr(list(self)) + ')'
class Comment(str):
def __repr__(self):
return 'Comment(' + unicode(self) + ')'
def Arg(contents, comments=None):
return _Arg(contents, comments or [])
def Command(name, body, comment=None):
return _Command(name, body, comment)
class CMakeParseError(Exception):
pass
def prettify(s):
"""
Returns the pretty-print of the contents of a CMakeLists file.
"""
return unicode(parse(s))
def parse(s):
'''
Parses a string s in CMakeLists format whose
contents are assumed to have come from the
file at the given path.
'''
nums_toks = tokenize(s)
nums_items = list(parse_file(nums_toks))
nums_items = attach_comments_to_commands(nums_items)
items = [item for _, item in nums_items]
return File(items)
def parse_file(toks):
'''
Yields line number ranges and top-level elements of the syntax tree for
a CMakeLists file, given a generator of tokens from the file.
toks must really be a generator, not a list, for this to work.
'''
prev_type = 'newline'
for line_num, (typ, tok_contents) in toks:
if typ == 'comment':
yield ([line_num], Comment(tok_contents))
elif typ == 'newline' and prev_type == 'newline':
yield ([line_num], BlankLine())
elif typ == 'word':
line_nums, cmd = parse_command(line_num, tok_contents, toks)
yield (line_nums, cmd)
prev_type = typ
def attach_comments_to_commands(nodes):
return merge_pairs(nodes, command_then_comment, attach_comment_to_command)
def command_then_comment(a, b):
line_nums_a, thing_a = a
line_nums_b, thing_b = b
return (isinstance(thing_a, _Command) and
isinstance(thing_b, Comment) and
set(line_nums_a).intersection(line_nums_b))
def attach_comment_to_command(lnums_command, lnums_comment):
command_lines, command = lnums_command
_, comment = lnums_comment
return command_lines, Command(command.name, command.body[:], comment)
def parse_command(start_line_num, command_name, toks):
cmd = Command(name=command_name, body=[], comment=None)
expect('left paren', toks)
for line_num, (typ, tok_contents) in toks:
if typ == 'right paren':
line_nums = range(start_line_num, line_num + 1)
return line_nums, cmd
elif typ == 'left paren':
raise ValueError('Unexpected left paren at line %s' % line_num)
elif typ in ('word', 'string'):
cmd.body.append(Arg(tok_contents, []))
elif typ == 'comment':
c = tok_contents
if cmd.body:
cmd.body[-1].comments.append(c)
else:
cmd.comments.append(c)
msg = 'File ended while processing command "%s" started at line %s' % (
command_name, start_line_num)
raise CMakeParseError(msg)
def expect(expected_type, toks):
line_num, (typ, tok_contents) = toks.next()
if typ != expected_type:
msg = 'Expected a %s, but got "%s" at line %s' % (
expected_type, tok_contents, line_num)
raise CMakeParseError(msg)
# http://stackoverflow.com/questions/691148/pythonic-way-to-implement-a-tokenizer
scanner = re.Scanner([
(r'#.*', lambda scanner, token: ("comment", token)),
(r'"[^"]*"', lambda scanner, token: ("string", token)),
(r"\(", lambda scanner, token: ("left paren", token)),
(r"\)", lambda scanner, token: ("right paren", token)),
(r'[^ \t\r\n()#"]+', lambda scanner, token: ("word", token)),
(r'\n', lambda scanner, token: ("newline", token)),
(r"\s+", None), # skip other whitespace
])
def tokenize(s):
"""
Yields pairs of the form (line_num, (token_type, token_contents))
given a string containing the contents of a CMakeLists file.
"""
toks, remainder = scanner.scan(s)
line_num = 1
if remainder != '':
msg = 'Unrecognized tokens at line %s: %s' % (line_num, remainder)
raise ValueError(msg)
for tok_type, tok_contents in toks:
yield line_num, (tok_type, tok_contents.strip())
line_num += tok_contents.count('\n')
| gpl-2.0 | -1,898,056,095,687,286,800 | 35.064706 | 81 | 0.550481 | false |
gem/sidd | sidd/operator/loaders/survey.py | 1 | 17568 | # Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
module constains class for loading survey data in SQLite format
"""
import csv
import sqlite3
import datetime
from os.path import exists
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsVectorFileWriter, QgsFeature, QgsField, QgsGeometry, QgsPoint
from utils.shapefile import load_shapefile_verify, remove_shapefile
from utils.system import get_unique_filename
from sidd.taxonomy.gem import GemTaxonomyAttribute
from sidd.constants import logAPICall, \
GID_FIELD_NAME, LON_FIELD_NAME, LAT_FIELD_NAME, TAX_FIELD_NAME, \
GRP_FIELD_NAME, AREA_FIELD_NAME, HT_FIELD_NAME, COST_FIELD_NAME
from sidd.operator import Operator,OperatorError, OperatorDataError
from sidd.operator.data import OperatorDataTypes
class GEMDBSurveyLoader(Operator):
""" loading field survey data in CSV format"""
HT_ATTRIBUTE_NAME='Height'
YR_ATTRIBUTE_NAME='Date of Construction'
def __init__(self, options=None, name="Survey Loader"):
""" constructor """
Operator.__init__(self, options, name)
self._tmp_dir = options['tmp_dir']
self.taxonomy = options['taxonomy']
# check if height/year range is requested
# range is stored as dictionary {'min_values':min_values, 'max_values':max_values}
# where min_value and max_value are arrays of values
if options.has_key(self.HT_ATTRIBUTE_NAME):
ht_ranges = options[self.HT_ATTRIBUTE_NAME]
min_values_count = len(ht_ranges['min_values'])
max_values_count = len(ht_ranges['max_values'])
# use range only if it is correctly set
if min_values_count>0 and max_values_count>0 and min_values_count==max_values_count:
self.ht_ranges = options[self.HT_ATTRIBUTE_NAME]
if options.has_key(self.YR_ATTRIBUTE_NAME):
ht_ranges = options[self.YR_ATTRIBUTE_NAME]
min_values_count = len(ht_ranges['min_values'])
max_values_count = len(ht_ranges['max_values'])
# use range only if it is correctly set
if min_values_count>0 and max_values_count>0 and min_values_count==max_values_count:
self.yr_ranges = options[self.YR_ATTRIBUTE_NAME]
self._fields = {
0 : QgsField(GID_FIELD_NAME, QVariant.String),
1 : QgsField(LON_FIELD_NAME, QVariant.Double),
2 : QgsField(LAT_FIELD_NAME, QVariant.Double),
3 : QgsField(TAX_FIELD_NAME, QVariant.String, "", 255),
4 : QgsField(GRP_FIELD_NAME, QVariant.String),
5 : QgsField(AREA_FIELD_NAME, QVariant.String),
6 : QgsField(HT_FIELD_NAME, QVariant.String),
7 : QgsField(COST_FIELD_NAME, QVariant.String),
}
# self documenting method override
###########################
@property
def input_types(self):
return [OperatorDataTypes.File, OperatorDataTypes.StringAttribute, OperatorDataTypes.StringAttribute]
@property
def input_names(self):
return ["Survey Input File", "Survey data type", "Project Filter"]
input_descriptions = input_names
@property
def output_types(self):
return [OperatorDataTypes.Survey, OperatorDataTypes.Shapefile]
@property
def output_names(self):
return ["Survey", "Survey Shapefile"]
output_descriptions = output_names
# public method override
###########################
@logAPICall
def do_operation(self):
""" perform survey data loading """
# input/output data checking already done during property set
survey = self.inputs[0].value
project = self.inputs[2].value
tmp_survey_file = '%ssurvey_%s.shp' % (self._tmp_dir, get_unique_filename())
# load survey
try:
self._loadSurvey(survey, tmp_survey_file, project)
except Exception as err:
remove_shapefile(tmp_survey_file)
raise OperatorError("Error Loading Survey\n%s" % err,
self.__class__)
try:
# store loaded data
tmp_survey_layername = 'survey_%s' % get_unique_filename()
tmp_survey_layer = load_shapefile_verify(tmp_survey_file, tmp_survey_layername,
[self._lon_field, self._lat_field, self._tax_field])
except Exception as err:
raise OperatorError("Error Loading Survey\n%s" % err,
self.__class__)
self.outputs[0].value = tmp_survey_layer
self.outputs[1].value = tmp_survey_file
# protected method override
####################################
def _verify_inputs(self, inputs):
""" perform operator specific input validation """
if not exists(inputs[0].value):
raise OperatorDataError("input file %s does not exist" % (inputs[0].value))
def _verify_outputs(self, outputs):
""" perform operator specific input validation """
pass
# internal helper methods
####################################
def _loadSurvey(self, sqlitepath, shapefilepath, proj_uid=None):
# load data
sql = """select OBJ_UID, X, Y, SAMPLE_GRP, PLAN_AREA, REPLC_COST,
MAT_TYPE_L, MAT_TECH_L, MAS_REIN_L, MAS_MORT_L, STEELCON_L,
LLRS_L, LLRS_DCT_L,
ROOFSYSMAT, ROOFSYSTYP,
FLOOR_MAT, FLOOR_TYPE,
STORY_AG_Q, STORY_AG_1, STORY_AG_2,
YR_BUILT_Q, YR_BUILT_1, YR_BUILT_2,
STR_IRREG, STR_HZIR_P, STR_HZIR_S, STR_VEIR_P, STR_VEIR_S,
OCCUPCY, OCCUPCY_DT
from GEM_OBJECT o LEFT JOIN GED g on o.OBJ_UID=g.GEMOBJ_UID"""
# SQL injection check not too important here given that data format is SQLite
if proj_uid is not None:
sql = "%s WHERE PROJ_UID='%s'" % (sql, proj_uid)
conn = sqlite3.connect(sqlitepath)
c = conn.cursor()
c.execute(sql)
self._buildSurveyLayer(c, shapefilepath)
c.close()
conn.close()
def _buildSurveyLayer(self, data, shapefilepath):
writer = QgsVectorFileWriter(shapefilepath, "utf-8", self._fields, QGis.WKBPoint, self._crs, "ESRI Shapefile")
f = QgsFeature()
for row in data:
obj_uid = str(row[0])
lon = self._tofloat(row[1])
lat = self._tofloat(row[2])
sample_grp = str(row[3])
plan_area = self._tofloat(row[4])
rep_cost = self._tofloat(row[5])
tax_string = self._make_gem_taxstring(row[6:])
ht = self._get_height(row[6:])
f.setGeometry(QgsGeometry.fromPoint(QgsPoint(lon, lat)))
f.addAttribute(0, QVariant(obj_uid))
f.addAttribute(1, QVariant(lon))
f.addAttribute(2, QVariant(lat))
f.addAttribute(3, QVariant(tax_string))
f.addAttribute(4, QVariant(sample_grp))
f.addAttribute(5, QVariant(plan_area))
f.addAttribute(6, QVariant(ht))
f.addAttribute(7, QVariant(rep_cost))
writer.addFeature(f)
del writer, f
def _make_gem_taxstring(self, data):
(mat_type_l, mat_tech_l, mas_rein_l, mas_mort_l, steel_con_l,
llrs_l, llrs_duct_l,
roofsysmat, roofsystyp,
floor_mat, floor_type,
story_ag_q, story_ag_1, story_ag_2,
yr_built_q, yr_built_1, yr_built_2,
str_irreg, str_hzir_p, str_hzir_s, str_veir_p, str_veir_s,
occupcy, occupcy_dt) = [(x) for x in data]
# attribute group names
# 'Material', 'Lateral Load-Resisting System', 'Roof', 'Floor', 'Height', 'Date of Construction', 'Irregularity', 'Occupancy'
# separator for individual attributes in group
separator = self.taxonomy.get_separator(self.taxonomy.Separators.Attribute)
# material
mat_string = self._coalesce(mat_type_l) \
+ self._append_not_null(mat_tech_l, separator) + self._append_not_null(mas_rein_l, separator) \
+ self._append_not_null(mas_mort_l, separator) + self._append_not_null(steel_con_l, separator)
# lateral load
ll_string = self._coalesce(llrs_l) + self._append_not_null(llrs_duct_l,separator)
# roof
roof_string = self._coalesce(roofsysmat) + self._append_not_null(roofsystyp,separator)
# floor
floor_string = self._coalesce(floor_mat) + self._append_not_null(floor_type,separator)
# story
attribute = self.taxonomy.get_attribute_by_name('Height')
_qualifier = self._coalesce(story_ag_q)
_story1, _story2 = self._toint(story_ag_1), self._toint(story_ag_2)
if getattr(self, 'ht_ranges', None) is None:
if _qualifier == 'HBET':
ht_string = attribute.make_string([_story2, _story1], GemTaxonomyAttribute.RANGE)
elif _qualifier == 'HAPP':
ht_string = attribute.make_string([_story2, 0], GemTaxonomyAttribute.APP)
else:
ht_string = attribute.make_string([_story1, 0], GemTaxonomyAttribute.EXACT)
else:
if _qualifier == "HBET":
ht_range = self._find_range((_story1 + _story2) / 2.0,
self.ht_ranges['min_values'], self.ht_ranges['max_values'])
else: # EXACT or APPROXIMATE
ht_range = self._find_range(_story1,
self.ht_ranges['min_values'], self.ht_ranges['max_values'])
if _story1 is None or _story1 == 0:
ht_range = [None, None]
elif ht_range[0] is None and ht_range[1] is not None:
self.ht_ranges['min_values'].insert(0, 1)
self.ht_ranges['max_values'].insert(0, ht_range[1])
elif ht_range[1] is None and ht_range[0] is not None:
self.ht_ranges['min_values'].append(ht_range[0])
self.ht_ranges['max_values'].append(200)
ht_string = attribute.make_string(ht_range, GemTaxonomyAttribute.RANGE)
# yr_built
attribute = self.taxonomy.get_attribute_by_name('Date of Construction')
_qualifier = self._coalesce(yr_built_q)
_year1, _year2 = self._toint(yr_built_1), self._toint(yr_built_2)
if getattr(self, 'yr_ranges', None) is None:
if _qualifier == 'YAPP':
yr_string = attribute.make_string([_year2, 0], GemTaxonomyAttribute.APP)
elif _qualifier== 'YPRE':
yr_string = attribute.make_string([_year2, 0], GemTaxonomyAttribute.PRE)
elif _qualifier == 'YBET':
yr_string = attribute.make_string([_year2, _year1], GemTaxonomyAttribute.RANGE)
else:
yr_string = attribute.make_string([_year1, 0], GemTaxonomyAttribute.EXACT)
else:
if _qualifier == "YBET":
yr_ranges = self._find_range((_year1 + _year2) / 2.0,
self.yr_ranges['min_values'], self.yr_ranges['max_values'])
else: # EXACT or APPROXIMATE
yr_ranges = self._find_range(_year1,
self.yr_ranges['min_values'], self.yr_ranges['max_values'])
if _year1 is None or _year1 == 0:
yr_ranges = [None, None]
elif yr_ranges[0] is None and yr_ranges[1] is not None:
self.yr_ranges['min_values'].insert(0, 1)
self.yr_ranges['max_values'].insert(0, yr_ranges[1])
elif yr_ranges[1] is None and yr_ranges[0] is not None:
self.yr_ranges['min_values'].append(yr_ranges[0])
self.yr_ranges['max_values'].append(datetime.date.today().year)
yr_string = attribute.make_string(yr_ranges, GemTaxonomyAttribute.RANGE)
# irregularity
ir_string = self._append_no_repeat([str_irreg, str_hzir_p, str_hzir_s, str_veir_p, str_veir_s],
separator, exclude="IRN")
# occupancy
occ_string = self._coalesce(occupcy) + self._append_not_null(occupcy_dt,separator)
# constructs output string
separator = self.taxonomy.get_separator(self.taxonomy.Separators.AttributeGroup)
return (mat_string + self._append_not_null(ll_string,separator)
+ self._append_not_null(roof_string,separator)
+ self._append_not_null(floor_string,separator)
+ self._append_not_null(ht_string,separator)
+ self._append_not_null(yr_string,separator)
+ self._append_not_null(ir_string,separator)
+ self._append_not_null(occ_string,separator))
def _get_height(self, data):
""" retrieve height as numeric value from SQLite Query Result """
story_ag_q, story_ag_1, story_ag_2 = data[11:14]
ht = 0
if story_ag_1 is None:
ht = 0
elif self._coalesce(story_ag_q) == "HBET":
ht = (self._toint(story_ag_1) + self._toint(story_ag_2)) / 2
else:
ht = self._toint(story_ag_1)
return int(ht)
def _coalesce(self, val):
""" returns val or blank string if val is null (None) """
if (val is not None):
return str(val).upper()
else:
return ""
def _toint(self, val):
""" convert val to integer, return 0 if conversion fails """
try:
return int(val)
except:
return 0
def _tofloat(self, val):
""" convert val to floating point, return 0.0 if conversion fails """
try:
return float(val)
except:
return 0.0
def _append_not_null(self, val, separator):
""" append val with separator if val is not empty """
if (val is None or val == ""):
return ""
else:
return separator + str(val)
def _append_no_repeat(self, vals, separator, exclude=''):
""" concatenate list of values using separator if value is not empty and not excluded """
no_repeat = {}
for val in vals:
if val is None or val == "" or val == exclude:
continue
no_repeat[val]=1
return str(separator).join(no_repeat.keys())
def _find_range(self, value, min_values, max_values):
""" find min/max values surrounding given value """
# less than minimum
if value < min_values[0]:
return None, min_values[0]
# test ranges
for min_val, max_val in map(None, min_values, max_values):
if value >= min_val and value <= max_val:
return min_val, max_val
# larger than maximum
return max_values[len(max_values)-1], None
class CSVSurveyLoader(GEMDBSurveyLoader):
""" loading field survey data in CSV format"""
def __init__(self, options=None, name="Survey Loader"):
""" constructor """
super(CSVSurveyLoader, self).__init__(options, name)
def _loadSurvey(self, csvpath, shapefilepath):
# load data
data = csv.reader(open(csvpath, 'r'), delimiter=',', quotechar='"')
# skip header, there is probably a better way to accomplish this
data.next()
writer = QgsVectorFileWriter(shapefilepath, "utf-8", self._fields, QGis.WKBPoint, self._crs, "ESRI Shapefile")
f = QgsFeature()
gid = 0
for row in data:
lon = float(row[0])
lat = float(row[1])
f.setGeometry(QgsGeometry.fromPoint(QgsPoint(lon, lat)))
gid+=1
f.addAttribute(0, QVariant(gid))
f.addAttribute(1, QVariant(lon))
f.addAttribute(2, QVariant(lat))
f.addAttribute(3, QVariant(row[2]))
writer.addFeature(f)
del writer, f
| agpl-3.0 | 2,007,593,762,775,590,700 | 44.007833 | 133 | 0.55288 | false |
zaproxy/zap-api-python | src/zapv2/users.py | 1 | 7216 | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2017 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
import six
class users(object):
def __init__(self, zap):
self.zap = zap
def users_list(self, contextid=None):
"""
Gets a list of users that belong to the context with the given ID, or all users if none provided.
"""
params = {}
if contextid is not None:
params['contextId'] = contextid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params)))
def get_user_by_id(self, contextid, userid):
"""
Gets the data of the user with the given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid})))
def get_authentication_credentials_config_params(self, contextid):
"""
Gets the configuration parameters for the credentials of the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid})))
def get_authentication_credentials(self, contextid, userid):
"""
Gets the authentication credentials of the user with given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid})))
def get_authentication_state(self, contextid, userid):
"""
Gets the authentication state information for the user identified by the Context and User Ids.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid})))
def get_authentication_session(self, contextid, userid):
"""
Gets the authentication session information for the user identified by the Context and User Ids, e.g. cookies and realm credentials.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid})))
def new_user(self, contextid, name, apikey=''):
"""
Creates a new user with the given name for the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey})))
def remove_user(self, contextid, userid, apikey=''):
"""
Removes the user with the given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def set_user_enabled(self, contextid, userid, enabled, apikey=''):
"""
Sets whether or not the user, with the given ID that belongs to the context with the given ID, should be enabled.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey})))
def set_user_name(self, contextid, userid, name, apikey=''):
"""
Renames the user with the given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey})))
def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''):
"""
Sets the authentication credentials for the user with the given ID that belongs to the context with the given ID.
"""
params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}
if authcredentialsconfigparams is not None:
params['authCredentialsConfigParams'] = authcredentialsconfigparams
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params)))
def authenticate_as_user(self, contextid, userid, apikey=''):
"""
Tries to authenticate as the identified user, returning the authentication request and whether it appears to have succeeded.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def poll_as_user(self, contextid, userid, apikey=''):
"""
Tries to poll as the identified user, returning the authentication request and whether it appears to have succeeded. This will only work if the polling verification strategy has been configured.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''):
"""
Sets fields in the authentication state for the user identified by the Context and User Ids.
"""
params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}
if lastpollresult is not None:
params['lastPollResult'] = lastpollresult
if lastpolltimeinms is not None:
params['lastPollTimeInMs'] = lastpolltimeinms
if requestssincelastpoll is not None:
params['requestsSinceLastPoll'] = requestssincelastpoll
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params)))
def set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None, apikey=''):
"""
Sets the specified cookie for the user identified by the Context and User Ids.
"""
params = {'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name, 'value': value, 'apikey': apikey}
if path is not None:
params['path'] = path
if secure is not None:
params['secure'] = secure
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setCookie/', params)))
| apache-2.0 | 8,769,476,478,485,714,000 | 52.058824 | 202 | 0.676136 | false |
ajerneck/thatsfordinner | canonical.py | 1 | 1153 | "Use a canonical list of ingredients or foods to match."
import nltk
import pandas as pd
import functions as f
import common
## exploiting that ingredients are mentioned in instructions as well.
con = common.make_engine()
dfe = pd.read_sql_table('recipes_recipe', con)
x = dfe.head()
## intersection of ingredients and instructions.
set(x['ingredient_txt'].str.split()[0]).intersection(set(x['instruction_txt'].str.split()[0]))
## olive oil
## parsley
## lemon peel
## garlick
## lemon juice
## kosher salt
## black pepper
## spinache and artichoke ravioli
## baby artichokes
## cannonical list attempt.
df = f.load_data()
## using canonical ingredient list.
cf = pd.read_csv('/home/alexander/start/archives/2015/2015-start/code/data-science/incubator-challenge/q3/fooddb/compounds_foods.csv', escapechar="\\")
vocab = cf['orig_food_common_name'].str.lower().unique()
## edit distances.
sents = df['ingredient_txt'].map(lambda x: map(nltk.word_tokenize, x.split('\n')))
sents = map(lambda x: x[1:-1], sents)
sents[0:10]
## simple approach: for the most probable words for each topic, if a unigram appears in a bigram, filter it out.
| bsd-3-clause | 7,886,874,209,994,783,000 | 22.06 | 151 | 0.718127 | false |
yaybu/touchdown | touchdown/aws/apigateway/method_response.py | 1 | 2446 | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument, resource, serializers
from touchdown.core.plan import Plan
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy
from .resource import Resource
class MethodResponse(resource.Resource):
resource_name = "method_response"
name = argument.String(field="httpMethod")
status_code = argument.String(field="statusCode")
response_parameters = argument.Dict(field="responseParameters")
response_models = argument.Dict(field="responseModels")
resource = argument.Resource(Resource, field="resourceId")
class Describe(SimpleDescribe, Plan):
resource = MethodResponse
service_name = "apigateway"
api_version = "2015-07-09"
describe_action = "get_method_response"
describe_notfound_exception = "NotFoundException"
describe_envelope = "[@]"
key = "httpMethod"
def get_describe_filters(self):
api = self.runner.get_plan(self.resource.resource.api)
if not api.resource_id:
return None
resource = self.runner.get_plan(self.resource.resource)
if not resource.resource_id:
return None
return {
"restApiId": api.resource_id,
"resourceId": resource.resource_id,
"httpMethod": self.resource.name,
"statusCode": self.resource.status_code,
}
class Apply(SimpleApply, Describe):
create_action = "put_method_response"
create_envelope = "@"
def get_create_serializer(self):
return serializers.Resource(restApiId=self.resource.resource.api.identifier())
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_method_response"
def get_destroy_serializer(self):
return serializers.Dict(
restApiId=self.resource.resource.api.identifier(),
resourceId=self.resource.identifier(),
)
| apache-2.0 | 5,690,618,798,359,818,000 | 30.766234 | 86 | 0.701962 | false |
js850/pele | examples/new_potential/mypotential.py | 1 | 3624 | """
an example of how to create a new potential.
"""
from pele.potentials import BasePotential
class MyPot(BasePotential):
"""a Lennard Jones potential with altered exponents
V(r) = 4. * (r**-24 - r**-12)
"""
def __init__(self, natoms):
self.natoms = natoms #number of atoms
def getEnergy(self, coords):
coords = np.reshape(coords, [self.natoms,3])
E = 0.
for i in range(self.natoms):
for j in range(i):
r = np.sqrt(np.sum((coords[i,:] - coords[j,:])**2))
E += 4. * (r**-24 - r**-12)
return E
def getEnergyGradient(self, coords):
coords = np.reshape(coords, [self.natoms,3])
E = 0.
grad = np.zeros(coords.shape)
for i in range(self.natoms):
for j in range(i):
dr = coords[i,:] - coords[j,:]
r = np.sqrt(np.sum(dr**2))
E += 4. * (r**(-24) - r**(-12))
g = 4. * ( 24. * r**(-25) - 12. * r**(-13))
grad[i,:] += -g * dr/r
grad[j,:] += g * dr/r
return E, grad.reshape(-1)
from pele.systems import BaseSystem
from pele.mindist import MinPermDistAtomicCluster, ExactMatchAtomicCluster
from pele.transition_states import orthogopt
class MySystem(BaseSystem):
def __init__(self, natoms):
super(MySystem, self).__init__()
self.natoms = natoms
self.params.database.accuracy =0.1
def get_potential(self):
return MyPot(self.natoms)
def get_mindist(self):
permlist = [range(self.natoms)]
return MinPermDistAtomicCluster(permlist=permlist, niter=10)
def get_orthogonalize_to_zero_eigenvectors(self):
return orthogopt
def get_compare_exact(self, **kwargs):
permlist = [range(self.natoms)]
return ExactMatchAtomicCluster(permlist=permlist, **kwargs)
import numpy as np
def run_basinhopping():
natoms = 8
system = MySystem(natoms)
database = system.create_database()
x0 = np.random.uniform(-1,1,[natoms*3])
bh = system.get_basinhopping(database=database, coords=x0)
bh.run(10)
print "found", len(database.minima()), "minima"
min0 = database.minima()[0]
print "lowest minimum found has energy", min0.energy
return system, database
def run_double_ended_connect(system, database):
# connect the all minima to the lowest minimum
from pele.landscape import ConnectManager
manager = ConnectManager(database, strategy="gmin")
for i in xrange(database.number_of_minima()-1):
min1, min2 = manager.get_connect_job()
connect = system.get_double_ended_connect(min1, min2, database)
connect.connect()
from pele.utils.disconnectivity_graph import DisconnectivityGraph, database2graph
import matplotlib.pyplot as plt
def make_disconnectivity_graph(database):
graph = database2graph(database)
dg = DisconnectivityGraph(graph, nlevels=3, center_gmin=True)
dg.calculate()
dg.plot()
plt.show()
def test_potential():
import numpy as np
natoms = 5
pot = MyPot(natoms)
coords = np.random.uniform(-1,1,natoms*3)
e = pot.getEnergy(coords)
print e
e, g = pot.getEnergyGradient(coords)
print e
gnum = pot.NumericalDerivative(coords, eps=1e-6)
print np.max(np.abs(gnum-g)), np.max(np.abs(gnum))
print np.max(np.abs(gnum-g)) / np.max(np.abs(gnum))
if __name__ == "__main__":
#test_potential()
mysys, database = run_basinhopping()
run_double_ended_connect(mysys, database)
make_disconnectivity_graph(database)
| gpl-3.0 | -772,127,674,280,667,800 | 30.789474 | 81 | 0.613962 | false |
liampauling/flumine | examples/middleware/orders.py | 1 | 3222 | import logging
from flumine import config
from flumine.utils import STRATEGY_NAME_HASH_LENGTH
from flumine.markets.middleware import Middleware
from flumine.order.trade import Trade
logger = logging.getLogger(__name__)
class OrdersMiddleware(Middleware):
"""
Middleware to add execution complete orders
to the blotter. This is required on a restart
as the order stream does not include
EXECUTION_COMPLETE orders
"""
def __init__(self, flumine):
self.flumine = flumine
def add_market(self, market) -> None:
resp = self.flumine.client.betting_client.betting.list_current_orders(
customer_strategy_refs=[config.hostname],
order_projection="EXECUTION_COMPLETE",
)
for current_order in resp.orders:
logger.info(
"OrdersMiddleware: Processing order {0}".format(current_order.bet_id),
extra={
"bet_id": current_order.bet_id,
"market_id": current_order.market_id,
"customer_strategy_ref": current_order.customer_strategy_ref,
"customer_order_ref": current_order.customer_order_ref,
},
)
order = self._create_order_from_current(current_order, market)
if order:
order.update_current_order(current_order)
order.execution_complete()
def _create_order_from_current(self, current_order, market):
strategy_name_hash = current_order.customer_order_ref[
:STRATEGY_NAME_HASH_LENGTH
]
order_id = current_order.customer_order_ref[STRATEGY_NAME_HASH_LENGTH + 1 :]
# get strategy
strategy = self.flumine.strategies.hashes.get(strategy_name_hash)
if strategy is None:
logger.warning(
"OrdersMiddleware: Strategy not available to create order {0}".format(
order_id
),
extra={
"bet_id": current_order.bet_id,
"market_id": current_order.market_id,
"customer_strategy_ref": current_order.customer_strategy_ref,
"customer_order_ref": current_order.customer_order_ref,
"strategy_name": str(strategy),
},
)
return
# add trade/order
trade = Trade(
market.market_id,
current_order.selection_id,
current_order.handicap,
strategy,
)
order = trade.create_order_from_current(current_order, order_id)
market.blotter[order.id] = order
runner_context = strategy.get_runner_context(*order.lookup)
runner_context.place(trade.id)
logger.info(
"OrdersMiddleware: New order trade created",
extra={
"bet_id": current_order.bet_id,
"market_id": current_order.market_id,
"customer_strategy_ref": current_order.customer_strategy_ref,
"customer_order_ref": current_order.customer_order_ref,
"strategy_name": str(strategy),
},
)
return order
| mit | -2,638,615,105,914,977,300 | 37.819277 | 86 | 0.574798 | false |
shizhz/tutu | tests/modules/command/test_parser.py | 1 | 1192 | # -*- coding: utf-8 -*-
import unittest
from nose.tools import raises
from modules.command.cmd import validator
from modules.command.parser import CommandParser
from modules.command.exceptions import UnknownCommandException, InvalidCommandException
class MockParser(object):
@validator
def is_valid(self, cmd):
return True
def support(self, txt):
return txt.startswith('mock')
def parse(self, txt):
return "MockCommandInfo"
class MockNotValidParser(MockParser):
@validator
def is_valid(self, cmd):
return False
class TestCommandParser(unittest.TestCase):
def test_parse(self):
cmd_parser = CommandParser(parsers=[MockParser()])
cmd_info = cmd_parser.parse('mock cmd')
assert type(cmd_info) is str
assert cmd_info == 'MockCommandInfo'
@raises(InvalidCommandException)
def test_is_not_valid(self):
cmd_parser = CommandParser(parsers=[MockNotValidParser()])
cmd_info = cmd_parser.parse('mock cmd')
@raises(UnknownCommandException)
def test_unknownexception(self):
cmd_parser = CommandParser(parsers=[MockParser()])
cmd_parser.parse('unknown cmd')
| mit | -1,355,294,728,687,652,600 | 28.8 | 87 | 0.692953 | false |
boegel/easybuild-easyblocks | easybuild/easyblocks/generic/rubygem.py | 1 | 4937 | ##
# Copyright 2015-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Ruby Gems, implemented as an easyblock
@author: Robert Schmidt (Ottawa Hospital Research Institute)
@author: Kenneth Hoste (Ghent University)
"""
import os
import easybuild.tools.environment as env
from easybuild.framework.easyconfig import CUSTOM
from easybuild.framework.extensioneasyblock import ExtensionEasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import copy_file
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class RubyGem(ExtensionEasyBlock):
"""Builds and installs Ruby Gems."""
@staticmethod
def extra_options(extra_vars=None):
"""Extra easyconfig parameters specific to RubyGem easyblock."""
extra_vars = ExtensionEasyBlock.extra_options(extra_vars)
extra_vars.update({
'gem_file': [None, "Path to gem file in unpacked sources", CUSTOM],
})
return extra_vars
def __init__(self, *args, **kwargs):
"""RubyGem easyblock constructor."""
super(RubyGem, self).__init__(*args, **kwargs)
self.ext_src = None
def run(self):
"""Perform the actual Ruby gem build/install"""
if not self.src:
raise EasyBuildError("No source found for Ruby Gem %s, required for installation.", self.name)
super(RubyGem, self).run()
self.ext_src = self.src
self.log.debug("Installing Ruby gem %s version %s." % (self.name, self.version))
self.install_step()
def extract_step(self):
"""Skip extraction of .gem files, which are installed as downloaded"""
if len(self.src) > 1:
raise EasyBuildError("Don't know how to handle Ruby gems with multiple sources.")
else:
src = self.src[0]
if src['path'].endswith('.gem'):
copy_file(src['path'], self.builddir)
self.ext_src = src['name']
# set final path since it can't be determined from unpacked sources (used for guessing start_dir)
src['finalpath'] = self.builddir
else:
# unpack zipped gems, use specified path to gem file
super(RubyGem, self).extract_step()
if self.cfg['gem_file']:
self.ext_src = os.path.join(src['finalpath'], self.cfg['gem_file'])
if not os.path.exists(self.ext_src):
raise EasyBuildError("Gem file not found at %s", self.ext_src)
else:
raise EasyBuildError("Location to gem file in unpacked sources must be specified via gem_file")
def configure_step(self):
"""No separate configuration for Ruby Gems."""
pass
def build_step(self):
"""No separate build procedure for Ruby Gems."""
pass
def test_step(self):
"""No separate (standard) test procedure for Ruby Gems."""
pass
def install_step(self):
"""Install Ruby Gems using gem package manager"""
ruby_root = get_software_root('Ruby')
if not ruby_root:
raise EasyBuildError("Ruby module not loaded?")
# this is the 'proper' way to specify a custom installation prefix: set $GEM_HOME
if not self.is_extension or self.master.name != 'Ruby':
env.setvar('GEM_HOME', self.installdir)
bindir = os.path.join(self.installdir, 'bin')
run_cmd("gem install --bindir %s --local %s" % (bindir, self.ext_src))
def make_module_extra(self):
"""Extend $GEM_PATH in module file."""
txt = super(RubyGem, self).make_module_extra()
# for stand-alone Ruby gem installs, $GEM_PATH needs to be updated
if not self.is_extension or self.master.name != 'Ruby':
txt += self.module_generator.prepend_paths('GEM_PATH', [''])
return txt
| gpl-2.0 | 2,007,238,328,556,488,700 | 38.814516 | 115 | 0.648167 | false |
sukenda/django-api-tutorial | blog/settings.py | 1 | 5512 | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# BASE_DIR = "/Users/jmitch/desktop/blog/src/"
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sm@g)(fbwdh5wc*xe@j++m9rh^uza5se9a57c5ptwkg*b@ki0x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party
'markdown_deux',
'pagedown',
'rest_framework',
# local apps
'comments',
'posts',
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
LOGIN_URL = "/login/"
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'init_command': 'SET default_storage_engine=INNODB',
'ENGINE': 'django.db.backends.mysql',
'NAME': 'api_sandbox',
'OPTIONS': {
'charset': 'utf8mb4',
'sql_mode': 'traditional',
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
# '/var/www/static/',
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn")
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
# 'DEFAULT_PARSER_CLASSES': (
# 'rest_framework.parsers.JSONParser',
# )
"DEFAULT_AUTHENTICATION_CLASSES": (
# 'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
# 'rest_framework.authentication.BasicAuthentication'
),
"DEFAULT_PERMISSION_CLASSES": (
'rest_framework.permissions.IsAuthenticated',
# 'rest_framework.permissions.IsAuthenticatedOrReadOnly',
)
}
'''
curl -X POST -d "username=cfe&password=learncode" http://127.0.0.1:8000/api/auth/token/
eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImNmZSIsInVzZXJfaWQiOjEsImVtYWlsIjoiIiwiZXhwIjoxNDYxOTY1ODI5fQ.OTX7CZFZqxhaUnU9Da13Ebh9FY_bHMeCF1ypr9hXjWw
curl -H "Authorization: JWT eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImNmZSIsInVzZXJfaWQiOjEsImVtYWlsIjoiIiwiZXhwIjoxNDYxOTY1ODI5fQ.OTX7CZFZqxhaUnU9Da13Ebh9FY_bHMeCF1ypr9hXjWw
" http://127.0.0.1:8000/api/comments/
curl -X POST -H "Authorization: JWT eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImNmZSIsInVzZXJfaWQiOjEsImVtYWlsIjoiIiwiZXhwIjoxNDYxOTY2MTc4fQ._i5wEqJ_OO8wNiVVNAWNPGjGaO7OzChY0UzONgw06D0" -H "Content-Type: application/json" -d '{"content":"some reply to another try"}' 'http://127.0.0.1:8000/api/comments/create/?slug=new-title&type=post&parent_id=13'
curl http://127.0.0.1:8000/api/comments/
'''
| mit | 5,082,875,815,669,808,000 | 28.956522 | 360 | 0.695573 | false |
vianuevm/cppStyle | comment_checks.py | 1 | 2368 | import re
from pyparsing import Word, Literal, alphanums
def check_line_width(self, line):
max_length = self.max_line_length
current_length = len(line)
if current_length > max_length:
self.add_error(label="LINE_WIDTH", data={'length': current_length})
def check_missing_rme(self, lines):
function = Word(alphanums + '_')
function_syntax = function + Literal('(')
parsed = function_syntax.searchString(lines[self.current_line_num]).asList()
function_name = parsed[0][0]
function_signature = lines[self.current_line_num].strip().replace(';','').strip()
if function_name != 'main':
requires = effects = modifies = False
#Check if there's a complete RME in the last 10 lines
start = self.current_line_num - 10
if start < 0:
start = 0
for line_num in range(start, self.current_line_num):
code = lines[line_num].lower()
if re.search('requires', code): requires = True
if re.search('effects', code): effects = True
if re.search('modifies', code): modifies = True
# If it's not there, maybe they defined it in a header file.
if not (requires and effects and modifies) and (function_signature not in self.all_rme[self.current_file]):
# error only in this case
# prevent double-counting
if function_signature not in self.missing_rme[self.current_file]:
self.add_error("MISSING_RME", data={'function': function_name, 'function_signature': function_signature})
self.missing_rme[self.current_file].add(function_signature)
elif function_signature not in self.all_rme[self.current_file]:
self.all_rme[self.current_file].add(function_signature)
def check_min_comments(self, all_lines, clean_lines):
num_lines = len(all_lines) + 1
num_comments = 0
blank_lines_at_end = 0
for index, line in enumerate(all_lines):
if line != clean_lines[index]:
num_comments += 1
if line[0] == u'\n':
blank_lines_at_end += 1
else:
blank_lines_at_end = 0
num_lines -= (blank_lines_at_end + 1)
if num_comments < num_lines * self.min_comments_ratio:
self.add_error(label='MIN_COMMENTS', line=0, type="WARNING", data={'comments': num_comments, 'lines': num_lines}) | mit | 1,191,410,062,008,988,000 | 45.45098 | 121 | 0.625845 | false |
hyc/HyperDex | test/python/RegexSearch.py | 1 | 2310 | #!/usr/bin/env python
import sys
import hyperdex.client
from hyperdex.client import LessEqual, GreaterEqual, Range, Regex, LengthEquals, LengthLessEqual, LengthGreaterEqual
c = hyperdex.client.Client(sys.argv[1], int(sys.argv[2]))
def to_objectset(xs):
return set([frozenset(x.items()) for x in xs])
assert c.put('kv', 'foo/foo/foo', {}) == True
assert c.put('kv', 'foo/foo/bar', {}) == True
assert c.put('kv', 'foo/foo/baz', {}) == True
assert c.put('kv', 'foo/bar/foo', {}) == True
assert c.put('kv', 'foo/bar/bar', {}) == True
assert c.put('kv', 'foo/bar/baz', {}) == True
assert c.put('kv', 'foo/baz/foo', {}) == True
assert c.put('kv', 'foo/baz/bar', {}) == True
assert c.put('kv', 'foo/baz/baz', {}) == True
assert c.put('kv', 'bar/foo/foo', {}) == True
assert c.put('kv', 'bar/foo/bar', {}) == True
assert c.put('kv', 'bar/foo/baz', {}) == True
assert c.put('kv', 'bar/bar/foo', {}) == True
assert c.put('kv', 'bar/bar/bar', {}) == True
assert c.put('kv', 'bar/bar/baz', {}) == True
assert c.put('kv', 'bar/baz/foo', {}) == True
assert c.put('kv', 'bar/baz/bar', {}) == True
assert c.put('kv', 'bar/baz/baz', {}) == True
assert c.put('kv', 'baz/foo/foo', {}) == True
assert c.put('kv', 'baz/foo/bar', {}) == True
assert c.put('kv', 'baz/foo/baz', {}) == True
assert c.put('kv', 'baz/bar/foo', {}) == True
assert c.put('kv', 'baz/bar/bar', {}) == True
assert c.put('kv', 'baz/bar/baz', {}) == True
assert c.put('kv', 'baz/baz/foo', {}) == True
assert c.put('kv', 'baz/baz/bar', {}) == True
assert c.put('kv', 'baz/baz/baz', {}) == True
assert to_objectset(c.search('kv', {'k': Regex('^foo')})) == to_objectset([{'k': 'foo/foo/foo'}, {'k': 'foo/foo/bar'}, {'k': 'foo/foo/baz'}, {'k': 'foo/bar/foo'}, {'k': 'foo/bar/bar'}, {'k': 'foo/bar/baz'}, {'k': 'foo/baz/foo'}, {'k': 'foo/baz/bar'}, {'k': 'foo/baz/baz'}])
assert to_objectset(c.search('kv', {'k': Regex('foo$')})) == to_objectset([{'k': 'foo/foo/foo'}, {'k': 'foo/bar/foo'}, {'k': 'foo/baz/foo'}, {'k': 'bar/foo/foo'}, {'k': 'bar/bar/foo'}, {'k': 'bar/baz/foo'}, {'k': 'baz/foo/foo'}, {'k': 'baz/bar/foo'}, {'k': 'baz/baz/foo'}])
assert to_objectset(c.search('kv', {'k': Regex('^b.*/foo/.*$')})) == to_objectset([{'k': 'bar/foo/foo'}, {'k': 'bar/foo/bar'}, {'k': 'bar/foo/baz'}, {'k': 'baz/foo/foo'}, {'k': 'baz/foo/bar'}, {'k': 'baz/foo/baz'}])
| bsd-3-clause | 2,271,529,638,827,483,000 | 61.432432 | 273 | 0.55671 | false |
jamespcole/home-assistant | homeassistant/components/yi/camera.py | 1 | 5262 | """
This component provides support for Xiaomi Cameras (HiSilicon Hi3518e V200).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.yi/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.camera import Camera, PLATFORM_SCHEMA
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PATH, CONF_PASSWORD, CONF_PORT, CONF_USERNAME)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from homeassistant.exceptions import PlatformNotReady
REQUIREMENTS = ['aioftp==0.12.0']
DEPENDENCIES = ['ffmpeg']
_LOGGER = logging.getLogger(__name__)
DEFAULT_BRAND = 'YI Home Camera'
DEFAULT_PASSWORD = ''
DEFAULT_PATH = '/tmp/sd/record'
DEFAULT_PORT = 21
DEFAULT_USERNAME = 'root'
DEFAULT_ARGUMENTS = '-pred 1'
CONF_FFMPEG_ARGUMENTS = 'ffmpeg_arguments'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_FFMPEG_ARGUMENTS, default=DEFAULT_ARGUMENTS): cv.string
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up a Yi Camera."""
async_add_entities([YiCamera(hass, config)], True)
class YiCamera(Camera):
"""Define an implementation of a Yi Camera."""
def __init__(self, hass, config):
"""Initialize."""
super().__init__()
self._extra_arguments = config.get(CONF_FFMPEG_ARGUMENTS)
self._last_image = None
self._last_url = None
self._manager = hass.data[DATA_FFMPEG]
self._name = config[CONF_NAME]
self._is_on = True
self.host = config[CONF_HOST]
self.port = config[CONF_PORT]
self.path = config[CONF_PATH]
self.user = config[CONF_USERNAME]
self.passwd = config[CONF_PASSWORD]
@property
def brand(self):
"""Camera brand."""
return DEFAULT_BRAND
@property
def is_on(self):
"""Determine whether the camera is on."""
return self._is_on
@property
def name(self):
"""Return the name of this camera."""
return self._name
async def _get_latest_video_url(self):
"""Retrieve the latest video file from the customized Yi FTP server."""
from aioftp import Client, StatusCodeError
ftp = Client(loop=self.hass.loop)
try:
await ftp.connect(self.host)
await ftp.login(self.user, self.passwd)
except (ConnectionRefusedError, StatusCodeError) as err:
raise PlatformNotReady(err)
try:
await ftp.change_directory(self.path)
dirs = []
for path, attrs in await ftp.list():
if attrs['type'] == 'dir' and '.' not in str(path):
dirs.append(path)
latest_dir = dirs[-1]
await ftp.change_directory(latest_dir)
videos = []
for path, _ in await ftp.list():
videos.append(path)
if not videos:
_LOGGER.info('Video folder "%s" empty; delaying', latest_dir)
return None
await ftp.quit()
self._is_on = True
return 'ftp://{0}:{1}@{2}:{3}{4}/{5}/{6}'.format(
self.user, self.passwd, self.host, self.port, self.path,
latest_dir, videos[-1])
except (ConnectionRefusedError, StatusCodeError) as err:
_LOGGER.error('Error while fetching video: %s', err)
self._is_on = False
return None
async def async_camera_image(self):
"""Return a still image response from the camera."""
from haffmpeg.tools import ImageFrame, IMAGE_JPEG
url = await self._get_latest_video_url()
if url and url != self._last_url:
ffmpeg = ImageFrame(self._manager.binary, loop=self.hass.loop)
self._last_image = await asyncio.shield(
ffmpeg.get_image(
url,
output_format=IMAGE_JPEG,
extra_cmd=self._extra_arguments),
loop=self.hass.loop)
self._last_url = url
return self._last_image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
from haffmpeg.camera import CameraMjpeg
if not self._is_on:
return
stream = CameraMjpeg(self._manager.binary, loop=self.hass.loop)
await stream.open_camera(
self._last_url, extra_cmd=self._extra_arguments)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass, request, stream_reader,
self._manager.ffmpeg_stream_content_type)
finally:
await stream.close()
| apache-2.0 | -6,223,721,790,184,453,000 | 33.168831 | 79 | 0.615735 | false |
GoranLundberg/RPLCD | RPLCD/i2c.py | 1 | 11253 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013-2017 Danilo Bargen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from smbus import SMBus
from . import common as c
from .lcd import BaseCharLCD
# PCF8574 backlight control
PCF8574_BACKLIGHT = 0x08
PCF8574_NOBACKLIGHT = 0x00
# PCF8574 Pin bitmasks
PCF8574_E = 0x4
PIN_READ_WRITE = 0x2 # Not used?
PIN_REGISTER_SELECT = 0x1 # Not used?
# MCP230XX backlight control
MCP230XX_BACKLIGHT = 0x80
MCP230XX_NOBACKLIGHT = 0x7f
# MCP230XX pin bitmasks and datamask
MCP230XX_RS = 0x02
MCP230XX_E = 0x4
MCP230XX_DATAMASK = 0x78
MCP230XX_DATASHIFT = 3
# MCP23008 Register addresses
MCP23008_IODIR = 0x00
MCP23008_GPIO = 0x09
# MCP23017 Register addresses
MCP23017_IODIRA = 0x00
MCP23017_IODIRB = 0x01
MCP23017_GPIOA = 0x12
MCP23017_GPIOB = 0x13
class CharLCD(BaseCharLCD):
def __init__(self, i2c_expander, address, expander_params=None, port=1,
cols=20, rows=4, dotsize=8,
charmap='A02',
auto_linebreaks=True,
backlight_enabled=True):
"""
CharLCD via PCF8574 I2C port expander:
Pin mapping::
7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
D7 | D6 | D5 | D4 | BL | EN | RW | RS
CharLCD via MCP23008 and MCP23017 I2C port expanders:
Adafruit I2C/SPI LCD Backback is supported.
Warning: You might need a level shifter (that supports i2c)
between the SCL/SDA connections on the MCP chip / backpack and the Raspberry Pi.
Or you might damage the Pi and possibly any other 3.3V i2c devices
connected on the i2c bus. Or cause reliability issues. The SCL/SDA are rated 0.7*VDD
on the MCP23008, so it needs 3.5V on the SCL/SDA when 5V is applied to drive the LCD.
The MCP23008 and MCP23017 needs to be connected exactly the same way as the backpack.
For complete schematics see the adafruit page at:
https://learn.adafruit.com/i2c-spi-lcd-backpack/
4-bit operation. I2C only supported.
Pin mapping::
7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
BL | D7 | D6 | D5 | D4 | E | RS | -
:param address: The I2C address of your LCD.
:type address: int
:param i2c_expander: Set your I²C chip type. Supported: "PCF8574", "MCP23008", "MCP23017".
:type i2c_expander: string
:param expander_params: Parameters for expanders, in a dictionary. Only needed for MCP23017
gpio_bank - This must be either ``A`` or ``B``
If you have a HAT, A is usually marked 1 and B is 2
Example: expander_params={'gpio_bank': 'A'}
:type expander_params: dictionary
:param port: The I2C port number. Default: ``1``.
:type port: int
:param cols: Number of columns per row (usually 16 or 20). Default: ``20``.
:type cols: int
:param rows: Number of display rows (usually 1, 2 or 4). Default: ``4``.
:type rows: int
:param dotsize: Some 1 line displays allow a font height of 10px.
Allowed: 8 or 10. Default: ``8``.
:type dotsize: int
:param charmap: The character map used. Depends on your LCD. This must
be either ``A00`` or ``A02``. Default: ``A02``.
:type charmap: str
:param auto_linebreaks: Whether or not to automatically insert line breaks.
Default: ``True``.
:type auto_linebreaks: bool
:param backlight_enabled: Whether the backlight is enabled initially. Default: ``True``.
:type backlight_enabled: bool
"""
# Set own address and port.
self._address = address
self._port = port
# Set i2c expander, 'PCF8574', 'MCP23008' and 'MCP23017' are supported.
if i2c_expander in ['PCF8574', 'MCP23008', 'MCP23017']:
self._i2c_expander = i2c_expander
else:
raise NotImplementedError('I2C expander "%s" is not supported.' % i2c_expander)
# Errorchecking for expander parameters
if expander_params is None:
if self._i2c_expander == 'MCP23017':
raise ValueError('MCP23017: expander_params[\'gpio_bank\'] is not defined, must be either \'A\' or \'B\'')
else:
self._expander_params = {}
else:
if self._i2c_expander == 'MCP23017':
if expander_params['gpio_bank'] in ['A', 'B']:
self._expander_params = {}
self._expander_params['gpio_bank'] = expander_params['gpio_bank']
else:
raise ValueError('MCP23017: expander_params[\'gpio_bank\'] is \'%s\' must be either \'A\' or \'B\'' % expander_params['gpio_bank'])
# Currently the I2C mode only supports 4 bit communication
self.data_bus_mode = c.LCD_4BITMODE
# Set backlight status
if self._i2c_expander == 'PCF8574':
self._backlight = PCF8574_BACKLIGHT if backlight_enabled else PCF8574_NOBACKLIGHT
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._backlight = MCP230XX_BACKLIGHT if backlight_enabled else MCP230XX_NOBACKLIGHT
# Call superclass
super(CharLCD, self).__init__(cols, rows, dotsize,
charmap=charmap,
auto_linebreaks=auto_linebreaks)
# Refresh backlight status
self.backlight_enabled = backlight_enabled
def _init_connection(self):
self.bus = SMBus(self._port)
if self._i2c_expander == 'PCF8574':
c.msleep(50)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
# Variable for storing data and applying bitmasks and shifting.
self._mcp_data = 0
# Set iodir register value according to expander
# If using MCP23017 set which gpio bank to use, A or B
if self._i2c_expander == 'MCP23008':
IODIR = MCP23008_IODIR
self._mcp_gpio = MCP23008_GPIO
elif self._i2c_expander == 'MCP23017':
# Set gpio bank A or B
if self._expander_params['gpio_bank'] == 'A':
IODIR = MCP23017_IODIRA
self._mcp_gpio = MCP23017_GPIOA
elif self._expander_params['gpio_bank'] == 'B':
IODIR = MCP23017_IODIRB
self._mcp_gpio = MCP23017_GPIOB
# Set IO DIRection to output on all GPIOs (GP0-GP7)
self.bus.write_byte_data(self._address, IODIR, 0x00)
def _close_connection(self):
# Nothing to do here?
pass
# Properties
def _get_backlight_enabled(self):
if self._i2c_expander == 'PCF8574':
return self._backlight == PCF8574_BACKLIGHT
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
return self._backlight == MCP230XX_BACKLIGHT
def _set_backlight_enabled(self, value):
if self._i2c_expander == 'PCF8574':
self._backlight = PCF8574_BACKLIGHT if value else PCF8574_NOBACKLIGHT
self.bus.write_byte(self._address, self._backlight)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
if value is True:
self._mcp_data |= MCP230XX_BACKLIGHT
else:
self._mcp_data &= MCP230XX_NOBACKLIGHT
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
backlight_enabled = property(_get_backlight_enabled, _set_backlight_enabled,
doc='Whether or not to enable the backlight. Either ``True`` or ``False``.')
# Low level commands
def _send_data(self, value):
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, (c.RS_DATA | (value & 0xF0)) | self._backlight)
self._pulse_data(c.RS_DATA | (value & 0xF0))
self.bus.write_byte(self._address, (c.RS_DATA |
((value << 4) & 0xF0)) | self._backlight)
self._pulse_data(c.RS_DATA | ((value << 4) & 0xF0))
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data |= MCP230XX_RS
self._pulse_data(value >> 4)
self._pulse_data(value & 0x0F)
def _send_instruction(self, value):
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, (c.RS_INSTRUCTION |
(value & 0xF0)) | self._backlight)
self._pulse_data(c.RS_INSTRUCTION | (value & 0xF0))
self.bus.write_byte(self._address, (c.RS_INSTRUCTION |
((value << 4) & 0xF0)) | self._backlight)
self._pulse_data(c.RS_INSTRUCTION | ((value << 4) & 0xF0))
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data &= ~MCP230XX_RS
self._pulse_data(value >> 4)
self._pulse_data(value & 0x0F)
def _pulse_data(self, value):
"""Pulse the `enable` flag to process value."""
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(1)
self.bus.write_byte(self._address, value | PCF8574_E | self._backlight)
c.usleep(1)
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(100)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data &= ~MCP230XX_DATAMASK
self._mcp_data |= value << MCP230XX_DATASHIFT
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data |= MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(100)
| mit | 4,186,489,563,130,845,700 | 41.460377 | 151 | 0.594383 | false |
pienkowb/omelette | omelette/compiler/parser.py | 1 | 4614 | from pyparsing import *
from omelette.compiler.lexer import Lexer
from omelette.compiler.uml import *
from omelette.compiler import logging
def callback(handler):
def wrapper(self, s, l, t):
handler(self, t)
return wrapper
class Parser(object):
"""A class translating code to UMLObjects."""
def __init__(self, lexer=Lexer()):
"""Constructor that pins to a provided Lexer."""
self.__lexer = lexer
self.__register_handlers()
def parse(self, code_objects):
self.__uml_object = None
self.__last_type = None
self.__objects = {}
for code_object in code_objects:
if code_object.position < 0:
if not code_object.is_empty():
message = "object definition without header"
logging.getLogger("compiler").warning(message)
continue
self.__code_object = code_object
self.__lexer["definition"].parseString(str(code_object))
return self.__objects
def __register_handlers(self):
"""Sets parseActions for appropriate tokens in lexer."""
self.__lexer["definition"].setParseAction(self.__handle_definition)
self.__lexer["header"].setParseAction(self.__handle_header)
self.__lexer["operation"].setParseAction(self.__handle_operation)
self.__lexer["attribute"].setParseAction(self.__handle_attribute)
self.__lexer["property"].setParseAction(self.__handle_property)
self.__lexer["constraint"].setParseAction(self.__handle_constraint)
self.__lexer["multiplicity"].setParseAction(self.__handle_multiplicity)
self.__lexer["name"].setParseAction(self.__handle_name)
self.__lexer["error"].setParseAction(self.__handle_error)
@callback
def __handle_definition(self, token):
name = self.__uml_object.name
self.__objects[name] = self.__uml_object
self.__uml_object = None
@callback
def __handle_error(self, token):
line = token["error"].get("line")
message = "unrecognised syntax: " + line
logging.getLogger("compiler").warning(message, object=self.__uml_object)
@callback
def __handle_header(self, token):
name = token["header"].get("name")
parent = token["header"]["parent"]
prototype = "prototype" in token["header"]
if name == None:
name = "%s" % self.__code_object.position
if parent == "base":
parent = None
self.__uml_object = UMLObject(parent, name, prototype,
self.__code_object)
@callback
def __handle_attribute(self, token):
static = "static" in token["attribute"]
visibility = token["attribute"].get("visibility")
name = token["attribute"]["name"]
type = token["attribute"].get("type")
default = token["attribute"].get("default")
attribute = Attribute(visibility, name, static, type, default)
self.__uml_object.add_attribute(attribute)
@callback
def __handle_operation(self, token):
static = "static" in token["operation"]
visibility = token["operation"].get("visibility")
name = token["operation"]["name"]
parameters = []
if "parameters" in token["operation"]:
for parameter in token["operation"]["parameters"]:
parameter_name = parameter["name"]
type = parameter.get("type")
parameters.append((parameter_name, type))
return_type = token["operation"].get("return_type")
operation = Operation(visibility, name, static, parameters, return_type)
self.__uml_object.add_operation(operation)
@callback
def __handle_property(self, token):
name = token["property"]["name"]
value = "".join(token["property"]["value"])
type = self.__last_type if self.__last_type else "STRING"
self.__last_type = None
self.__uml_object.properties[name] = (value, type)
@callback
def __handle_constraint(self, token):
value = token.get("value")
constants = token.get("constants")
if constants != None: value = list(constants)
if token["type"] == "allow":
self.__uml_object.allowed[token["key"]] = value
elif token["type"] == "require":
self.__uml_object.required[token["key"]] = value
@callback
def __handle_multiplicity(self, token):
self.__last_type = "MULTIPLICITY"
@callback
def __handle_name(self, token):
self.__last_type = "OBJECT"
| gpl-3.0 | 2,444,114,487,353,214,500 | 32.194245 | 80 | 0.597529 | false |
lauregonnord/cap-labs | TP02/ariteval/check_ariteval.py | 1 | 6487 | import re
import os
import sys
import subprocess
PRINT_DEBUG = True
def debug(*args):
"""Like print(), but on stderr."""
if PRINT_DEBUG:
print(*args, file=sys.stderr)
def parse_specifications():
"""Parse the LaTeX file of the course to use as an example input
and output."""
tex = os.path.join(os.path.dirname(__file__), '..', 'tp2.tex')
spec_input = []
spec_output = []
# To parse lines of line this one: {\tt 1;} & 1 = 1 \\
pattern = re.compile(r'^{\\tt (?P<input>.*;)} & (?P<output>.*) \\\\')
with open(tex) as fd:
# Iterate trough lines until the BEGIN marker is found
for line in fd:
if line == '% BEGIN AUTOTEST ARIT\n':
# Everything before this marker is ignored.
break
else:
print('spec not found')
exit(1)
for line in fd:
match = pattern.match(line)
if line == '% END AUTOTEST ARIT\n':
# Everything after this marker is ignored
break
if match:
# This is a specification line, add it to the spec.
spec_input.append(match.group('input'))
spec_output.append(match.group('output'))
debug('Got {} specifications'.format(len(spec_input)))
return (spec_input, spec_output)
def run_code(input_, code_path):
"""Runs the code to be tested and returns its output. Pipes its
stderr to this process' stderr."""
# Compile the tested code.
subprocess.check_output(['make', '-C', code_path])
debug()
debug()
debug('stderr:')
# Run the tested code, send it the input, and get its output.
output = subprocess.check_output(
['make', 'run', '--silent', '-C', code_path],
input='\n'.join(input_),
universal_newlines=True,
)
debug()
return output
def normalize_line(line):
"""Removes whitespaces."""
return line.strip().replace(' ', '')
def count_mistakes(inputs, expected_outputs, outputs):
"""Compares intput and output, and counts the number of lines that
are not equal (modulo normalization)."""
nb_mistakes = 0
# Iterate through each line of the expected/actual outputs,
# and compare them with each other.
for (input_line, expected, got) in zip(inputs, expected_outputs, outputs):
if normalize_line(expected) == normalize_line(got):
debug('{} ok'.format(input_line))
else:
nb_mistakes += 1
debug('{}\n\texpected\t{!r}\n\tgot \t{!r}'.format(input_line, expected, got))
return nb_mistakes
def check_specifications(code_path):
"""Runs the code on the inputs in the specification (ie. tp2.pdf) and
compare them with the associated outputs."""
(spec_input, spec_output) = parse_specifications()
output = run_code(spec_input, code_path)
debug()
debug('Checking specifications:')
debug()
return count_mistakes(spec_input, spec_output, output.split('\n'))
def parse_test_file(test_file):
"""Reads a .txt file in the format explained when using
'python3 check_ariteval.py --help'."""
inputs = []
outputs = []
with open(test_file) as fd:
for line in fd:
line = line.strip()
if '#' not in line:
continue # Ignore lines without a #
(in_, out) = line.split('#')
inputs.append(in_.strip())
outputs.append(out.strip())
return (inputs, outputs)
def check_test_file(test_file, code_path):
"""Runs the code on the inputs in a test file (see the explainations
with 'python3 check_ariteval.py --help) and compare them with the
associated outputs."""
(test_input, test_output) = parse_test_file(test_file)
output = run_code(test_input, code_path)
debug()
debug('Checking test file {}:'.format(test_file))
debug()
return count_mistakes(test_input, test_output, output.split('\n'))
def all_checks(code_path, test_files):
"""Runs specification checks and test file checks, counts their errors,
and return a list that can be used as the final CSV line."""
csv_line = [code_path]
try:
nb_mistakes_in_spec = check_specifications(code_path)
debug()
debug('{} mistakes in specification.'.format(nb_mistakes_in_spec))
csv_line.append(nb_mistakes_in_spec)
except FileNotFoundError:
csv_line.append('NA')
for test_file in test_files:
try:
nb_mistakes = check_test_file(test_file, code_path)
except subprocess.CalledProcessError:
debug()
debug('{}: error'.format(test_file))
csv_line.append('Error')
else:
debug()
debug('{}: {} mistakes'.format(test_file, nb_mistakes))
csv_line.append(nb_mistakes)
debug()
return csv_line
HELP_OPTIONS = {'-h', '-help', '--h', '--help'}
QUIET_OPTIONS = {'-q', '--quiet'}
def main():
global PRINT_DEBUG
args = list(sys.argv)
if set(args) & HELP_OPTIONS:
print('Syntax: {} [-q|--quiet] path/to/code [path/to/testfile1 [path/to/testfile2 [...]]'
.format(args[0]))
print()
print('path/to/code should contain a Makefile with a "run" target '
'that runs an interpreter of AritEval.')
print()
print('path/to/testfile1 should be a file following this format:')
print()
print('\tinput expression1 # expected output1')
print('\tinput expression2 # expected output2')
print('\t...')
print()
print('For instance:')
print()
print('\t20 + 22; # 20 + 22 = 42')
print('\ta = 4; # a now equals 4')
print('\ta + 2; # a + 2 = 6')
print('\ta * 5; # a * 5 = 20')
print()
print('When comparing the expected output with the actual output, '
'whitespaces are stripped to avoid silly false negatives.')
return
if set(args) & QUIET_OPTIONS:
PRINT_DEBUG = False
args = [x for x in args if x not in QUIET_OPTIONS]
try:
(_, code_path, *test_files) = args
except ValueError:
print('Syntax: {} path/to/code [path/to/testfile1 [path/to/testfile2 [...]]'
.format(args[0]))
exit(1)
csv_line = all_checks(code_path, test_files)
print(','.join(map(str, csv_line)))
if __name__ == '__main__':
main()
| gpl-3.0 | -1,084,461,821,032,549,800 | 31.928934 | 97 | 0.579929 | false |
qilicun/python | python3/src/oo/vehicle0.py | 1 | 3006 | from pysketcher import *
R = 1 # radius of wheel
L = 4 # distance between wheels
H = 2 # height of vehicle body
w_1 = 5 # position of front wheel
xmax = w_1 + 2*L + 3*R
drawing_tool.set_coordinate_system(xmin=0, xmax=xmax,
ymin=-1, ymax=2*R + 3*H,
axis=False)
wheel1 = Circle(center=(w_1, R), radius=R)
wheel2 = wheel1.copy()
wheel2.translate((L,0))
under = Rectangle(lower_left_corner=(w_1-2*R, 2*R),
width=2*R + L + 2*R, height=H)
over = Rectangle(lower_left_corner=(w_1, 2*R + H),
width=2.5*R, height=1.25*H)
wheels = Composition({'wheel1': wheel1, 'wheel2': wheel2})
body = Composition({'under': under, 'over': over})
vehicle = Composition({'wheels': wheels, 'body': body})
ground = Wall(x=[R, xmax], y=[0, 0], thickness=-0.3*R)
fig = Composition({'vehicle': vehicle, 'ground': ground})
fig.draw() # send all figures to plotting backend
drawing_tool.display()
drawing_tool.savefig('tmp1.png')
fig['vehicle']['wheels'].set_filled_curves('blue')
fig['vehicle']['wheels'].set_linewidth(6)
fig['vehicle']['wheels'].set_linecolor('black')
fig['vehicle']['body']['under'].set_filled_curves('red')
fig['vehicle']['body']['over'].set_filled_curves(pattern='/')
fig['vehicle']['body']['over'].set_linewidth(14)
drawing_tool.erase() # avoid drawing old and new fig on top of each other
fig.draw()
drawing_tool.display()
drawing_tool.savefig('tmp2.png')
print fig
fig.recurse('fig')
fig.graphviz_dot('fig', False)
import time
time.sleep(1)
# Animate motion
fig['vehicle'].translate((L,0)) # move to start point for "driving"
def v(t):
return -8*R*t*(1 - t/(2*R))
import numpy
tp = numpy.linspace(0, 2*R, 25)
dt = tp[1] - tp[0] # time step
def move(t, fig):
x_displacement = dt*v(t)
fig['vehicle'].translate((x_displacement, 0))
files = animate(fig, tp, move, moviefiles=True,
pause_per_frame=0)
os.system('convert -delay 20 %s anim.gif' % files)
os.system('ffmpeg -i "tmp_frame_%04d.png" -b 800k -r 25 -vcodec mpeg4 -y -qmin 2 -qmax 31 anim.mpeg')
try:
from scitools.std import movie
except ImportError:
raise ImportError(
'scitools must be installed for running the "movie" function.\n'
'scitools is installed by sudo apt-get install python-scitools\n'
'on Ubuntu or by sudo python setup.py install if the code is\n'
'downloaded from http://code.google.com/p/scitools.')
# HTML page showing individual frames
movie(files, encoder='html', fps=4, output_file='anim.html')
# Standard GIF file
movie(files, encoder='convert', fps=4, output_file='anim2.gif')
# AVI format
movie('tmp_*.png', encoder='ffmpeg', fps=4,
output_file='anim.avi') # requires ffmpeg package
# MPEG format
movie('tmp_*.png', encoder='ffmpeg', fps=4,
output_file='anim3.mpeg', vodec='mpeg2video')
# or
movie(files, encoder='ppmtompeg', fps=24,
output_file='anim2.mpeg') # requires the netpbm package
raw_input()
| gpl-3.0 | -143,500,134,917,915,000 | 29.363636 | 101 | 0.644378 | false |
DemocracyLab/CivicTechExchange | civictechprojects/migrations/0009_auto_20180403_1604.py | 1 | 1786 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-04-03 16:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('civictechprojects', '0008_auto_20180317_2315'),
]
operations = [
migrations.CreateModel(
name='ProjectPosition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position_description', models.CharField(max_length=3000)),
('position_project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='positions', to='civictechprojects.Project')),
],
),
migrations.CreateModel(
name='TaggedPositionRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='civictechprojects.ProjectPosition')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='civictechprojects_taggedpositionrole_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='projectposition',
name='position_role',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='civictechprojects.TaggedPositionRole', to='taggit.Tag', verbose_name='Tags'),
),
]
| mit | 4,902,892,915,200,710,000 | 41.52381 | 181 | 0.620941 | false |
nanophotonics/nplab | nplab/instrument/monochromator/bentham_DTMc300.py | 1 | 2962 | from __future__ import print_function
import ctypes
from nplab.instrument import Instrument
from ctypes import CDLL, c_char_p,byref,c_char, POINTER, ARRAY, WinDLL
import os
import numpy as np
import time
FILEPATH = os.path.realpath(__file__)
DIRPATH = os.path.dirname(FILEPATH)
ATTRS_PATH = "{0}\\{1}".format(DIRPATH,"bentham_DTMc300_attributes.atr")
CONFIG_PATH = "{0}\\{1}".format(DIRPATH,"bentham_DTMc300_config.cfg")
DLL_PATH="{0}\\{1}".format(DIRPATH,"bentham_instruments_dlls\\Win32\\benhw32_fastcall.dll") #NOTE: hardcoded to use 64 bit DLL, for 32bit use the ones in Win32
# print DLL_PATH
def read_tokens():
'''
Text tokens are mapped to integers in the bentham_dlltokens.h file
read the file and make the dictionary of tokens
'''
token_map = {}
import re
definition_pattern = re.compile("#define.*")
token_filepath = os.path.normpath(DIRPATH+"/bentham_dlltokens.h")
with open(token_filepath,"r") as f:
for line in f.readlines():
line = line.strip("\n")
if bool(definition_pattern.match(line))==True:
line_list = line.split(" ")
token_map.update({line_list[1]:int(line_list[2])})
return token_map
class Bentham_DTMc300(Instrument):
def __init__(self):
super(Bentham_DTMc300,self).__init__()
self.dll = WinDLL(DLL_PATH)
self.token_map = read_tokens()
error_report = c_char_p("")
response = self.dll.BI_build_system_model(c_char_p(CONFIG_PATH),error_report)
print("Error report",error_report)
print("BI_build_system_model:",response)
response = self.dll.BI_load_setup(c_char_p(ATTRS_PATH))
print("BI_load_setup:",response)
response = self.dll.BI_initialise(None)
print("BI_initialise:",response)
response = self.dll.BI_park(None)
print("BI_park:",response)
self.components = self.get_component_list()
def get_component_list(self):
mylist = (ctypes.c_char*100)()
response = self.dll.BI_get_component_list(ctypes.byref(mylist))
components = [k for k in ("".join([c for c in mylist if c != '\x00'])).split(",") if k != '']
print("BI_get_component_list:",response, components)
return components
def get(self,item_id,token,index):
value = ctypes.c_double(0.0)
print("id:{0}, token:{1}, index:{2}".format(item_id,token,index))
response = self.dll.BI_get(c_char_p(item_id),ctypes.c_int32(self.token_map[token]),ctypes.c_int32(index),ctypes.byref(value))
print("BI_get", response)
return value.value
def get_wavelength(self,token="mono"):
wavelength = self.get(item_id="mono",token="MonochromatorCurrentWL",index=0)
return wavelength
def set_wavelength(self,wavelength):
delay = ctypes.c_double(0.0)
response = self.dll.BI_select_wavelength(ctypes.c_double(wavelength), ctypes.byref(delay))
time.sleep(0.3) #sleep for 300ms - ensure everything has moved
return
if __name__ == "__main__":
m = Bentham_DTMc300()
initial = m.get_wavelength()
m.set_wavelength(0)
final = m.get_wavelength()
print("Initial, Final:", initial, final)
print("DONE") | gpl-3.0 | -9,105,023,763,205,697,000 | 31.56044 | 159 | 0.698177 | false |
mylene-campana/hpp-rbprm-corba | script/scenarios/demos/darpa_hyq_path.py | 1 | 3581 | # Importing helper class for setting up a reachability planning problem
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
# Importing Gepetto viewer helper class
from hpp.gepetto import Viewer
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
# URDF file describing the trunk of the robot HyQ
urdfName = 'hyq_trunk_large'
# URDF files describing the reachable workspace of each limb of HyQ
urdfNameRom = ['hyq_lhleg_rom','hyq_lfleg_rom','hyq_rfleg_rom','hyq_rhleg_rom']
urdfSuffix = ""
srdfSuffix = ""
# Creating an instance of the helper class, and loading the robot
rbprmBuilder = Builder ()
rbprmBuilder.loadModel(urdfName, urdfNameRom, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
rbprmBuilder.setJointBounds ("base_joint_xyz", [-2,5, -1, 1, 0.3, 4])
# The following lines set constraint on the valid configurations:
# a configuration is valid only if all limbs can create a contact ...
rbprmBuilder.setFilter(['hyq_rhleg_rom', 'hyq_lfleg_rom', 'hyq_rfleg_rom','hyq_lhleg_rom'])
rbprmBuilder.setAffordanceFilter('hyq_rhleg_rom', ['Support'])
rbprmBuilder.setAffordanceFilter('hyq_rfleg_rom', ['Support',])
rbprmBuilder.setAffordanceFilter('hyq_lhleg_rom', ['Support'])
rbprmBuilder.setAffordanceFilter('hyq_lfleg_rom', ['Support',])
# We also bound the rotations of the torso.
rbprmBuilder.boundSO3([-0.4,0.4,-3,3,-3,3])
# Creating an instance of HPP problem solver and the viewer
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
ps = ProblemSolver( rbprmBuilder )
r = Viewer (ps)
# Setting initial and goal configurations
q_init = rbprmBuilder.getCurrentConfig ();
q_init [0:3] = [-2, 0, 0.63]; rbprmBuilder.setCurrentConfig (q_init); r (q_init)
q_goal = q_init [::]
q_goal [0:3] = [3, 0, 0.63]; r (q_goal)
# Choosing a path optimizer
ps.addPathOptimizer("RandomShortcut")
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
from hpp.corbaserver.affordance.affordance import AffordanceTool
afftool = AffordanceTool ()
afftool.loadObstacleModel (packageName, "darpa", "planning", r)
#~ afftool.visualiseAffordances('Support', r, [0.25, 0.5, 0.5])
# Choosing RBPRM shooter and path validation methods.
# Note that the standard RRT algorithm is used.
ps.client.problem.selectConFigurationShooter("RbprmShooter")
ps.client.problem.selectPathValidation("RbprmPathValidation",0.05)
# Solve the problem
t = ps.solve ()
if isinstance(t, list):
t = t[0]* 3600000 + t[1] * 60000 + t[2] * 1000 + t[3]
# Playing the computed path
from hpp.gepetto import PathPlayer
pp = PathPlayer (rbprmBuilder.client.basic, r)
q_far = q_init [::]
q_far [0:3] = [-2, -3, 0.63];
r(q_far)
for i in range(1,10):
rbprmBuilder.client.basic.problem.optimizePath(i)
from hpp.corbaserver import Client
from hpp.corbaserver.robot import Robot as Parent
class Robot (Parent):
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
# URDF file describing the trunk of the robot HyQ
urdfName = 'hyq_trunk_large'
urdfSuffix = ""
srdfSuffix = ""
def __init__ (self, robotName, load = True):
Parent.__init__ (self, robotName, self.rootJointType, load)
self.tf_root = "base_footprint"
self.client.basic = Client ()
self.load = load
#DEMO code to play root path and final contact plan
cl = Client()
cl.problem.selectProblem("rbprm_path")
rbprmBuilder2 = Robot ("toto")
ps2 = ProblemSolver( rbprmBuilder2 )
cl.problem.selectProblem("default")
cl.problem.movePathToProblem(3,"rbprm_path",rbprmBuilder.getAllJointNames())
r2 = Viewer (ps2)
r2(q_far)
| lgpl-3.0 | -931,012,746,437,607,700 | 34.81 | 114 | 0.743926 | false |
adamcharnock/lightbus | lightbus/config/config.py | 1 | 5716 | import json as jsonlib
import os
from pathlib import Path
from typing import Dict, NamedTuple, Union, TYPE_CHECKING
import urllib.request
import jsonschema
import yaml as yamllib
from lightbus.exceptions import UnexpectedConfigurationFormat
from lightbus.schema.hints_to_schema import python_type_to_json_schemas, SCHEMA_URI
from lightbus.utilities.casting import cast_to_hint
from lightbus.utilities.deforming import deform_to_bus
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from .structure import RootConfig, BusConfig, ApiConfig
class Config:
"""Provides access to configuration options
There are two forms of configuration:
* Bus-level configuration, `config.bus()`
* API-level configuration, `config.api(api_name)`
Bus-level configuration is global to lightbus. API-level configuration
will normally have a default catch-all definition, but can be customised
on a per-api basis.
"""
_config: "RootConfig"
def __init__(self, root_config: "RootConfig"):
self._config = root_config
def bus(self) -> "BusConfig":
return self._config.bus
def api(self, api_name=None) -> "ApiConfig":
"""Returns config for the given API
If there is no API-specific config available for the
given api_name, then the root API config will be returned.
"""
return self._config.apis.get(api_name, None) or self._config.apis["default"]
def apis(self) -> Dict[str, "ApiConfig"]:
return self._config.apis
def plugin(self, plugin_name) -> NamedTuple:
return getattr(self._config.plugins, plugin_name)
@classmethod
def load_file(cls, file_path: Union[str, Path]):
"""Instantiate the config from the given file path
Files ending in `.json` will be parsed as JSON, otherwise the
file will be parsed as YAML.
"""
if str(file_path).startswith("http://") or str(file_path).startswith("https://"):
response = urllib.request.urlopen(file_path, timeout=5)
encoded_config = response.read()
if "json" in response.headers.get("Content-Type") or file_path.endswith(".json"):
return cls.load_json(encoded_config)
else:
return cls.load_yaml(encoded_config)
else:
file_path = Path(file_path)
encoded_config = file_path.read_text(encoding="utf8")
if file_path.name.endswith(".json"):
return cls.load_json(encoded_config)
else:
return cls.load_yaml(encoded_config)
@classmethod
def load_json(cls, json: str):
"""Instantiate the config from a JSON string"""
return cls.load_dict(config=jsonlib.loads(json))
@classmethod
def load_yaml(cls, yaml: str):
"""Instantiate the config from a YAML string"""
config = yamllib.safe_load(yaml)
if not isinstance(config, dict):
raise UnexpectedConfigurationFormat(
f"The config file was loaded but it appears to be in an unexpected format. "
f"The root of the configuration should be a key/value mapping, but the "
f"type '{type(config).__name__}' was found instead. Check your config "
f"file is correctly formatted."
)
return cls.load_dict(config=config)
@classmethod
def load_dict(cls, config: dict, set_defaults=True):
"""Instantiate the config from a dictionary"""
# pylint: disable=import-outside-toplevel
from .structure import RootConfig
config = config.copy()
if set_defaults:
config = set_default_config(config)
validate_config(config)
return cls(root_config=cast_to_hint(config, RootConfig))
@classmethod
def default(cls):
return cls.load_dict(config={}, set_defaults=True)
def __getattr__(self, item):
if hasattr(self._config, item):
return getattr(self._config, item)
else:
raise AttributeError(f"No root-level configuration option named '{item}'")
def validate_config(config: dict):
"""Validate the provided config dictionary against the config json schema"""
json_schema = config_as_json_schema()
jsonschema.validate(config, json_schema)
def config_as_json_schema() -> dict:
"""Get the configuration structure as a json schema"""
# pylint: disable=import-outside-toplevel
from .structure import RootConfig
schema, = python_type_to_json_schemas(RootConfig)
# Some of the default values will still be python types,
# so let's use deform_to_bus to turn them into something
# that'll be json safe
schema = deform_to_bus(schema)
schema["$schema"] = SCHEMA_URI
return schema
def set_default_config(config: dict) -> dict:
"""Set the default configuration options on a loaded config dictionary"""
env_service_name = os.environ.get("LIGHTBUS_SERVICE_NAME")
if env_service_name:
config.setdefault("service_name", env_service_name)
env_process_name = os.environ.get("LIGHTBUS_PROCESS_NAME")
if env_process_name:
config.setdefault("process_name", env_process_name)
config.setdefault("apis", {})
config.setdefault("bus", {})
config["apis"].setdefault("default", {})
config["bus"].setdefault("schema", {})
config["apis"]["default"].setdefault("rpc_transport", {"redis": {}})
config["apis"]["default"].setdefault("result_transport", {"redis": {}})
config["apis"]["default"].setdefault("event_transport", {"redis": {}})
config["bus"]["schema"].setdefault("transport", {"redis": {}})
return config
| apache-2.0 | -4,224,342,558,960,254,500 | 34.503106 | 93 | 0.650105 | false |
trdarr/apk2 | import/import.py | 1 | 1792 | import urllib
from io import BytesIO
from xml.etree import ElementTree
import psycopg2
import requests
from entity import Product, Store
def create_sql(table, entity):
entity_dict = entity.to_dict()
columns = ', '.join(entity_dict.keys())
placeholders = ', '.join(f'%({k})s' for k in entity_dict.keys())
insert_fragment = f'insert into {table} ({columns}) values ({placeholders})'
expressions = ', '.join(f'{k} = %({k})s' for k in entity_dict.keys())
update_fragment = f'on conflict (nr) do update set {expressions}'
return f'{insert_fragment} {update_fragment}'
def from_file(filename, tag_name):
with open(f'data/{filename}.xml') as xml_file:
for event, element in ElementTree.iterparse(xml_file):
if event == 'end' and element.tag == tag_name:
yield {child.tag: child.text for child in element.iter()}
element.clear()
def from_systembolaget(filename, tag_name):
base_url = 'https://www.systembolaget.se/api/assortment'
url = '/'.join((base_url, filename, 'xml'))
response = requests.get(url)
if response.status_code != requests.codes.ok:
raise Exception(f'Got {response.status_code} from <{url}>.')
for event, element in ElementTree.iterparse(BytesIO(response.content)):
if event == 'end' and element.tag == tag_name:
yield element
element.clear()
products = from_file('products', 'artikel')
stores = from_file('stores', 'ButikOmbud')
with psycopg2.connect(user='postgres', host='postgres') as connection:
with connection.cursor() as cursor:
for product in (Product(**product) for product in products):
cursor.execute(create_sql('products', product), product.to_dict())
for store in (Store(**store) for store in stores):
cursor.execute(create_sql('stores', store), store.to_dict())
| apache-2.0 | 6,123,053,057,215,423,000 | 34.137255 | 78 | 0.68471 | false |
dineshsonachalam/ocr | setup.py | 1 | 1380 | from setuptools import setup, find_packages
import requests
import semantic_version
install_requires = [
'boto3>=1.17.78',
'botocore>=1.20.78',
'simplejson==3.17.2'
]
def get_LucidDynamodb_version():
url = "https://pypi.org/pypi/LucidDynamodb/json"
response = requests.request("GET", url, headers={}, data={})
result = response.json()
LucidDynamodb_version = str(result.get("info").get("version"))
current_version = semantic_version.Version(LucidDynamodb_version)
next_version = current_version.next_patch()
return next_version
setup(
name="LucidDynamodb",
version=str(get_LucidDynamodb_version()),
author="Dinesh Sonachalam",
author_email="dineshsonachalam@gmail.com",
description="A simple Python wrapper to AWS Dynamodb",
url="https://github.com/dineshsonachalam/Lucid-Dynamodb",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
zip_safe=False,
license='MIT',
keywords='python dynamodb amazon',
python_requires=">=3.1",
install_requires=install_requires,
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
) | mit | 6,862,585,355,070,034,000 | 33.525 | 69 | 0.635507 | false |
matrix-org/sygnal | tests/test_concurrency_limit.py | 1 | 3238 | # -*- coding: utf-8 -*-
# Copyright 2019–2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sygnal.notifications import ConcurrencyLimitedPushkin
from sygnal.utils import twisted_sleep
from tests.testutils import TestCase
DEVICE_GCM1_EXAMPLE = {
"app_id": "com.example.gcm",
"pushkey": "spqrg",
"pushkey_ts": 42,
}
DEVICE_GCM2_EXAMPLE = {
"app_id": "com.example.gcm",
"pushkey": "spqrh",
"pushkey_ts": 42,
}
DEVICE_APNS_EXAMPLE = {
"app_id": "com.example.apns",
"pushkey": "spqra",
"pushkey_ts": 42,
}
class SlowConcurrencyLimitedDummyPushkin(ConcurrencyLimitedPushkin):
async def _dispatch_notification_unlimited(self, n, device, context):
"""
We will deliver the notification to the mighty nobody
and we will take one second to do it, because we are slow!
"""
await twisted_sleep(1.0, self.sygnal.reactor)
return []
class ConcurrencyLimitTestCase(TestCase):
def config_setup(self, config):
super(ConcurrencyLimitTestCase, self).config_setup(config)
config["apps"]["com.example.gcm"] = {
"type": "tests.test_concurrency_limit.SlowConcurrencyLimitedDummyPushkin",
"inflight_request_limit": 1,
}
config["apps"]["com.example.apns"] = {
"type": "tests.test_concurrency_limit.SlowConcurrencyLimitedDummyPushkin",
"inflight_request_limit": 1,
}
def test_passes_under_limit_one(self):
"""
Tests that a push notification succeeds if it is under the limit.
"""
resp = self._request(self._make_dummy_notification([DEVICE_GCM1_EXAMPLE]))
self.assertEqual(resp, {"rejected": []})
def test_passes_under_limit_multiple_no_interfere(self):
"""
Tests that 2 push notifications succeed if they are to different
pushkins (so do not hit a per-pushkin limit).
"""
resp = self._request(
self._make_dummy_notification([DEVICE_GCM1_EXAMPLE, DEVICE_APNS_EXAMPLE])
)
self.assertEqual(resp, {"rejected": []})
def test_fails_when_limit_hit(self):
"""
Tests that 1 of 2 push notifications fail if they are to the same pushkins
(so do hit the per-pushkin limit of 1).
"""
resp = self._multi_requests(
[
self._make_dummy_notification([DEVICE_GCM1_EXAMPLE]),
self._make_dummy_notification([DEVICE_GCM2_EXAMPLE]),
]
)
# request 0 will succeed
self.assertEqual(resp[0], {"rejected": []})
# request 1 will fail because request 0 has filled the limit
self.assertEqual(resp[1], 502)
| apache-2.0 | 4,043,498,856,687,162,400 | 33.063158 | 86 | 0.64246 | false |
gdm/skew | skew/resources/aws/s3.py | 1 | 1420 | # Copyright (c) 2014 Scopely, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import jmespath
from skew.resources.aws import AWSResource
class Bucket(AWSResource):
class Meta(object):
service = 's3'
type = 'bucket'
enum_spec = ('list_buckets', 'Buckets[]', None)
detail_spec = ('list_objects', 'Bucket', 'Contents[]')
id = 'Name'
filter_name = None
name = 'BucketName'
date = 'CreationDate'
dimension = None
def __init__(self, client, data, query=None):
super(Bucket, self).__init__(client, data, query)
self._data = data
self._keys = []
def __iter__(self):
detail_op, param_name, detail_path = self.Meta.detail_spec
params = {param_name: self.id}
if not self._keys:
data = self._client.call(detail_op, **params)
self._keys = jmespath.search(detail_path, data)
for key in self._keys:
yield key
| apache-2.0 | 7,790,654,558,427,385,000 | 32.023256 | 72 | 0.628873 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/mail/imap4.py | 1 | 185338 | # -*- test-case-name: twisted.mail.test.test_imap -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An IMAP4 protocol implementation
API Stability: Semi-stable
@author: U{Jp Calderone<mailto:exarkun@twistedmatrix.com>}
To do::
Suspend idle timeout while server is processing
Use an async message parser instead of buffering in memory
Figure out a way to not queue multi-message client requests (Flow? A simple callback?)
Clarify some API docs (Query, etc)
Make APPEND recognize (again) non-existent mailboxes before accepting the literal
"""
from __future__ import nested_scopes
from __future__ import generators
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import defer
from twisted.internet import error
from twisted.internet.defer import maybeDeferred
from twisted.python import log, util, failure, text
from twisted.internet import interfaces
from twisted import cred
import twisted.cred.error
import twisted.cred.credentials
import rfc822
import base64
import binascii
import time
import hmac
import re
import tempfile
import string
import time
import random
import types
import sys
from zope.interface import implements, Interface
import email.Utils
try:
import cStringIO as StringIO
except:
import StringIO
class MessageSet(object):
"""
Essentially an infinite bitfield, with some extra features.
@type getnext: Function taking C{int} returning C{int}
@ivar getnext: A function that returns the next message number,
used when iterating through the MessageSet. By default, a function
returning the next integer is supplied, but as this can be rather
inefficient for sparse UID iterations, it is recommended to supply
one when messages are requested by UID. The argument is provided
as a hint to the implementation and may be ignored if it makes sense
to do so (eg, if an iterator is being used that maintains its own
state, it is guaranteed that it will not be called out-of-order).
"""
_empty = []
def __init__(self, start=_empty, end=_empty):
"""
Create a new MessageSet()
@type start: Optional C{int}
@param start: Start of range, or only message number
@type end: Optional C{int}
@param end: End of range.
"""
self._last = self._empty # Last message/UID in use
self.ranges = [] # List of ranges included
self.getnext = lambda x: x+1 # A function which will return the next
# message id. Handy for UID requests.
if start is self._empty:
return
if isinstance(start, types.ListType):
self.ranges = start[:]
self.clean()
else:
self.add(start,end)
# Ooo. A property.
def last():
def _setLast(self,value):
if self._last is not self._empty:
raise ValueError("last already set")
self._last = value
for i,(l,h) in enumerate(self.ranges):
if l is not None:
break # There are no more Nones after this
l = value
if h is None:
h = value
if l > h:
l, h = h, l
self.ranges[i] = (l,h)
self.clean()
def _getLast(self):
return self._last
doc = '''
"Highest" message number, refered to by "*".
Must be set before attempting to use the MessageSet.
'''
return _getLast, _setLast, None, doc
last = property(*last())
def add(self, start, end=_empty):
"""
Add another range
@type start: C{int}
@param start: Start of range, or only message number
@type end: Optional C{int}
@param end: End of range.
"""
if end is self._empty:
end = start
if self._last is not self._empty:
if start is None:
start = self.last
if end is None:
end = self.last
if start > end:
# Try to keep in low, high order if possible
# (But we don't know what None means, this will keep
# None at the start of the ranges list)
start, end = end, start
self.ranges.append((start,end))
self.clean()
def __add__(self, other):
if isinstance(other, MessageSet):
ranges = self.ranges + other.ranges
return MessageSet(ranges)
else:
res = MessageSet(self.ranges)
try:
res.add(*other)
except TypeError:
res.add(other)
return res
def extend(self, other):
if isinstance(other, MessageSet):
self.ranges.extend(other.ranges)
self.clean()
else:
try:
self.add(*other)
except TypeError:
self.add(other)
return self
def clean(self):
"""
Clean ranges list, combining adjacent ranges
"""
self.ranges.sort()
oldl, oldh = None, None
for i,(l,h) in enumerate(self.ranges):
if l is None:
continue
# l is >= oldl and h is >= oldh due to sort()
if oldl is not None and l <= oldh+1:
l = oldl
h = max(oldh,h)
self.ranges[i-1] = None
self.ranges[i] = (l,h)
oldl,oldh = l,h
self.ranges = filter(None, self.ranges)
def __contains__(self, value):
"""
May raise TypeError if we encounter unknown "high" values
"""
for l,h in self.ranges:
if l is None:
raise TypeError(
"Can't determine membership; last value not set")
if l <= value <= h:
return True
return False
def _iterator(self):
for l,h in self.ranges:
l = self.getnext(l-1)
while l <= h:
yield l
l = self.getnext(l)
if l is None:
break
def __iter__(self):
if self.ranges and self.ranges[0][0] is None:
raise TypeError("Can't iterate; last value not set")
return self._iterator()
def __len__(self):
res = 0
for l, h in self.ranges:
if l is None:
raise TypeError("Can't size object; last value not set")
res += (h - l) + 1
return res
def __str__(self):
p = []
for low, high in self.ranges:
if low == high:
if low is None:
p.append('*')
else:
p.append(str(low))
elif low is None:
p.append('%d:*' % (high,))
else:
p.append('%d:%d' % (low, high))
return ','.join(p)
def __repr__(self):
return '<MessageSet %s>' % (str(self),)
def __eq__(self, other):
if isinstance(other, MessageSet):
return self.ranges == other.ranges
return False
class LiteralString:
def __init__(self, size, defered):
self.size = size
self.data = []
self.defer = defered
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.append(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = ''
if data:
self.data.append(data)
return passon
def callback(self, line):
"""
Call defered with data and rest of line
"""
self.defer.callback((''.join(self.data), line))
class LiteralFile:
_memoryFileLimit = 1024 * 1024 * 10
def __init__(self, size, defered):
self.size = size
self.defer = defered
if size > self._memoryFileLimit:
self.data = tempfile.TemporaryFile()
else:
self.data = StringIO.StringIO()
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.write(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = ''
if data:
self.data.write(data)
return passon
def callback(self, line):
"""
Call defered with data and rest of line
"""
self.data.seek(0,0)
self.defer.callback((self.data, line))
class WriteBuffer:
"""Buffer up a bunch of writes before sending them all to a transport at once.
"""
def __init__(self, transport, size=8192):
self.bufferSize = size
self.transport = transport
self._length = 0
self._writes = []
def write(self, s):
self._length += len(s)
self._writes.append(s)
if self._length > self.bufferSize:
self.flush()
def flush(self):
if self._writes:
self.transport.writeSequence(self._writes)
self._writes = []
self._length = 0
class Command:
_1_RESPONSES = ('CAPABILITY', 'FLAGS', 'LIST', 'LSUB', 'STATUS', 'SEARCH', 'NAMESPACE')
_2_RESPONSES = ('EXISTS', 'EXPUNGE', 'FETCH', 'RECENT')
_OK_RESPONSES = ('UIDVALIDITY', 'READ-WRITE', 'READ-ONLY', 'UIDNEXT', 'PERMANENTFLAGS')
defer = None
def __init__(self, command, args=None, wantResponse=(),
continuation=None, *contArgs, **contKw):
self.command = command
self.args = args
self.wantResponse = wantResponse
self.continuation = lambda x: continuation(x, *contArgs, **contKw)
self.lines = []
def format(self, tag):
if self.args is None:
return ' '.join((tag, self.command))
return ' '.join((tag, self.command, self.args))
def finish(self, lastLine, unusedCallback):
send = []
unuse = []
for L in self.lines:
names = parseNestedParens(L)
N = len(names)
if (N >= 1 and names[0] in self._1_RESPONSES or
N >= 2 and names[0] == 'OK' and isinstance(names[1], types.ListType) and names[1][0] in self._OK_RESPONSES):
send.append(L)
elif N >= 3 and names[1] in self._2_RESPONSES:
if isinstance(names[2], list) and len(names[2]) >= 1 and names[2][0] == 'FLAGS' and 'FLAGS' not in self.args:
unuse.append(L)
else:
send.append(L)
elif N >= 2 and names[1] in self._2_RESPONSES:
send.append(L)
else:
unuse.append(L)
d, self.defer = self.defer, None
d.callback((send, lastLine))
if unuse:
unusedCallback(unuse)
class LOGINCredentials(cred.credentials.UsernamePassword):
def __init__(self):
self.challenges = ['Password\0', 'User Name\0']
self.responses = ['password', 'username']
cred.credentials.UsernamePassword.__init__(self, None, None)
def getChallenge(self):
return self.challenges.pop()
def setResponse(self, response):
setattr(self, self.responses.pop(), response)
def moreChallenges(self):
return bool(self.challenges)
class PLAINCredentials(cred.credentials.UsernamePassword):
def __init__(self):
cred.credentials.UsernamePassword.__init__(self, None, None)
def getChallenge(self):
return ''
def setResponse(self, response):
parts = response[:-1].split('\0', 1)
if len(parts) != 2:
raise IllegalClientResponse("Malformed Response - wrong number of parts")
self.username, self.password = parts
def moreChallenges(self):
return False
class IMAP4Exception(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
class IllegalClientResponse(IMAP4Exception): pass
class IllegalOperation(IMAP4Exception): pass
class IllegalMailboxEncoding(IMAP4Exception): pass
class IMailboxListener(Interface):
"""Interface for objects interested in mailbox events"""
def modeChanged(writeable):
"""Indicates that the write status of a mailbox has changed.
@type writeable: C{bool}
@param writeable: A true value if write is now allowed, false
otherwise.
"""
def flagsChanged(newFlags):
"""Indicates that the flags of one or more messages have changed.
@type newFlags: C{dict}
@param newFlags: A mapping of message identifiers to tuples of flags
now set on that message.
"""
def newMessages(exists, recent):
"""Indicates that the number of messages in a mailbox has changed.
@type exists: C{int} or C{None}
@param exists: The total number of messages now in this mailbox.
If the total number of messages has not changed, this should be
C{None}.
@type recent: C{int}
@param recent: The number of messages now flagged \\Recent.
If the number of recent messages has not changed, this should be
C{None}.
"""
class IMAP4Server(basic.LineReceiver, policies.TimeoutMixin):
"""
Protocol implementation for an IMAP4rev1 server.
The server can be in any of four states:
- Non-authenticated
- Authenticated
- Selected
- Logout
"""
implements(IMailboxListener)
# Identifier for this server software
IDENT = 'Twisted IMAP4rev1 Ready'
# Number of seconds before idle timeout
# Initially 1 minute. Raised to 30 minutes after login.
timeOut = 60
POSTAUTH_TIMEOUT = 60 * 30
# Whether STARTTLS has been issued successfully yet or not.
startedTLS = False
# Whether our transport supports TLS
canStartTLS = False
# Mapping of tags to commands we have received
tags = None
# The object which will handle logins for us
portal = None
# The account object for this connection
account = None
# Logout callback
_onLogout = None
# The currently selected mailbox
mbox = None
# Command data to be processed when literal data is received
_pendingLiteral = None
# Maximum length to accept for a "short" string literal
_literalStringLimit = 4096
# IChallengeResponse factories for AUTHENTICATE command
challengers = None
state = 'unauth'
parseState = 'command'
def __init__(self, chal = None, contextFactory = None, scheduler = None):
if chal is None:
chal = {}
self.challengers = chal
self.ctx = contextFactory
if scheduler is None:
scheduler = iterateInReactor
self._scheduler = scheduler
self._queuedAsync = []
def capabilities(self):
cap = {'AUTH': self.challengers.keys()}
if self.ctx and self.canStartTLS:
if not self.startedTLS and interfaces.ISSLTransport(self.transport, None) is None:
cap['LOGINDISABLED'] = None
cap['STARTTLS'] = None
cap['NAMESPACE'] = None
cap['IDLE'] = None
return cap
def connectionMade(self):
self.tags = {}
self.canStartTLS = interfaces.ITLSTransport(self.transport, None) is not None
self.setTimeout(self.timeOut)
self.sendServerGreeting()
def connectionLost(self, reason):
self.setTimeout(None)
if self._onLogout:
self._onLogout()
self._onLogout = None
def timeoutConnection(self):
self.sendLine('* BYE Autologout; connection idle too long')
self.transport.loseConnection()
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'timeout'
def rawDataReceived(self, data):
self.resetTimeout()
passon = self._pendingLiteral.write(data)
if passon is not None:
self.setLineMode(passon)
# Avoid processing commands while buffers are being dumped to
# our transport
blocked = None
def _unblock(self):
commands = self.blocked
self.blocked = None
while commands and self.blocked is None:
self.lineReceived(commands.pop(0))
if self.blocked is not None:
self.blocked.extend(commands)
# def sendLine(self, line):
# print 'C:', repr(line)
# return basic.LineReceiver.sendLine(self, line)
def lineReceived(self, line):
# print 'S:', repr(line)
if self.blocked is not None:
self.blocked.append(line)
return
self.resetTimeout()
f = getattr(self, 'parse_' + self.parseState)
try:
f(line)
except Exception, e:
self.sendUntaggedResponse('BAD Server error: ' + str(e))
log.err()
def parse_command(self, line):
args = line.split(None, 2)
rest = None
if len(args) == 3:
tag, cmd, rest = args
elif len(args) == 2:
tag, cmd = args
elif len(args) == 1:
tag = args[0]
self.sendBadResponse(tag, 'Missing command')
return None
else:
self.sendBadResponse(None, 'Null command')
return None
cmd = cmd.upper()
try:
return self.dispatchCommand(tag, cmd, rest)
except IllegalClientResponse, e:
self.sendBadResponse(tag, 'Illegal syntax: ' + str(e))
except IllegalOperation, e:
self.sendNegativeResponse(tag, 'Illegal operation: ' + str(e))
except IllegalMailboxEncoding, e:
self.sendNegativeResponse(tag, 'Illegal mailbox name: ' + str(e))
def parse_pending(self, line):
d = self._pendingLiteral
self._pendingLiteral = None
self.parseState = 'command'
d.callback(line)
def dispatchCommand(self, tag, cmd, rest, uid=None):
f = self.lookupCommand(cmd)
if f:
fn = f[0]
parseargs = f[1:]
self.__doCommand(tag, fn, [self, tag], parseargs, rest, uid)
else:
self.sendBadResponse(tag, 'Unsupported command')
def lookupCommand(self, cmd):
return getattr(self, '_'.join((self.state, cmd.upper())), None)
def __doCommand(self, tag, handler, args, parseargs, line, uid):
for (i, arg) in enumerate(parseargs):
if callable(arg):
parseargs = parseargs[i+1:]
maybeDeferred(arg, self, line).addCallback(
self.__cbDispatch, tag, handler, args,
parseargs, uid).addErrback(self.__ebDispatch, tag)
return
else:
args.append(arg)
if line:
# Too many arguments
raise IllegalClientResponse("Too many arguments for command: " + repr(line))
if uid is not None:
handler(uid=uid, *args)
else:
handler(*args)
def __cbDispatch(self, (arg, rest), tag, fn, args, parseargs, uid):
args.append(arg)
self.__doCommand(tag, fn, args, parseargs, rest, uid)
def __ebDispatch(self, failure, tag):
if failure.check(IllegalClientResponse):
self.sendBadResponse(tag, 'Illegal syntax: ' + str(failure.value))
elif failure.check(IllegalOperation):
self.sendNegativeResponse(tag, 'Illegal operation: ' +
str(failure.value))
elif failure.check(IllegalMailboxEncoding):
self.sendNegativeResponse(tag, 'Illegal mailbox name: ' +
str(failure.value))
else:
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
log.err(failure)
def _stringLiteral(self, size):
if size > self._literalStringLimit:
raise IllegalClientResponse(
"Literal too long! I accept at most %d octets" %
(self._literalStringLimit,))
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralString(size, d)
self.sendContinuationRequest('Ready for %d octets of text' % size)
self.setRawMode()
return d
def _fileLiteral(self, size):
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralFile(size, d)
self.sendContinuationRequest('Ready for %d octets of data' % size)
self.setRawMode()
return d
def arg_astring(self, line):
"""
Parse an astring from the line, return (arg, rest), possibly
via a deferred (to handle literals)
"""
line = line.strip()
if not line:
raise IllegalClientResponse("Missing argument")
d = None
arg, rest = None, None
if line[0] == '"':
try:
spam, arg, rest = line.split('"',2)
rest = rest[1:] # Strip space
except ValueError:
raise IllegalClientResponse("Unmatched quotes")
elif line[0] == '{':
# literal
if line[-1] != '}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse("Bad literal size: " + line[1:-1])
d = self._stringLiteral(size)
else:
arg = line.split(' ',1)
if len(arg) == 1:
arg.append('')
arg, rest = arg
return d or (arg, rest)
# ATOM: Any CHAR except ( ) { % * " \ ] CTL SP (CHAR is 7bit)
atomre = re.compile(r'(?P<atom>[^\](){%*"\\\x00-\x20\x80-\xff]+)( (?P<rest>.*$)|$)')
def arg_atom(self, line):
"""
Parse an atom from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
m = self.atomre.match(line)
if m:
return m.group('atom'), m.group('rest')
else:
raise IllegalClientResponse("Malformed ATOM")
def arg_plist(self, line):
"""
Parse a (non-nested) parenthesised list from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[0] != "(":
raise IllegalClientResponse("Missing parenthesis")
i = line.find(")")
if i == -1:
raise IllegalClientResponse("Mismatched parenthesis")
return (parseNestedParens(line[1:i],0), line[i+2:])
def arg_literal(self, line):
"""
Parse a literal from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[0] != '{':
raise IllegalClientResponse("Missing literal")
if line[-1] != '}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse("Bad literal size: " + line[1:-1])
return self._fileLiteral(size)
def arg_searchkeys(self, line):
"""
searchkeys
"""
query = parseNestedParens(line)
# XXX Should really use list of search terms and parse into
# a proper tree
return (query, '')
def arg_seqset(self, line):
"""
sequence-set
"""
rest = ''
arg = line.split(' ',1)
if len(arg) == 2:
rest = arg[1]
arg = arg[0]
try:
return (parseIdList(arg), rest)
except IllegalIdentifierError, e:
raise IllegalClientResponse("Bad message number " + str(e))
def arg_fetchatt(self, line):
"""
fetch-att
"""
p = _FetchParser()
p.parseString(line)
return (p.result, '')
def arg_flaglist(self, line):
"""
Flag part of store-att-flag
"""
flags = []
if line[0] == '(':
if line[-1] != ')':
raise IllegalClientResponse("Mismatched parenthesis")
line = line[1:-1]
while line:
m = self.atomre.search(line)
if not m:
raise IllegalClientResponse("Malformed flag")
if line[0] == '\\' and m.start() == 1:
flags.append('\\' + m.group('atom'))
elif m.start() == 0:
flags.append(m.group('atom'))
else:
raise IllegalClientResponse("Malformed flag")
line = m.group('rest')
return (flags, '')
def arg_line(self, line):
"""
Command line of UID command
"""
return (line, '')
def opt_plist(self, line):
"""
Optional parenthesised list
"""
if line.startswith('('):
return self.arg_plist(line)
else:
return (None, line)
def opt_datetime(self, line):
"""
Optional date-time string
"""
if line.startswith('"'):
try:
spam, date, rest = line.split('"',2)
except IndexError:
raise IllegalClientResponse("Malformed date-time")
return (date, rest[1:])
else:
return (None, line)
def opt_charset(self, line):
"""
Optional charset of SEARCH command
"""
if line[:7].upper() == 'CHARSET':
arg = line.split(' ',2)
if len(arg) == 1:
raise IllegalClientResponse("Missing charset identifier")
if len(arg) == 2:
arg.append('')
spam, arg, rest = arg
return (arg, rest)
else:
return (None, line)
def sendServerGreeting(self):
msg = '[CAPABILITY %s] %s' % (' '.join(self.listCapabilities()), self.IDENT)
self.sendPositiveResponse(message=msg)
def sendBadResponse(self, tag = None, message = ''):
self._respond('BAD', tag, message)
def sendPositiveResponse(self, tag = None, message = ''):
self._respond('OK', tag, message)
def sendNegativeResponse(self, tag = None, message = ''):
self._respond('NO', tag, message)
def sendUntaggedResponse(self, message, async=False):
if not async or (self.blocked is None):
self._respond(message, None, None)
else:
self._queuedAsync.append(message)
def sendContinuationRequest(self, msg = 'Ready for additional command text'):
if msg:
self.sendLine('+ ' + msg)
else:
self.sendLine('+')
def _respond(self, state, tag, message):
if state in ('OK', 'NO', 'BAD') and self._queuedAsync:
lines = self._queuedAsync
self._queuedAsync = []
for msg in lines:
self._respond(msg, None, None)
if not tag:
tag = '*'
if message:
self.sendLine(' '.join((tag, state, message)))
else:
self.sendLine(' '.join((tag, state)))
def listCapabilities(self):
caps = ['IMAP4rev1']
for c, v in self.capabilities().iteritems():
if v is None:
caps.append(c)
elif len(v):
caps.extend([('%s=%s' % (c, cap)) for cap in v])
return caps
def do_CAPABILITY(self, tag):
self.sendUntaggedResponse('CAPABILITY ' + ' '.join(self.listCapabilities()))
self.sendPositiveResponse(tag, 'CAPABILITY completed')
unauth_CAPABILITY = (do_CAPABILITY,)
auth_CAPABILITY = unauth_CAPABILITY
select_CAPABILITY = unauth_CAPABILITY
logout_CAPABILITY = unauth_CAPABILITY
def do_LOGOUT(self, tag):
self.sendUntaggedResponse('BYE Nice talking to you')
self.sendPositiveResponse(tag, 'LOGOUT successful')
self.transport.loseConnection()
unauth_LOGOUT = (do_LOGOUT,)
auth_LOGOUT = unauth_LOGOUT
select_LOGOUT = unauth_LOGOUT
logout_LOGOUT = unauth_LOGOUT
def do_NOOP(self, tag):
self.sendPositiveResponse(tag, 'NOOP No operation performed')
unauth_NOOP = (do_NOOP,)
auth_NOOP = unauth_NOOP
select_NOOP = unauth_NOOP
logout_NOOP = unauth_NOOP
def do_AUTHENTICATE(self, tag, args):
args = args.upper().strip()
if args not in self.challengers:
self.sendNegativeResponse(tag, 'AUTHENTICATE method unsupported')
else:
self.authenticate(self.challengers[args](), tag)
unauth_AUTHENTICATE = (do_AUTHENTICATE, arg_atom)
def authenticate(self, chal, tag):
if self.portal is None:
self.sendNegativeResponse(tag, 'Temporary authentication failure')
return
self._setupChallenge(chal, tag)
def _setupChallenge(self, chal, tag):
try:
challenge = chal.getChallenge()
except Exception, e:
self.sendBadResponse(tag, 'Server error: ' + str(e))
else:
coded = base64.encodestring(challenge)[:-1]
self.parseState = 'pending'
self._pendingLiteral = defer.Deferred()
self.sendContinuationRequest(coded)
self._pendingLiteral.addCallback(self.__cbAuthChunk, chal, tag)
self._pendingLiteral.addErrback(self.__ebAuthChunk, tag)
def __cbAuthChunk(self, result, chal, tag):
try:
uncoded = base64.decodestring(result)
except binascii.Error:
raise IllegalClientResponse("Malformed Response - not base64")
chal.setResponse(uncoded)
if chal.moreChallenges():
self._setupChallenge(chal, tag)
else:
self.portal.login(chal, None, IAccount).addCallbacks(
self.__cbAuthResp,
self.__ebAuthResp,
(tag,), None, (tag,), None
)
def __cbAuthResp(self, (iface, avatar, logout), tag):
assert iface is IAccount, "IAccount is the only supported interface"
self.account = avatar
self.state = 'auth'
self._onLogout = logout
self.sendPositiveResponse(tag, 'Authentication successful')
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebAuthResp(self, failure, tag):
if failure.check(cred.error.UnauthorizedLogin):
self.sendNegativeResponse(tag, 'Authentication failed: unauthorized')
elif failure.check(cred.error.UnhandledCredentials):
self.sendNegativeResponse(tag, 'Authentication failed: server misconfigured')
else:
self.sendBadResponse(tag, 'Server error: login failed unexpectedly')
log.err(failure)
def __ebAuthChunk(self, failure, tag):
self.sendNegativeResponse(tag, 'Authentication failed: ' + str(failure.value))
def do_STARTTLS(self, tag):
if self.startedTLS:
self.sendNegativeResponse(tag, 'TLS already negotiated')
elif self.ctx and self.canStartTLS:
self.sendPositiveResponse(tag, 'Begin TLS negotiation now')
self.transport.startTLS(self.ctx)
self.startedTLS = True
self.challengers = self.challengers.copy()
if 'LOGIN' not in self.challengers:
self.challengers['LOGIN'] = LOGINCredentials
if 'PLAIN' not in self.challengers:
self.challengers['PLAIN'] = PLAINCredentials
else:
self.sendNegativeResponse(tag, 'TLS not available')
unauth_STARTTLS = (do_STARTTLS,)
def do_LOGIN(self, tag, user, passwd):
if 'LOGINDISABLED' in self.capabilities():
self.sendBadResponse(tag, 'LOGIN is disabled before STARTTLS')
return
maybeDeferred(self.authenticateLogin, user, passwd
).addCallback(self.__cbLogin, tag
).addErrback(self.__ebLogin, tag
)
unauth_LOGIN = (do_LOGIN, arg_astring, arg_astring)
def authenticateLogin(self, user, passwd):
"""Lookup the account associated with the given parameters
Override this method to define the desired authentication behavior.
The default behavior is to defer authentication to C{self.portal}
if it is not None, or to deny the login otherwise.
@type user: C{str}
@param user: The username to lookup
@type passwd: C{str}
@param passwd: The password to login with
"""
if self.portal:
return self.portal.login(
cred.credentials.UsernamePassword(user, passwd),
None, IAccount
)
raise cred.error.UnauthorizedLogin()
def __cbLogin(self, (iface, avatar, logout), tag):
if iface is not IAccount:
self.sendBadResponse(tag, 'Server error: login returned unexpected value')
log.err("__cbLogin called with %r, IAccount expected" % (iface,))
else:
self.account = avatar
self._onLogout = logout
self.sendPositiveResponse(tag, 'LOGIN succeeded')
self.state = 'auth'
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebLogin(self, failure, tag):
if failure.check(cred.error.UnauthorizedLogin):
self.sendNegativeResponse(tag, 'LOGIN failed')
else:
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
log.err(failure)
def do_NAMESPACE(self, tag):
personal = public = shared = None
np = INamespacePresenter(self.account, None)
if np is not None:
personal = np.getPersonalNamespaces()
public = np.getSharedNamespaces()
shared = np.getSharedNamespaces()
self.sendUntaggedResponse('NAMESPACE ' + collapseNestedLists([personal, public, shared]))
self.sendPositiveResponse(tag, "NAMESPACE command completed")
auth_NAMESPACE = (do_NAMESPACE,)
select_NAMESPACE = auth_NAMESPACE
def _parseMbox(self, name):
if isinstance(name, unicode):
return name
try:
return name.decode('imap4-utf-7')
except:
log.err()
raise IllegalMailboxEncoding(name)
def _selectWork(self, tag, name, rw, cmdName):
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'auth'
name = self._parseMbox(name)
maybeDeferred(self.account.select, self._parseMbox(name), rw
).addCallback(self._cbSelectWork, cmdName, tag
).addErrback(self._ebSelectWork, cmdName, tag
)
def _ebSelectWork(self, failure, cmdName, tag):
self.sendBadResponse(tag, "%s failed: Server error" % (cmdName,))
log.err(failure)
def _cbSelectWork(self, mbox, cmdName, tag):
if mbox is None:
self.sendNegativeResponse(tag, 'No such mailbox')
return
if '\\noselect' in [s.lower() for s in mbox.getFlags()]:
self.sendNegativeResponse(tag, 'Mailbox cannot be selected')
return
flags = mbox.getFlags()
self.sendUntaggedResponse(str(mbox.getMessageCount()) + ' EXISTS')
self.sendUntaggedResponse(str(mbox.getRecentCount()) + ' RECENT')
self.sendUntaggedResponse('FLAGS (%s)' % ' '.join(flags))
self.sendPositiveResponse(None, '[UIDVALIDITY %d]' % mbox.getUIDValidity())
s = mbox.isWriteable() and 'READ-WRITE' or 'READ-ONLY'
mbox.addListener(self)
self.sendPositiveResponse(tag, '[%s] %s successful' % (s, cmdName))
self.state = 'select'
self.mbox = mbox
auth_SELECT = ( _selectWork, arg_astring, 1, 'SELECT' )
select_SELECT = auth_SELECT
auth_EXAMINE = ( _selectWork, arg_astring, 0, 'EXAMINE' )
select_EXAMINE = auth_EXAMINE
def do_IDLE(self, tag):
self.sendContinuationRequest(None)
self.parseTag = tag
self.lastState = self.parseState
self.parseState = 'idle'
def parse_idle(self, *args):
self.parseState = self.lastState
del self.lastState
self.sendPositiveResponse(self.parseTag, "IDLE terminated")
del self.parseTag
select_IDLE = ( do_IDLE, )
auth_IDLE = select_IDLE
def do_CREATE(self, tag, name):
name = self._parseMbox(name)
try:
result = self.account.create(name)
except MailboxException, c:
self.sendNegativeResponse(tag, str(c))
except:
self.sendBadResponse(tag, "Server error encountered while creating mailbox")
log.err()
else:
if result:
self.sendPositiveResponse(tag, 'Mailbox created')
else:
self.sendNegativeResponse(tag, 'Mailbox not created')
auth_CREATE = (do_CREATE, arg_astring)
select_CREATE = auth_CREATE
def do_DELETE(self, tag, name):
name = self._parseMbox(name)
if name.lower() == 'inbox':
self.sendNegativeResponse(tag, 'You cannot delete the inbox')
return
try:
self.account.delete(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while deleting mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Mailbox deleted')
auth_DELETE = (do_DELETE, arg_astring)
select_DELETE = auth_DELETE
def do_RENAME(self, tag, oldname, newname):
oldname, newname = [self._parseMbox(n) for n in oldname, newname]
if oldname.lower() == 'inbox' or newname.lower() == 'inbox':
self.sendNegativeResponse(tag, 'You cannot rename the inbox, or rename another mailbox to inbox.')
return
try:
self.account.rename(oldname, newname)
except TypeError:
self.sendBadResponse(tag, 'Invalid command syntax')
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while renaming mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Mailbox renamed')
auth_RENAME = (do_RENAME, arg_astring, arg_astring)
select_RENAME = auth_RENAME
def do_SUBSCRIBE(self, tag, name):
name = self._parseMbox(name)
try:
self.account.subscribe(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while subscribing to mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Subscribed')
auth_SUBSCRIBE = (do_SUBSCRIBE, arg_astring)
select_SUBSCRIBE = auth_SUBSCRIBE
def do_UNSUBSCRIBE(self, tag, name):
name = self._parseMbox(name)
try:
self.account.unsubscribe(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while unsubscribing from mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Unsubscribed')
auth_UNSUBSCRIBE = (do_UNSUBSCRIBE, arg_astring)
select_UNSUBSCRIBE = auth_UNSUBSCRIBE
def _listWork(self, tag, ref, mbox, sub, cmdName):
mbox = self._parseMbox(mbox)
maybeDeferred(self.account.listMailboxes, ref, mbox
).addCallback(self._cbListWork, tag, sub, cmdName
).addErrback(self._ebListWork, tag
)
def _cbListWork(self, mailboxes, tag, sub, cmdName):
for (name, box) in mailboxes:
if not sub or self.account.isSubscribed(name):
flags = box.getFlags()
delim = box.getHierarchicalDelimiter()
resp = (DontQuoteMe(cmdName), map(DontQuoteMe, flags), delim, name.encode('imap4-utf-7'))
self.sendUntaggedResponse(collapseNestedLists(resp))
self.sendPositiveResponse(tag, '%s completed' % (cmdName,))
def _ebListWork(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while listing mailboxes.")
log.err(failure)
auth_LIST = (_listWork, arg_astring, arg_astring, 0, 'LIST')
select_LIST = auth_LIST
auth_LSUB = (_listWork, arg_astring, arg_astring, 1, 'LSUB')
select_LSUB = auth_LSUB
def do_STATUS(self, tag, mailbox, names):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox, 0
).addCallback(self._cbStatusGotMailbox, tag, mailbox, names
).addErrback(self._ebStatusGotMailbox, tag
)
def _cbStatusGotMailbox(self, mbox, tag, mailbox, names):
if mbox:
maybeDeferred(mbox.requestStatus, names).addCallbacks(
self.__cbStatus, self.__ebStatus,
(tag, mailbox), None, (tag, mailbox), None
)
else:
self.sendNegativeResponse(tag, "Could not open mailbox")
def _ebStatusGotMailbox(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while opening mailbox.")
log.err(failure)
auth_STATUS = (do_STATUS, arg_astring, arg_plist)
select_STATUS = auth_STATUS
def __cbStatus(self, status, tag, box):
line = ' '.join(['%s %s' % x for x in status.iteritems()])
self.sendUntaggedResponse('STATUS %s (%s)' % (box, line))
self.sendPositiveResponse(tag, 'STATUS complete')
def __ebStatus(self, failure, tag, box):
self.sendBadResponse(tag, 'STATUS %s failed: %s' % (box, str(failure.value)))
def do_APPEND(self, tag, mailbox, flags, date, message):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbAppendGotMailbox, tag, flags, date, message
).addErrback(self._ebAppendGotMailbox, tag
)
def _cbAppendGotMailbox(self, mbox, tag, flags, date, message):
if not mbox:
self.sendNegativeResponse(tag, '[TRYCREATE] No such mailbox')
return
d = mbox.addMessage(message, flags, date)
d.addCallback(self.__cbAppend, tag, mbox)
d.addErrback(self.__ebAppend, tag)
def _ebAppendGotMailbox(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while opening mailbox.")
log.err(failure)
auth_APPEND = (do_APPEND, arg_astring, opt_plist, opt_datetime,
arg_literal)
select_APPEND = auth_APPEND
def __cbAppend(self, result, tag, mbox):
self.sendUntaggedResponse('%d EXISTS' % mbox.getMessageCount())
self.sendPositiveResponse(tag, 'APPEND complete')
def __ebAppend(self, failure, tag):
self.sendBadResponse(tag, 'APPEND failed: ' + str(failure.value))
def do_CHECK(self, tag):
d = self.checkpoint()
if d is None:
self.__cbCheck(None, tag)
else:
d.addCallbacks(
self.__cbCheck,
self.__ebCheck,
callbackArgs=(tag,),
errbackArgs=(tag,)
)
select_CHECK = (do_CHECK,)
def __cbCheck(self, result, tag):
self.sendPositiveResponse(tag, 'CHECK completed')
def __ebCheck(self, failure, tag):
self.sendBadResponse(tag, 'CHECK failed: ' + str(failure.value))
def checkpoint(self):
"""Called when the client issues a CHECK command.
This should perform any checkpoint operations required by the server.
It may be a long running operation, but may not block. If it returns
a deferred, the client will only be informed of success (or failure)
when the deferred's callback (or errback) is invoked.
"""
return None
def do_CLOSE(self, tag):
d = None
if self.mbox.isWriteable():
d = maybeDeferred(self.mbox.expunge)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
if d is not None:
d.addCallback(lambda result: cmbx.close())
else:
d = maybeDeferred(cmbx.close)
if d is not None:
d.addCallbacks(self.__cbClose, self.__ebClose, (tag,), None, (tag,), None)
else:
self.__cbClose(None, tag)
select_CLOSE = (do_CLOSE,)
def __cbClose(self, result, tag):
self.sendPositiveResponse(tag, 'CLOSE completed')
self.mbox.removeListener(self)
self.mbox = None
self.state = 'auth'
def __ebClose(self, failure, tag):
self.sendBadResponse(tag, 'CLOSE failed: ' + str(failure.value))
def do_EXPUNGE(self, tag):
if self.mbox.isWriteable():
maybeDeferred(self.mbox.expunge).addCallbacks(
self.__cbExpunge, self.__ebExpunge, (tag,), None, (tag,), None
)
else:
self.sendNegativeResponse(tag, 'EXPUNGE ignored on read-only mailbox')
select_EXPUNGE = (do_EXPUNGE,)
def __cbExpunge(self, result, tag):
for e in result:
self.sendUntaggedResponse('%d EXPUNGE' % e)
self.sendPositiveResponse(tag, 'EXPUNGE completed')
def __ebExpunge(self, failure, tag):
self.sendBadResponse(tag, 'EXPUNGE failed: ' + str(failure.value))
log.err(failure)
def do_SEARCH(self, tag, charset, query, uid=0):
sm = ISearchableMailbox(self.mbox, None)
if sm is not None:
maybeDeferred(sm.search, query, uid=uid).addCallbacks(
self.__cbSearch, self.__ebSearch,
(tag, self.mbox, uid), None, (tag,), None
)
else:
s = parseIdList('1:*')
maybeDeferred(self.mbox.fetch, s, uid=uid).addCallbacks(
self.__cbManualSearch, self.__ebSearch,
(tag, self.mbox, query, uid), None, (tag,), None
)
select_SEARCH = (do_SEARCH, opt_charset, arg_searchkeys)
def __cbSearch(self, result, tag, mbox, uid):
if uid:
result = map(mbox.getUID, result)
ids = ' '.join([str(i) for i in result])
self.sendUntaggedResponse('SEARCH ' + ids)
self.sendPositiveResponse(tag, 'SEARCH completed')
def __cbManualSearch(self, result, tag, mbox, query, uid, searchResults = None):
if searchResults is None:
searchResults = []
i = 0
for (i, (id, msg)) in zip(range(5), result):
if self.searchFilter(query, id, msg):
if uid:
searchResults.append(str(msg.getUID()))
else:
searchResults.append(str(id))
if i == 4:
from twisted.internet import reactor
reactor.callLater(0, self.__cbManualSearch, result, tag, mbox, query, uid, searchResults)
else:
if searchResults:
self.sendUntaggedResponse('SEARCH ' + ' '.join(searchResults))
self.sendPositiveResponse(tag, 'SEARCH completed')
def searchFilter(self, query, id, msg):
while query:
if not self.singleSearchStep(query, id, msg):
return False
return True
def singleSearchStep(self, query, id, msg):
q = query.pop(0)
if isinstance(q, list):
if not self.searchFilter(q, id, msg):
return False
else:
c = q.upper()
f = getattr(self, 'search_' + c)
if f:
if not f(query, id, msg):
return False
else:
# IMAP goes *out of its way* to be complex
# Sequence sets to search should be specified
# with a command, like EVERYTHING ELSE.
try:
m = parseIdList(c)
except:
log.err('Unknown search term: ' + c)
else:
if id not in m:
return False
return True
def search_ALL(self, query, id, msg):
return True
def search_ANSWERED(self, query, id, msg):
return '\\Answered' in msg.getFlags()
def search_BCC(self, query, id, msg):
bcc = msg.getHeaders(False, 'bcc').get('bcc', '')
return bcc.lower().find(query.pop(0).lower()) != -1
def search_BEFORE(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) < date
def search_BODY(self, query, id, msg):
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_CC(self, query, id, msg):
cc = msg.getHeaders(False, 'cc').get('cc', '')
return cc.lower().find(query.pop(0).lower()) != -1
def search_DELETED(self, query, id, msg):
return '\\Deleted' in msg.getFlags()
def search_DRAFT(self, query, id, msg):
return '\\Draft' in msg.getFlags()
def search_FLAGGED(self, query, id, msg):
return '\\Flagged' in msg.getFlags()
def search_FROM(self, query, id, msg):
fm = msg.getHeaders(False, 'from').get('from', '')
return fm.lower().find(query.pop(0).lower()) != -1
def search_HEADER(self, query, id, msg):
hdr = query.pop(0).lower()
hdr = msg.getHeaders(False, hdr).get(hdr, '')
return hdr.lower().find(query.pop(0).lower()) != -1
def search_KEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_LARGER(self, query, id, msg):
return int(query.pop(0)) < msg.getSize()
def search_NEW(self, query, id, msg):
return '\\Recent' in msg.getFlags() and '\\Seen' not in msg.getFlags()
def search_NOT(self, query, id, msg):
return not self.singleSearchStep(query, id, msg)
def search_OLD(self, query, id, msg):
return '\\Recent' not in msg.getFlags()
def search_ON(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) == date
def search_OR(self, query, id, msg):
a = self.singleSearchStep(query, id, msg)
b = self.singleSearchStep(query, id, msg)
return a or b
def search_RECENT(self, query, id, msg):
return '\\Recent' in msg.getFlags()
def search_SEEN(self, query, id, msg):
return '\\Seen' in msg.getFlags()
def search_SENTBEFORE(self, query, id, msg):
date = msg.getHeader(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date < parseTime(query.pop(0))
def search_SENTON(self, query, id, msg):
date = msg.getHeader(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date[:3] == parseTime(query.pop(0))[:3]
def search_SENTSINCE(self, query, id, msg):
date = msg.getHeader(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date > parseTime(query.pop(0))
def search_SINCE(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) > date
def search_SMALLER(self, query, id, msg):
return int(query.pop(0)) > msg.getSize()
def search_SUBJECT(self, query, id, msg):
subj = msg.getHeaders(False, 'subject').get('subject', '')
return subj.lower().find(query.pop(0).lower()) != -1
def search_TEXT(self, query, id, msg):
# XXX - This must search headers too
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_TO(self, query, id, msg):
to = msg.getHeaders(False, 'to').get('to', '')
return to.lower().find(query.pop(0).lower()) != -1
def search_UID(self, query, id, msg):
c = query.pop(0)
m = parseIdList(c)
return msg.getUID() in m
def search_UNANSWERED(self, query, id, msg):
return '\\Answered' not in msg.getFlags()
def search_UNDELETED(self, query, id, msg):
return '\\Deleted' not in msg.getFlags()
def search_UNDRAFT(self, query, id, msg):
return '\\Draft' not in msg.getFlags()
def search_UNFLAGGED(self, query, id, msg):
return '\\Flagged' not in msg.getFlags()
def search_UNKEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_UNSEEN(self, query, id, msg):
return '\\Seen' not in msg.getFlags()
def __ebSearch(self, failure, tag):
self.sendBadResponse(tag, 'SEARCH failed: ' + str(failure.value))
log.err(failure)
def do_FETCH(self, tag, messages, query, uid=0):
if query:
maybeDeferred(self.mbox.fetch, messages, uid=uid
).addCallback(iter
).addCallback(self.__cbFetch, tag, query, uid
).addErrback(self.__ebFetch, tag
)
else:
self.sendPositiveResponse(tag, 'FETCH complete')
select_FETCH = (do_FETCH, arg_seqset, arg_fetchatt)
def __cbFetch(self, results, tag, query, uid):
if self.blocked is None:
self.blocked = []
self._oldTimeout = self.setTimeout(None)
try:
id, msg = results.next()
except StopIteration:
# All results have been processed, deliver completion notification.
self.sendPositiveResponse(tag, 'FETCH completed')
# The idle timeout was suspended while we delivered results,
# restore it now.
self.setTimeout(self._oldTimeout)
del self._oldTimeout
# Instance state is now consistent again (ie, it is as though
# the fetch command never ran), so allow any pending blocked
# commands to execute.
self._unblock()
else:
self.spewMessage(id, msg, query, uid
).addCallback(lambda _: self.__cbFetch(results, tag, query, uid)
).addErrback(self.__ebSpewMessage
)
def __ebSpewMessage(self, failure):
# This indicates a programming error.
# There's no reliable way to indicate anything to the client, since we
# may have already written an arbitrary amount of data in response to
# the command.
log.err(failure)
self.transport.loseConnection()
def spew_envelope(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('ENVELOPE ' + collapseNestedLists([getEnvelope(msg)]))
def spew_flags(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('FLAGS ' + '(%s)' % (' '.join(msg.getFlags())))
def spew_internaldate(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
idate = msg.getInternalDate()
ttup = rfc822.parsedate_tz(idate)
if ttup is None:
log.msg("%d:%r: unpareseable internaldate: %r" % (id, msg, idate))
raise IMAP4Exception("Internal failure generating INTERNALDATE")
odate = time.strftime("%d-%b-%Y %H:%M:%S ", ttup[:9])
if ttup[9] is None:
odate = odate + "+0000"
else:
if ttup[9] >= 0:
sign = "+"
else:
sign = "-"
odate = odate + sign + string.zfill(str(((abs(ttup[9]) / 3600) * 100 + (abs(ttup[9]) % 3600) / 60)), 4)
_w('INTERNALDATE ' + _quote(odate))
def spew_rfc822header(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
hdrs = _formatHeaders(msg.getHeaders(True))
_w('RFC822.HEADER ' + _literal(hdrs))
def spew_rfc822text(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822.TEXT ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
def spew_rfc822size(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822.SIZE ' + str(msg.getSize()))
def spew_rfc822(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822 ')
_f()
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()
).beginProducing(self.transport
)
return MessageProducer(msg, None, self._scheduler
).beginProducing(self.transport
)
def spew_uid(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('UID ' + str(msg.getUID()))
def spew_bodystructure(self, id, msg, _w=None, _f=None):
_w('BODYSTRUCTURE ' + collapseNestedLists([getBodyStructure(msg, True)]))
def spew_body(self, part, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
for p in part.part:
if msg.isMultipart():
msg = msg.getSubPart(p)
elif p > 0:
# Non-multipart messages have an implicit first part but no
# other parts - reject any request for any other part.
raise TypeError("Requested subpart of non-multipart message")
if part.header:
hdrs = msg.getHeaders(part.header.negate, *part.header.fields)
hdrs = _formatHeaders(hdrs)
_w(str(part) + ' ' + _literal(hdrs))
elif part.text:
_w(str(part) + ' ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
elif part.mime:
hdrs = _formatHeaders(msg.getHeaders(True))
_w(str(part) + ' ' + _literal(hdrs))
elif part.empty:
_w(str(part) + ' ')
_f()
if part.part:
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
else:
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()).beginProducing(self.transport)
return MessageProducer(msg, None, self._scheduler).beginProducing(self.transport)
else:
_w('BODY ' + collapseNestedLists([getBodyStructure(msg)]))
def spewMessage(self, id, msg, query, uid):
wbuf = WriteBuffer(self.transport)
write = wbuf.write
flush = wbuf.flush
def start():
write('* %d FETCH (' % (id,))
def finish():
write(')\r\n')
def space():
write(' ')
def spew():
seenUID = False
start()
for part in query:
if part.type == 'uid':
seenUID = True
if part.type == 'body':
yield self.spew_body(part, id, msg, write, flush)
else:
f = getattr(self, 'spew_' + part.type)
yield f(id, msg, write, flush)
if part is not query[-1]:
space()
if uid and not seenUID:
space()
yield self.spew_uid(id, msg, write, flush)
finish()
flush()
return self._scheduler(spew())
def __ebFetch(self, failure, tag):
log.err(failure)
self.sendBadResponse(tag, 'FETCH failed: ' + str(failure.value))
def do_STORE(self, tag, messages, mode, flags, uid=0):
mode = mode.upper()
silent = mode.endswith('SILENT')
if mode.startswith('+'):
mode = 1
elif mode.startswith('-'):
mode = -1
else:
mode = 0
maybeDeferred(self.mbox.store, messages, flags, mode, uid=uid).addCallbacks(
self.__cbStore, self.__ebStore, (tag, self.mbox, uid, silent), None, (tag,), None
)
select_STORE = (do_STORE, arg_seqset, arg_atom, arg_flaglist)
def __cbStore(self, result, tag, mbox, uid, silent):
if result and not silent:
for (k, v) in result.iteritems():
if uid:
uidstr = ' UID %d' % mbox.getUID(k)
else:
uidstr = ''
self.sendUntaggedResponse('%d FETCH (FLAGS (%s)%s)' %
(k, ' '.join(v), uidstr))
self.sendPositiveResponse(tag, 'STORE completed')
def __ebStore(self, failure, tag):
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
def do_COPY(self, tag, messages, mailbox, uid=0):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbCopySelectedMailbox, tag, messages, mailbox, uid
).addErrback(self._ebCopySelectedMailbox, tag
)
select_COPY = (do_COPY, arg_seqset, arg_astring)
def _cbCopySelectedMailbox(self, mbox, tag, messages, mailbox, uid):
if not mbox:
self.sendNegativeResponse(tag, 'No such mailbox: ' + mailbox)
else:
maybeDeferred(self.mbox.fetch, messages, uid
).addCallback(self.__cbCopy, tag, mbox
).addCallback(self.__cbCopied, tag, mbox
).addErrback(self.__ebCopy, tag
)
def _ebCopySelectedMailbox(self, failure, tag):
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
def __cbCopy(self, messages, tag, mbox):
# XXX - This should handle failures with a rollback or something
addedDeferreds = []
addedIDs = []
failures = []
fastCopyMbox = IMessageCopier(mbox, None)
for (id, msg) in messages:
if fastCopyMbox is not None:
d = maybeDeferred(fastCopyMbox.copy, msg)
addedDeferreds.append(d)
continue
# XXX - The following should be an implementation of IMessageCopier.copy
# on an IMailbox->IMessageCopier adapter.
flags = msg.getFlags()
date = msg.getInternalDate()
body = IMessageFile(msg, None)
if body is not None:
bodyFile = body.open()
d = maybeDeferred(mbox.addMessage, bodyFile, flags, date)
else:
def rewind(f):
f.seek(0)
return f
buffer = tempfile.TemporaryFile()
d = MessageProducer(msg, buffer, self._scheduler
).beginProducing(None
).addCallback(lambda _, b=buffer, f=flags, d=date: mbox.addMessage(rewind(b), f, d)
)
addedDeferreds.append(d)
return defer.DeferredList(addedDeferreds)
def __cbCopied(self, deferredIds, tag, mbox):
ids = []
failures = []
for (status, result) in deferredIds:
if status:
ids.append(result)
else:
failures.append(result.value)
if failures:
self.sendNegativeResponse(tag, '[ALERT] Some messages were not copied')
else:
self.sendPositiveResponse(tag, 'COPY completed')
def __ebCopy(self, failure, tag):
self.sendBadResponse(tag, 'COPY failed:' + str(failure.value))
log.err(failure)
def do_UID(self, tag, command, line):
command = command.upper()
if command not in ('COPY', 'FETCH', 'STORE', 'SEARCH'):
raise IllegalClientResponse(command)
self.dispatchCommand(tag, command, line, uid=1)
select_UID = (do_UID, arg_atom, arg_line)
#
# IMailboxListener implementation
#
def modeChanged(self, writeable):
if writeable:
self.sendUntaggedResponse(message='[READ-WRITE]', async=True)
else:
self.sendUntaggedResponse(message='[READ-ONLY]', async=True)
def flagsChanged(self, newFlags):
for (mId, flags) in newFlags.iteritems():
msg = '%d FETCH (FLAGS (%s))' % (mId, ' '.join(flags))
self.sendUntaggedResponse(msg, async=True)
def newMessages(self, exists, recent):
if exists is not None:
self.sendUntaggedResponse('%d EXISTS' % exists, async=True)
if recent is not None:
self.sendUntaggedResponse('%d RECENT' % recent, async=True)
class UnhandledResponse(IMAP4Exception): pass
class NegativeResponse(IMAP4Exception): pass
class NoSupportedAuthentication(IMAP4Exception):
def __init__(self, serverSupports, clientSupports):
IMAP4Exception.__init__(self, 'No supported authentication schemes available')
self.serverSupports = serverSupports
self.clientSupports = clientSupports
def __str__(self):
return (IMAP4Exception.__str__(self)
+ ': Server supports %r, client supports %r'
% (self.serverSupports, self.clientSupports))
class IllegalServerResponse(IMAP4Exception): pass
TIMEOUT_ERROR = error.TimeoutError()
class IMAP4Client(basic.LineReceiver, policies.TimeoutMixin):
"""IMAP4 client protocol implementation
@ivar state: A string representing the state the connection is currently
in.
"""
implements(IMailboxListener)
tags = None
waiting = None
queued = None
tagID = 1
state = None
startedTLS = False
# Number of seconds to wait before timing out a connection.
# If the number is <= 0 no timeout checking will be performed.
timeout = 0
# Capabilities are not allowed to change during the session
# So cache the first response and use that for all later
# lookups
_capCache = None
_memoryFileLimit = 1024 * 1024 * 10
# Authentication is pluggable. This maps names to IClientAuthentication
# objects.
authenticators = None
STATUS_CODES = ('OK', 'NO', 'BAD', 'PREAUTH', 'BYE')
STATUS_TRANSFORMATIONS = {
'MESSAGES': int, 'RECENT': int, 'UNSEEN': int
}
context = None
def __init__(self, contextFactory = None):
self.tags = {}
self.queued = []
self.authenticators = {}
self.context = contextFactory
self._tag = None
self._parts = None
self._lastCmd = None
def registerAuthenticator(self, auth):
"""Register a new form of authentication
When invoking the authenticate() method of IMAP4Client, the first
matching authentication scheme found will be used. The ordering is
that in which the server lists support authentication schemes.
@type auth: Implementor of C{IClientAuthentication}
@param auth: The object to use to perform the client
side of this authentication scheme.
"""
self.authenticators[auth.getName().upper()] = auth
def rawDataReceived(self, data):
if self.timeout > 0:
self.resetTimeout()
self._pendingSize -= len(data)
if self._pendingSize > 0:
self._pendingBuffer.write(data)
else:
passon = ''
if self._pendingSize < 0:
data, passon = data[:self._pendingSize], data[self._pendingSize:]
self._pendingBuffer.write(data)
rest = self._pendingBuffer
self._pendingBuffer = None
self._pendingSize = None
rest.seek(0, 0)
self._parts.append(rest.read())
self.setLineMode(passon.lstrip('\r\n'))
# def sendLine(self, line):
# print 'S:', repr(line)
# return basic.LineReceiver.sendLine(self, line)
def _setupForLiteral(self, rest, octets):
self._pendingBuffer = self.messageFile(octets)
self._pendingSize = octets
if self._parts is None:
self._parts = [rest, '\r\n']
else:
self._parts.extend([rest, '\r\n'])
self.setRawMode()
def connectionMade(self):
if self.timeout > 0:
self.setTimeout(self.timeout)
def connectionLost(self, reason):
"""We are no longer connected"""
if self.timeout > 0:
self.setTimeout(None)
if self.queued is not None:
queued = self.queued
self.queued = None
for cmd in queued:
cmd.defer.errback(reason)
if self.tags is not None:
tags = self.tags
self.tags = None
for cmd in tags.itervalues():
if cmd is not None and cmd.defer is not None:
cmd.defer.errback(reason)
def lineReceived(self, line):
# print 'C: ' + repr(line)
if self.timeout > 0:
self.resetTimeout()
lastPart = line.rfind(' ')
if lastPart != -1:
lastPart = line[lastPart + 1:]
if lastPart.startswith('{') and lastPart.endswith('}'):
# It's a literal a-comin' in
try:
octets = int(lastPart[1:-1])
except ValueError:
raise IllegalServerResponse(line)
if self._parts is None:
self._tag, parts = line.split(None, 1)
else:
parts = line
self._setupForLiteral(parts, octets)
return
if self._parts is None:
# It isn't a literal at all
self._regularDispatch(line)
else:
# If an expression is in progress, no tag is required here
# Since we didn't find a literal indicator, this expression
# is done.
self._parts.append(line)
tag, rest = self._tag, ''.join(self._parts)
self._tag = self._parts = None
self.dispatchCommand(tag, rest)
def timeoutConnection(self):
if self._lastCmd and self._lastCmd.defer is not None:
d, self._lastCmd.defer = self._lastCmd.defer, None
d.errback(TIMEOUT_ERROR)
if self.queued:
for cmd in self.queued:
if cmd.defer is not None:
d, cmd.defer = cmd.defer, d
d.errback(TIMEOUT_ERROR)
self.transport.loseConnection()
def _regularDispatch(self, line):
parts = line.split(None, 1)
if len(parts) != 2:
parts.append('')
tag, rest = parts
self.dispatchCommand(tag, rest)
def messageFile(self, octets):
"""Create a file to which an incoming message may be written.
@type octets: C{int}
@param octets: The number of octets which will be written to the file
@rtype: Any object which implements C{write(string)} and
C{seek(int, int)}
@return: A file-like object
"""
if octets > self._memoryFileLimit:
return tempfile.TemporaryFile()
else:
return StringIO.StringIO()
def makeTag(self):
tag = '%0.4X' % self.tagID
self.tagID += 1
return tag
def dispatchCommand(self, tag, rest):
if self.state is None:
f = self.response_UNAUTH
else:
f = getattr(self, 'response_' + self.state.upper(), None)
if f:
try:
f(tag, rest)
except:
log.err()
self.transport.loseConnection()
else:
log.err("Cannot dispatch: %s, %s, %s" % (self.state, tag, rest))
self.transport.loseConnection()
def response_UNAUTH(self, tag, rest):
if self.state is None:
# Server greeting, this is
status, rest = rest.split(None, 1)
if status.upper() == 'OK':
self.state = 'unauth'
elif status.upper() == 'PREAUTH':
self.state = 'auth'
else:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + ' ' + rest)
b, e = rest.find('['), rest.find(']')
if b != -1 and e != -1:
self.serverGreeting(self.__cbCapabilities(([rest[b:e]], None)))
else:
self.serverGreeting(None)
else:
self._defaultHandler(tag, rest)
def response_AUTH(self, tag, rest):
self._defaultHandler(tag, rest)
def _defaultHandler(self, tag, rest):
if tag == '*' or tag == '+':
if not self.waiting:
self._extraInfo([rest])
else:
cmd = self.tags[self.waiting]
if tag == '+':
cmd.continuation(rest)
else:
cmd.lines.append(rest)
else:
try:
cmd = self.tags[tag]
except KeyError:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + ' ' + rest)
else:
status, line = rest.split(None, 1)
if status == 'OK':
# Give them this last line, too
cmd.finish(rest, self._extraInfo)
else:
cmd.defer.errback(IMAP4Exception(line))
del self.tags[tag]
self.waiting = None
self._flushQueue()
def _flushQueue(self):
if self.queued:
cmd = self.queued.pop(0)
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
def _extraInfo(self, lines):
# XXX - This is terrible.
# XXX - Also, this should collapse temporally proximate calls into single
# invocations of IMailboxListener methods, where possible.
flags = {}
recent = exists = None
for L in lines:
if L.find('EXISTS') != -1:
exists = int(L.split()[0])
elif L.find('RECENT') != -1:
recent = int(L.split()[0])
elif L.find('READ-ONLY') != -1:
self.modeChanged(0)
elif L.find('READ-WRITE') != -1:
self.modeChanged(1)
elif L.find('FETCH') != -1:
for (mId, fetched) in self.__cbFetch(([L], None)).iteritems():
sum = []
for f in fetched.get('FLAGS', []):
sum.append(f)
flags.setdefault(mId, []).extend(sum)
else:
log.msg('Unhandled unsolicited response: ' + repr(L))
if flags:
self.flagsChanged(flags)
if recent is not None or exists is not None:
self.newMessages(exists, recent)
def sendCommand(self, cmd):
cmd.defer = defer.Deferred()
if self.waiting:
self.queued.append(cmd)
return cmd.defer
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
self._lastCmd = cmd
return cmd.defer
def getCapabilities(self, useCache=1):
"""Request the capabilities available on this server.
This command is allowed in any state of connection.
@type useCache: C{bool}
@param useCache: Specify whether to use the capability-cache or to
re-retrieve the capabilities from the server. Server capabilities
should never change, so for normal use, this flag should never be
false.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a
dictionary mapping capability types to lists of supported
mechanisms, or to None if a support list is not applicable.
"""
if useCache and self._capCache is not None:
return defer.succeed(self._capCache)
cmd = 'CAPABILITY'
resp = ('CAPABILITY',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbCapabilities)
return d
def __cbCapabilities(self, (lines, tagline)):
caps = {}
for rest in lines:
rest = rest.split()[1:]
for cap in rest:
eq = cap.find('=')
if eq == -1:
caps[cap] = None
else:
caps.setdefault(cap[:eq], []).append(cap[eq+1:])
self._capCache = caps
return caps
def logout(self):
"""Inform the server that we are done with the connection.
This command is allowed in any state of connection.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with None
when the proper server acknowledgement has been received.
"""
d = self.sendCommand(Command('LOGOUT', wantResponse=('BYE',)))
d.addCallback(self.__cbLogout)
return d
def __cbLogout(self, (lines, tagline)):
self.transport.loseConnection()
# We don't particularly care what the server said
return None
def noop(self):
"""Perform no operation.
This command is allowed in any state of connection.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a list
of untagged status updates the server responds with.
"""
d = self.sendCommand(Command('NOOP'))
d.addCallback(self.__cbNoop)
return d
def __cbNoop(self, (lines, tagline)):
# Conceivable, this is elidable.
# It is, afterall, a no-op.
return lines
def startTLS(self, contextFactory=None):
"""
Initiates a 'STARTTLS' request and negotiates the TLS / SSL
Handshake.
@param contextFactory: The TLS / SSL Context Factory to
leverage. If the contextFactory is None the IMAP4Client will
either use the current TLS / SSL Context Factory or attempt to
create a new one.
@type contextFactory: C{ssl.ClientContextFactory}
@return: A Deferred which fires when the transport has been
secured according to the given contextFactory, or which fails
if the transport cannot be secured.
"""
assert not self.startedTLS, "Client and Server are currently communicating via TLS"
if contextFactory is None:
contextFactory = self._getContextFactory()
if contextFactory is None:
return defer.fail(IMAP4Exception(
"IMAP4Client requires a TLS context to "
"initiate the STARTTLS handshake"))
if 'STARTTLS' not in self._capCache:
return defer.fail(IMAP4Exception(
"Server does not support secure communication "
"via TLS / SSL"))
tls = interfaces.ITLSTransport(self.transport, None)
if tls is None:
return defer.fail(IMAP4Exception(
"IMAP4Client transport does not implement "
"interfaces.ITLSTransport"))
d = self.sendCommand(Command('STARTTLS'))
d.addCallback(self._startedTLS, contextFactory)
d.addCallback(lambda _: self.getCapabilities())
return d
def authenticate(self, secret):
"""Attempt to enter the authenticated state with the server
This command is allowed in the Non-Authenticated state.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the authentication
succeeds and whose errback will be invoked otherwise.
"""
if self._capCache is None:
d = self.getCapabilities()
else:
d = defer.succeed(self._capCache)
d.addCallback(self.__cbAuthenticate, secret)
return d
def __cbAuthenticate(self, caps, secret):
auths = caps.get('AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command('AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
if self.startedTLS:
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
else:
def ebStartTLS(err):
err.trap(IMAP4Exception)
# We couldn't negotiate TLS for some reason
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
d = self.startTLS()
d.addErrback(ebStartTLS)
d.addCallback(lambda _: self.getCapabilities())
d.addCallback(self.__cbAuthTLS, secret)
return d
def __cbContinueAuth(self, rest, scheme, secret):
try:
chal = base64.decodestring(rest + '\n')
except binascii.Error:
self.sendLine('*')
raise IllegalServerResponse(rest)
self.transport.loseConnection()
else:
auth = self.authenticators[scheme]
chal = auth.challengeResponse(secret, chal)
self.sendLine(base64.encodestring(chal).strip())
def __cbAuthTLS(self, caps, secret):
auths = caps.get('AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command('AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
raise NoSupportedAuthentication(auths, self.authenticators.keys())
def login(self, username, password):
"""Authenticate with the server using a username and password
This command is allowed in the Non-Authenticated state. If the
server supports the STARTTLS capability and our transport supports
TLS, TLS is negotiated before the login command is issued.
A more secure way to log in is to use C{startTLS} or
C{authenticate} or both.
@type username: C{str}
@param username: The username to log in with
@type password: C{str}
@param password: The password to log in with
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if login is successful
and whose errback is invoked otherwise.
"""
d = maybeDeferred(self.getCapabilities)
d.addCallback(self.__cbLoginCaps, username, password)
return d
def serverGreeting(self, caps):
"""Called when the server has sent us a greeting.
@type caps: C{dict}
@param caps: Capabilities the server advertised in its greeting.
"""
def _getContextFactory(self):
if self.context is not None:
return self.context
try:
from twisted.internet import ssl
except ImportError:
return None
else:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
def __cbLoginCaps(self, capabilities, username, password):
# If the server advertises STARTTLS, we might want to try to switch to TLS
tryTLS = 'STARTTLS' in capabilities
# If our transport supports switching to TLS, we might want to try to switch to TLS.
tlsableTransport = interfaces.ITLSTransport(self.transport, None) is not None
# If our transport is not already using TLS, we might want to try to switch to TLS.
nontlsTransport = interfaces.ISSLTransport(self.transport, None) is None
if not self.startedTLS and tryTLS and tlsableTransport and nontlsTransport:
d = self.startTLS()
d.addCallbacks(
self.__cbLoginTLS,
self.__ebLoginTLS,
callbackArgs=(username, password),
)
return d
else:
if nontlsTransport:
log.msg("Server has no TLS support. logging in over cleartext!")
args = ' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command('LOGIN', args))
def _startedTLS(self, result, context):
self.transport.startTLS(context)
self._capCache = None
self.startedTLS = True
return result
def __cbLoginTLS(self, result, username, password):
args = ' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command('LOGIN', args))
def __ebLoginTLS(self, failure):
log.err(failure)
return failure
def namespace(self):
"""Retrieve information about the namespaces available to this account
This command is allowed in the Authenticated and Selected states.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with namespace
information. An example of this information is::
[[['', '/']], [], []]
which indicates a single personal namespace called '' with '/'
as its hierarchical delimiter, and no shared or user namespaces.
"""
cmd = 'NAMESPACE'
resp = ('NAMESPACE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbNamespace)
return d
def __cbNamespace(self, (lines, last)):
for line in lines:
parts = line.split(None, 1)
if len(parts) == 2:
if parts[0] == 'NAMESPACE':
# XXX UGGG parsing hack :(
r = parseNestedParens('(' + parts[1] + ')')[0]
return [e or [] for e in r]
log.err("No NAMESPACE response to NAMESPACE command")
return [[], [], []]
def select(self, mailbox):
"""Select a mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to select
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the select is successful and whose errback is
invoked otherwise. Mailbox information consists of a dictionary
with the following keys and values::
FLAGS: A list of strings containing the flags settable on
messages in this mailbox.
EXISTS: An integer indicating the number of messages in this
mailbox.
RECENT: An integer indicating the number of \"recent\"
messages in this mailbox.
UNSEEN: An integer indicating the number of messages not
flagged \\Seen in this mailbox.
PERMANENTFLAGS: A list of strings containing the flags that
can be permanently set on messages in this mailbox.
UIDVALIDITY: An integer uniquely identifying this mailbox.
"""
cmd = 'SELECT'
args = _prepareMailboxName(mailbox)
resp = ('FLAGS', 'EXISTS', 'RECENT', 'UNSEEN', 'PERMANENTFLAGS', 'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 1)
return d
def examine(self, mailbox):
"""Select a mailbox in read-only mode
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to examine
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the examine is successful and whose errback
is invoked otherwise. Mailbox information consists of a dictionary
with the following keys and values::
'FLAGS': A list of strings containing the flags settable on
messages in this mailbox.
'EXISTS': An integer indicating the number of messages in this
mailbox.
'RECENT': An integer indicating the number of \"recent\"
messages in this mailbox.
'UNSEEN': An integer indicating the number of messages not
flagged \\Seen in this mailbox.
'PERMANENTFLAGS': A list of strings containing the flags that
can be permanently set on messages in this mailbox.
'UIDVALIDITY': An integer uniquely identifying this mailbox.
"""
cmd = 'EXAMINE'
args = _prepareMailboxName(mailbox)
resp = ('FLAGS', 'EXISTS', 'RECENT', 'UNSEEN', 'PERMANENTFLAGS', 'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 0)
return d
def __cbSelect(self, (lines, tagline), rw):
# In the absense of specification, we are free to assume:
# READ-WRITE access
datum = {'READ-WRITE': rw}
lines.append(tagline)
for parts in lines:
split = parts.split()
if len(split) == 2:
if split[1].upper().strip() == 'EXISTS':
try:
datum['EXISTS'] = int(split[0])
except ValueError:
raise IllegalServerResponse(parts)
elif split[1].upper().strip() == 'RECENT':
try:
datum['RECENT'] = int(split[0])
except ValueError:
raise IllegalServerResponse(parts)
else:
log.err('Unhandled SELECT response (1): ' + parts)
elif split[0].upper().strip() == 'FLAGS':
split = parts.split(None, 1)
datum['FLAGS'] = tuple(parseNestedParens(split[1])[0])
elif split[0].upper().strip() == 'OK':
begin = parts.find('[')
end = parts.find(']')
if begin == -1 or end == -1:
raise IllegalServerResponse(parts)
else:
content = parts[begin+1:end].split(None, 1)
if len(content) >= 1:
key = content[0].upper()
if key == 'READ-ONLY':
datum['READ-WRITE'] = 0
elif key == 'READ-WRITE':
datum['READ-WRITE'] = 1
elif key == 'UIDVALIDITY':
try:
datum['UIDVALIDITY'] = int(content[1])
except ValueError:
raise IllegalServerResponse(parts)
elif key == 'UNSEEN':
try:
datum['UNSEEN'] = int(content[1])
except ValueError:
raise IllegalServerResponse(parts)
elif key == 'UIDNEXT':
datum['UIDNEXT'] = int(content[1])
elif key == 'PERMANENTFLAGS':
datum['PERMANENTFLAGS'] = tuple(parseNestedParens(content[1])[0])
else:
log.err('Unhandled SELECT response (2): ' + parts)
else:
log.err('Unhandled SELECT response (3): ' + parts)
else:
log.err('Unhandled SELECT response (4): ' + parts)
return datum
def create(self, name):
"""Create a new mailbox on the server
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The name of the mailbox to create.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the mailbox creation
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('CREATE', _prepareMailboxName(name)))
def delete(self, name):
"""Delete a mailbox
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The name of the mailbox to delete.
@rtype: C{Deferred}
@return: A deferred whose calblack is invoked if the mailbox is
deleted successfully and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('DELETE', _prepareMailboxName(name)))
def rename(self, oldname, newname):
"""Rename a mailbox
This command is allowed in the Authenticated and Selected states.
@type oldname: C{str}
@param oldname: The current name of the mailbox to rename.
@type newname: C{str}
@param newname: The new name to give the mailbox.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the rename is
successful and whose errback is invoked otherwise.
"""
oldname = _prepareMailboxName(oldname)
newname = _prepareMailboxName(newname)
return self.sendCommand(Command('RENAME', ' '.join((oldname, newname))))
def subscribe(self, name):
"""Add a mailbox to the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The mailbox to mark as 'active' or 'subscribed'
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the subscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('SUBSCRIBE', _prepareMailboxName(name)))
def unsubscribe(self, name):
"""Remove a mailbox from the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The mailbox to unsubscribe
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the unsubscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('UNSUBSCRIBE', _prepareMailboxName(name)))
def list(self, reference, wildcard):
"""List a subset of the available mailboxes
This command is allowed in the Authenticated and Selected states.
@type reference: C{str}
@param reference: The context in which to interpret C{wildcard}
@type wildcard: C{str}
@param wildcard: The pattern of mailbox names to match, optionally
including either or both of the '*' and '%' wildcards. '*' will
match zero or more characters and cross hierarchical boundaries.
'%' will also match zero or more characters, but is limited to a
single hierarchical level.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of C{tuple}s,
the first element of which is a C{tuple} of mailbox flags, the second
element of which is the hierarchy delimiter for this mailbox, and the
third of which is the mailbox name; if the command is unsuccessful,
the deferred's errback is invoked instead.
"""
cmd = 'LIST'
args = '"%s" "%s"' % (reference, wildcard.encode('imap4-utf-7'))
resp = ('LIST',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, 'LIST')
return d
def lsub(self, reference, wildcard):
"""List a subset of the subscribed available mailboxes
This command is allowed in the Authenticated and Selected states.
The parameters and returned object are the same as for the C{list}
method, with one slight difference: Only mailboxes which have been
subscribed can be included in the resulting list.
"""
cmd = 'LSUB'
args = '"%s" "%s"' % (reference, wildcard.encode('imap4-utf-7'))
resp = ('LSUB',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, 'LSUB')
return d
def __cbList(self, (lines, last), command):
results = []
for L in lines:
parts = parseNestedParens(L)
if len(parts) != 4:
raise IllegalServerResponse, L
if parts[0] == command:
parts[1] = tuple(parts[1])
results.append(tuple(parts[1:]))
return results
def status(self, mailbox, *names):
"""Retrieve the status of the given mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to query
@type names: C{str}
@param names: The status names to query. These may be any number of:
MESSAGES, RECENT, UIDNEXT, UIDVALIDITY, and UNSEEN.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with the status information
if the command is successful and whose errback is invoked otherwise.
"""
cmd = 'STATUS'
args = "%s (%s)" % (_prepareMailboxName(mailbox), ' '.join(names))
resp = ('STATUS',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbStatus)
return d
def __cbStatus(self, (lines, last)):
status = {}
for line in lines:
parts = parseNestedParens(line)
if parts[0] == 'STATUS':
items = parts[2]
items = [items[i:i+2] for i in range(0, len(items), 2)]
status.update(dict(items))
for k in status.keys():
t = self.STATUS_TRANSFORMATIONS.get(k)
if t:
try:
status[k] = t(status[k])
except Exception, e:
raise IllegalServerResponse('(%s %s): %s' % (k, status[k], str(e)))
return status
def append(self, mailbox, message, flags = (), date = None):
"""Add the given message to the given mailbox.
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The mailbox to which to add this message.
@type message: Any file-like object
@param message: The message to add, in RFC822 format. Newlines
in this file should be \\r\\n-style.
@type flags: Any iterable of C{str}
@param flags: The flags to associated with this message.
@type date: C{str}
@param date: The date to associate with this message. This should
be of the format DD-MM-YYYY HH:MM:SS +/-HHMM. For example, in
Eastern Standard Time, on July 1st 2004 at half past 1 PM,
\"01-07-2004 13:30:00 -0500\".
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
message.seek(0, 2)
L = message.tell()
message.seek(0, 0)
fmt = '%s (%s)%s {%d}'
if date:
date = ' "%s"' % date
else:
date = ''
cmd = fmt % (
_prepareMailboxName(mailbox), ' '.join(flags),
date, L
)
d = self.sendCommand(Command('APPEND', cmd, (), self.__cbContinueAppend, message))
return d
def __cbContinueAppend(self, lines, message):
s = basic.FileSender()
return s.beginFileTransfer(message, self.transport, None
).addCallback(self.__cbFinishAppend)
def __cbFinishAppend(self, foo):
self.sendLine('')
def check(self):
"""Tell the server to perform a checkpoint
This command is allowed in the Selected state.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
return self.sendCommand(Command('CHECK'))
def close(self):
"""Return the connection to the Authenticated state.
This command is allowed in the Selected state.
Issuing this command will also remove all messages flagged \\Deleted
from the selected mailbox if it is opened in read-write mode,
otherwise it indicates success by no messages are removed.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when the command
completes successfully or whose errback is invoked if it fails.
"""
return self.sendCommand(Command('CLOSE'))
def expunge(self):
"""Return the connection to the Authenticate state.
This command is allowed in the Selected state.
Issuing this command will perform the same actions as issuing the
close command, but will also generate an 'expunge' response for
every message deleted.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
'expunge' responses when this command is successful or whose errback
is invoked otherwise.
"""
cmd = 'EXPUNGE'
resp = ('EXPUNGE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbExpunge)
return d
def __cbExpunge(self, (lines, last)):
ids = []
for line in lines:
parts = line.split(None, 1)
if len(parts) == 2:
if parts[1] == 'EXPUNGE':
try:
ids.append(int(parts[0]))
except ValueError:
raise IllegalServerResponse, line
return ids
def search(self, *queries, **kwarg):
"""Search messages in the currently selected mailbox
This command is allowed in the Selected state.
Any non-zero number of queries are accepted by this method, as
returned by the C{Query}, C{Or}, and C{Not} functions.
One keyword argument is accepted: if uid is passed in with a non-zero
value, the server is asked to return message UIDs instead of message
sequence numbers.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a list of all
the message sequence numbers return by the search, or whose errback
will be invoked if there is an error.
"""
if kwarg.get('uid'):
cmd = 'UID SEARCH'
else:
cmd = 'SEARCH'
args = ' '.join(queries)
d = self.sendCommand(Command(cmd, args, wantResponse=(cmd,)))
d.addCallback(self.__cbSearch)
return d
def __cbSearch(self, (lines, end)):
ids = []
for line in lines:
parts = line.split(None, 1)
if len(parts) == 2:
if parts[0] == 'SEARCH':
try:
ids.extend(map(int, parts[1].split()))
except ValueError:
raise IllegalServerResponse, line
return ids
def fetchUID(self, messages, uid=0):
"""Retrieve the unique identifier for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message sequence numbers to unique message identifiers, or whose
errback is invoked if there is an error.
"""
d = self._fetch(messages, useUID=uid, uid=1)
d.addCallback(self.__cbFetch)
return d
def fetchFlags(self, messages, uid=0):
"""Retrieve the flags for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve flags.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to lists of flags, or whose errback is invoked if
there is an error.
"""
d = self._fetch(str(messages), useUID=uid, flags=1)
d.addCallback(self.__cbFetch)
return d
def fetchInternalDate(self, messages, uid=0):
"""Retrieve the internal date associated with one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve the internal date.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to date strings, or whose errback is invoked
if there is an error. Date strings take the format of
\"day-month-year time timezone\".
"""
d = self._fetch(str(messages), useUID=uid, internaldate=1)
d.addCallback(self.__cbFetch)
return d
def fetchEnvelope(self, messages, uid=0):
"""Retrieve the envelope data for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve envelope data.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to envelope data, or whose errback is invoked
if there is an error. Envelope data consists of a sequence of the
date, subject, from, sender, reply-to, to, cc, bcc, in-reply-to,
and message-id header fields. The date, subject, in-reply-to, and
message-id fields are strings, while the from, sender, reply-to,
to, cc, and bcc fields contain address data. Address data consists
of a sequence of name, source route, mailbox name, and hostname.
Fields which are not present for a particular address may be C{None}.
"""
d = self._fetch(str(messages), useUID=uid, envelope=1)
d.addCallback(self.__cbFetch)
return d
def fetchBodyStructure(self, messages, uid=0):
"""Retrieve the structure of the body of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve body structure
data.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body structure data, or whose errback is invoked
if there is an error. Body structure data describes the MIME-IMB
format of a message and consists of a sequence of mime type, mime
subtype, parameters, content id, description, encoding, and size.
The fields following the size field are variable: if the mime
type/subtype is message/rfc822, the contained message's envelope
information, body structure data, and number of lines of text; if
the mime type is text, the number of lines of text. Extension fields
may also be included; if present, they are: the MD5 hash of the body,
body disposition, body language.
"""
d = self._fetch(messages, useUID=uid, bodystructure=1)
d.addCallback(self.__cbFetch)
return d
def fetchSimplifiedBody(self, messages, uid=0):
"""Retrieve the simplified body structure of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body data, or whose errback is invoked
if there is an error. The simplified body structure is the same
as the body structure, except that extension fields will never be
present.
"""
d = self._fetch(messages, useUID=uid, body=1)
d.addCallback(self.__cbFetch)
return d
def fetchMessage(self, messages, uid=0):
"""Retrieve one or more entire messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message objects (as returned by self.messageFile(), file objects by
default), to additional information, or whose errback is invoked if
there is an error.
"""
d = self._fetch(messages, useUID=uid, rfc822=1)
d.addCallback(self.__cbFetch)
return d
def fetchHeaders(self, messages, uid=0):
"""Retrieve headers of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dicts of message headers, or whose errback is
invoked if there is an error.
"""
d = self._fetch(messages, useUID=uid, rfc822header=1)
d.addCallback(self.__cbFetch)
return d
def fetchBody(self, messages, uid=0):
"""Retrieve body text of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to file-like objects containing body text, or whose
errback is invoked if there is an error.
"""
d = self._fetch(messages, useUID=uid, rfc822text=1)
d.addCallback(self.__cbFetch)
return d
def fetchSize(self, messages, uid=0):
"""Retrieve the size, in octets, of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to sizes, or whose errback is invoked if there is
an error.
"""
d = self._fetch(messages, useUID=uid, rfc822size=1)
d.addCallback(self.__cbFetch)
return d
def fetchFull(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, C{fetchEnvelope}, and C{fetchSimplifiedBody}
functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", "envelope", and "body".
"""
d = self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1, body=1
)
d.addCallback(self.__cbFetch)
return d
def fetchAll(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, and C{fetchEnvelope} functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", and "envelope".
"""
d = self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1
)
d.addCallback(self.__cbFetch)
return d
def fetchFast(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate}, and
C{fetchSize} functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys are
"flags", "date", and "size".
"""
d = self._fetch(
messages, useUID=uid, flags=1, internaldate=1, rfc822size=1
)
d.addCallback(self.__cbFetch)
return d
def __cbFetch(self, (lines, last)):
flags = {}
for line in lines:
parts = line.split(None, 2)
if len(parts) == 3:
if parts[1] == 'FETCH':
try:
id = int(parts[0])
except ValueError:
raise IllegalServerResponse, line
else:
data = parseNestedParens(parts[2])
while len(data) == 1 and isinstance(data, types.ListType):
data = data[0]
while data:
if len(data) < 2:
raise IllegalServerResponse("Not enough arguments", data)
flags.setdefault(id, {})[data[0]] = data[1]
del data[:2]
else:
print '(2)Ignoring ', parts
else:
print '(3)Ignoring ', parts
return flags
def fetchSpecific(self, messages, uid=0, headerType=None,
headerNumber=None, headerArgs=None, peek=None,
offset=None, length=None):
"""Retrieve a specific section of one or more messages
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@type headerType: C{str}
@param headerType: If specified, must be one of HEADER,
HEADER.FIELDS, HEADER.FIELDS.NOT, MIME, or TEXT, and will determine
which part of the message is retrieved. For HEADER.FIELDS and
HEADER.FIELDS.NOT, C{headerArgs} must be a sequence of header names.
For MIME, C{headerNumber} must be specified.
@type headerNumber: C{int} or C{int} sequence
@param headerNumber: The nested rfc822 index specifying the
entity to retrieve. For example, C{1} retrieves the first
entity of the message, and C{(2, 1, 3}) retrieves the 3rd
entity inside the first entity inside the second entity of
the message.
@type headerArgs: A sequence of C{str}
@param headerArgs: If C{headerType} is HEADER.FIELDS, these are the
headers to retrieve. If it is HEADER.FIELDS.NOT, these are the
headers to exclude from retrieval.
@type peek: C{bool}
@param peek: If true, cause the server to not set the \\Seen
flag on this message as a result of this command.
@type offset: C{int}
@param offset: The number of octets at the beginning of the result
to skip.
@type length: C{int}
@param length: The number of octets to retrieve.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a mapping of
message numbers to retrieved data, or whose errback is invoked
if there is an error.
"""
fmt = '%s BODY%s[%s%s%s]%s'
if headerNumber is None:
number = ''
elif isinstance(headerNumber, types.IntType):
number = str(headerNumber)
else:
number = '.'.join(headerNumber)
if headerType is None:
header = ''
elif number:
header = '.' + headerType
else:
header = headerType
if header:
if headerArgs is not None:
payload = ' (%s)' % ' '.join(headerArgs)
else:
payload = ' ()'
else:
payload = ''
if offset is None:
extra = ''
else:
extra = '<%d.%d>' % (offset, length)
fetch = uid and 'UID FETCH' or 'FETCH'
cmd = fmt % (messages, peek and '.PEEK' or '', number, header, payload, extra)
d = self.sendCommand(Command(fetch, cmd, wantResponse=('FETCH',)))
d.addCallback(self.__cbFetchSpecific)
return d
def __cbFetchSpecific(self, (lines, last)):
info = {}
for line in lines:
parts = line.split(None, 2)
if len(parts) == 3:
if parts[1] == 'FETCH':
try:
id = int(parts[0])
except ValueError:
raise IllegalServerResponse, line
else:
info[id] = parseNestedParens(parts[2])
return info
def _fetch(self, messages, useUID=0, **terms):
fetch = useUID and 'UID FETCH' or 'FETCH'
if 'rfc822text' in terms:
del terms['rfc822text']
terms['rfc822.text'] = True
if 'rfc822size' in terms:
del terms['rfc822size']
terms['rfc822.size'] = True
if 'rfc822header' in terms:
del terms['rfc822header']
terms['rfc822.header'] = True
cmd = '%s (%s)' % (messages, ' '.join([s.upper() for s in terms.keys()]))
d = self.sendCommand(Command(fetch, cmd, wantResponse=('FETCH',)))
return d
def setFlags(self, messages, flags, silent=1, uid=0):
"""Set the flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
the server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), silent and 'FLAGS.SILENT' or 'FLAGS', flags, uid)
def addFlags(self, messages, flags, silent=1, uid=0):
"""Add to the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
the server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), silent and '+FLAGS.SILENT' or '+FLAGS', flags, uid)
def removeFlags(self, messages, flags, silent=1, uid=0):
"""Remove from the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
the server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), silent and '-FLAGS.SILENT' or '-FLAGS', flags, uid)
def _store(self, messages, cmd, flags, uid):
store = uid and 'UID STORE' or 'STORE'
args = ' '.join((messages, cmd, '(%s)' % ' '.join(flags)))
d = self.sendCommand(Command(store, args, wantResponse=('FETCH',)))
d.addCallback(self.__cbFetch)
return d
def copy(self, messages, mailbox, uid):
"""Copy the specified messages to the specified mailbox.
This command is allowed in the Selected state.
@type messages: C{str}
@param messages: A message sequence set
@type mailbox: C{str}
@param mailbox: The mailbox to which to copy the messages
@type uid: C{bool}
@param uid: If true, the C{messages} refers to message UIDs, rather
than message sequence numbers.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a true value
when the copy is successful, or whose errback is invoked if there
is an error.
"""
if uid:
cmd = 'UID COPY'
else:
cmd = 'COPY'
args = '%s %s' % (messages, _prepareMailboxName(mailbox))
return self.sendCommand(Command(cmd, args))
#
# IMailboxListener methods
#
def modeChanged(self, writeable):
"""Override me"""
def flagsChanged(self, newFlags):
"""Override me"""
def newMessages(self, exists, recent):
"""Override me"""
class IllegalIdentifierError(IMAP4Exception): pass
def parseIdList(s):
res = MessageSet()
parts = s.split(',')
for p in parts:
if ':' in p:
low, high = p.split(':', 1)
try:
if low == '*':
low = None
else:
low = long(low)
if high == '*':
high = None
else:
high = long(high)
res.extend((low, high))
except ValueError:
raise IllegalIdentifierError(p)
else:
try:
if p == '*':
p = None
else:
p = long(p)
except ValueError:
raise IllegalIdentifierError(p)
else:
res.extend(p)
return res
class IllegalQueryError(IMAP4Exception): pass
_SIMPLE_BOOL = (
'ALL', 'ANSWERED', 'DELETED', 'DRAFT', 'FLAGGED', 'NEW', 'OLD', 'RECENT',
'SEEN', 'UNANSWERED', 'UNDELETED', 'UNDRAFT', 'UNFLAGGED', 'UNSEEN'
)
_NO_QUOTES = (
'LARGER', 'SMALLER', 'UID'
)
def Query(sorted=0, **kwarg):
"""Create a query string
Among the accepted keywords are::
all : If set to a true value, search all messages in the
current mailbox
answered : If set to a true value, search messages flagged with
\\Answered
bcc : A substring to search the BCC header field for
before : Search messages with an internal date before this
value. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
body : A substring to search the body of the messages for
cc : A substring to search the CC header field for
deleted : If set to a true value, search messages flagged with
\\Deleted
draft : If set to a true value, search messages flagged with
\\Draft
flagged : If set to a true value, search messages flagged with
\\Flagged
from : A substring to search the From header field for
header : A two-tuple of a header name and substring to search
for in that header
keyword : Search for messages with the given keyword set
larger : Search for messages larger than this number of octets
messages : Search only the given message sequence set.
new : If set to a true value, search messages flagged with
\\Recent but not \\Seen
old : If set to a true value, search messages not flagged with
\\Recent
on : Search messages with an internal date which is on this
date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
recent : If set to a true value, search for messages flagged with
\\Recent
seen : If set to a true value, search for messages flagged with
\\Seen
sentbefore : Search for messages with an RFC822 'Date' header before
this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
senton : Search for messages with an RFC822 'Date' header which is
on this date The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
sentsince : Search for messages with an RFC822 'Date' header which is
after this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
since : Search for messages with an internal date that is after
this date.. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
smaller : Search for messages smaller than this number of octets
subject : A substring to search the 'subject' header for
text : A substring to search the entire message for
to : A substring to search the 'to' header for
uid : Search only the messages in the given message set
unanswered : If set to a true value, search for messages not
flagged with \\Answered
undeleted : If set to a true value, search for messages not
flagged with \\Deleted
undraft : If set to a true value, search for messages not
flagged with \\Draft
unflagged : If set to a true value, search for messages not
flagged with \\Flagged
unkeyword : Search for messages without the given keyword set
unseen : If set to a true value, search for messages not
flagged with \\Seen
@type sorted: C{bool}
@param sorted: If true, the output will be sorted, alphabetically.
The standard does not require it, but it makes testing this function
easier. The default is zero, and this should be acceptable for any
application.
@rtype: C{str}
@return: The formatted query string
"""
cmd = []
keys = kwarg.keys()
if sorted:
keys.sort()
for k in keys:
v = kwarg[k]
k = k.upper()
if k in _SIMPLE_BOOL and v:
cmd.append(k)
elif k == 'HEADER':
cmd.extend([k, v[0], '"%s"' % (v[1],)])
elif k not in _NO_QUOTES:
cmd.extend([k, '"%s"' % (v,)])
else:
cmd.extend([k, '%s' % (v,)])
if len(cmd) > 1:
return '(%s)' % ' '.join(cmd)
else:
return ' '.join(cmd)
def Or(*args):
"""The disjunction of two or more queries"""
if len(args) < 2:
raise IllegalQueryError, args
elif len(args) == 2:
return '(OR %s %s)' % args
else:
return '(OR %s %s)' % (args[0], Or(*args[1:]))
def Not(query):
"""The negation of a query"""
return '(NOT %s)' % (query,)
class MismatchedNesting(IMAP4Exception):
pass
class MismatchedQuoting(IMAP4Exception):
pass
def wildcardToRegexp(wildcard, delim=None):
wildcard = wildcard.replace('*', '(?:.*?)')
if delim is None:
wildcard = wildcard.replace('%', '(?:.*?)')
else:
wildcard = wildcard.replace('%', '(?:(?:[^%s])*?)' % re.escape(delim))
return re.compile(wildcard, re.I)
def splitQuoted(s):
"""Split a string into whitespace delimited tokens
Tokens that would otherwise be separated but are surrounded by \"
remain as a single token. Any token that is not quoted and is
equal to \"NIL\" is tokenized as C{None}.
@type s: C{str}
@param s: The string to be split
@rtype: C{list} of C{str}
@return: A list of the resulting tokens
@raise MismatchedQuoting: Raised if an odd number of quotes are present
"""
s = s.strip()
result = []
inQuote = inWord = start = 0
for (i, c) in zip(range(len(s)), s):
if c == '"' and not inQuote:
inQuote = 1
start = i + 1
elif c == '"' and inQuote:
inQuote = 0
result.append(s[start:i])
start = i + 1
elif not inWord and not inQuote and c not in ('"' + string.whitespace):
inWord = 1
start = i
elif inWord and not inQuote and c in string.whitespace:
if s[start:i] == 'NIL':
result.append(None)
else:
result.append(s[start:i])
start = i
inWord = 0
if inQuote:
raise MismatchedQuoting(s)
if inWord:
if s[start:] == 'NIL':
result.append(None)
else:
result.append(s[start:])
return result
def splitOn(sequence, predicate, transformers):
result = []
mode = predicate(sequence[0])
tmp = [sequence[0]]
for e in sequence[1:]:
p = predicate(e)
if p != mode:
result.extend(transformers[mode](tmp))
tmp = [e]
mode = p
else:
tmp.append(e)
result.extend(transformers[mode](tmp))
return result
def collapseStrings(results):
"""
Turns a list of length-one strings and lists into a list of longer
strings and lists. For example,
['a', 'b', ['c', 'd']] is returned as ['ab', ['cd']]
@type results: C{list} of C{str} and C{list}
@param results: The list to be collapsed
@rtype: C{list} of C{str} and C{list}
@return: A new list which is the collapsed form of C{results}
"""
copy = []
begun = None
listsList = [isinstance(s, types.ListType) for s in results]
pred = lambda e: isinstance(e, types.TupleType)
tran = {
0: lambda e: splitQuoted(''.join(e)),
1: lambda e: [''.join([i[0] for i in e])]
}
for (i, c, isList) in zip(range(len(results)), results, listsList):
if isList:
if begun is not None:
copy.extend(splitOn(results[begun:i], pred, tran))
begun = None
copy.append(collapseStrings(c))
elif begun is None:
begun = i
if begun is not None:
copy.extend(splitOn(results[begun:], pred, tran))
return copy
def parseNestedParens(s, handleLiteral = 1):
"""Parse an s-exp-like string into a more useful data structure.
@type s: C{str}
@param s: The s-exp-like string to parse
@rtype: C{list} of C{str} and C{list}
@return: A list containing the tokens present in the input.
@raise MismatchedNesting: Raised if the number or placement
of opening or closing parenthesis is invalid.
"""
s = s.strip()
inQuote = 0
contentStack = [[]]
try:
i = 0
L = len(s)
while i < L:
c = s[i]
if inQuote:
if c == '\\':
contentStack[-1].append(s[i+1])
i += 2
continue
elif c == '"':
inQuote = not inQuote
contentStack[-1].append(c)
i += 1
else:
if c == '"':
contentStack[-1].append(c)
inQuote = not inQuote
i += 1
elif handleLiteral and c == '{':
end = s.find('}', i)
if end == -1:
raise ValueError, "Malformed literal"
literalSize = int(s[i+1:end])
contentStack[-1].append((s[end+3:end+3+literalSize],))
i = end + 3 + literalSize
elif c == '(' or c == '[':
contentStack.append([])
i += 1
elif c == ')' or c == ']':
contentStack[-2].append(contentStack.pop())
i += 1
else:
contentStack[-1].append(c)
i += 1
except IndexError:
raise MismatchedNesting(s)
if len(contentStack) != 1:
raise MismatchedNesting(s)
return collapseStrings(contentStack[0])
def _quote(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\\"'),)
def _literal(s):
return '{%d}\r\n%s' % (len(s), s)
class DontQuoteMe:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
_ATOM_SPECIALS = '(){ %*"'
def _needsQuote(s):
if s == '':
return 1
for c in s:
if c < '\x20' or c > '\x7f':
return 1
if c in _ATOM_SPECIALS:
return 1
return 0
def _prepareMailboxName(name):
name = name.encode('imap4-utf-7')
if _needsQuote(name):
return _quote(name)
return name
def _needsLiteral(s):
# Change this to "return 1" to wig out stupid clients
return '\n' in s or '\r' in s or len(s) > 1000
def collapseNestedLists(items):
"""Turn a nested list structure into an s-exp-like string.
Strings in C{items} will be sent as literals if they contain CR or LF,
otherwise they will be quoted. References to None in C{items} will be
translated to the atom NIL. Objects with a 'read' attribute will have
it called on them with no arguments and the returned string will be
inserted into the output as a literal. Integers will be converted to
strings and inserted into the output unquoted. Instances of
C{DontQuoteMe} will be converted to strings and inserted into the output
unquoted.
This function used to be much nicer, and only quote things that really
needed to be quoted (and C{DontQuoteMe} did not exist), however, many
broken IMAP4 clients were unable to deal with this level of sophistication,
forcing the current behavior to be adopted for practical reasons.
@type items: Any iterable
@rtype: C{str}
"""
pieces = []
for i in items:
if i is None:
pieces.extend([' ', 'NIL'])
elif isinstance(i, (DontQuoteMe, int, long)):
pieces.extend([' ', str(i)])
elif isinstance(i, types.StringTypes):
if _needsLiteral(i):
pieces.extend([' ', '{', str(len(i)), '}', IMAP4Server.delimiter, i])
else:
pieces.extend([' ', _quote(i)])
elif hasattr(i, 'read'):
d = i.read()
pieces.extend([' ', '{', str(len(d)), '}', IMAP4Server.delimiter, d])
else:
pieces.extend([' ', '(%s)' % (collapseNestedLists(i),)])
return ''.join(pieces[1:])
class IClientAuthentication(Interface):
def getName():
"""Return an identifier associated with this authentication scheme.
@rtype: C{str}
"""
def challengeResponse(secret, challenge):
"""Generate a challenge response string"""
class CramMD5ClientAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "CRAM-MD5"
def challengeResponse(self, secret, chal):
response = hmac.HMAC(secret, chal).hexdigest()
return '%s %s' % (self.user, response)
class LOGINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
self.challengeResponse = self.challengeUsername
def getName(self):
return "LOGIN"
def challengeUsername(self, secret, chal):
# Respond to something like "Username:"
self.challengeResponse = self.challengeSecret
return self.user
def challengeSecret(self, secret, chal):
# Respond to something like "Password:"
return secret
class PLAINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "PLAIN"
def challengeResponse(self, secret, chal):
return '%s\0%s\0' % (self.user, secret)
class MailboxException(IMAP4Exception): pass
class MailboxCollision(MailboxException):
def __str__(self):
return 'Mailbox named %s already exists' % self.args
class NoSuchMailbox(MailboxException):
def __str__(self):
return 'No mailbox named %s exists' % self.args
class ReadOnlyMailbox(MailboxException):
def __str__(self):
return 'Mailbox open in read-only state'
class IAccount(Interface):
"""Interface for Account classes
Implementors of this interface should consider implementing
C{INamespacePresenter}.
"""
def addMailbox(name, mbox = None):
"""Add a new mailbox to this account
@type name: C{str}
@param name: The name associated with this mailbox. It may not
contain multiple hierarchical parts.
@type mbox: An object implementing C{IMailbox}
@param mbox: The mailbox to associate with this name. If C{None},
a suitable default is created and used.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added for
some reason. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
def create(pathspec):
"""Create a new mailbox from the given hierarchical name.
@type pathspec: C{str}
@param pathspec: The full hierarchical name of a new mailbox to create.
If any of the inferior hierarchical names to this one do not exist,
they are created as well.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added.
This may also be raised asynchronously, if a C{Deferred} is
returned.
"""
def select(name, rw=True):
"""Acquire a mailbox, given its name.
@type name: C{str}
@param name: The mailbox to acquire
@type rw: C{bool}
@param rw: If a true value, request a read-write version of this
mailbox. If a false value, request a read-only version.
@rtype: Any object implementing C{IMailbox} or C{Deferred}
@return: The mailbox object, or a C{Deferred} whose callback will
be invoked with the mailbox object. None may be returned if the
specified mailbox may not be selected for any reason.
"""
def delete(name):
"""Delete the mailbox with the specified name.
@type name: C{str}
@param name: The mailbox to delete.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully deleted, or a
C{Deferred} whose callback will be invoked when the deletion
completes.
@raise MailboxException: Raised if this mailbox cannot be deleted.
This may also be raised asynchronously, if a C{Deferred} is returned.
"""
def rename(oldname, newname):
"""Rename a mailbox
@type oldname: C{str}
@param oldname: The current name of the mailbox to rename.
@type newname: C{str}
@param newname: The new name to associate with the mailbox.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully renamed, or a
C{Deferred} whose callback will be invoked when the rename operation
is completed.
@raise MailboxException: Raised if this mailbox cannot be
renamed. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
def isSubscribed(name):
"""Check the subscription status of a mailbox
@type name: C{str}
@param name: The name of the mailbox to check
@rtype: C{Deferred} or C{bool}
@return: A true value if the given mailbox is currently subscribed
to, a false value otherwise. A C{Deferred} may also be returned
whose callback will be invoked with one of these values.
"""
def subscribe(name):
"""Subscribe to a mailbox
@type name: C{str}
@param name: The name of the mailbox to subscribe to
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is subscribed to successfully,
or a Deferred whose callback will be invoked with this value when
the subscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
subscribed to. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
def unsubscribe(name):
"""Unsubscribe from a mailbox
@type name: C{str}
@param name: The name of the mailbox to unsubscribe from
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is unsubscribed from successfully,
or a Deferred whose callback will be invoked with this value when
the unsubscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
unsubscribed from. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
def listMailboxes(ref, wildcard):
"""List all the mailboxes that meet a certain criteria
@type ref: C{str}
@param ref: The context in which to apply the wildcard
@type wildcard: C{str}
@param wildcard: An expression against which to match mailbox names.
'*' matches any number of characters in a mailbox name, and '%'
matches similarly, but will not match across hierarchical boundaries.
@rtype: C{list} of C{tuple}
@return: A list of C{(mailboxName, mailboxObject)} which meet the
given criteria. C{mailboxObject} should implement either
C{IMailboxInfo} or C{IMailbox}. A Deferred may also be returned.
"""
class INamespacePresenter(Interface):
def getPersonalNamespaces():
"""Report the available personal namespaces.
Typically there should be only one personal namespace. A common
name for it is \"\", and its hierarchical delimiter is usually
\"/\".
@rtype: iterable of two-tuples of strings
@return: The personal namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
def getSharedNamespaces():
"""Report the available shared namespaces.
Shared namespaces do not belong to any individual user but are
usually to one or more of them. Examples of shared namespaces
might be \"#news\" for a usenet gateway.
@rtype: iterable of two-tuples of strings
@return: The shared namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
def getUserNamespaces():
"""Report the available user namespaces.
These are namespaces that contain folders belonging to other users
access to which this account has been granted.
@rtype: iterable of two-tuples of strings
@return: The user namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
class MemoryAccount(object):
implements(IAccount, INamespacePresenter)
mailboxes = None
subscriptions = None
top_id = 0
def __init__(self, name):
self.name = name
self.mailboxes = {}
self.subscriptions = []
def allocateID(self):
id = self.top_id
self.top_id += 1
return id
##
## IAccount
##
def addMailbox(self, name, mbox = None):
name = name.upper()
if self.mailboxes.has_key(name):
raise MailboxCollision, name
if mbox is None:
mbox = self._emptyMailbox(name, self.allocateID())
self.mailboxes[name] = mbox
return 1
def create(self, pathspec):
paths = filter(None, pathspec.split('/'))
for accum in range(1, len(paths)):
try:
self.addMailbox('/'.join(paths[:accum]))
except MailboxCollision:
pass
try:
self.addMailbox('/'.join(paths))
except MailboxCollision:
if not pathspec.endswith('/'):
return False
return True
def _emptyMailbox(self, name, id):
raise NotImplementedError
def select(self, name, readwrite=1):
return self.mailboxes.get(name.upper())
def delete(self, name):
name = name.upper()
# See if this mailbox exists at all
mbox = self.mailboxes.get(name)
if not mbox:
raise MailboxException("No such mailbox")
# See if this box is flagged \Noselect
if r'\Noselect' in mbox.getFlags():
# Check for hierarchically inferior mailboxes with this one
# as part of their root.
for others in self.mailboxes.keys():
if others != name and others.startswith(name):
raise MailboxException, "Hierarchically inferior mailboxes exist and \\Noselect is set"
mbox.destroy()
# iff there are no hierarchically inferior names, we will
# delete it from our ken.
if self._inferiorNames(name) > 1:
del self.mailboxes[name]
def rename(self, oldname, newname):
oldname = oldname.upper()
newname = newname.upper()
if not self.mailboxes.has_key(oldname):
raise NoSuchMailbox, oldname
inferiors = self._inferiorNames(oldname)
inferiors = [(o, o.replace(oldname, newname, 1)) for o in inferiors]
for (old, new) in inferiors:
if self.mailboxes.has_key(new):
raise MailboxCollision, new
for (old, new) in inferiors:
self.mailboxes[new] = self.mailboxes[old]
del self.mailboxes[old]
def _inferiorNames(self, name):
inferiors = []
for infname in self.mailboxes.keys():
if infname.startswith(name):
inferiors.append(infname)
return inferiors
def isSubscribed(self, name):
return name.upper() in self.subscriptions
def subscribe(self, name):
name = name.upper()
if name not in self.subscriptions:
self.subscriptions.append(name)
def unsubscribe(self, name):
name = name.upper()
if name not in self.subscriptions:
raise MailboxException, "Not currently subscribed to " + name
self.subscriptions.remove(name)
def listMailboxes(self, ref, wildcard):
ref = self._inferiorNames(ref.upper())
wildcard = wildcardToRegexp(wildcard, '/')
return [(i, self.mailboxes[i]) for i in ref if wildcard.match(i)]
##
## INamespacePresenter
##
def getPersonalNamespaces(self):
return [["", "/"]]
def getSharedNamespaces(self):
return None
def getOtherNamespaces(self):
return None
_statusRequestDict = {
'MESSAGES': 'getMessageCount',
'RECENT': 'getRecentCount',
'UIDNEXT': 'getUIDNext',
'UIDVALIDITY': 'getUIDValidity',
'UNSEEN': 'getUnseenCount'
}
def statusRequestHelper(mbox, names):
r = {}
for n in names:
r[n] = getattr(mbox, _statusRequestDict[n.upper()])()
return r
def parseAddr(addr):
if addr is None:
return [(None, None, None),]
addrs = email.Utils.getaddresses([addr])
return [[fn or None, None] + addr.split('@') for fn, addr in addrs]
def getEnvelope(msg):
headers = msg.getHeaders(True)
date = headers.get('date')
subject = headers.get('subject')
from_ = headers.get('from')
sender = headers.get('sender', from_)
reply_to = headers.get('reply-to', from_)
to = headers.get('to')
cc = headers.get('cc')
bcc = headers.get('bcc')
in_reply_to = headers.get('in-reply-to')
mid = headers.get('message-id')
return (date, subject, parseAddr(from_), parseAddr(sender),
reply_to and parseAddr(reply_to), to and parseAddr(to),
cc and parseAddr(cc), bcc and parseAddr(bcc), in_reply_to, mid)
def getLineCount(msg):
# XXX - Super expensive, CACHE THIS VALUE FOR LATER RE-USE
# XXX - This must be the number of lines in the ENCODED version
lines = 0
for _ in msg.getBodyFile():
lines += 1
return lines
def unquote(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def getBodyStructure(msg, extended=False):
# XXX - This does not properly handle multipart messages
# BODYSTRUCTURE is obscenely complex and criminally under-documented.
attrs = {}
headers = 'content-type', 'content-id', 'content-description', 'content-transfer-encoding'
headers = msg.getHeaders(False, *headers)
mm = headers.get('content-type')
if mm:
mm = ''.join(mm.splitlines())
mimetype = mm.split(';')
if mimetype:
type = mimetype[0].split('/', 1)
if len(type) == 1:
major = type[0]
minor = None
elif len(type) == 2:
major, minor = type
else:
major = minor = None
attrs = dict([x.strip().lower().split('=', 1) for x in mimetype[1:]])
else:
major = minor = None
else:
major = minor = None
size = str(msg.getSize())
unquotedAttrs = [(k, unquote(v)) for (k, v) in attrs.iteritems()]
result = [
major, minor, # Main and Sub MIME types
unquotedAttrs, # content-type parameter list
headers.get('content-id'),
headers.get('content-description'),
headers.get('content-transfer-encoding'),
size, # Number of octets total
]
if major is not None:
if major.lower() == 'text':
result.append(str(getLineCount(msg)))
elif (major.lower(), minor.lower()) == ('message', 'rfc822'):
contained = msg.getSubPart(0)
result.append(getEnvelope(contained))
result.append(getBodyStructure(contained, False))
result.append(str(getLineCount(contained)))
if not extended or major is None:
return result
if major.lower() != 'multipart':
headers = 'content-md5', 'content-disposition', 'content-language'
headers = msg.getHeaders(False, *headers)
disp = headers.get('content-disposition')
# XXX - I dunno if this is really right
if disp:
disp = disp.split('; ')
if len(disp) == 1:
disp = (disp[0].lower(), None)
elif len(disp) > 1:
disp = (disp[0].lower(), [x.split('=') for x in disp[1:]])
result.append(headers.get('content-md5'))
result.append(disp)
result.append(headers.get('content-language'))
else:
result = [result]
try:
i = 0
while True:
submsg = msg.getSubPart(i)
result.append(getBodyStructure(submsg))
i += 1
except IndexError:
result.append(minor)
result.append(attrs.items())
# XXX - I dunno if this is really right
headers = msg.getHeaders(False, 'content-disposition', 'content-language')
disp = headers.get('content-disposition')
if disp:
disp = disp.split('; ')
if len(disp) == 1:
disp = (disp[0].lower(), None)
elif len(disp) > 1:
disp = (disp[0].lower(), [x.split('=') for x in disp[1:]])
result.append(disp)
result.append(headers.get('content-language'))
return result
class IMessagePart(Interface):
def getHeaders(negate, *names):
"""Retrieve a group of message headers.
@type names: C{tuple} of C{str}
@param names: The names of the headers to retrieve or omit.
@type negate: C{bool}
@param negate: If True, indicates that the headers listed in C{names}
should be omitted from the return value, rather than included.
@rtype: C{dict}
@return: A mapping of header field names to header field values
"""
def getBodyFile():
"""Retrieve a file object containing only the body of this message.
"""
def getSize():
"""Retrieve the total size, in octets, of this message.
@rtype: C{int}
"""
def isMultipart():
"""Indicate whether this message has subparts.
@rtype: C{bool}
"""
def getSubPart(part):
"""Retrieve a MIME sub-message
@type part: C{int}
@param part: The number of the part to retrieve, indexed from 0.
@raise IndexError: Raised if the specified part does not exist.
@raise TypeError: Raised if this message is not multipart.
@rtype: Any object implementing C{IMessagePart}.
@return: The specified sub-part.
"""
class IMessage(IMessagePart):
def getUID():
"""Retrieve the unique identifier associated with this message.
"""
def getFlags():
"""Retrieve the flags associated with this message.
@rtype: C{iterable}
@return: The flags, represented as strings.
"""
def getInternalDate():
"""Retrieve the date internally associated with this message.
@rtype: C{str}
@return: An RFC822-formatted date string.
"""
class IMessageFile(Interface):
"""Optional message interface for representing messages as files.
If provided by message objects, this interface will be used instead
the more complex MIME-based interface.
"""
def open():
"""Return an file-like object opened for reading.
Reading from the returned file will return all the bytes
of which this message consists.
"""
class ISearchableMailbox(Interface):
def search(query, uid):
"""Search for messages that meet the given query criteria.
If this interface is not implemented by the mailbox, L{IMailbox.fetch}
and various methods of L{IMessage} will be used instead.
Implementations which wish to offer better performance than the
default implementation should implement this interface.
@type query: C{list}
@param query: The search criteria
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: C{list} or C{Deferred}
@return: A list of message sequence numbers or message UIDs which
match the search criteria or a C{Deferred} whose callback will be
invoked with such a list.
"""
class IMessageCopier(Interface):
def copy(messageObject):
"""Copy the given message object into this mailbox.
The message object will be one which was previously returned by
L{IMailbox.fetch}.
Implementations which wish to offer better performance than the
default implementation should implement this interface.
If this interface is not implemented by the mailbox, IMailbox.addMessage
will be used instead.
@rtype: C{Deferred} or C{int}
@return: Either the UID of the message or a Deferred which fires
with the UID when the copy finishes.
"""
class IMailboxInfo(Interface):
"""Interface specifying only the methods required for C{listMailboxes}.
Implementations can return objects implementing only these methods for
return to C{listMailboxes} if it can allow them to operate more
efficiently.
"""
def getFlags():
"""Return the flags defined in this mailbox
Flags with the \\ prefix are reserved for use as system flags.
@rtype: C{list} of C{str}
@return: A list of the flags that can be set on messages in this mailbox.
"""
def getHierarchicalDelimiter():
"""Get the character which delimits namespaces for in this mailbox.
@rtype: C{str}
"""
class IMailbox(IMailboxInfo):
def getUIDValidity():
"""Return the unique validity identifier for this mailbox.
@rtype: C{int}
"""
def getUIDNext():
"""Return the likely UID for the next message added to this mailbox.
@rtype: C{int}
"""
def getUID(message):
"""Return the UID of a message in the mailbox
@type message: C{int}
@param message: The message sequence number
@rtype: C{int}
@return: The UID of the message.
"""
def getMessageCount():
"""Return the number of messages in this mailbox.
@rtype: C{int}
"""
def getRecentCount():
"""Return the number of messages with the 'Recent' flag.
@rtype: C{int}
"""
def getUnseenCount():
"""Return the number of messages with the 'Unseen' flag.
@rtype: C{int}
"""
def isWriteable():
"""Get the read/write status of the mailbox.
@rtype: C{int}
@return: A true value if write permission is allowed, a false value otherwise.
"""
def destroy():
"""Called before this mailbox is deleted, permanently.
If necessary, all resources held by this mailbox should be cleaned
up here. This function _must_ set the \\Noselect flag on this
mailbox.
"""
def requestStatus(names):
"""Return status information about this mailbox.
Mailboxes which do not intend to do any special processing to
generate the return value, C{statusRequestHelper} can be used
to build the dictionary by calling the other interface methods
which return the data for each name.
@type names: Any iterable
@param names: The status names to return information regarding.
The possible values for each name are: MESSAGES, RECENT, UIDNEXT,
UIDVALIDITY, UNSEEN.
@rtype: C{dict} or C{Deferred}
@return: A dictionary containing status information about the
requested names is returned. If the process of looking this
information up would be costly, a deferred whose callback will
eventually be passed this dictionary is returned instead.
"""
def addListener(listener):
"""Add a mailbox change listener
@type listener: Any object which implements C{IMailboxListener}
@param listener: An object to add to the set of those which will
be notified when the contents of this mailbox change.
"""
def removeListener(listener):
"""Remove a mailbox change listener
@type listener: Any object previously added to and not removed from
this mailbox as a listener.
@param listener: The object to remove from the set of listeners.
@raise ValueError: Raised when the given object is not a listener for
this mailbox.
"""
def addMessage(message, flags = (), date = None):
"""Add the given message to this mailbox.
@type message: A file-like object
@param message: The RFC822 formatted message
@type flags: Any iterable of C{str}
@param flags: The flags to associate with this message
@type date: C{str}
@param date: If specified, the date to associate with this
message.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with the message
id if the message is added successfully and whose errback is
invoked otherwise.
@raise ReadOnlyMailbox: Raised if this Mailbox is not open for
read-write.
"""
def expunge():
"""Remove all messages flagged \\Deleted.
@rtype: C{list} or C{Deferred}
@return: The list of message sequence numbers which were deleted,
or a C{Deferred} whose callback will be invoked with such a list.
@raise ReadOnlyMailbox: Raised if this Mailbox is not open for
read-write.
"""
def fetch(messages, uid):
"""Retrieve one or more messages.
@type messages: C{MessageSet}
@param messages: The identifiers of messages to retrieve information
about
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: Any iterable of two-tuples of message sequence numbers and
implementors of C{IMessage}.
"""
def store(messages, flags, mode, uid):
"""Set the flags of one or more messages.
@type messages: A MessageSet object with the list of messages requested
@param messages: The identifiers of the messages to set the flags of.
@type flags: sequence of C{str}
@param flags: The flags to set, unset, or add.
@type mode: -1, 0, or 1
@param mode: If mode is -1, these flags should be removed from the
specified messages. If mode is 1, these flags should be added to
the specified messages. If mode is 0, all existing flags should be
cleared and these flags should be added.
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: C{dict} or C{Deferred}
@return: A C{dict} mapping message sequence numbers to sequences of C{str}
representing the flags set on the message after this operation has
been performed, or a C{Deferred} whose callback will be invoked with
such a C{dict}.
@raise ReadOnlyMailbox: Raised if this mailbox is not open for
read-write.
"""
class ICloseableMailbox(Interface):
"""A supplementary interface for mailboxes which require cleanup on close.
Implementing this interface is optional. If it is implemented, the protocol
code will call the close method defined whenever a mailbox is closed.
"""
def close():
"""Close this mailbox.
@return: A C{Deferred} which fires when this mailbox
has been closed, or None if the mailbox can be closed
immediately.
"""
def _formatHeaders(headers):
hdrs = [': '.join((k.title(), '\r\n'.join(v.splitlines()))) for (k, v)
in headers.iteritems()]
hdrs = '\r\n'.join(hdrs) + '\r\n'
return hdrs
def subparts(m):
i = 0
try:
while True:
yield m.getSubPart(i)
i += 1
except IndexError:
pass
def iterateInReactor(i):
"""Consume an interator at most a single iteration per reactor iteration.
If the iterator produces a Deferred, the next iteration will not occur
until the Deferred fires, otherwise the next iteration will be taken
in the next reactor iteration.
@rtype: C{Deferred}
@return: A deferred which fires (with None) when the iterator is
exhausted or whose errback is called if there is an exception.
"""
from twisted.internet import reactor
d = defer.Deferred()
def go(last):
try:
r = i.next()
except StopIteration:
d.callback(last)
except:
d.errback()
else:
if isinstance(r, defer.Deferred):
r.addCallback(go)
else:
reactor.callLater(0, go, r)
go(None)
return d
class MessageProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
def __init__(self, msg, buffer = None, scheduler = None):
"""Produce this message.
@param msg: The message I am to produce.
@type msg: L{IMessage}
@param buffer: A buffer to hold the message in. If None, I will
use a L{tempfile.TemporaryFile}.
@type buffer: file-like
"""
self.msg = msg
if buffer is None:
buffer = tempfile.TemporaryFile()
self.buffer = buffer
if scheduler is None:
scheduler = iterateInReactor
self.scheduler = scheduler
self.write = self.buffer.write
def beginProducing(self, consumer):
self.consumer = consumer
return self.scheduler(self._produce())
def _produce(self):
headers = self.msg.getHeaders(True)
boundary = None
if self.msg.isMultipart():
content = headers.get('content-type')
parts = [x.split('=', 1) for x in content.split(';')[1:]]
parts = dict([(k.lower().strip(), v) for (k, v) in parts])
boundary = parts.get('boundary')
if boundary is None:
# Bastards
boundary = '----=_%f_boundary_%f' % (time.time(), random.random())
headers['content-type'] += '; boundary="%s"' % (boundary,)
else:
if boundary.startswith('"') and boundary.endswith('"'):
boundary = boundary[1:-1]
self.write(_formatHeaders(headers))
self.write('\r\n')
if self.msg.isMultipart():
for p in subparts(self.msg):
self.write('\r\n--%s\r\n' % (boundary,))
yield MessageProducer(p, self.buffer, self.scheduler
).beginProducing(None
)
self.write('\r\n--%s--\r\n' % (boundary,))
else:
f = self.msg.getBodyFile()
while True:
b = f.read(self.CHUNK_SIZE)
if b:
self.buffer.write(b)
yield None
else:
break
if self.consumer:
self.buffer.seek(0, 0)
yield FileProducer(self.buffer
).beginProducing(self.consumer
).addCallback(lambda _: self
)
class _FetchParser:
class Envelope:
# Response should be a list of fields from the message:
# date, subject, from, sender, reply-to, to, cc, bcc, in-reply-to,
# and message-id.
#
# from, sender, reply-to, to, cc, and bcc are themselves lists of
# address information:
# personal name, source route, mailbox name, host name
#
# reply-to and sender must not be None. If not present in a message
# they should be defaulted to the value of the from field.
type = 'envelope'
__str__ = lambda self: 'envelope'
class Flags:
type = 'flags'
__str__ = lambda self: 'flags'
class InternalDate:
type = 'internaldate'
__str__ = lambda self: 'internaldate'
class RFC822Header:
type = 'rfc822header'
__str__ = lambda self: 'rfc822.header'
class RFC822Text:
type = 'rfc822text'
__str__ = lambda self: 'rfc822.text'
class RFC822Size:
type = 'rfc822size'
__str__ = lambda self: 'rfc822.size'
class RFC822:
type = 'rfc822'
__str__ = lambda self: 'rfc822'
class UID:
type = 'uid'
__str__ = lambda self: 'uid'
class Body:
type = 'body'
peek = False
header = None
mime = None
text = None
part = ()
empty = False
partialBegin = None
partialLength = None
def __str__(self):
base = 'BODY'
part = ''
separator = ''
if self.part:
part = '.'.join([str(x + 1) for x in self.part])
separator = '.'
# if self.peek:
# base += '.PEEK'
if self.header:
base += '[%s%s%s]' % (part, separator, self.header,)
elif self.text:
base += '[%s%sTEXT]' % (part, separator)
elif self.mime:
base += '[%s%sMIME]' % (part, separator)
elif self.empty:
base += '[%s]' % (part,)
if self.partialBegin is not None:
base += '<%d.%d>' % (self.partialBegin, self.partialLength)
return base
class BodyStructure:
type = 'bodystructure'
__str__ = lambda self: 'bodystructure'
# These three aren't top-level, they don't need type indicators
class Header:
negate = False
fields = None
part = None
def __str__(self):
base = 'HEADER'
if self.fields:
base += '.FIELDS'
if self.negate:
base += '.NOT'
fields = []
for f in self.fields:
f = f.title()
if _needsQuote(f):
f = _quote(f)
fields.append(f)
base += ' (%s)' % ' '.join(fields)
if self.part:
base = '.'.join([str(x + 1) for x in self.part]) + '.' + base
return base
class Text:
pass
class MIME:
pass
parts = None
_simple_fetch_att = [
('envelope', Envelope),
('flags', Flags),
('internaldate', InternalDate),
('rfc822.header', RFC822Header),
('rfc822.text', RFC822Text),
('rfc822.size', RFC822Size),
('rfc822', RFC822),
('uid', UID),
('bodystructure', BodyStructure),
]
def __init__(self):
self.state = ['initial']
self.result = []
self.remaining = ''
def parseString(self, s):
s = self.remaining + s
try:
while s or self.state:
# print 'Entering state_' + self.state[-1] + ' with', repr(s)
state = self.state.pop()
try:
used = getattr(self, 'state_' + state)(s)
except:
self.state.append(state)
raise
else:
# print state, 'consumed', repr(s[:used])
s = s[used:]
finally:
self.remaining = s
def state_initial(self, s):
# In the initial state, the literals "ALL", "FULL", and "FAST"
# are accepted, as is a ( indicating the beginning of a fetch_att
# token, as is the beginning of a fetch_att token.
if s == '':
return 0
l = s.lower()
if l.startswith('all'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope()
))
return 3
if l.startswith('full'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope(),
self.Body()
))
return 4
if l.startswith('fast'):
self.result.extend((
self.Flags(), self.InternalDate(), self.RFC822Size(),
))
return 4
if l.startswith('('):
self.state.extend(('close_paren', 'maybe_fetch_att', 'fetch_att'))
return 1
self.state.append('fetch_att')
return 0
def state_close_paren(self, s):
if s.startswith(')'):
return 1
raise Exception("Missing )")
def state_whitespace(self, s):
# Eat up all the leading whitespace
if not s or not s[0].isspace():
raise Exception("Whitespace expected, none found")
i = 0
for i in range(len(s)):
if not s[i].isspace():
break
return i
def state_maybe_fetch_att(self, s):
if not s.startswith(')'):
self.state.extend(('maybe_fetch_att', 'fetch_att', 'whitespace'))
return 0
def state_fetch_att(self, s):
# Allowed fetch_att tokens are "ENVELOPE", "FLAGS", "INTERNALDATE",
# "RFC822", "RFC822.HEADER", "RFC822.SIZE", "RFC822.TEXT", "BODY",
# "BODYSTRUCTURE", "UID",
# "BODY [".PEEK"] [<section>] ["<" <number> "." <nz_number> ">"]
l = s.lower()
for (name, cls) in self._simple_fetch_att:
if l.startswith(name):
self.result.append(cls())
return len(name)
b = self.Body()
if l.startswith('body.peek'):
b.peek = True
used = 9
elif l.startswith('body'):
used = 4
else:
raise Exception("Nothing recognized in fetch_att: %s" % (l,))
self.pending_body = b
self.state.extend(('got_body', 'maybe_partial', 'maybe_section'))
return used
def state_got_body(self, s):
self.result.append(self.pending_body)
del self.pending_body
return 0
def state_maybe_section(self, s):
if not s.startswith("["):
return 0
self.state.extend(('section', 'part_number'))
return 1
_partExpr = re.compile(r'(\d+(?:\.\d+)*)\.?')
def state_part_number(self, s):
m = self._partExpr.match(s)
if m is not None:
self.parts = [int(p) - 1 for p in m.groups()[0].split('.')]
return m.end()
else:
self.parts = []
return 0
def state_section(self, s):
# Grab "HEADER]" or "HEADER.FIELDS (Header list)]" or
# "HEADER.FIELDS.NOT (Header list)]" or "TEXT]" or "MIME]" or
# just "]".
l = s.lower()
used = 0
if l.startswith(']'):
self.pending_body.empty = True
used += 1
elif l.startswith('header]'):
h = self.pending_body.header = self.Header()
h.negate = True
h.fields = ()
used += 7
elif l.startswith('text]'):
self.pending_body.text = self.Text()
used += 5
elif l.startswith('mime]'):
self.pending_body.mime = self.MIME()
used += 5
else:
h = self.Header()
if l.startswith('header.fields.not'):
h.negate = True
used += 17
elif l.startswith('header.fields'):
used += 13
else:
raise Exception("Unhandled section contents: %r" % (l,))
self.pending_body.header = h
self.state.extend(('finish_section', 'header_list', 'whitespace'))
self.pending_body.part = tuple(self.parts)
self.parts = None
return used
def state_finish_section(self, s):
if not s.startswith(']'):
raise Exception("section must end with ]")
return 1
def state_header_list(self, s):
if not s.startswith('('):
raise Exception("Header list must begin with (")
end = s.find(')')
if end == -1:
raise Exception("Header list must end with )")
headers = s[1:end].split()
self.pending_body.header.fields = map(str.upper, headers)
return end + 1
def state_maybe_partial(self, s):
# Grab <number.number> or nothing at all
if not s.startswith('<'):
return 0
end = s.find('>')
if end == -1:
raise Exception("Found < but not >")
partial = s[1:end]
parts = partial.split('.', 1)
if len(parts) != 2:
raise Exception("Partial specification did not include two .-delimited integers")
begin, length = map(int, parts)
self.pending_body.partialBegin = begin
self.pending_body.partialLength = length
return end + 1
class FileProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
firstWrite = True
def __init__(self, f):
self.f = f
def beginProducing(self, consumer):
self.consumer = consumer
self.produce = consumer.write
d = self._onDone = defer.Deferred()
self.consumer.registerProducer(self, False)
return d
def resumeProducing(self):
b = ''
if self.firstWrite:
b = '{%d}\r\n' % self._size()
self.firstWrite = False
if not self.f:
return
b = b + self.f.read(self.CHUNK_SIZE)
if not b:
self.consumer.unregisterProducer()
self._onDone.callback(self)
self._onDone = self.f = self.consumer = None
else:
self.produce(b)
def pauseProducing(self):
pass
def stopProducing(self):
pass
def _size(self):
b = self.f.tell()
self.f.seek(0, 2)
e = self.f.tell()
self.f.seek(b, 0)
return e - b
def parseTime(s):
# XXX - This may require localization :(
months = [
'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december'
]
expr = {
'day': r"(?P<day>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'mon': r"(?P<mon>\w+)",
'year': r"(?P<year>\d\d\d\d)"
}
m = re.match('%(day)s-%(mon)s-%(year)s' % expr, s)
if not m:
raise ValueError, "Cannot parse time string %r" % (s,)
d = m.groupdict()
try:
d['mon'] = 1 + (months.index(d['mon'].lower()) % 12)
d['year'] = int(d['year'])
d['day'] = int(d['day'])
except ValueError:
raise ValueError, "Cannot parse time string %r" % (s,)
else:
return time.struct_time(
(d['year'], d['mon'], d['day'], 0, 0, 0, -1, -1, -1)
)
import codecs
def modified_base64(s):
s_utf7 = s.encode('utf-7')
return s_utf7[1:-1].replace('/', ',')
def modified_unbase64(s):
s_utf7 = '+' + s.replace(',', '/') + '-'
return s_utf7.decode('utf-7')
def encoder(s):
r = []
_in = []
for c in s:
if ord(c) in (range(0x20, 0x26) + range(0x27, 0x7f)):
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append(str(c))
elif c == '&':
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append('&-')
else:
_in.append(c)
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
return (''.join(r), len(s))
def decoder(s):
r = []
decode = []
for c in s:
if c == '&' and not decode:
decode.append('&')
elif c == '-' and decode:
if len(decode) == 1:
r.append('&')
else:
r.append(modified_unbase64(''.join(decode[1:])))
decode = []
elif decode:
decode.append(c)
else:
r.append(c)
if decode:
r.append(modified_unbase64(''.join(decode[1:])))
return (''.join(r), len(s))
class StreamReader(codecs.StreamReader):
def decode(self, s, errors='strict'):
return decoder(s)
class StreamWriter(codecs.StreamWriter):
def decode(self, s, errors='strict'):
return encoder(s)
def imap4_utf_7(name):
if name == 'imap4-utf-7':
return (encoder, decoder, StreamReader, StreamWriter)
codecs.register(imap4_utf_7)
__all__ = [
# Protocol classes
'IMAP4Server', 'IMAP4Client',
# Interfaces
'IMailboxListener', 'IClientAuthentication', 'IAccount', 'IMailbox',
'INamespacePresenter', 'ICloseableMailbox', 'IMailboxInfo',
'IMessage', 'IMessageCopier', 'IMessageFile', 'ISearchableMailbox',
# Exceptions
'IMAP4Exception', 'IllegalClientResponse', 'IllegalOperation',
'IllegalMailboxEncoding', 'UnhandledResponse', 'NegativeResponse',
'NoSupportedAuthentication', 'IllegalServerResponse',
'IllegalIdentifierError', 'IllegalQueryError', 'MismatchedNesting',
'MismatchedQuoting', 'MailboxException', 'MailboxCollision',
'NoSuchMailbox', 'ReadOnlyMailbox',
# Auth objects
'CramMD5ClientAuthenticator', 'PLAINAuthenticator', 'LOGINAuthenticator',
'PLAINCredentials', 'LOGINCredentials',
# Simple query interface
'Query', 'Not', 'Or',
# Miscellaneous
'MemoryAccount',
'statusRequestHelper',
]
| bsd-3-clause | -8,669,594,478,378,295,000 | 33.044453 | 125 | 0.570752 | false |
BeetleChunks/redsails | rsCrypto/rsCrypto.py | 1 | 2008 | import base64
import ctypes
import sys
from Crypto.Cipher import AES
from Crypto import Random
from pbkdf2 import PBKDF2
class iv:
def __init__(self, bs):
self.bs = bs
self.usedIVs = set()
self.initializeIV()
def initializeIV(self):
self.value = Random.new().read(self.bs)
self.usedIVs.add(self.value)
def setNextIV(self):
self.value = Random.new().read(self.bs)
if self.value in self.usedIVs:
self.setNextIV()
else:
self.usedIVs.add(self.value)
class AESCipher:
def __init__(self, key):
# Ill implement this later -_-
#self.keyDerivation = PBKDF2(key, os.urandom(8)).read(32)
self.keyDerivation = PBKDF2(key, "DEADLIST").read(32)
self.iv = iv(AES.block_size)
self.tryToKillPasswordInMemory(key)
def encrypt(self, payload):
payload = self.addPadding(payload)
cipher = AES.new(self.keyDerivation, AES.MODE_CBC, self.iv.value)
cipherText = base64.b64encode(self.iv.value + cipher.encrypt(payload))
self.iv.setNextIV()
return cipherText
def decrypt(self, payload):
cipherText = base64.b64decode(payload)
iv = cipherText[:16]
cipher = AES.new(self.keyDerivation, AES.MODE_CBC, iv)
paddedText = cipher.decrypt(cipherText[16:])
plainText = self.subPadding(paddedText)
return plainText
# TODO: Use p-rand int for char() for more padding entropy
def addPadding(self, payload):
length = 16 - (len(payload) % 16)
payload += chr(length)*length
return payload
def subPadding(self, payload):
return payload.rstrip(payload[-1])
# Great example of why I like C more than Python...
def tryToKillPasswordInMemory(self, keyToDelete):
tmpStrToGetHeader = "This is a temp string to get header"
header = ctypes.string_at(id(tmpStrToGetHeader), sys.getsizeof(tmpStrToGetHeader)).find(tmpStrToGetHeader)
location = id(keyToDelete) + header
size = sys.getsizeof(keyToDelete) - header
memset = ctypes.cdll.msvcrt.memset
memset(location, 0, size) | gpl-3.0 | 5,536,739,678,931,007,000 | 24.8 | 108 | 0.693227 | false |
riccardofreixo/ansible_ec2_elb_healthcheck | ec2_elb_healthcheck/ec2_elb_healthcheck.py | 1 | 4619 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Riccardo Freixo
"""
Simple Ansible module to health check instances in an ELB
"""
DOCUMENTATION = '''
---
module: ec2_elb_healthcheck
version_added: "1.8"
short_description: Get instance Health Check state from ELBs
description:
- Gets instance Health Check states from ELBs.
author: Riccardo Freixo
options:
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
name:
description:
- The name of the ELB.
required: true
instances:
description:
- A list of instance IDs to get Health Check states from.
required: false
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Check health of two instances attached to elb myelb
- ec2_elb_healthcheck:
region: eu-west-1
name: my-elb
instances:
- i-1157af42
- i-b514da21
# Check health of all instances attached to elb myelb
- ec2_elb_healthcheck:
region: eu-west-1
name: my-elb
'''
import sys
try:
import boto
import boto.ec2
import boto.ec2.elb
import boto.ec2.elb.attributes
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
def check_instances_health(connection, elb, ids):
"""
Returns a dict with the state of each instance in 'ids'.
:type connection: :class:`boto.ec2.connection.EC2Connection`
:param connection: a connection to ec2
:type elb: str
:param elb: the name of the ELB to health check.
:type ids: list
:param ids: a list of instance IDs to health check.
:rtype: dict
:return: Returns a dict with the state of each instance in 'ids'.
"""
try:
instances = connection.describe_instance_health(elb)
except boto.exception.EC2ResponseError, error:
module.fail_json(msg=str(error))
healthcheck = {instance.instance_id: instance.state for instance in instances if instance.instance_id in ids}
for instance_not_found in set(ids) - set(healthcheck.keys()):
healthcheck[instance_not_found] = 'NotFound'
instances_in_service = [k for k, v in healthcheck.iteritems() if v == 'InService']
all_in_service = True if len(instances_in_service) == len(ids) else False
return dict(
all_in_service=all_in_service,
instances=healthcheck
)
def check_all_instances_health(connection, elb):
"""
Returns a dict with the state of each instance attached to the ELB 'elb'.
:type connection: :class:`boto.ec2.connection.EC2Connection`
:param connection: a connection to ec2
:type elb: str
:param elb: the name of the ELB to health check.
:rtype: dict
:return: Returns a dict with the state of each instance attached to the ELB 'elb'.
"""
try:
instances = connection.describe_instance_health(elb)
except boto.exception.EC2ResponseError, error:
module.fail_json(msg=str(error))
healthcheck = {instance.instance_id: instance.state for instance in instances}
instances_in_service = [k for k, v in healthcheck.iteritems() if v == 'InService']
all_in_service = True if len(instances_in_service) == len(instances) else False
return dict(
all_in_service=all_in_service,
instances=healthcheck
)
def main():
"""Main function"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
instances=dict(type='list')
),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, error:
module.fail_json(msg=str(error))
name = module.params.get('name')
instances = module.params.get('instances')
if instances is not None:
results = check_instances_health(connection, name, ids=instances)
else:
results = check_all_instances_health(connection, name)
module.exit_json(
changed=False,
**results
)
if __name__ == "__main__":
main()
| gpl-3.0 | 7,849,643,962,396,327,000 | 27.337423 | 119 | 0.658584 | false |
nsdont/gogs_ci_demo | superlists/superlists/settings.py | 1 | 2664 | """
Django settings for superlists project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_@3#)mt6d@mk73wnt-rq47@0d5*fe5kqmshd_-nj*^43d92!rz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lists',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'superlists.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| mit | 6,567,312,469,750,804,000 | 24.615385 | 71 | 0.691066 | false |
pchmieli/h2o-3 | h2o-py/tests/testdir_jira/pyunit_hexdev_296_confusion_matrices.py | 1 | 1271 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def confusion_matrices_check():
local_data = [[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[0, 'b'],
[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b']]
h2o_data = h2o.H2OFrame(python_obj=zip(*local_data))
h2o_data.set_names(['response', 'predictor'])
h2o_data.show()
gbm = h2o.gbm(x=h2o_data[1:], y=h2o_data["response"].asfactor(), ntrees=1, distribution="bernoulli")
gbm.show()
perf = gbm.model_performance()
tps = perf.metric("tps", [perf.find_threshold_by_max_metric("f1")])[0][1]
tns = perf.metric("tns", [perf.find_threshold_by_max_metric("f1")])[0][1]
fps = perf.metric("fps", [perf.find_threshold_by_max_metric("f1")])[0][1]
fns = perf.metric("fns", [perf.find_threshold_by_max_metric("f1")])[0][1]
assert tps + tns + fps + fns == 20, "incorrect confusion matrix computation: tps: {0}, fps: {1}, tns: {2}, fns: " \
"{3}. Should sum to 20.".format(tps, fps, tns, fns)
if __name__ == "__main__":
pyunit_utils.standalone_test(confusion_matrices_check)
else:
confusion_matrices_check()
| apache-2.0 | -4,116,032,665,391,225,000 | 36.382353 | 119 | 0.543666 | false |
apache/airflow | airflow/hooks/http_hook.py | 2 | 1096 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.http.hooks.http`."""
import warnings
from airflow.providers.http.hooks.http import HttpHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.http.hooks.http`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | -6,505,298,985,341,999,000 | 38.142857 | 85 | 0.762774 | false |
Der-Eddy/pyepvp | pyepvp/session.py | 1 | 4903 | import requests
import cfscrape
import xmlrpc.client
import hashlib
import time
import platform
import json
import os
import sys
import logging
from .regexp import *
from .exceptions import *
from .parser import *
from .tapatalk import *
from .user import *
from . import __version__, __title__, __author__
class session:
'''
Needed for several methods, logs into an elitepvpers user account and provides several useful informations about that account.
'''
system = platform.system()
userAgent = "{0}:{1}:{2} (by {3})".format(system.lower(), __title__, __version__, __author__)
sess = cfscrape.create_scraper()
sess.headers = {
"User-Agent" : userAgent,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip,deflate,br",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.3",
"Content-Type": "application/x-www-form-urlencoded"
}
username = ""
guestSession = False
securityToken = ""
logoutHash = ""
secretWord = None
userID = ""
ranks = ["guest"]
paramsGet = "&langid=1"
refreshTime = 30
_notifications = {'last_update': 0,
'unread_private_messages': 0,
'unread_vistor_messages': 0,
'unapproved_visitor_messages': 0,
'incoming_friend_requests': 0,
'groups_request': 0,
'groups_invitations': 0,
'unread_picture_comments': 0,
'unapproved_picture_comments': 0,
'unapproved_group_messages': 0,
'new_mentions': 0,
'new_post_quotes': 0,
'staff_changes': 0,
'subscribed_threads': 0}
_elite_gold = 0
@property
def elite_gold(self):
if self._notifications['last_update'] + self.refreshTime < time.time():
self.updateNotifications()
return self._elite_gold
@elite_gold.setter
def elite_gold(self, value):
self._elite.gold = value
def __enter__(self):
return self
def __init__(self, uname='guest', passwd=None, md5bool=False, secretWord=None):
logging.info("Running on" + systemInfo())
if passwd is not None: #Checks if User Session
if md5bool == True:
md5 = passwd
else:
md5 = hashlib.md5(passwd.encode("utf-8"));md5 = md5.hexdigest()
self.username = uname
self._login(uname, md5)
if secretWord is not None:
self.secretWord = secretWord
elif uname == "guest": #Checks if Guest Session
self.username = "guest"
self.guestSession = True
self.securityToken = "guest"
else:
raise noAuthenticationException()
def __exit__(self, *kwargs):
self.__del__()
def __del__(self):
try:
self._logout()
except Exception:
pass
def _login(self, uname, md5):
loginnurl = "https://www.elitepvpers.com/forum/login.php?do=login" + self.paramsGet
params = {
"do": "login",
"vb_login_md5password": md5,
"vb_login_md5password_utf": md5,
"s": "",
"cookieuser": "1",
"vb_login_username": uname,
"security_token": "guest"
}
params = dicttostr(params)
r = self.sess.post(loginnurl, data=params, verify=True)
content = parser(self, "https://www.elitepvpers.com/forum/usercp.php")
self.securityToken = securityTokenParser(content)
self.logoutHash = logoutHashParser(content)
if self.securityToken == "guest":
raise invalidAuthenticationException()
self.userID = userIDParser(content)
usercontent = parser(self, "https://www.elitepvpers.com/forum/member.php?userid=" + self.userID)
self.ranks = rankParser(usercontent)
logging.info("User-Session created: {0}:{1}:{2}".format(self.username, self.userID, self.ranks))
self.updateNotifications()
self.tapatalk = tapatalk(uname, md5)
def logout(self):
'''
Logout the user session and destroys itself.
'''
self.__del__()
def _logout(self):
self.sess.get("https://www.elitepvpers.com/forum/login.php?do=logout&logouthash=" + self.logoutHash)
self.tapatalk.logout()
def updateNotifications(self):
'''
Updates the notifications of the session user and returns them.
'''
url = 'https://www.elitepvpers.com/forum/usercp.php'
getUpdates(session, url)
self._notifications['last_update'] = time.time()
logging.info('Updated notifications - {0}'.format(time.time()))
return self._notifications
| mit | -5,756,216,365,573,822,000 | 33.286713 | 130 | 0.570671 | false |
aio-libs/aiobotocore | docs/conf.py | 1 | 5964 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# aiobotocore documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 11 17:08:38 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
import pathlib
import re
_docs_path = pathlib.Path(__file__).parent
_version_path = _docs_path / '../aiobotocore/__init__.py'
with _version_path.open() as fp:
try:
_version_info = re.search(r"^__version__ = '"
r"(?P<major>\d+)"
r"\.(?P<minor>\d+)"
r"\.(?P<patch>\d+)"
r"(?P<tag>.*)?'$",
fp.read(), re.M).groupdict()
except IndexError:
raise RuntimeError('Unable to determine version.')
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'aiobotocore'
copyright = '2017, Nikolay Novik and aio-libs contributors'
author = 'Nikolay Novik'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{major}.{minor}'.format(**_version_info)
# The full version, including alpha/beta/rc tags.
release = '{major}.{minor}.{patch}-{tag}'.format(**_version_info)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
aiobotocore_desc = ('Async client for aws services using botocore and aiohttp')
html_theme_options = {
'description': aiobotocore_desc,
'github_user': 'aio-libs',
'github_repo': 'aiobotocore',
'github_button': True,
'github_type': 'star',
'github_banner': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiobotocoredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'aiobotocore.tex', 'aiobotocore Documentation',
'Nikolay Novik', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aiobotocore', 'aiobotocore Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'aiobotocore', 'aiobotocore Documentation',
author, 'aiobotocore', aiobotocore_desc,
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| apache-2.0 | 4,489,589,714,667,007,500 | 30.723404 | 79 | 0.651408 | false |
rschnapka/odoo | addons/purchase/purchase.py | 1 | 70094 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line)
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
wf_service = netsvc.LocalService("workflow")
for id in unlink_ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
move_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
order_line_ids += [po_line.id for po_line in order.order_line]
move_ids += [po_line.move_dest_id.id for po_line in order.order_line if po_line.move_dest_id]
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
if order_line_ids and status == 'cancel':
procs = proc_obj.search(cr, uid, [('move_id', 'in', move_ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', ids[0], 'send_rfq', cr)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
for purchase in self.browse(cr, uid, ids, context=context):
self.pool['purchase.order.line'].write(cr, uid, [l.id for l in purchase.order_line], {'state': 'draft'})
wf_service = netsvc.LocalService("workflow")
for p_id in ids:
# Deleting the existing instance of workflow for PO
wf_service.trg_delete(uid, 'purchase.order', p_id, cr)
wf_service.trg_create(uid, 'purchase.order', p_id, cr)
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
for pick in purchase.picking_ids:
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_cancel', cr)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
if inv:
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'invoice_cancel', cr)
self.pool['purchase.order.line'].write(cr, uid, [l.id for l in purchase.order_line],
{'state': 'cancel'})
for id in ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return True
def date_to_datetime(self, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'partner_id': order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
price_unit = order_line.price_unit
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'date_expected': self.date_to_datetime(cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
if not picking_id:
picking_id = self.pool.get('stock.picking').create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id and order_line.move_dest_id.state != 'done':
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'partner_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
wf_service = netsvc.LocalService("workflow")
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)
wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None):
procurement_ids_to_cancel = []
for line in self.browse(cr, uid, ids, context=context):
if line.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
if line.move_dest_id:
procurement_ids_to_cancel.extend(procurement.id for procurement in line.move_dest_id.procurements)
if procurement_ids_to_cancel:
self.pool['procurement.order'].action_cancel(cr, uid, procurement_ids_to_cancel)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_warehouse(self, procurement, user_company):
"""
Return the warehouse containing the procurment stock location (or one of it ancestors)
If none match, returns then first warehouse of the company
"""
# TODO refactor the domain once we implement the "parent_of" domain operator
# NOTE This method has been copied in the `purchase_requisition` module to ensure
# retro-compatibility. This code duplication will be deleted in next stable version.
# Do not forget to update both version in case of modification.
company_id = (procurement.company_id or user_company).id
domains = [
[
'&', ('company_id', '=', company_id),
'|', '&', ('lot_stock_id.parent_left', '<', procurement.location_id.parent_left),
('lot_stock_id.parent_right', '>', procurement.location_id.parent_right),
('lot_stock_id', '=', procurement.location_id.id)
],
[('company_id', '=', company_id)]
]
cr, uid = procurement._cr, procurement._uid
context = procurement._context
Warehouse = self.pool['stock.warehouse']
for domain in domains:
ids = Warehouse.search(cr, uid, domain, context=context)
if ids:
return ids[0]
return False
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': self._get_warehouse(procurement, company),
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', mail.res_id, 'send_rfq', cr)
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', context['default_res_id'], 'send_rfq', cr)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
wf_service = netsvc.LocalService("workflow")
for order in purchase_order_obj.browse(cr, uid, po_ids, context=context):
# Signal purchase order workflow that an invoice has been validated.
invoiced = []
for po_line in order.order_line:
if any(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines):
invoiced.append(po_line.id)
if invoiced:
self.pool['purchase.order.line'].write(cr, uid, invoiced, {'invoiced': True})
wf_service.trg_write(uid, 'purchase.order', order.id, cr)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 903,126,199,072,234,500 | 51.622372 | 545 | 0.596122 | false |
Thermondo/viewflow-extensions | docs/conf.py | 1 | 2916 | # -*- coding: utf-8 -*-
import datetime
import importlib
import inspect
import sys
import os
import django
year = datetime.datetime.now().strftime("%Y")
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.testapp.settings")
django.setup()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../tests.testapp'))
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinxcontrib.spelling',
]
def linkcode_resolve(domain, info):
"""Link source code to GitHub."""
project = 'viewflow-extensions'
github_user = 'Thermondo'
head = 'master'
if domain != 'py' or not info['module']:
return None
filename = info['module'].replace('.', '/')
mod = importlib.import_module(info['module'])
basename = os.path.splitext(mod.__file__)[0]
if basename.endswith('__init__'):
filename += '/__init__'
item = mod
lineno = ''
for piece in info['fullname'].split('.'):
item = getattr(item, piece)
try:
lineno = '#L%d' % inspect.getsourcelines(item)[1]
except (TypeError, IOError):
pass
return ("https://github.com/%s/%s/blob/%s/%s.py%s" %
(github_user, project, head, filename, lineno))
intersphinx_mapping = {
'python': ('http://docs.python.org/3', None),
'django': ('https://docs.djangoproject.com/en/stable/',
'https://docs.djangoproject.com/en/stable/_objects/'),
'viewflow': ('https://viewflow.readthedocs.io/en/latest/', None),
}
# spell check
spelling_word_list_filename = 'spelling_wordlist.txt'
spelling_show_suggestions = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
master_doc = 'index'
project = 'Viewflow Extensions'
copyright = '%s, Thermondo GmbH' % year
exclude_patterns = ['_build']
pygments_style = 'sphinx'
def skip(app, what, name, obj, skip, options):
if name == "__init__" and obj.__doc__:
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
autodoc_default_flags = ['members', 'show-inheritance']
autodoc_member_order = 'bysource'
inheritance_graph_attrs = dict(rankdir='TB')
inheritance_node_attrs = dict(shape='rect', fontsize=14, fillcolor='gray90',
color='gray30', style='filled')
inheritance_edge_attrs = dict(penwidth=0.75)
html_theme = 'sphinx_rtd_theme'
| apache-2.0 | -8,658,033,130,985,556,000 | 26.252336 | 84 | 0.653292 | false |
chfw/pyexcel-ods3 | tests/test_multiple_sheets.py | 2 | 7687 | import os
from collections import OrderedDict
import pyexcel
from base import PyexcelMultipleSheetBase
from nose.tools import raises
class TestOdsNxlsMultipleSheets(PyexcelMultipleSheetBase):
def setUp(self):
self.testfile = "multiple1.ods"
self.testfile2 = "multiple1.xls"
self.content = _produce_ordered_dict()
self._write_test_file(self.testfile)
def tearDown(self):
self._clean_up()
class TestXlsNOdsMultipleSheets(PyexcelMultipleSheetBase):
def setUp(self):
self.testfile = "multiple1.xls"
self.testfile2 = "multiple1.ods"
self.content = _produce_ordered_dict()
self._write_test_file(self.testfile)
def tearDown(self):
self._clean_up()
class TestAddBooks:
def _write_test_file(self, file):
"""
Make a test file as:
1,1,1,1
2,2,2,2
3,3,3,3
"""
self.rows = 3
pyexcel.save_book_as(bookdict=self.content, dest_file_name=file)
def setUp(self):
self.testfile = "multiple1.ods"
self.testfile2 = "multiple1.xls"
self.content = _produce_ordered_dict()
self._write_test_file(self.testfile)
self._write_test_file(self.testfile2)
def test_load_a_single_sheet(self):
b1 = pyexcel.get_book(file_name=self.testfile, sheet_name="Sheet1")
assert len(b1.sheet_names()) == 1
assert b1["Sheet1"].to_array() == self.content["Sheet1"]
def test_load_a_single_sheet2(self):
b1 = pyexcel.load_book(self.testfile, sheet_index=0)
assert len(b1.sheet_names()) == 1
assert b1["Sheet1"].to_array() == self.content["Sheet1"]
@raises(IndexError)
def test_load_a_single_sheet3(self):
pyexcel.get_book(file_name=self.testfile, sheet_index=10000)
@raises(ValueError)
def test_load_a_single_sheet4(self):
pyexcel.get_book(file_name=self.testfile, sheet_name="Not exist")
def test_delete_sheets(self):
b1 = pyexcel.load_book(self.testfile)
assert len(b1.sheet_names()) == 3
del b1["Sheet1"]
assert len(b1.sheet_names()) == 2
try:
del b1["Sheet1"]
assert 1 == 2
except KeyError:
assert 1 == 1
del b1[1]
assert len(b1.sheet_names()) == 1
try:
del b1[1]
assert 1 == 2
except IndexError:
assert 1 == 1
def test_delete_sheets2(self):
"""repetitively delete first sheet"""
b1 = pyexcel.load_book(self.testfile)
del b1[0]
assert len(b1.sheet_names()) == 2
del b1[0]
assert len(b1.sheet_names()) == 1
del b1[0]
assert len(b1.sheet_names()) == 0
def test_add_book1(self):
"""
test this scenario: book3 = book1 + book2
"""
b1 = pyexcel.get_book(file_name=self.testfile)
b2 = pyexcel.get_book(file_name=self.testfile2)
b3 = b1 + b2
content = b3.dict
sheet_names = content.keys()
assert len(sheet_names) == 6
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book1_in_place(self):
"""
test this scenario: book1 += book2
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b1 += b2
content = b1.dict
sheet_names = content.keys()
assert len(sheet_names) == 6
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book2(self):
"""
test this scenario: book3 = book1 + sheet3
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b3 = b1 + b2["Sheet3"]
content = b3.dict
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book2_in_place(self):
"""
test this scenario: book3 = book1 + sheet3
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b1 += b2["Sheet3"]
content = b1.dict
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book3(self):
"""
test this scenario: book3 = sheet1 + sheet2
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b3 = b1["Sheet1"] + b2["Sheet3"]
content = b3.dict
sheet_names = content.keys()
assert len(sheet_names) == 2
assert content["Sheet3"] == self.content["Sheet3"]
assert content["Sheet1"] == self.content["Sheet1"]
def test_add_book4(self):
"""
test this scenario: book3 = sheet1 + book
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b3 = b1["Sheet1"] + b2
content = b3.dict
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book_error(self):
"""
test this scenario: book3 = sheet1 + book
"""
b1 = pyexcel.BookReader(self.testfile)
try:
b1 + 12
assert 1 == 2
except TypeError:
assert 1 == 1
try:
b1 += 12
assert 1 == 2
except TypeError:
assert 1 == 1
def tearDown(self):
if os.path.exists(self.testfile):
os.unlink(self.testfile)
if os.path.exists(self.testfile2):
os.unlink(self.testfile2)
class TestMultiSheetReader:
def setUp(self):
self.testfile = "file_with_an_empty_sheet.ods"
def test_reader_with_correct_sheets(self):
r = pyexcel.BookReader(
os.path.join("tests", "fixtures", self.testfile)
)
assert r.number_of_sheets() == 3
def _produce_ordered_dict():
data_dict = OrderedDict()
data_dict.update({"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]})
data_dict.update({"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]]})
data_dict.update(
{"Sheet3": [[u"X", u"Y", u"Z"], [1, 4, 7], [2, 5, 8], [3, 6, 9]]}
)
return data_dict
| bsd-3-clause | 368,446,140,860,946,300 | 30.896266 | 76 | 0.55015 | false |
PrincipiaCollege/RouteCalculator | Test/TestIssue03.py | 1 | 3873 | """ Title: Read in CSV data of the route """
""" Description: Read in and store the CSV data into usable Python structures.
This will include the following info from the CSV:
Step Information
Approx. Kms from start: distance of step
Feature: name of landmark on step
Action/Remark: road info and traveling instructions
Note: Useful info """
import unittest
from Source import Route, CSVReader, Step
class TestCSV(unittest.TestCase):
@classmethod
def setUpClass(cls):
CSVReader.read_CSV("WSC_Route.csv")
# Checks that Step objects were created and added to the list of steps
def test_read_step_obj(self):
third_step = Route.get_step(3)
self.assertIs(type(third_step), Step.step)
self.assertTrue(third_step in Route.steps.values())
# Checks proper iteration through steps
def test_get_next_step(self):
first_step = Route.get_cur_step()
self.assertEqual(first_step.distance, 0.0,
"Didn't get correct cur_step")
Route.advance_step()
second_step = Route.get_cur_step()
self.assertEqual(second_step.distance, 0.2,
"Didn't advance_step correctly")
second_step = Route.get_cur_step()
self.assertEqual(second_step.distance, 0.2,
"Repeat call of get_cur_step should be the same correctly")
# Checks that all the properties were read in correctly
def test_get_first(self):
first_step = Route.get_step(1)
self.assertEqual(first_step.distance, 0.0, "Wrong distance")
self.assertEqual(first_step.feature, "Exit to Mitchell Street",
"Wrong Feature")
self.assertEqual(first_step.action, "Under normal traffic conditions",
"Wrong Action")
self.assertEqual(first_step.note, "", "Wrong Note")
self.assertEqual(first_step.speed, 50, "Wrong Speed")
# Checks that all the properties were read in correctly
# of semi-randomly selected step
def test_get_tenth(self):
tenth_step = Route.get_step(10)
self.assertEqual(tenth_step.distance, 4.8, "Wrong distance")
self.assertEqual(tenth_step.interval, 0.3, "Wrong interval")
self.assertEqual(tenth_step.feature, "Speed 100", "Wrong Feature")
self.assertEqual(tenth_step.action, "Speed limit", "Wrong Action")
self.assertEqual(tenth_step.note, "", "Wrong Note")
self.assertEqual(tenth_step.speed, 80, "Wrong Speed")
# Checks the speed property is properly stored, changed, and held
def test_get_speed(self):
step = Route.get_step(4)
self.assertEqual(step.speed, 60, "Didn't get correct speed")
step = Route.get_step(5)
self.assertEqual(step.speed, 80, "Didn't get new speed")
step = Route.get_step(6)
self.assertEqual(step.speed, 80, "Didn't hold correct speed")
# Checks the calculation of the interval between steps
def test_get_interval(self):
third_step = Route.get_step(3)
fourth_step = Route.get_step(4)
self.assertEqual(fourth_step.interval,
fourth_step.distance - third_step.distance,
"Incorrect Interval")
self.assertEqual(fourth_step.interval, 0.5, "Incorrect Interval")
# Checks the note property
def test_get_note(self):
seventh_step = Route.get_step(7)
self.assertEqual(seventh_step.note, "Straight ahead", "Incorrect Note")
# Checks the action property
def test_get_action(self):
third_step = Route.get_step(3)
self.assertEqual(third_step.action, "Traffic Light", "Incorrect Action")
# Python unittest provided function to be executed
# after all tests in the class are completed
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| mit | 7,383,761,783,303,239,000 | 39.768421 | 80 | 0.646269 | false |
ckrooss/shortpath | shortpath/astar.py | 1 | 1996 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from random import randint
if __name__ == "__main__":
from node import Node
from nodelist import Nodelist
else:
from .node import Node
from .nodelist import Nodelist
Node.xmax = 30
Node.ymax = 15
def gen_obstacles(xmax: int, ymax: int) -> Nodelist:
obstacles = Nodelist()
def ao(x, y):
o = Node(x, y)
obstacles.append(o)
for _ in range(2):
ao(randint(-xmax, xmax), randint(-ymax, ymax))
for y in range(-ymax, ymax):
ao(0, y + 1)
for y in range(-ymax, ymax + 1):
ao(-xmax, y)
for k, j in zip(range(1, xmax), range(ymax)):
ao(xmax - k, j - (ymax // 3))
return obstacles
def find_path(ziel: Node, openlist: Nodelist, closedlist: Nodelist, obstacles: Nodelist) -> bool:
while openlist:
current_node = openlist.pop_min()
if current_node == ziel:
ziel.pre = current_node
return True
closedlist.append(current_node)
current_node.expand_node(ziel, openlist, closedlist, obstacles)
return False
def main():
start = Node(-Node.xmax, Node.ymax)
ziel = Node(Node.xmax, Node.ymax)
while True:
# List of elements that form the optimal way
optimal_path = Nodelist()
# Openlist: Path elements that have not been fully evaluated and might be good
openlist = Nodelist(start)
# Closedlist: Path elements that have been fully evaluated
closedlist = Nodelist()
# Blocking the path
obstacles = gen_obstacles(Node.xmax, Node.ymax)
if find_path(ziel, openlist, closedlist, obstacles):
Node.select_optimal_path(ziel, optimal_path)
Node.printf(start, ziel, optimal_path, openlist, closedlist, obstacles)
break
else:
Node.printf(start, ziel, optimal_path, openlist, closedlist, obstacles)
raise Exception()
if __name__ == '__main__':
main()
| mit | 4,708,236,576,825,571,000 | 24.589744 | 97 | 0.602204 | false |
SyllogismRXS/misc | gui/widget-test/graveyard/mycustomwidgetplugin.py | 1 | 3624 | #!/usr/bin/env python
"""
helloglwidgetplugin.py
A simple OpenGL custom widget plugin for Qt Designer.
Copyright (C) 2006 David Boddie <david@boddie.org.uk>
Copyright (C) 2005-2006 Trolltech ASA. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from PyQt5.QtGui import QIcon
from PyQt5.QtDesigner import QPyDesignerCustomWidgetPlugin
from my_custom_widget import MyCustomWidget
class MyCustomWidgetPlugin(QPyDesignerCustomWidgetPlugin):
"""MyCustomWidgetPlugin(QPyDesignerCustomWidgetPlugin)
Provides a Python custom plugin for Qt Designer by implementing the
QDesignerCustomWidgetPlugin via a PyQt-specific custom plugin class.
"""
# The __init__() method is only used to set up the plugin and define its
# initialized variable.
def __init__(self, parent=None):
super(MyCustomWidgetPlugin, self).__init__(parent)
self.initialized = False
# The initialize() and isInitialized() methods allow the plugin to set up
# any required resources, ensuring that this can only happen once for each
# plugin.
def initialize(self, core):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
# This factory method creates new instances of our custom widget with the
# appropriate parent.
def createWidget(self, parent):
return MyCustomWidget(parent)
# This method returns the name of the custom widget class that is provided
# by this plugin.
def name(self):
return "MyCustomWidget"
# Returns the name of the group in Qt Designer's widget box that this
# widget belongs to.
def group(self):
return "Display Widgets"
# Returns the icon used to represent the custom widget in Qt Designer's
# widget box.
def icon(self):
return QIcon()
# Returns a short description of the custom widget for use in a tool tip.
def toolTip(self):
return ""
# Returns a short description of the custom widget for use in a "What's
# This?" help message for the widget.
def whatsThis(self):
return ""
# Returns True if the custom widget acts as a container for other widgets;
# otherwise returns False. Note that plugins for custom containers also
# need to provide an implementation of the QDesignerContainerExtension
# interface if they need to add custom editing support to Qt Designer.
def isContainer(self):
return False
# Returns an XML description of a custom widget instance that describes
# default values for its properties. Each custom widget created by this
# plugin will be configured using this description.
def domXml(self):
return '<widget class="MyCustomWidget" name="mycustomwidget" />\n'
# Returns the module containing the custom widget class. It may include
# a module path.
def includeFile(self):
return "my_custom_widget"
| mit | 8,384,229,615,302,424,000 | 35.606061 | 78 | 0.721578 | false |
dafrito/trac-mirror | trac/ticket/tests/conversion.py | 1 | 4457 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import unittest
from trac import __version__ as TRAC_VERSION
from trac.test import EnvironmentStub, Mock
from trac.ticket.model import Ticket
from trac.ticket.web_ui import TicketModule
from trac.mimeview.api import Mimeview
from trac.web.href import Href
class TicketConversionTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.env.config.set('trac', 'templates_dir',
os.path.join(os.path.dirname(self.env.path),
'templates'))
self.ticket_module = TicketModule(self.env)
self.mimeview = Mimeview(self.env)
self.req = Mock(base_path='/trac.cgi', path_info='',
href=Href('/trac.cgi'), chrome={'logo': {}},
abs_href=Href('http://example.org/trac.cgi'),
environ={}, perm=[], authname='-', args={}, tz=None,
locale='', session=None, form_token=None)
def tearDown(self):
self.env.reset_db()
def _create_a_ticket(self):
# 1. Creating ticket
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['description'] = 'Bar'
ticket['foo'] = 'This is a custom field'
ticket.insert()
return ticket
def test_conversions(self):
conversions = self.mimeview.get_supported_conversions(
'trac.ticket.Ticket')
expected = sorted([('csv', 'Comma-delimited Text', 'csv',
'trac.ticket.Ticket', 'text/csv', 8,
self.ticket_module),
('tab', 'Tab-delimited Text', 'tsv',
'trac.ticket.Ticket', 'text/tab-separated-values', 8,
self.ticket_module),
('rss', 'RSS Feed', 'xml',
'trac.ticket.Ticket', 'application/rss+xml', 8,
self.ticket_module)],
key=lambda i: i[-1], reverse=True)
self.assertEqual(expected, conversions)
def test_csv_conversion(self):
ticket = self._create_a_ticket()
csv = self.mimeview.convert_content(self.req, 'trac.ticket.Ticket',
ticket, 'csv')
self.assertEqual(('\xef\xbb\xbf'
'id,summary,reporter,owner,description,status,'
'keywords,cc\r\n1,Foo,santa,,Bar,,,\r\n',
'text/csv;charset=utf-8', 'csv'), csv)
def test_tab_conversion(self):
ticket = self._create_a_ticket()
csv = self.mimeview.convert_content(self.req, 'trac.ticket.Ticket',
ticket, 'tab')
self.assertEqual(('\xef\xbb\xbf'
'id\tsummary\treporter\towner\tdescription\tstatus\t'
'keywords\tcc\r\n1\tFoo\tsanta\t\tBar\t\t\t\r\n',
'text/tab-separated-values;charset=utf-8', 'tsv'),
csv)
def test_rss_conversion(self):
ticket = self._create_a_ticket()
content, mimetype, ext = self.mimeview.convert_content(
self.req, 'trac.ticket.Ticket', ticket, 'rss')
self.assertEqual(("""<?xml version="1.0"?>
<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
<channel>
<title>My Project: Ticket #1: Foo</title>
<link>http://example.org/trac.cgi/ticket/1</link>
<description><p>
Bar
</p>
</description>
<language>en-us</language>
<generator>Trac %s</generator>
</channel>
</rss>""" % (TRAC_VERSION),
'application/rss+xml', 'xml'),
(content.replace('\r', ''), mimetype, ext))
def suite():
return unittest.makeSuite(TicketConversionTestCase, 'test')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -1,433,409,218,591,650,600 | 38.442478 | 80 | 0.554184 | false |
Cqfuj/disco-cake | disco_cake/link/mp3_track_number_album_linker.py | 1 | 1939 | # Disco-cake
# Copyright (C) 2017 Maugere Lucas
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import mutagen.id3
from .abstract_album_linker import AbstractAlbumLinker
class Mp3TrackNumberAlbumLinker(AbstractAlbumLinker):
allowed_extensions = ['.mp3']
def __init__(self, cfg):
super().__init__(cfg)
def can_apply_to(self, link):
return link.file.extension in self.allowed_extensions
def find_disc(self, trck_values, album):
if len(album.discs) == 1:
return album.discs[0]
if len(trck_values) <= 1:
return None
nb_tracks = int(trck_values[1])
discs = album.get_discs_with_nb_tracks(nb_tracks)
if len(discs) != 1:
return None
return discs[0]
def find_linked_track(self, link, album):
if not self.can_apply_to(link):
return
try:
metadata = mutagen.id3.ID3(link.file.filepath)
trck_values = metadata['TRCK'].text[0].split('/')
track_number = int(trck_values[0])
disc = self.find_disc(trck_values, album)
if disc and disc.has_track_number(track_number):
link.track = disc.tracks[track_number-1]
return
except (mutagen.id3.ID3NoHeaderError, ValueError) as e:
print(str(e))
return
| gpl-3.0 | 2,301,692,807,720,406,000 | 35.584906 | 71 | 0.645694 | false |
lucadt/memoizeit | memoizeit/python/experiment.py | 1 | 8785 | #!/usr/bin/env python
# encoding: utf-8
import sys
import os
import tempfile
import tarfile
import shutil
import shlex
import datetime
import subprocess
import time
import copy
import csv
#
import options
import commons
import config
#
class Experiment(object):
@property
def program(self):
return self._program
@property
def folder(self):
return self.program.options.folder
def __init__(self, program):
self._program = program
def _write_options_to_file(self, directory, options):
ops_path = '%s/%s' % (directory, config.options_file())
with open(ops_path, 'a+') as my_file:
my_file.write('\n'.join(options.as_list))
my_file.write('\n')
def _move_file(self, from_file, to_file):
shutil.move(from_file, to_file)
def _copy_file(self, from_file, to_file):
shutil.copyfile(from_file, to_file)
def _copy_time_profile(self):
os.makedirs('%s/%s' % (self.folder, config.time_dir()))
to_file = '%s/%s/%s' % (self.folder, config.time_dir(), config.time_profile_file())
self._copy_file(self.program.profile, to_file)
def _copy_time_output_to_white_list(self):
from_file = '%s/%s/%s' % (self.folder, config.time_dir(), config.time_output_file())
to_file = '%s/%s' % (self.folder, config.white_list_file())
self._copy_file(from_file, to_file)
def _read_file(self, file):
with open(file) as my_file:
lines = [line.strip() for line in my_file]
filter(lambda line: len(line) > 0, lines)
return lines
def _write_file(self, file, methods):
with open(file, 'w+') as f:
f.write('\n'.join(methods))
def _count_lines(self, file):
with open(file, 'r') as f:
return sum(1 for _ in f)
def _copy_black_list(self):
from_file = '%s/memoizeit/%s' % (commons.general_path(), config.black_list_file())
to_file = '%s/%s' % (self.folder, config.black_list_file())
self._copy_file(from_file, to_file)
def _filter_with_black_list(self):
self._copy_black_list()
white_file = '%s/%s/%s' % (self.folder, config.time_dir(), config.time_output_file())
black_file = '%s/%s' % (self.folder, config.black_list_file())
white_methods = self._read_file(white_file)
black_methods = self._read_file(black_file)
white_method_package = [line for line in white_methods if line not in black_methods and line.startswith(self.program.options.package)]
white_method_filtered = [line for line in white_method_package if not '<init>' in line]
white_list_file = '%s/%s' % (self.folder, config.white_list_file())
self._write_file(white_list_file, white_method_filtered)
def execute(self):
self.initial_candidates()
self.refine_candidates()
def initial_candidates(self):
#
self._write_options_to_file(self.folder, self.program.options)
self._write_options_to_file(self.folder, self.program.time_options)
#
self._copy_time_profile()
self.program.time_jvisualvm()
#
if commons.filter_using_fields() == True:
self.program.fields()
def refine_candidates(self):
pass
def ranking(self):
pass
class IterativeExperiment(Experiment):
def __init__(self, program):
super(IterativeExperiment, self).__init__(program)
def _save_white_list(self, next_depth):
self._copy_file('%s/%s' % (self.folder, config.white_list_file()), '%s/%d.txt' % (self.folder, next_depth))
def _save_current_depth_directory(self, depth):
self._move_file('%s/%s' % (self.folder, config.tuples_dir()), '%s/%d' % (self.folder, depth))
self._move_file('%s/%d.txt' % (self.folder, depth) , '%s/%d/%s' % (self.folder, depth, config.white_list_file()))
def _create_tuples_file(self):
if commons.profile_exaustive() == True:
from_file = '%s/%s/%s' % (self.folder, config.tuples_dir(), config.tuples_output_file())
to_file = '%s/%s' % (self.folder, config.tuples_final_file())
self._copy_file(from_file, to_file)
def _max_depth_reached(self):
if commons.profile_exaustive() == True:
return True
else:
return not os.path.isfile('%s/%s/%s' % (self.folder, config.tuples_dir(), config.tuples_max_depth_file()))
def _create_filter_depths_trace(self):
depths_csv = '%s/%s/%s' % (self.folder, config.tuples_dir(), config.tuples_depths_file())
methods_file = '%s/%s/%s' % (self.folder, config.tuples_dir(), 'methods.txt')
trace_file = '%s/%s/data/thread_1_main/log_depth.txt' % (self.folder, config.tuples_dir())
running = 'java -jar %s %s --trace %s --methods %s --output %s' % (commons.jython_jar(), commons.depths_script_py(), trace_file, methods_file, depths_csv)
subprocess.call( shlex.split( str(running)) )
def _filter_tuples(self, depth):
white_list = '%s/%s' % (self.folder, config.white_list_file())
tuples_output = '%s/%s/%s' % (self.folder, config.tuples_dir(), config.tuples_output_file())
depths_csv = '%s/%s/%s' % (self.folder, config.tuples_dir(), config.tuples_depths_file())
tuples_final = '%s/%s' % (self.folder, config.tuples_final_file())
hit_rates = '%s/hit_rates_%d.txt' % (self.folder, depth)
min_hit_rate = 0.5
running = 'java -cp %s cpb.RefineCandidateMethods %d %f %s %s %s %s %s' % (commons.scala_libs(), depth, min_hit_rate, tuples_output, depths_csv, tuples_final, hit_rates, white_list)
self._create_filter_depths_trace()
subprocess.call(shlex.split(str(running)))
def _ranking(self, cluster):
#
if cluster == True:
options_cluster = ['0.01', '0.5', 'true']
else:
options_cluster = ['0.01', '0.5', 'false']
#
time_stats_file = '%s/%s/stats.txt' % (self.folder, config.time_dir())
time_total_file = '%s/%s/%s' % (self.folder, config.time_dir(), config.time_total_file())
time_profile_file = '%s/%s/%s' % (self.folder, config.time_dir(), config.time_profile_file())
tuples_file = '%s/%s' % (self.folder, config.tuples_final_file())
#
call_graph_file = '%s/%s/%s/call_graph.bin' % (commons.callgraphs_path(), self.program.path, self.program.prefix)
#
running = 'java -cp %s cpb.RankAndInspect %s %s %s %s %s %s' % (commons.scala_libs(), self.program.prefix, time_stats_file, time_total_file, tuples_file, call_graph_file, time_profile_file)
subprocess.call(shlex.split(str(running)) + options_cluster)
#
def ranking(self):
#
commons.log('Ranking -- ' + '"' + self.program.prefix + '"' + '-- *without* clustering"')
self._ranking(False)
#
commons.log('Ranking -- ' + '"' + self.program.prefix + '"' + '-- *with* clustering"')
self._ranking(True)
#
def refine_candidates(self):
#
self._filter_with_black_list()
#
depth = 1
#
use_max_depth = not commons.profile_exaustive()
#
get_next_depth = None
if commons.increment_function() == 'inc1':
get_next_depth = lambda x: x + 1
elif commons.increment_function() == 'pow2':
get_next_depth = lambda x: 2 * x
else:
if commons.profile_exaustive() == False:
raise Exeception('Increment function parameter has wrong value -- ' + commons.increment_function())
get_next_depth = None
while (True):
commons.log('Exploring depth ' + str(depth) + ' -- ' + '"' + self.program.prefix + '"')
self._save_white_list(depth)
tuple_options = options.TuplesOptions(use_max_depth, depth, True, False)
self.program.tuples(tuple_options)
self._write_options_to_file('%s/%s' % (self.folder, config.tuples_dir()), tuple_options)
stop = False
if self._max_depth_reached():
stop = True
#
if commons.profile_exaustive() == False:
self._filter_tuples(depth)
self._save_current_depth_directory(depth)
#
candidates_list_new = '%s/%s' % (self.folder, config.white_list_file())
number_of_candidates = self._count_lines(candidates_list_new)
#
if number_of_candidates == 0:
commons.log('No caching candidates left to explore' + ' -- ' + str(depth) + ' -- ' + '"' + self.program.prefix + '"')
break
#
if stop:
self._create_tuples_file()
commons.log('Max depth ' + str(depth) + 'reached' + ' -- ' + '"' + self.program.prefix + '"')
break
depth = get_next_depth(depth)
| apache-2.0 | 2,114,208,309,911,349,500 | 39.298165 | 195 | 0.588845 | false |
bpsmith/tia | tia/analysis/model/interface.py | 1 | 3276 | __all__ = ['CostCalculator', 'EodMarketData', 'MarketDataColumns', 'TxnColumns', 'PositionColumns', 'PlColumns',
'TxnPlColumns']
class CostCalculator(object):
"""Define the methods necessary to be able to calculator the premium for a trade."""
def get_premium(self, qty, px, ts=None):
raise NotImplementedError()
def get_mkt_val(self, qty, px, ts=None):
raise NotImplementedError()
class EodMarketData(object):
def get_eod_frame(self):
"""Return an end of day DataFrame with columns ('close', 'mktval', 'dvd')"""
raise NotImplementedError()
class MarketDataColumns(object):
CLOSE = 'close'
MKT_VAL = 'mkt_val'
DVDS = 'dvds'
class TxnColumns(object):
DT = 'date'
TS = 'txn_ts'
PID = 'pid'
TID = 'tid'
QTY = 'txn_qty'
PX = 'txn_px'
FEES = 'txn_fees'
PREMIUM = 'txn_premium'
OPEN_VAL = 'open_val'
POS = 'pos'
INTENT = 'txn_intent'
ACTION = 'txn_action'
DESCRIPTIONS = {
DT: 'Date-only portion of transaction',
TS: 'Timestamp of transaction',
PID: 'position id',
TID: 'trade id',
QTY: 'quantity',
PX: 'price',
FEES: 'fees',
PREMIUM: 'premium',
OPEN_VAL: 'open value of position',
POS: 'position quantity',
INTENT: 'trade intent',
ACTION: 'trade action',
}
class PlColumns(object):
DT = 'date'
DVDS = 'dvds'
FEES = 'fees'
RPL_GROSS = 'rpl_gross'
RPL = 'rpl'
UPL = 'upl'
PL = 'pl'
DESCRIPTIONS = {
DT: 'p/l date',
DVDS: 'dividends',
FEES: 'fees',
RPL_GROSS: 'realized gross p/l (TOT_VAL - OPEN_VAL)',
RPL: 'realized pl (RPL_GROSS + FEES + DVDS)',
UPL: 'unrealized pl (MKT_VAL + OPEN_VAL)',
PL: 'Total p/l (UPL + RPL)'
}
ALL = [DT, DVDS, FEES, RPL_GROSS, RPL, UPL, PL]
LTDS = [DVDS, FEES, RPL_GROSS, RPL, UPL, PL]
class TxnPlColumns(object):
DT = 'date'
PID = TxnColumns.PID
TID = TxnColumns.TID
POS = 'pos'
TXN_QTY = 'txn_qty'
TXN_PX = 'txn_px'
TXN_FEES = 'txn_fees'
TXN_PREMIUM = 'txn_premium'
TXN_INTENT = 'txn_intent'
TXN_ACTION = 'txn_action'
CLOSE_PX = 'close'
OPEN_VAL = 'open_val'
MKT_VAL = 'mkt_val'
TOT_VAL = 'total_val'
DVDS = 'dvds'
FEES = 'fees'
RPL_GROSS = 'rpl_gross'
RPL = 'rpl'
UPL = 'upl'
PL = 'pl'
DESCRIPTIONS = {
DT: 'p/l date',
POS: 'end of day position quantity',
CLOSE_PX: 'end of day closing price',
OPEN_VAL: 'open value of the position',
MKT_VAL: 'market value',
TOT_VAL: 'total of trade premiums',
DVDS: 'dividends',
FEES: 'fees',
RPL_GROSS: 'realized gross p/l (TOT_VAL - OPEN_VAL)',
RPL: 'realized pl (RPL_GROSS + FEES + DVDS)',
UPL: 'unrealized pl (MKT_VAL + OPEN_VAL)',
PL: 'Total p/l (UPL + RPL)'
}
class PositionColumns(object):
PID = 'pid'
SIDE = 'side'
OPEN_DT = 'open_dt'
CLOSE_DT = 'close_dt'
OPEN_QTY = 'open_qty'
OPEN_PX = 'open_px'
CLOSE_PX = 'close_px'
OPEN_PREMIUM = 'open_premium'
PL = 'pl'
DURATION = 'duration'
NUM_TXNS = 'ntxns'
RET = 'ret'
STATE = 'state' | bsd-3-clause | -7,107,958,425,368,233,000 | 23.825758 | 112 | 0.548535 | false |
vgripon/PyRat | imports/display.py | 1 | 22177 | # Copyright © 2017 Vincent Gripon (vincent.gripon@imt-atlatique.fr) and IMT Atlantique
#
# This file is part of PyRat.
#
# PyRat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyRat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyRat. If not, see <http://www.gnu.org/licenses/>.
from imports.parameters import *
import pygame
import random
import datetime
from pygame import locals
def image_of_maze(maze, tiles, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, scale, width, height, screen, window_height):
global mud_range
for i in range(width):
for j in range(height):
screen.blit(image_tile[tiles[i][j]], (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
if not args.mud_no_display:
for i in range(width):
for j in range(height):
if not((i-1,j) in maze[(i,j)]):
pass
elif maze[(i,j)][(i-1,j)] > 1:
screen.blit(image_mud, (offset_x + scale * i - scale/2, window_height - offset_y - scale * (j+1)))
if not((i,j+1) in maze[(i,j)]):
pass
elif maze[(i,j)][(i,j+1)] > 1:
screen.blit(pygame.transform.rotate(image_mud, 270), (offset_x + scale * i, window_height - offset_y - scale * (j+1) - scale/2))
for i in range(width):
for j in range(height):
if not((i-1,j) in maze[(i,j)]):
screen.blit(image_wall, (offset_x + scale * i - scale / 2, window_height - offset_y - scale * (j+1)))
if not((i,j+1) in maze[(i,j)]):
screen.blit(pygame.transform.rotate(image_wall, 270), (offset_x + scale * i, window_height - offset_y - scale * (j+1) - scale/2))
for i in range(width):
screen.blit(pygame.transform.rotate(image_wall, 270), (offset_x + scale * i, window_height - offset_y - scale/2))
for j in range(height):
screen.blit(image_wall, (offset_x + scale * width -scale/2, window_height - offset_y - scale * (j+1)))
for i in range(width+1):
for j in range(height+1):
horiz = False
vert = False
count = 0
if i == 0 or i == width:
vert = True
if j == 0 or j == height:
horiz = True
# is there a wall left?
if i > 0 and j < height and j > 0:
if (i-1,j) not in maze[(i-1,j-1)]:
horiz = True
count = count + 1
# is there a wall right?
if i < width and j < height and j > 0:
if (i,j-1) not in maze[(i,j)]:
horiz = True
count = count + 1
# is there a wall up?
if i > 0 and i < width and j < height:
if (i,j) not in maze[(i-1,j)]:
vert = True
count = count + 1
# is there a wall down?
if i > 0 and i < width and j > 0:
if (i,j-1) not in maze[(i-1,j-1)]:
vert = True
count = count + 1
if vert and horiz or count == 1:
screen.blit(image_corner, (offset_x + scale * i - scale/2, window_height - offset_y - scale * j - scale/2))
def draw_pieces_of_cheese(pieces_of_cheese, image_cheese, offset_x, offset_y, scale, width, height, screen, window_height):
for (i,j) in pieces_of_cheese:
screen.blit(image_cheese, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
def draw_players(player1_location, player2_location, image_python, image_rat, offset_x, offset_y, scale, width, height, screen, window_height):
i, j = player1_location
screen.blit(image_python, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
i, j = player2_location
screen.blit(image_rat, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
def draw_players_animate(player1_location, player2_location, image_python, image_rat, offset_x, offset_y, scale, width, height, screen, window_height):
i, j = player1_location
screen.blit(image_python, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
i, j = player2_location
screen.blit(image_rat, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
font_sizes = [50, 25, 50, 25, 50, 50, 50]
def draw_text(text, font, color, max_size, index_size, x, y, screen):
global font_sizes
font = pygame.font.Font("resources/fonts/" + font + ".ttf", font_sizes[index_size])
label = font.render(text, 1, color)
while(label.get_rect().width > max_size):
font_sizes[index_size] = font_sizes[index_size] - 1
font = pygame.font.SysFont("monospace", font_sizes[index_size])
label = font.render(text, 1, color)
# pygame.draw.rect(screen, (57,57,64), (x - label.get_rect().width // 2, y, label.get_rect().width,label.get_rect().height))
screen.blit(label, (x - label.get_rect().width // 2,y))
def draw_scores(p1name, score1, image1, p2name, score2, image2, window_width, window_height, screen, player1_is_alive, player2_is_alive, moves1, miss1, moves2, miss2, stuck1, stuck2):
if player1_is_alive:
draw_text("Score: "+str(score1), "Kalam-Bold", (50,50,50), window_width / 6, 0, int(window_width / 12), window_width / 3 + 50, screen)
draw_text(p1name, "Kalam-Bold", (50,50,50), window_width / 6, 5, int(window_width / 12), window_width / 3, screen)
draw_text("Moves: " + str(moves1), "Kalam-Regular", (2,118,137), window_width / 6, 1, int(window_width / 12), window_width / 3 + 150, screen)
draw_text("Miss: " + str(miss1), "Kalam-Regular", (229,35,64), window_width / 6, 1, int(window_width / 12), window_width / 3 + 180, screen)
draw_text("Mud: " + str(stuck1), "Kalam-Regular", (229,35,64), window_width / 6, 1, int(window_width / 12), window_width / 3 + 210, screen)
if player2_is_alive:
draw_text("Score: "+str(score2), "Kalam-Bold", (50,50,50), window_width / 6, 2, int(11 * window_width / 12), window_width / 3 + 50, screen)
draw_text(p2name, "Kalam-Bold", (50,50,50), window_width / 6, 6, int(11 * window_width / 12), window_width / 3, screen)
draw_text("Moves: " + str(moves2), "Kalam-Regular", (2,118,137), window_width / 6, 3, int(11 * window_width / 12), window_width / 3 + 150, screen)
draw_text("Miss: " + str(miss2), "Kalam-Regular", (229,35,64), window_width / 6, 3, int(11 * window_width / 12), window_width / 3 + 180, screen)
draw_text("Mud: " + str(stuck2), "Kalam-Regular", (229,35,64), window_width / 6, 3, int(11 * window_width / 12), window_width / 3 + 210, screen)
def display_exit():
pygame.quit()
def play(q_out, move):
while not q_out.empty():
q_out.get()
q_out.put(move)
def init_coords_and_images(width, height, player1_is_alive, player2_is_alive, window_width, window_height):
scale = int(min((window_height - 50) / height, window_width * 2/3 / width))
offset_x = window_width // 2 - int(width / 2 * scale)
offset_y = max(25, window_height // 2 - int(scale * height / 2))
scale_portrait_w = int(window_width / 6)
scale_portrait_h = int(window_width / 6)
image_background = pygame.transform.smoothscale(pygame.image.load("resources/illustrations/background.jpg"),(window_width, window_height))
image_cheese = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/cheese.png"),(scale, scale))
image_corner = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/corner.png"),(scale, scale))
image_moving_python = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/movingPython.png"),(scale, scale))
image_moving_rat = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/movingRat.png"),(scale, scale))
image_python = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/python.png"),(scale, scale))
image_rat = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/rat.png"),(scale, scale))
image_wall = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/wall.png"),(scale, scale))
image_mud = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/mud.png"),(scale, scale))
image_portrait_python = pygame.transform.smoothscale(pygame.image.load("resources/illustrations/python_left.png"),(scale_portrait_w, scale_portrait_h))
image_portrait_rat = pygame.transform.smoothscale(pygame.image.load("resources/illustrations/rat.png"),(scale_portrait_w, scale_portrait_h))
image_tile = []
for i in range(10):
image_tile.append(pygame.transform.smoothscale(pygame.image.load("resources/gameElements/tile"+str(i+1)+".png"),(scale, scale)))
tiles = []
for i in range(width):
tiles.append([])
for j in range(height):
tiles[i].append(random.randrange(10))
if not(args.save_images):
if not(player1_is_alive):
image_rat = image_rat.convert()
image_rat.set_alpha(0)
image_moving_rat = image_moving_rat.convert()
image_moving_rat.set_alpha(0)
if not(player2_is_alive):
image_python = image_python.convert()
image_python.set_alpha(0)
image_moving_python = image_moving_python.convert()
image_moving_python.set_alpha(0)
return scale, offset_x, offset_y, image_background, image_cheese, image_corner, image_moving_python, image_moving_rat, image_python, image_rat, image_wall, image_mud, image_portrait_python, image_portrait_rat, tiles, image_tile
def build_background(screen, maze, tiles, image_background, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, width, height, window_width, window_height, image_portrait_rat, image_portrait_python, scale, player1_is_alive, player2_is_alive):
global font_sizes
# screen.fill((57,57,64))
font_sizes = [50, 25, 50, 25, 50, 50, 50]
maze_image = screen.copy()
maze_image.blit(image_background, (0,0))
image_of_maze(maze, tiles, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, scale, width, height, maze_image, window_height)
if player1_is_alive:
maze_image.blit(image_portrait_rat, (int(window_width /12 - image_portrait_python.get_rect().width / 2), 100))
if player2_is_alive:
maze_image.blit(image_portrait_python, (int(window_width * 11 / 12 - image_portrait_python.get_rect().width / 2), 100))
return maze_image
def run(maze, width, height, q, q_render_in, q_quit, p1name, p2name, q1_out, q2_out, is_human_rat, is_human_python, q_info, pieces_of_cheese, player1_location, player2_location, player1_is_alive, player2_is_alive, screen, infoObject):
global args
debug("Starting rendering",2)
if args.save_images:
window_width, window_height = args.window_width, args.window_height
else:
window_width, window_height = pygame.display.get_surface().get_size()
turn_time = args.turn_time
scale, offset_x, offset_y, image_background, image_cheese, image_corner, image_moving_python, image_moving_rat, image_python, image_rat, image_wall, image_mud, image_portrait_python, image_portrait_rat, tiles, image_tile = init_coords_and_images(width, height, player1_is_alive, player2_is_alive, window_width, window_height)
debug("Defining constants",2)
d = 10000000
clock = pygame.time.Clock()
new_player1_location = player1_location
new_player2_location = player2_location
time_to_go1 = pygame.time.get_ticks()
time_to_go2 = pygame.time.get_ticks()
score1 = 0
score2 = 0
image1 = image_rat
image2 = image_python
moves1 = 0
moves2 = 0
miss1 = 0
miss2 = 0
stuck1 = 0
stuck2 = 0
debug("Trying to initialize Joystick",2)
pygame.joystick.init()
try:
j0 = pygame.joystick.Joystick(0)
j0.init()
print('Enabled joystick: ' + j0.get_name() + ' with ' + str(j0.get_numaxes()) + ' axes', file=sys.stderr)
j1 = pygame.joystick.Joystick(1)
j1.init()
print('Enabled joystick: ' + j1.get_name() + ' with ' + str(j1.get_numaxes()) + ' axes', file=sys.stderr)
except pygame.error:
()
debug("Building background image",2)
maze_image = build_background(screen, maze, tiles, image_background, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, width, height, window_width, window_height, image_portrait_rat, image_portrait_python, scale, player1_is_alive, player2_is_alive)
starting_time = pygame.time.get_ticks()
text_info = ""
debug("Starting main loop",2)
while q_quit.empty() or (args.desactivate_animations and not(q.empty())):
debug("Checking events",2)
if not(args.save_images):
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and (event.key == pygame.K_q or event.key == pygame.K_ESCAPE)):
q_quit.put("")
break
if event.type == pygame.VIDEORESIZE or (event.type == pygame.KEYDOWN and event.key == pygame.K_f):
if event.type == pygame.KEYDOWN and not(screen.get_flags() & 0x80000000):
screen = pygame.display.set_mode((infoObject.current_w, infoObject.current_h), pygame.FULLSCREEN)
window_width, window_height = infoObject.current_w, infoObject.current_h
else:
if event.type == pygame.VIDEORESIZE:
window_width, window_height = event.w, event.h
screen = pygame.display.set_mode((window_width, window_height),pygame.RESIZABLE)
scale, offset_x, offset_y, image_background, image_cheese, image_corner, image_moving_python, image_moving_rat, image_python, image_rat, image_wall, image_mud, image_portrait_python, image_portrait_rat, tiles, image_tile = init_coords_and_images(width, height, player1_is_alive, player2_is_alive, window_width, window_height)
maze_image = build_background(screen, maze, tiles, image_background, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, width, height, window_width, window_height, image_portrait_rat, image_portrait_python, scale, player1_is_alive, player2_is_alive)
if event.type == pygame.KEYDOWN and (is_human_rat or is_human_python):
if event.key == pygame.K_LEFT:
play(q1_out, "L")
if event.key == pygame.K_RIGHT:
play(q1_out, "R")
if event.key == pygame.K_UP:
play(q1_out, "U")
if event.key == pygame.K_DOWN:
play(q1_out, "D")
if event.key == pygame.K_KP4:
play(q2_out, "L")
if event.key == pygame.K_KP6:
play(q2_out, "R")
if event.key == pygame.K_KP8:
play(q2_out, "U")
if event.key == pygame.K_KP5:
play(q2_out, "D")
debug("Processing joysticks",2)
try:
x , y = j0.get_axis(3), j0.get_axis(4)
if x < -0.7:
play(q1_out, "L")
if x > 0.7:
play(q1_out, "R")
if y < -0.7:
play(q1_out, "U")
if y > 0.7:
play(q1_out, "D")
except:
()
try:
x , y = j1.get_axis(3), j1.get_axis(4)
if x < -0.7:
play(q2_out, "L")
if x > 0.7:
play(q2_out, "R")
if y < -0.7:
play(q2_out, "U")
if y > 0.7:
play(q2_out, "D")
except:
()
debug("Looking for updates from core program",2)
if (args.desactivate_animations and not(q.empty())) or not(args.desactivate_animations):
if args.desactivate_animations:
pieces_of_cheese, nnew_player1_location, nnew_player2_location, score1, score2, moves1, moves2, miss1, miss2, stuck1, stuck2 = q.get()
player1_location = nnew_player1_location
player2_location = nnew_player2_location
else:
while not(q.empty()):
pieces_of_cheese, nnew_player1_location, nnew_player2_location, score1, score2, moves1, moves2, miss1, miss2, stuck1, stuck2 = q.get()
if not(args.desactivate_animations):
if nnew_player1_location != new_player1_location:
time_to_go1 = pygame.time.get_ticks() + turn_time * maze[new_player1_location][nnew_player1_location]
player1_location = new_player1_location
if nnew_player2_location != new_player2_location:
player2_location = new_player2_location
time_to_go2 = pygame.time.get_ticks() + turn_time * maze[new_player2_location][nnew_player2_location]
new_player1_location = nnew_player1_location
new_player2_location = nnew_player2_location
debug("Starting draw",2)
screen.fill((57, 57, 64))
screen.blit(maze_image, (0, 0))
draw_pieces_of_cheese(pieces_of_cheese, image_cheese, offset_x, offset_y, scale, width, height, screen, window_height)
if not(args.desactivate_animations):
if time_to_go1 <= pygame.time.get_ticks() or player1_location == new_player1_location:
player1_location = new_player1_location
player1_draw_location = player1_location
else:
prop = (time_to_go1 - pygame.time.get_ticks()) / (maze[player1_location][new_player1_location] * turn_time)
i, j = player1_location
ii, jj = new_player1_location
player1_draw_location = i * prop + ii * (1 - prop), j * prop + jj * (1 - prop)
if ii > i:
image1 = pygame.transform.rotate(image_moving_rat, 270)
elif ii < i:
image1 = pygame.transform.rotate(image_moving_rat, 90)
elif j < jj:
image1 = pygame.transform.rotate(image_moving_rat, 0)
else:
image1 = pygame.transform.rotate(image_moving_rat, 180)
if time_to_go2 <= pygame.time.get_ticks() or player2_location == new_player2_location:
player2_location = new_player2_location
player2_draw_location = player2_location
else:
prop = (time_to_go2 - pygame.time.get_ticks()) / (maze[player2_location][new_player2_location] * turn_time)
i, j = player2_location
ii, jj = new_player2_location
player2_draw_location = i * prop + ii * (1 - prop), j * prop + jj * (1 - prop)
if ii > i:
image2 = pygame.transform.rotate(image_moving_python, 270)
elif ii < i:
image2 = pygame.transform.rotate(image_moving_python, 90)
elif j < jj:
image2 = pygame.transform.rotate(image_moving_python, 0)
else:
image2 = pygame.transform.rotate(image_moving_python, 180)
draw_players_animate(player1_draw_location, player2_draw_location, image1, image2, offset_x, offset_y, scale, width, height, screen, window_height)
else:#if desactivate_animations
draw_players(player1_location, player2_location, image_rat, image_python, offset_x, offset_y, scale, width, height, screen, window_height)
draw_scores(p1name, score1, image_portrait_rat, p2name, score2, image_portrait_python, window_width, window_height, screen, player1_is_alive, player2_is_alive, moves1, miss1, moves2, miss2, stuck1, stuck2)
if not(q_info.empty()):
text_info = q_info.get()
if text_info != "":
draw_text(text_info, "Kalam-Bold", (50,50,50), window_width, 4, window_width // 2, 25, screen)
if (pygame.time.get_ticks() - starting_time < args.preparation_time) and not(args.desactivate_animations):
remaining = args.preparation_time - pygame.time.get_ticks() + starting_time
if remaining > 0:
draw_text("Starting in " + str(remaining // 1000) + "." + (str(remaining % 1000)).zfill(3), "Kalam-Bold", (50,50,50), window_width, 4, window_width // 2, 25, screen)
debug("Drawing on screen",2)
if not(args.save_images):
pygame.display.flip()
if not(args.desactivate_animations):
clock.tick(60)
else:
if not(args.synchronous):
clock.tick(1000/turn_time)
if args.save_images:
pygame.image.save(screen, "output_images/image" + str(d)[1:] + ".png")
d = d + 1
else:
clock.tick(60)
debug("Exiting rendering", 2)
q_render_in.put("quit")
if is_human_python:
q2_out.put("")
if is_human_rat:
q1_out.put("")
| gpl-3.0 | -2,253,685,481,212,059,100 | 55.861538 | 353 | 0.584055 | false |
AmatanHead/collective-blog | user/tests.py | 1 | 10954 | """Tests for user profile"""
from django import test
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.utils.translation import ugettext as __
from .models import Profile
User = get_user_model()
class TestProfileModel(test.TransactionTestCase):
def setUp(self):
User.objects.create(username="test", password="test")
def test_profile_creation(self):
"""Test that profile is created for each user"""
user = User.objects.create(username="test2", password="test2")
self.assertIsInstance(user.profile, Profile)
user.save()
self.assertIsInstance(user.profile, Profile)
def test_profile_assigned(self):
"""Test that profile is assigned for each user"""
user = User.objects.get(username="test")
self.assertIsInstance(user.profile, Profile)
def test_profile_deleted(self):
"""Test that profile is deleted properly"""
user = User.objects.get(username="test")
user.delete()
profile = Profile.objects.all()
self.assertEqual(profile.count(), 0)
def test_user_deleted(self):
"""Test that user is deleted properly"""
user = User.objects.get(username="test")
user.profile.delete()
users = User.objects.all()
self.assertEqual(users.count(), 0)
class TestProfileModelPerms(test.TransactionTestCase):
def setUp(self):
User.objects.create(username="test", password="test", email="a.b@example.com")
can_change_profile = Permission.objects.get(codename="change_profile")
can_change_user = Permission.objects.get(codename="change_user")
moderator_with_perms = User.objects.create(username="moderator_with_perms", password="_", is_staff=True, is_active=True)
moderator_with_perms.user_permissions.add(can_change_profile)
moderator_with_perms.save()
moderator_with_perms2 = User.objects.create(username="moderator_with_perms2", password="_", is_staff=True, is_active=True)
moderator_with_perms2.user_permissions.add(can_change_user)
moderator_with_perms2.save()
moderator_without_perms = User.objects.create(username="moderator_without_perms", password="_", is_staff=True, is_active=True)
moderator_without_perms.save()
user_with_perms = User.objects.create(username="user_with_perms", password="_", is_active=True)
user_with_perms.user_permissions.add(can_change_profile)
user_with_perms.save()
user_with_perms2 = User.objects.create(username="user_with_perms2", password="_", is_active=True)
user_with_perms2.user_permissions.add(can_change_user)
user_with_perms2.save()
superuser = User.objects.create(username="superuser", password="_", is_active=True, is_staff=True, is_superuser=True)
superuser.save()
disabled_superuser = User.objects.create(username="disabled_superuser", password="_", is_staff=True, is_active=False, is_superuser=True)
disabled_superuser.save()
disabled_superuser2 = User.objects.create(username="disabled_superuser2", password="_", is_active=True, is_staff=False, is_superuser=True)
disabled_superuser2.save()
ordinary_user = User.objects.create(username="ordinary_user", password="_")
ordinary_user.save()
def test_can_edit_profile(self):
"""Test that only moderators and the user can edit the user's profile"""
moderator_with_perms = User.objects.get(username='moderator_with_perms')
moderator_with_perms2 = User.objects.get(
username='moderator_with_perms2')
moderator_without_perms = User.objects.get(
username='moderator_without_perms')
user_with_perms = User.objects.get(username='user_with_perms')
user_with_perms2 = User.objects.get(username='user_with_perms2')
superuser = User.objects.get(username='superuser')
disabled_superuser = User.objects.get(username='disabled_superuser')
disabled_superuser2 = User.objects.get(username='disabled_superuser2')
ordinary_user = User.objects.get(username='ordinary_user')
user = User.objects.get(username="test")
profile = user.profile
user2 = User.objects.get(username="test")
self.assertTrue(profile.can_be_edited_by(moderator_with_perms))
self.assertTrue(profile.can_be_edited_by(moderator_with_perms2))
self.assertFalse(profile.can_be_edited_by(moderator_without_perms))
self.assertFalse(profile.can_be_edited_by(user_with_perms))
self.assertFalse(profile.can_be_edited_by(user_with_perms2))
self.assertTrue(profile.can_be_edited_by(superuser))
self.assertFalse(profile.can_be_edited_by(disabled_superuser))
self.assertFalse(profile.can_be_edited_by(disabled_superuser2))
self.assertFalse(profile.can_be_edited_by(ordinary_user))
self.assertTrue(profile.can_be_edited_by(user))
self.assertTrue(profile.can_be_edited_by(user2))
def test_can_see_the_email(self):
"""Test that only moderators and the user can seethe user's private email"""
moderator_with_perms = User.objects.get(username='moderator_with_perms')
moderator_with_perms2 = User.objects.get(
username='moderator_with_perms2')
moderator_without_perms = User.objects.get(
username='moderator_without_perms')
user_with_perms = User.objects.get(username='user_with_perms')
user_with_perms2 = User.objects.get(username='user_with_perms2')
superuser = User.objects.get(username='superuser')
disabled_superuser = User.objects.get(username='disabled_superuser')
disabled_superuser2 = User.objects.get(username='disabled_superuser2')
ordinary_user = User.objects.get(username='ordinary_user')
user = User.objects.get(username="test")
profile = user.profile
user2 = User.objects.get(username="test")
self.assertTrue(profile.email_can_be_seen_by(moderator_with_perms))
self.assertTrue(profile.email_can_be_seen_by(moderator_with_perms2))
self.assertFalse(profile.email_can_be_seen_by(moderator_without_perms))
self.assertFalse(profile.email_can_be_seen_by(user_with_perms))
self.assertFalse(profile.email_can_be_seen_by(user_with_perms2))
self.assertTrue(profile.email_can_be_seen_by(superuser))
self.assertFalse(profile.email_can_be_seen_by(disabled_superuser))
self.assertFalse(profile.email_can_be_seen_by(disabled_superuser2))
self.assertFalse(profile.email_can_be_seen_by(ordinary_user))
self.assertTrue(profile.email_can_be_seen_by(user))
self.assertTrue(profile.email_can_be_seen_by(user2))
user.profile.email_is_public = True
user.profile.save()
user = User.objects.get(username="test")
profile = user.profile
user2 = User.objects.get(username="test")
self.assertTrue(profile.email_can_be_seen_by(moderator_with_perms))
self.assertTrue(profile.email_can_be_seen_by(moderator_with_perms2))
self.assertTrue(profile.email_can_be_seen_by(moderator_without_perms))
self.assertTrue(profile.email_can_be_seen_by(user_with_perms))
self.assertTrue(profile.email_can_be_seen_by(user_with_perms2))
self.assertTrue(profile.email_can_be_seen_by(superuser))
self.assertTrue(profile.email_can_be_seen_by(disabled_superuser))
self.assertTrue(profile.email_can_be_seen_by(disabled_superuser2))
self.assertTrue(profile.email_can_be_seen_by(ordinary_user))
self.assertTrue(profile.email_can_be_seen_by(user))
self.assertTrue(profile.email_can_be_seen_by(user2))
def test_visible_email(self):
"""Test that private emails are displayed correctly"""
moderator_with_perms = User.objects.get(username='moderator_with_perms')
moderator_with_perms2 = User.objects.get(
username='moderator_with_perms2')
moderator_without_perms = User.objects.get(
username='moderator_without_perms')
user_with_perms = User.objects.get(username='user_with_perms')
user_with_perms2 = User.objects.get(username='user_with_perms2')
superuser = User.objects.get(username='superuser')
disabled_superuser = User.objects.get(username='disabled_superuser')
disabled_superuser2 = User.objects.get(username='disabled_superuser2')
ordinary_user = User.objects.get(username='ordinary_user')
user = User.objects.get(username="test")
profile = user.profile
user2 = User.objects.get(username="test")
self.assertEqual(profile.email_as_seen_by(moderator_with_perms), "a.b@example.com (%s)" % __('Only you can see the email'))
self.assertEqual(profile.email_as_seen_by(moderator_with_perms2), "a.b@example.com (%s)" % __('Only you can see the email'))
self.assertEqual(profile.email_as_seen_by(moderator_without_perms), '')
self.assertEqual(profile.email_as_seen_by(user_with_perms), '')
self.assertEqual(profile.email_as_seen_by(user_with_perms2), '')
self.assertEqual(profile.email_as_seen_by(superuser), "a.b@example.com (%s)" % __('Only you can see the email'))
self.assertEqual(profile.email_as_seen_by(disabled_superuser), '')
self.assertEqual(profile.email_as_seen_by(disabled_superuser2), '')
self.assertEqual(profile.email_as_seen_by(ordinary_user), '')
self.assertEqual(profile.email_as_seen_by(user), "a.b@example.com (%s)" % __('Only you can see the email'))
self.assertEqual(profile.email_as_seen_by(user2), "a.b@example.com (%s)" % __('Only you can see the email'))
user.profile.email_is_public = True
user.profile.save()
user = User.objects.get(username="test")
profile = user.profile
user2 = User.objects.get(username="test")
self.assertEqual(profile.email_as_seen_by(moderator_with_perms), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(moderator_with_perms2), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(moderator_without_perms), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(user_with_perms), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(user_with_perms2), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(superuser), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(disabled_superuser), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(disabled_superuser2), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(ordinary_user), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(user), "a.b@example.com")
self.assertEqual(profile.email_as_seen_by(user2), "a.b@example.com")
| mit | -8,111,463,802,058,949,000 | 49.712963 | 146 | 0.681851 | false |
Acehaidrey/incubator-airflow | airflow/www/views.py | 1 | 135065 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import itertools
import json
import logging
import math
import socket
import sys
import traceback
from collections import defaultdict
from datetime import datetime, timedelta
from json import JSONDecodeError
from typing import Dict, List, Optional, Tuple
from urllib.parse import unquote, urlparse
import lazy_object_proxy
import nvd3
import sqlalchemy as sqla
import yaml
from flask import (
Markup,
Response,
abort,
current_app,
escape,
flash,
g,
jsonify,
make_response,
redirect,
render_template,
request,
session as flask_session,
url_for,
)
from flask_appbuilder import BaseView, ModelView, expose
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.filters import BaseFilter # noqa
from flask_babel import lazy_gettext
from jinja2.utils import htmlsafe_json_dumps, pformat # type: ignore
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter # noqa pylint: disable=no-name-in-module
from sqlalchemy import and_, desc, func, or_, union_all
from sqlalchemy.orm import joinedload
from wtforms import SelectField, validators
import airflow
from airflow import models, plugins_manager, settings
from airflow.api.common.experimental.mark_tasks import (
set_dag_run_state_to_failed,
set_dag_run_state_to_success,
)
from airflow.configuration import AIRFLOW_CONFIG, conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.base_job import BaseJob
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models import Connection, DagModel, DagTag, Log, SlaMiss, TaskFail, XCom, errors
from airflow.models.baseoperator import BaseOperator
from airflow.models.dagcode import DagCode
from airflow.models.dagrun import DagRun, DagRunType
from airflow.models.taskinstance import TaskInstance
from airflow.security import permissions
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import RUNNING_DEPS, SCHEDULER_QUEUED_DEPS
from airflow.utils import json as utils_json, timezone
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.log.log_reader import TaskLogReader
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.version import version
from airflow.www import auth, utils as wwwutils
from airflow.www.decorators import action_logging, gzipped
from airflow.www.forms import (
ConnectionForm,
DagRunForm,
DateTimeForm,
DateTimeWithNumRunsForm,
DateTimeWithNumRunsWithDagRunsForm,
)
from airflow.www.widgets import AirflowModelListWidget
PAGE_SIZE = conf.getint('webserver', 'page_size')
FILTER_TAGS_COOKIE = 'tags_filter'
FILTER_STATUS_COOKIE = 'dag_status_filter'
def get_safe_url(url):
"""Given a user-supplied URL, ensure it points to our web server"""
valid_schemes = ['http', 'https', '']
valid_netlocs = [request.host, '']
parsed = urlparse(url)
if parsed.scheme in valid_schemes and parsed.netloc in valid_netlocs:
return url
return url_for('Airflow.index')
def get_date_time_num_runs_dag_runs_form_data(www_request, session, dag):
"""Get Execution Data, Base Date & Number of runs from a Request"""
date_time = www_request.args.get('execution_date')
if date_time:
date_time = timezone.parse(date_time)
else:
date_time = dag.get_latest_execution_date(session=session) or timezone.utcnow()
base_date = www_request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (date_time + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = www_request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
drs = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date <= base_date)
.order_by(desc(DagRun.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if date_time == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
date_time = dr.execution_date
dr_state = dr.state
return {
'dttm': date_time,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': date_time.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
def task_group_to_dict(task_group):
"""
Create a nested dict representation of this TaskGroup and its children used to construct
the Graph View.
"""
if isinstance(task_group, BaseOperator):
return {
'id': task_group.task_id,
'value': {
'label': task_group.label,
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color};",
'rx': 5,
'ry': 5,
},
}
children = [
task_group_to_dict(child) for child in sorted(task_group.children.values(), key=lambda t: t.label)
]
if task_group.upstream_group_ids or task_group.upstream_task_ids:
children.append(
{
'id': task_group.upstream_join_id,
'value': {
'label': '',
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color};",
'shape': 'circle',
},
}
)
if task_group.downstream_group_ids or task_group.downstream_task_ids:
# This is the join node used to reduce the number of edges between two TaskGroup.
children.append(
{
'id': task_group.downstream_join_id,
'value': {
'label': '',
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color};",
'shape': 'circle',
},
}
)
return {
"id": task_group.group_id,
'value': {
'label': task_group.label,
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color}",
'rx': 5,
'ry': 5,
'clusterLabelPos': 'top',
},
'tooltip': task_group.tooltip,
'children': children,
}
def dag_edges(dag):
"""
Create the list of edges needed to construct the Graph View.
A special case is made if a TaskGroup is immediately upstream/downstream of another
TaskGroup or task. Two dummy nodes named upstream_join_id and downstream_join_id are
created for the TaskGroup. Instead of drawing an edge onto every task in the TaskGroup,
all edges are directed onto the dummy nodes. This is to cut down the number of edges on
the graph.
For example: A DAG with TaskGroups group1 and group2:
group1: task1, task2, task3
group2: task4, task5, task6
group2 is downstream of group1:
group1 >> group2
Edges to add (This avoids having to create edges between every task in group1 and group2):
task1 >> downstream_join_id
task2 >> downstream_join_id
task3 >> downstream_join_id
downstream_join_id >> upstream_join_id
upstream_join_id >> task4
upstream_join_id >> task5
upstream_join_id >> task6
"""
# Edges to add between TaskGroup
edges_to_add = set()
# Edges to remove between individual tasks that are replaced by edges_to_add.
edges_to_skip = set()
task_group_map = dag.task_group.get_task_group_dict()
def collect_edges(task_group):
"""Update edges_to_add and edges_to_skip according to TaskGroups."""
if isinstance(task_group, BaseOperator):
return
for target_id in task_group.downstream_group_ids:
# For every TaskGroup immediately downstream, add edges between downstream_join_id
# and upstream_join_id. Skip edges between individual tasks of the TaskGroups.
target_group = task_group_map[target_id]
edges_to_add.add((task_group.downstream_join_id, target_group.upstream_join_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
for target in target_group.get_roots():
edges_to_skip.add((child.task_id, target.task_id))
edges_to_skip.add((child.task_id, target_group.upstream_join_id))
for child in target_group.get_roots():
edges_to_add.add((target_group.upstream_join_id, child.task_id))
edges_to_skip.add((task_group.downstream_join_id, child.task_id))
# For every individual task immediately downstream, add edges between downstream_join_id and
# the downstream task. Skip edges between individual tasks of the TaskGroup and the
# downstream task.
for target_id in task_group.downstream_task_ids:
edges_to_add.add((task_group.downstream_join_id, target_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
edges_to_skip.add((child.task_id, target_id))
# For every individual task immediately upstream, add edges between the upstream task
# and upstream_join_id. Skip edges between the upstream task and individual tasks
# of the TaskGroup.
for source_id in task_group.upstream_task_ids:
edges_to_add.add((source_id, task_group.upstream_join_id))
for child in task_group.get_roots():
edges_to_add.add((task_group.upstream_join_id, child.task_id))
edges_to_skip.add((source_id, child.task_id))
for child in task_group.children.values():
collect_edges(child)
collect_edges(dag.task_group)
# Collect all the edges between individual tasks
edges = set()
def get_downstream(task):
for child in task.downstream_list:
edge = (task.task_id, child.task_id)
if edge not in edges:
edges.add(edge)
get_downstream(child)
for root in dag.roots:
get_downstream(root)
return [
{'source_id': source_id, 'target_id': target_id}
for source_id, target_id in sorted(edges.union(edges_to_add) - edges_to_skip)
]
######################################################################################
# Error handlers
######################################################################################
def circles(error): # pylint: disable=unused-argument
"""Show Circles on screen for any error in the Webserver"""
return (
render_template(
'airflow/circles.html',
hostname=socket.getfqdn()
if conf.getboolean('webserver', 'EXPOSE_HOSTNAME', fallback=True) # noqa
else 'redact',
),
404,
)
def show_traceback(error): # pylint: disable=unused-argument
"""Show Traceback for a given error"""
return (
render_template(
'airflow/traceback.html', # noqa
python_version=sys.version.split(" ")[0],
airflow_version=version,
hostname=socket.getfqdn()
if conf.getboolean('webserver', 'EXPOSE_HOSTNAME', fallback=True)
else 'redact',
info=traceback.format_exc()
if conf.getboolean('webserver', 'EXPOSE_STACKTRACE', fallback=True)
else 'Error! Please contact server admin.',
),
500,
)
######################################################################################
# BaseViews
######################################################################################
class AirflowBaseView(BaseView): # noqa: D101
"""Base View to set Airflow related properties"""
from airflow import macros
route_base = ''
# Make our macros available to our UI templates too.
extra_args = {
'macros': macros,
}
def render_template(self, *args, **kwargs):
return super().render_template(
*args,
# Cache this at most once per request, not for the lifetime of the view instance
scheduler_job=lazy_object_proxy.Proxy(SchedulerJob.most_recent_job),
**kwargs,
)
class Airflow(AirflowBaseView): # noqa: D101 pylint: disable=too-many-public-methods
"""Main Airflow application."""
@expose('/health')
def health(self):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
payload = {'metadatabase': {'status': 'unhealthy'}}
latest_scheduler_heartbeat = None
scheduler_status = 'unhealthy'
payload['metadatabase'] = {'status': 'healthy'}
try:
scheduler_job = SchedulerJob.most_recent_job()
if scheduler_job:
latest_scheduler_heartbeat = scheduler_job.latest_heartbeat.isoformat()
if scheduler_job.is_alive():
scheduler_status = 'healthy'
except Exception: # noqa pylint: disable=broad-except
payload['metadatabase']['status'] = 'unhealthy'
payload['scheduler'] = {
'status': scheduler_status,
'latest_scheduler_heartbeat': latest_scheduler_heartbeat,
}
return wwwutils.json_response(payload)
@expose('/home')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
]
) # pylint: disable=too-many-locals,too-many-statements
def index(self):
"""Home view."""
hide_paused_dags_by_default = conf.getboolean('webserver', 'hide_paused_dags_by_default')
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search')
arg_tags_filter = request.args.getlist('tags')
arg_status_filter = request.args.get('status')
if request.args.get('reset_tags') is not None:
flask_session[FILTER_TAGS_COOKIE] = None
# Remove the reset_tags=reset from the URL
return redirect(url_for('Airflow.index'))
cookie_val = flask_session.get(FILTER_TAGS_COOKIE)
if arg_tags_filter:
flask_session[FILTER_TAGS_COOKIE] = ','.join(arg_tags_filter)
elif cookie_val:
# If tags exist in cookie, but not URL, add them to the URL
return redirect(url_for('Airflow.index', tags=cookie_val.split(',')))
if arg_status_filter is None:
cookie_val = flask_session.get(FILTER_STATUS_COOKIE)
if cookie_val:
arg_status_filter = cookie_val
else:
arg_status_filter = 'active' if hide_paused_dags_by_default else 'all'
flask_session[FILTER_STATUS_COOKIE] = arg_status_filter
else:
status = arg_status_filter.strip().lower()
flask_session[FILTER_STATUS_COOKIE] = status
arg_status_filter = status
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
start = current_page * dags_per_page
end = start + dags_per_page
# Get all the dag id the user could access
filter_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
with create_session() as session:
# read orm_dags from the db
dags_query = session.query(DagModel).filter(~DagModel.is_subdag, DagModel.is_active)
# pylint: disable=no-member
if arg_search_query:
dags_query = dags_query.filter(
DagModel.dag_id.ilike('%' + arg_search_query + '%')
| DagModel.owners.ilike('%' + arg_search_query + '%') # noqa # noqa
)
if arg_tags_filter:
dags_query = dags_query.filter(DagModel.tags.any(DagTag.name.in_(arg_tags_filter)))
if permissions.RESOURCE_DAG not in filter_dag_ids:
dags_query = dags_query.filter(DagModel.dag_id.in_(filter_dag_ids))
# pylint: enable=no-member
all_dags = dags_query
active_dags = dags_query.filter(~DagModel.is_paused)
paused_dags = dags_query.filter(DagModel.is_paused)
is_paused_count = dict(
all_dags.with_entities(DagModel.is_paused, func.count(DagModel.dag_id))
.group_by(DagModel.is_paused)
.all()
)
status_count_active = is_paused_count.get(False, 0)
status_count_paused = is_paused_count.get(True, 0)
all_dags_count = status_count_active + status_count_paused
if arg_status_filter == 'active':
current_dags = active_dags
num_of_all_dags = status_count_active
elif arg_status_filter == 'paused':
current_dags = paused_dags
num_of_all_dags = status_count_paused
else:
current_dags = all_dags
num_of_all_dags = all_dags_count
dags = (
current_dags.order_by(DagModel.dag_id)
.options(joinedload(DagModel.tags))
.offset(start)
.limit(dags_per_page)
.all()
)
dagtags = session.query(DagTag.name).distinct(DagTag.name).all()
tags = [
{"name": name, "selected": bool(arg_tags_filter and name in arg_tags_filter)}
for name, in dagtags
]
import_errors = session.query(errors.ImportError).all()
for import_error in import_errors:
flash("Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=import_error), "dag_import_error")
from airflow.plugins_manager import import_errors as plugin_import_errors
for filename, stacktrace in plugin_import_errors.items():
flash(
f"Broken plugin: [{filename}] {stacktrace}",
"error",
)
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
state_color_mapping = State.state_color.copy()
state_color_mapping["null"] = state_color_mapping.pop(None)
return self.render_template(
'airflow/dags.html',
dags=dags,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=min(start + 1, num_of_all_dags),
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(
current_page,
num_of_pages,
search=escape(arg_search_query) if arg_search_query else None,
status=arg_status_filter if arg_status_filter else None,
),
num_runs=num_runs,
tags=tags,
state_color=state_color_mapping,
status_filter=arg_status_filter,
status_count_all=all_dags_count,
status_count_active=status_count_active,
status_count_paused=status_count_paused,
tags_filter=arg_tags_filter,
)
@expose('/dag_stats', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def dag_stats(self, session=None):
"""Dag statistics."""
dr = models.DagRun
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
if permissions.RESOURCE_DAG in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
dag_state_stats = session.query(dr.dag_id, dr.state, sqla.func.count(dr.state)).group_by(
dr.dag_id, dr.state
)
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
payload = {}
dag_state_stats = dag_state_stats.filter(dr.dag_id.in_(filter_dag_ids)) # pylint: disable=no-member
data = {}
for dag_id, state, count in dag_state_stats:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.dag_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({'state': state, 'count': count})
return wwwutils.json_response(payload)
@expose('/task_stats', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@provide_session
def task_stats(self, session=None):
"""Task Statistics"""
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
if not allowed_dag_ids:
return wwwutils.json_response({})
if permissions.RESOURCE_DAG in allowed_dag_ids:
allowed_dag_ids = {dag_id for dag_id, in session.query(models.DagModel.dag_id)}
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
# pylint: disable=comparison-with-callable
running_dag_run_query_result = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(DagModel, DagModel.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING, DagModel.is_active)
)
# pylint: enable=comparison-with-callable
# pylint: disable=no-member
if selected_dag_ids:
running_dag_run_query_result = running_dag_run_query_result.filter(
DagRun.dag_id.in_(filter_dag_ids)
)
# pylint: enable=no-member
running_dag_run_query_result = running_dag_run_query_result.subquery('running_dag_run')
# pylint: disable=no-member
# Select all task_instances from active dag_runs.
running_task_instance_query_result = session.query(
TaskInstance.dag_id.label('dag_id'), TaskInstance.state.label('state')
).join(
running_dag_run_query_result,
and_(
running_dag_run_query_result.c.dag_id == TaskInstance.dag_id,
running_dag_run_query_result.c.execution_date == TaskInstance.execution_date,
),
)
if selected_dag_ids:
running_task_instance_query_result = running_task_instance_query_result.filter(
TaskInstance.dag_id.in_(filter_dag_ids)
)
# pylint: enable=no-member
if conf.getboolean('webserver', 'SHOW_RECENT_STATS_FOR_COMPLETED_RUNS', fallback=True):
# pylint: disable=comparison-with-callable
last_dag_run = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(DagModel, DagModel.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING, DagModel.is_active)
.group_by(DagRun.dag_id)
)
# pylint: enable=comparison-with-callable
# pylint: disable=no-member
if selected_dag_ids:
last_dag_run = last_dag_run.filter(DagRun.dag_id.in_(filter_dag_ids))
last_dag_run = last_dag_run.subquery('last_dag_run')
# pylint: enable=no-member
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
last_task_instance_query_result = session.query(
TaskInstance.dag_id.label('dag_id'), TaskInstance.state.label('state')
).join(
last_dag_run,
and_(
last_dag_run.c.dag_id == TaskInstance.dag_id,
last_dag_run.c.execution_date == TaskInstance.execution_date,
),
)
# pylint: disable=no-member
if selected_dag_ids:
last_task_instance_query_result = last_task_instance_query_result.filter(
TaskInstance.dag_id.in_(filter_dag_ids)
)
# pylint: enable=no-member
final_task_instance_query_result = union_all(
last_task_instance_query_result, running_task_instance_query_result
).alias('final_ti')
else:
final_task_instance_query_result = running_task_instance_query_result.subquery('final_ti')
qry = session.query(
final_task_instance_query_result.c.dag_id,
final_task_instance_query_result.c.state,
sqla.func.count(),
).group_by(final_task_instance_query_result.c.dag_id, final_task_instance_query_result.c.state)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.task_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({'state': state, 'count': count})
return wwwutils.json_response(payload)
@expose('/last_dagruns', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def last_dagruns(self, session=None):
"""Last DAG runs"""
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
if permissions.RESOURCE_DAG in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
query = session.query(
DagRun.dag_id,
sqla.func.max(DagRun.execution_date).label('execution_date'),
sqla.func.max(DagRun.start_date).label('start_date'),
).group_by(DagRun.dag_id)
# Filter to only ask for accessible and selected dags
query = query.filter(DagRun.dag_id.in_(filter_dag_ids)) # pylint: enable=no-member
resp = {
r.dag_id.replace('.', '__dot__'): {
'dag_id': r.dag_id,
'execution_date': r.execution_date.isoformat(),
'start_date': r.start_date.isoformat(),
}
for r in query
}
return wwwutils.json_response(resp)
@expose('/code')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
]
)
@provide_session
def code(self, session=None):
"""Dag Code."""
all_errors = ""
dag_orm = None
dag_id = None
try:
dag_id = request.args.get('dag_id')
dag_orm = DagModel.get_dagmodel(dag_id, session=session)
code = DagCode.get_code_by_fileloc(dag_orm.fileloc)
html_code = Markup(
highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True) # pylint: disable=no-member
)
)
except Exception as e: # pylint: disable=broad-except
all_errors += (
"Exception encountered during "
+ f"dag_id retrieval/dag retrieval fallback/code highlighting:\n\n{e}\n"
)
html_code = Markup('<p>Failed to load file.</p><p>Details: {}</p>').format( # noqa
escape(all_errors)
)
return self.render_template(
'airflow/dag_code.html',
html_code=html_code,
dag=dag_orm,
title=dag_id,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'),
wrapped=conf.getboolean('webserver', 'default_wrap'),
)
@expose('/dag_details')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def dag_details(self, session=None):
"""Get Dag details."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
title = "DAG Details"
root = request.args.get('root', '')
states = (
session.query(TaskInstance.state, sqla.func.count(TaskInstance.dag_id))
.filter(TaskInstance.dag_id == dag_id)
.group_by(TaskInstance.state)
.all()
)
active_runs = models.DagRun.find(dag_id=dag_id, state=State.RUNNING, external_trigger=False)
tags = session.query(models.DagTag).filter(models.DagTag.dag_id == dag_id).all()
return self.render_template(
'airflow/dag_details.html',
dag=dag,
title=title,
root=root,
states=states,
State=State,
active_runs=active_runs,
tags=tags,
)
@expose('/rendered-templates')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def rendered_templates(self):
"""Get rendered Dag."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
logging.info("Retrieving rendered templates.")
dag = current_app.dag_bag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.get_rendered_template_fields()
except AirflowException as e: # pylint: disable=broad-except
msg = "Error rendering template: " + escape(e)
if e.__cause__: # pylint: disable=using-constant-test
msg += Markup("<br><br>OriginalError: ") + escape(e.__cause__)
flash(msg, "error")
except Exception as e: # pylint: disable=broad-except
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
renderers = wwwutils.get_attr_renderer()
for template_field in task.template_fields:
content = getattr(task, template_field)
renderer = task.template_fields_renderers.get(template_field, template_field)
if renderer in renderers:
if isinstance(content, (dict, list)):
content = json.dumps(content, sort_keys=True, indent=4)
html_dict[template_field] = renderers[renderer](content)
else:
html_dict[template_field] = Markup("<pre><code>{}</pre></code>").format(
pformat(content)
) # noqa
return self.render_template(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
title=title,
)
@expose('/rendered-k8s')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def rendered_k8s(self):
"""Get rendered k8s yaml."""
if not settings.IS_K8S_OR_K8SCELERY_EXECUTOR:
abort(404)
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
logging.info("Retrieving rendered templates.")
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
ti = models.TaskInstance(task=task, execution_date=dttm)
pod_spec = None
try:
pod_spec = ti.get_rendered_k8s_spec()
except AirflowException as e:
msg = "Error rendering Kubernetes POD Spec: " + escape(e)
if e.__cause__: # pylint: disable=using-constant-test
msg += Markup("<br><br>OriginalError: ") + escape(e.__cause__)
flash(msg, "error")
except Exception as e: # pylint: disable=broad-except
flash("Error rendering Kubernetes Pod Spec: " + str(e), "error")
title = "Rendered K8s Pod Spec"
html_dict = {}
renderers = wwwutils.get_attr_renderer()
if pod_spec:
content = yaml.dump(pod_spec)
content = renderers["yaml"](content)
else:
content = Markup("<pre><code>Error rendering Kubernetes POD Spec</pre></code>")
html_dict['k8s'] = content
return self.render_template(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
title=title,
)
@expose('/get_logs_with_metadata')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
"""Retrieve logs including metadata."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
if request.args.get('try_number') is not None:
try_number = int(request.args.get('try_number'))
else:
try_number = None
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
response_format = request.args.get('format', 'json')
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(execution_date)
)
response = jsonify({'error': error_message})
response.status_code = 400
return response
task_log_reader = TaskLogReader()
if not task_log_reader.supports_read:
return jsonify(
message="Task log handler does not support read logs.",
error=True,
metadata={"end_of_log": True},
)
ti = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == execution_date,
)
.first()
)
if ti is None:
return jsonify(
message="*** Task instance did not exist in the DB\n",
error=True,
metadata={"end_of_log": True},
)
try:
dag = current_app.dag_bag.get_dag(dag_id)
if dag:
ti.task = dag.get_task(ti.task_id)
if response_format == 'json':
logs, metadata = task_log_reader.read_log_chunks(ti, try_number, metadata)
message = logs[0] if try_number is not None else logs
return jsonify(message=message, metadata=metadata)
metadata['download_logs'] = True
attachment_filename = task_log_reader.render_log_filename(ti, try_number)
log_stream = task_log_reader.read_log_stream(ti, try_number, metadata)
return Response(
response=log_stream,
mimetype="text/plain",
headers={"Content-Disposition": f"attachment; filename={attachment_filename}"},
)
except AttributeError as e:
error_message = [f"Task log handler does not support read logs.\n{str(e)}\n"]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def log(self, session=None):
"""Retrieve log."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag_model = DagModel.get_dagmodel(dag_id)
ti = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm,
)
.first()
)
num_logs = 0
if ti is not None:
num_logs = ti.next_try_number - 1
if ti.state == State.UP_FOR_RESCHEDULE:
# Tasks in reschedule state decremented the try number
num_logs += 1
logs = [''] * num_logs
root = request.args.get('root', '')
return self.render_template(
'airflow/ti_log.html',
logs=logs,
dag=dag_model,
title="Log by attempts",
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
wrapped=conf.getboolean('webserver', 'default_wrap'),
)
@expose('/redirect_to_external_log')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def redirect_to_external_log(self, session=None):
"""Redirects to external log."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
try_number = request.args.get('try_number', 1)
ti = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm,
)
.first()
)
if not ti:
flash(f"Task [{dag_id}.{task_id}] does not exist", "error")
return redirect(url_for('Airflow.index'))
task_log_reader = TaskLogReader()
if not task_log_reader.supports_external_link:
flash("Task log handler does not support external links", "error")
return redirect(url_for('Airflow.index'))
handler = task_log_reader.log_handler
url = handler.get_external_log_url(ti, try_number)
return redirect(url)
@expose('/task')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def task(self):
"""Retrieve task."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dag = current_app.dag_bag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(f"Task [{dag_id}.{task_id}] doesn't seem to exist at the moment", "error")
return redirect(url_for('Airflow.index'))
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TaskInstance(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task): # noqa pylint: disable=unidiomatic-typecheck
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
# pylint: disable=unidiomatic-typecheck
if type(attr) != type(self.task) and attr_name not in wwwutils.get_attr_renderer(): # noqa
task_attrs.append((attr_name, str(attr)))
# pylint: enable=unidiomatic-typecheck
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in wwwutils.get_attr_renderer():
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = wwwutils.get_attr_renderer()[attr_name](source)
no_failed_deps_result = [
(
"Unknown",
"All dependencies are met but the task instance is not running. In most "
"cases this just means that the task will probably be scheduled soon "
"unless:<br>\n- The scheduler is down or under heavy load<br>\n{}\n"
"<br>\nIf this task instance does not start soon please contact your "
"Airflow administrator for assistance.".format(
"- This task instance already ran and had it's state changed manually "
"(e.g. cleared in the UI)<br>"
if ti.state == State.NONE
else ""
),
)
]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_QUEUED_DEPS)
failed_dep_reasons = [
(dep.dep_name, dep.reason) for dep in ti.get_failed_dep_statuses(dep_context=dep_context)
]
title = "Task Instance Details"
return self.render_template(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
root=root,
dag=dag,
title=title,
)
@expose('/xcom')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
]
)
@action_logging
@provide_session
def xcom(self, session=None):
"""Retrieve XCOM."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dm_db = models.DagModel
ti_db = models.TaskInstance
dag = session.query(dm_db).filter(dm_db.dag_id == dag_id).first()
ti = session.query(ti_db).filter(ti_db.dag_id == dag_id and ti_db.task_id == task_id).first()
if not ti:
flash(f"Task [{dag_id}.{task_id}] doesn't seem to exist at the moment", "error")
return redirect(url_for('Airflow.index'))
xcomlist = (
session.query(XCom)
.filter(XCom.dag_id == dag_id, XCom.task_id == task_id, XCom.execution_date == dttm)
.all()
)
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render_template(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
dag=dag,
title=title,
)
@expose('/run', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def run(self):
"""Runs Task Instance."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
ignore_all_deps = request.form.get('ignore_all_deps') == "true"
ignore_task_deps = request.form.get('ignore_task_deps') == "true"
ignore_ti_state = request.form.get('ignore_ti_state') == "true"
executor = ExecutorLoader.get_default_executor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor # noqa
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.executors.kubernetes_executor import KubernetesExecutor # noqa
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be run
dep_context = DepContext(
deps=RUNNING_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join([f"{dep.dep_name}: {dep.reason}" for dep in failed_deps])
flash(
"Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error",
)
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
executor.heartbeat()
flash(f"Sent {ti} to the message queue, it should start any moment now.")
return redirect(origin)
@expose('/delete', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
]
)
@action_logging
def delete(self):
"""Deletes DAG."""
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagFileExists, DagNotFound
dag_id = request.values.get('dag_id')
origin = get_safe_url(request.values.get('origin'))
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash(f"DAG with id {dag_id} not found. Cannot delete", 'error')
return redirect(request.referrer)
except DagFileExists:
flash(f"Dag id {dag_id} is still in DagBag. Remove the DAG file first.", 'error')
return redirect(request.referrer)
flash(f"Deleting DAG with id {dag_id}. May take a couple minutes to fully disappear.")
# Upon success return to origin.
return redirect(origin)
@expose('/trigger', methods=['POST', 'GET'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
@provide_session
def trigger(self, session=None):
"""Triggers DAG Run."""
dag_id = request.values.get('dag_id')
origin = get_safe_url(request.values.get('origin'))
request_conf = request.values.get('conf')
if request.method == 'GET':
# Populate conf textarea with conf requests parameter, or dag.params
default_conf = ''
if request_conf:
default_conf = request_conf
else:
try:
dag = current_app.dag_bag.get_dag(dag_id)
default_conf = json.dumps(dag.params, indent=4)
except TypeError:
flash("Could not pre-populate conf field due to non-JSON-serializable data-types")
return self.render_template(
'airflow/trigger.html', dag_id=dag_id, origin=origin, conf=default_conf
)
dag_orm = session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
if not dag_orm:
flash(f"Cannot find dag {dag_id}")
return redirect(origin)
execution_date = timezone.utcnow()
dr = DagRun.find(dag_id=dag_id, execution_date=execution_date, run_type=DagRunType.MANUAL)
if dr:
flash(f"This run_id {dr.run_id} already exists") # noqa
return redirect(origin)
run_conf = {}
if request_conf:
try:
run_conf = json.loads(request_conf)
except json.decoder.JSONDecodeError:
flash("Invalid JSON configuration", "error")
return self.render_template(
'airflow/trigger.html', dag_id=dag_id, origin=origin, conf=request_conf
)
dag = current_app.dag_bag.get_dag(dag_id)
dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
dag_hash=current_app.dag_bag.dags_hash.get(dag_id),
)
flash(f"Triggered {dag_id}, it should start any moment now.")
return redirect(origin)
def _clear_dag_tis(
self, dag, start_date, end_date, origin, recursive=False, confirmed=False, only_failed=False
):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
)
flash(f"{count} task instances have been cleared")
return redirect(origin)
try:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
dry_run=True,
)
except AirflowException as ex:
flash(str(ex), 'error')
return redirect(origin)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to clear:",
details=details,
)
return response
@expose('/clear', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def clear(self):
"""Clears the Dag."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
dag = current_app.dag_bag.get_dag(dag_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('upstream') == "true"
downstream = request.form.get('downstream') == "true"
future = request.form.get('future') == "true"
past = request.form.get('past') == "true"
recursive = request.form.get('recursive') == "true"
only_failed = request.form.get('only_failed') == "true"
dag = dag.sub_dag(
task_ids_or_regex=fr"^{task_id}$",
include_downstream=downstream,
include_upstream=upstream,
)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(
dag,
start_date,
end_date,
origin,
recursive=recursive,
confirmed=confirmed,
only_failed=only_failed,
)
@expose('/dagrun_clear', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def dagrun_clear(self):
"""Clears the DagRun"""
dag_id = request.form.get('dag_id')
origin = get_safe_url(request.form.get('origin'))
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
dag = current_app.dag_bag.get_dag(dag_id)
execution_date = timezone.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin, recursive=True, confirmed=confirmed)
@expose('/blocked', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def blocked(self, session=None):
"""Mark Dag Blocked."""
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
if permissions.RESOURCE_DAG in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response([])
# pylint: disable=comparison-with-callable
dags = (
session.query(DagRun.dag_id, sqla.func.count(DagRun.id))
.filter(DagRun.state == State.RUNNING)
.filter(DagRun.dag_id.in_(filter_dag_ids))
.group_by(DagRun.dag_id)
)
# pylint: enable=comparison-with-callable
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
dag = current_app.dag_bag.get_dag(dag_id)
if dag:
# TODO: Make max_active_runs a column so we can query for it directly
max_active_runs = dag.max_active_runs
payload.append(
{
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
}
)
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'Cannot find DAG: {dag_id}', 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked failed on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as failed",
details=details,
)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'Cannot find DAG: {dag_id}', 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as success",
details=details,
)
return response
@expose('/dagrun_failed', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
def dagrun_failed(self):
"""Mark DagRun failed."""
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = get_safe_url(request.form.get('origin'))
return self._mark_dagrun_state_as_failed(dag_id, execution_date, confirmed, origin)
@expose('/dagrun_success', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
def dagrun_success(self):
"""Mark DagRun success"""
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = get_safe_url(request.form.get('origin'))
return self._mark_dagrun_state_as_success(dag_id, execution_date, confirmed, origin)
def _mark_task_instance_state( # pylint: disable=too-many-arguments
self,
dag_id,
task_id,
origin,
execution_date,
confirmed,
upstream,
downstream,
future,
past,
state,
):
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
latest_execution_date = dag.get_latest_execution_date()
if not latest_execution_date:
flash(f"Cannot make {state}, seem that dag {dag_id} has never run", "error")
return redirect(origin)
execution_date = timezone.parse(execution_date)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(
tasks=[task],
execution_date=execution_date,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=state,
commit=True,
)
flash("Marked {} on {} task instances".format(state, len(altered)))
return redirect(origin)
to_be_altered = set_state(
tasks=[task],
execution_date=execution_date,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=state,
commit=False,
)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render_template(
"airflow/confirm.html",
message=f"Here's the list of task instances you are about to mark as {state}:",
details=details,
)
return response
@expose('/failed', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def failed(self):
"""Mark task as failed."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('failed_upstream') == "true"
downstream = request.form.get('failed_downstream') == "true"
future = request.form.get('failed_future') == "true"
past = request.form.get('failed_past') == "true"
return self._mark_task_instance_state(
dag_id,
task_id,
origin,
execution_date,
confirmed,
upstream,
downstream,
future,
past,
State.FAILED,
)
@expose('/success', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def success(self):
"""Mark task as success."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('success_upstream') == "true"
downstream = request.form.get('success_downstream') == "true"
future = request.form.get('success_future') == "true"
past = request.form.get('success_past') == "true"
return self._mark_task_instance_state(
dag_id,
task_id,
origin,
execution_date,
confirmed,
upstream,
downstream,
future,
past,
State.SUCCESS,
)
@expose('/tree')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped # pylint: disable=too-many-locals
@action_logging # pylint: disable=too-many-locals
def tree(self):
"""Get Dag as tree."""
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing from DagBag.', "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_downstream=False, include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
if num_runs:
num_runs = int(num_runs)
else:
num_runs = conf.getint('webserver', 'default_dag_run_display_number')
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
with create_session() as session:
dag_runs = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date <= base_date)
.order_by(DagRun.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(dag_runs.keys())
max_date = max(dates) if dates else None
min_date = min(dates) if dates else None
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
task_instances: Dict[Tuple[str, datetime], models.TaskInstance] = {}
for ti in tis:
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = set()
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = 0
node_limit = 5000 / max(1, len(dag.leaves))
def encode_ti(task_instance: Optional[models.TaskInstance]) -> Optional[List]:
if not task_instance:
return None
# NOTE: order of entry is important here because client JS relies on it for
# tree node reconstruction. Remember to change JS code in tree.html
# whenever order is altered.
task_instance_data = [
task_instance.state,
task_instance.try_number,
None, # start_ts
None, # duration
]
if task_instance.start_date:
# round to seconds to reduce payload size
task_instance_data[2] = int(task_instance.start_date.timestamp())
if task_instance.duration is not None:
task_instance_data[3] = int(task_instance.duration)
return task_instance_data
def recurse_nodes(task, visited):
nonlocal node_count
node_count += 1
visited.add(task)
task_id = task.task_id
node = {
'name': task.task_id,
'instances': [encode_ti(task_instances.get((task_id, d))) for d in dates],
'num_dep': len(task.downstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'ui_color': task.ui_color,
}
if task.downstream_list:
children = [
recurse_nodes(t, visited)
for t in task.downstream_list
if node_count < node_limit or t not in visited
]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
if task.task_id not in expanded:
children_key = 'children'
expanded.add(task.task_id)
else:
children_key = "_children"
node[children_key] = children
if task.depends_on_past:
node['depends_on_past'] = task.depends_on_past
if task.start_date:
# round to seconds to reduce payload size
node['start_ts'] = int(task.start_date.timestamp())
if task.end_date:
# round to seconds to reduce payload size
node['end_ts'] = int(task.end_date.timestamp())
if task.extra_links:
node['extra_links'] = task.extra_links
return node
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [dag_runs.get(d) or {'execution_date': d.isoformat()} for d in dates],
}
form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs})
doc_md = wwwutils.wrapped_markdown(getattr(dag, 'doc_md', None), css_class='dag-doc')
task_log_reader = TaskLogReader()
if task_log_reader.supports_external_link:
external_log_name = task_log_reader.log_handler.log_name
else:
external_log_name = None
# avoid spaces to reduce payload size
data = htmlsafe_json_dumps(data, separators=(',', ':'))
return self.render_template(
'airflow/tree.html',
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
root=root,
form=form,
dag=dag,
doc_md=doc_md,
data=data,
blur=blur,
num_runs=num_runs,
show_external_log_redirect=task_log_reader.supports_external_link,
external_log_name=external_log_name,
)
@expose('/graph')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
@provide_session
def graph(self, session=None):
"""Get DAG as Graph."""
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = task_group_to_dict(dag.task_group)
edges = dag_edges(dag)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
"""Graph Form class."""
arrange = SelectField(
"Layout",
choices=(
('LR', "Left > Right"),
('RL', "Right > Left"),
('TB', "Top > Bottom"),
('BT', "Bottom > Top"),
),
)
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
'extra_links': t.extra_links,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = wwwutils.wrapped_markdown(getattr(dag, 'doc_md', None), css_class='dag-doc')
task_log_reader = TaskLogReader()
if task_log_reader.supports_external_link:
external_log_name = task_log_reader.log_handler.log_name
else:
external_log_name = None
return self.render_template(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=wwwutils.state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
blur=blur,
root=root or '',
task_instances=task_instances,
tasks=tasks,
nodes=nodes,
edges=edges,
show_external_log_redirect=task_log_reader.supports_external_link,
external_log_name=external_log_name,
dag_run_state=dt_nr_dr_data['dr_state'],
)
@expose('/duration')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging # pylint: disable=too-many-locals
@provide_session # pylint: disable=too-many-locals
def duration(self, session=None):
"""Get Dag as duration graph."""
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
try:
dag = current_app.dag_bag.get_dag(dag_id)
except airflow.exceptions.SerializedDagNotFound:
dag = None
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if dag is None:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for('Airflow.index'))
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y_points = defaultdict(list)
x_points = defaultdict(list)
cumulative_y = defaultdict(list)
task_instances = dag.get_task_instances(start_date=min_date, end_date=base_date)
ti_fails = (
session.query(TaskFail)
.filter(
TaskFail.dag_id == dag.dag_id,
TaskFail.execution_date >= min_date,
TaskFail.execution_date <= base_date,
TaskFail.task_id.in_([t.task_id for t in dag.tasks]),
)
.all()
)
fails_totals = defaultdict(int)
for failed_task_instance in ti_fails:
dict_key = (
failed_task_instance.dag_id,
failed_task_instance.task_id,
failed_task_instance.execution_date,
)
if failed_task_instance.duration:
fails_totals[dict_key] += failed_task_instance.duration
for task_instance in task_instances:
if task_instance.duration:
date_time = wwwutils.epoch(task_instance.execution_date)
x_points[task_instance.task_id].append(date_time)
y_points[task_instance.task_id].append(float(task_instance.duration))
fails_dict_key = (task_instance.dag_id, task_instance.task_id, task_instance.execution_date)
fails_total = fails_totals[fails_dict_key]
cumulative_y[task_instance.task_id].append(float(task_instance.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y_points.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cumulative_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False, label=f'Duration ({y_unit})')
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False, label=f'Duration ({cum_y_unit})')
cum_chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task_id in x_points:
chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(y_points[task_id], y_unit),
)
cum_chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(cumulative_y[task_id], cum_y_unit),
)
dates = sorted({ti.execution_date for ti in task_instances})
max_date = max([ti.execution_date for ti in task_instances]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (
cum_chart.htmlcontent[:s_index]
+ "$( document ).trigger('chartload')"
+ cum_chart.htmlcontent[s_index:]
)
return self.render_template(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=Markup(chart.htmlcontent),
cum_chart=Markup(cum_chart.htmlcontent),
)
@expose('/tries')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def tries(self, session=None):
"""Shows all tries."""
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height, width="1200"
)
for task in dag.tasks:
y_points = []
x_points = []
for ti in task.get_task_instances(start_date=min_date, end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x_points.append(dttm)
# y value should reflect completed tries to have a 0 baseline.
y_points.append(ti.prev_attempted_tries)
if x_points:
chart.add_serie(name=task.task_id, x=x_points, y=y_points)
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
tries = sorted({ti.try_number for ti in tis})
max_date = max([ti.execution_date for ti in tis]) if tries else None
chart.create_y_axis('yAxis', format='.02f', custom_format=False, label='Tries')
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=Markup(chart.htmlcontent),
tab_title='Tries',
)
@expose('/landing_times')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def landing_times(self, session=None):
"""Shows landing times."""
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(name="lineChart", x_is_date=True, height=chart_height, width="1200")
y_points = {}
x_points = {}
for task in dag.tasks:
task_id = task.task_id
y_points[task_id] = []
x_points[task_id] = []
for ti in task.get_task_instances(start_date=min_date, end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x_points[task_id].append(dttm)
y_points[task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y_points.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False, label=f'Landing Time ({y_unit})')
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task_id in x_points:
chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(y_points[task_id], y_unit),
)
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
dates = sorted({ti.execution_date for ti in tis})
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
chart=Markup(chart.htmlcontent),
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
tab_title='Landing times',
)
@expose('/paused', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
@action_logging
def paused(self):
"""Toggle paused."""
dag_id = request.args.get('dag_id')
is_paused = request.args.get('is_paused') == 'false'
models.DagModel.get_dagmodel(dag_id).set_is_paused(is_paused=is_paused)
return "OK"
@expose('/refresh', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
@action_logging
@provide_session
def refresh(self, session=None):
"""Refresh DAG."""
dag_id = request.values.get('dag_id')
orm_dag = session.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dag = current_app.dag_bag.get_dag(dag_id)
# sync dag permission
current_app.appbuilder.sm.sync_perm_for_dag(dag_id, dag.access_control)
flash(f"DAG [{dag_id}] is now fresh as a daisy")
return redirect(request.referrer)
@expose('/refresh_all', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
@action_logging
def refresh_all(self):
"""Refresh everything"""
current_app.dag_bag.collect_dags_from_db()
# sync permissions for all dags
for dag_id, dag in current_app.dag_bag.dags.items():
current_app.appbuilder.sm.sync_perm_for_dag(dag_id, dag.access_control)
flash("All DAGs are now up to date")
return redirect(url_for('Airflow.index'))
@expose('/gantt')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def gantt(self, session=None):
"""Show GANTT chart."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [ti for ti in dag.get_task_instances(dttm, dttm) if ti.start_date and ti.state]
tis = sorted(tis, key=lambda ti: ti.start_date)
ti_fails = list(
itertools.chain(
*[
(
session.query(TaskFail)
.filter(
TaskFail.dag_id == ti.dag_id,
TaskFail.task_id == ti.task_id,
TaskFail.execution_date == ti.execution_date,
)
.all()
)
for ti in tis
]
)
)
# determine bars to show in the gantt chart
gantt_bar_items = []
tasks = []
for ti in tis:
end_date = ti.end_date or timezone.utcnow()
# prev_attempted_tries will reflect the currently running try_number
# or the try_number of the last complete run
# https://issues.apache.org/jira/browse/AIRFLOW-2143
try_count = ti.prev_attempted_tries
gantt_bar_items.append((ti.task_id, ti.start_date, end_date, ti.state, try_count))
task_dict = alchemy_to_dict(ti)
task_dict['extraLinks'] = dag.get_task(ti.task_id).extra_links
tasks.append(task_dict)
tf_count = 0
try_count = 1
prev_task_id = ""
for failed_task_instance in ti_fails:
end_date = failed_task_instance.end_date or timezone.utcnow()
start_date = failed_task_instance.start_date or end_date
if tf_count != 0 and failed_task_instance.task_id == prev_task_id:
try_count += 1
else:
try_count = 1
prev_task_id = failed_task_instance.task_id
gantt_bar_items.append(
(failed_task_instance.task_id, start_date, end_date, State.FAILED, try_count)
)
tf_count += 1
task = dag.get_task(failed_task_instance.task_id)
task_dict = alchemy_to_dict(failed_task_instance)
task_dict['state'] = State.FAILED
task_dict['operator'] = task.task_type
task_dict['try_number'] = try_count
task_dict['extraLinks'] = task.extra_links
tasks.append(task_dict)
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render_template(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=data,
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/extra_links')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def extra_links(self):
"""
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
link_name = request.args.get('link_name')
dttm = timezone.parse(execution_date)
dag = current_app.dag_bag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
response = jsonify(
{
'url': None,
'error': f"can't find dag {dag} or task_id {task_id}",
}
)
response.status_code = 404
return response
task = dag.get_task(task_id)
try:
url = task.get_extra_links(dttm, link_name)
except ValueError as err:
response = jsonify({'url': None, 'error': str(err)})
response.status_code = 404
return response
if url:
response = jsonify({'error': None, 'url': url})
response.status_code = 200
return response
else:
response = jsonify({'url': None, 'error': f'No URL found for {link_name}'})
response.status_code = 404
return response
@expose('/object/task_instances')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def task_instances(self):
"""Shows task instances."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = timezone.parse(dttm)
else:
return "Error: Invalid execution_date"
task_instances = {ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(dttm, dttm)}
return json.dumps(task_instances, cls=utils_json.AirflowJsonEncoder)
class ConfigurationView(AirflowBaseView):
"""View to show Airflow Configurations"""
default_view = 'conf'
class_permission_name = permissions.RESOURCE_CONFIG
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
@expose('/configuration')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
]
)
def conf(self):
"""Shows configuration."""
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = AIRFLOW_CONFIG
# Don't show config when expose_config variable is False in airflow config
if conf.getboolean("webserver", "expose_config"):
with open(AIRFLOW_CONFIG) as file:
config = file.read()
table = [
(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()
]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons."
)
table = None
if raw:
return Response(response=config, status=200, mimetype="application/text")
else:
code_html = Markup(
highlight(
config,
lexers.IniLexer(), # Lexer call pylint: disable=no-member
HtmlFormatter(noclasses=True),
)
)
return self.render_template(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html,
title=title,
subtitle=subtitle,
table=table,
)
class RedocView(AirflowBaseView):
"""Redoc Open API documentation"""
default_view = 'redoc'
@expose('/redoc')
def redoc(self):
"""Redoc API documentation."""
openapi_spec_url = url_for("/api/v1./api/v1_openapi_yaml")
return self.render_template('airflow/redoc.html', openapi_spec_url=openapi_spec_url)
######################################################################################
# ModelViews
######################################################################################
class DagFilter(BaseFilter):
"""Filter using DagIDs"""
def apply(self, query, func): # noqa pylint: disable=redefined-outer-name,unused-argument
if current_app.appbuilder.sm.has_all_dags_access():
return query
filter_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
return query.filter(self.model.dag_id.in_(filter_dag_ids))
class AirflowModelView(ModelView): # noqa: D101
"""Airflow Mode View."""
list_widget = AirflowModelListWidget
page_size = PAGE_SIZE
CustomSQLAInterface = wwwutils.CustomSQLAInterface
class SlaMissModelView(AirflowModelView):
"""View to show SlaMiss table"""
route_base = '/slamiss'
datamodel = AirflowModelView.CustomSQLAInterface(SlaMiss) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_SLA_MISS
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
add_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
edit_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
search_columns = ['dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
class XComModelView(AirflowModelView):
"""View to show records from XCom table"""
route_base = '/xcom'
list_title = 'List XComs'
datamodel = AirflowModelView.CustomSQLAInterface(XCom)
class_permission_name = permissions.RESOURCE_XCOM
method_permission_name = {
'list': 'read',
'delete': 'delete',
'action_muldelete': 'delete',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
search_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
list_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
@action('muldelete', 'Delete', "Are you sure you want to delete selected records?", single=False)
def action_muldelete(self, items):
"""Multiple delete action."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pre_add(self, item):
"""Pre add hook."""
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
def pre_update(self, item):
"""Pre update hook."""
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
class ConnectionModelView(AirflowModelView):
"""View to show records from Connections table"""
route_base = '/connection'
datamodel = AirflowModelView.CustomSQLAInterface(Connection) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_CONNECTION
method_permission_name = {
'add': 'create',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'action_muldelete': 'delete',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
extra_fields = [
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
'extra__google_cloud_platform__num_retries',
'extra__grpc__auth_type',
'extra__grpc__credential_pem_file',
'extra__grpc__scopes',
'extra__yandexcloud__service_account_json',
'extra__yandexcloud__service_account_json_path',
'extra__yandexcloud__oauth',
'extra__yandexcloud__public_ssh_key',
'extra__yandexcloud__folder_id',
'extra__kubernetes__in_cluster',
'extra__kubernetes__kube_config',
'extra__kubernetes__namespace',
]
list_columns = ['conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted']
add_columns = edit_columns = [
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
] + extra_fields
add_form = edit_form = ConnectionForm
add_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
base_order = ('conn_id', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_muldelete(self, items):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def process_form(self, form, is_created):
"""Process form data."""
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform', 'grpc', 'yandexcloud', 'kubernetes']:
extra = {key: formdata[key] for key in self.extra_fields if key in formdata}
form.extra.data = json.dumps(extra)
def prefill_form(self, form, pk):
"""Prefill the form."""
try:
extra = form.data.get('extra')
if extra is None:
extra_dictionary = {}
else:
extra_dictionary = json.loads(extra)
except JSONDecodeError:
extra_dictionary = {}
if not isinstance(extra_dictionary, dict):
logging.warning('extra field for %s is not a dictionary', form.data.get('conn_id', '<unknown>'))
return
for field in self.extra_fields:
value = extra_dictionary.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class PluginView(AirflowBaseView):
"""View to show Airflow Plugins"""
default_view = 'list'
class_permission_name = permissions.RESOURCE_PLUGIN
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
plugins_attributes_to_dump = [
"hooks",
"executors",
"macros",
"admin_views",
"flask_blueprints",
"menu_links",
"appbuilder_views",
"appbuilder_menu_items",
"global_operator_extra_links",
"operator_extra_links",
"source",
]
@expose('/plugin')
def list(self):
"""List loaded plugins."""
plugins_manager.ensure_plugins_loaded()
plugins_manager.integrate_executor_plugins()
plugins_manager.initialize_extra_operators_links_plugins()
plugins_manager.initialize_web_ui_plugins()
plugins = []
for plugin_no, plugin in enumerate(plugins_manager.plugins, 1):
plugin_data = {
'plugin_no': plugin_no,
'plugin_name': plugin.name,
'attrs': {},
}
for attr_name in self.plugins_attributes_to_dump:
attr_value = getattr(plugin, attr_name)
plugin_data['attrs'][attr_name] = attr_value
plugins.append(plugin_data)
title = "Airflow Plugins"
return self.render_template(
'airflow/plugin.html',
plugins=plugins,
title=title,
)
class PoolModelView(AirflowModelView):
"""View to show records from Pool table"""
route_base = '/pool'
datamodel = AirflowModelView.CustomSQLAInterface(models.Pool) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_POOL
method_permission_name = {
'add': 'create',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'action_muldelete': 'delete',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['pool', 'slots', 'running_slots', 'queued_slots']
add_columns = ['pool', 'slots', 'description']
edit_columns = ['pool', 'slots', 'description']
base_order = ('pool', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?', single=False)
def action_muldelete(self, items):
"""Multiple delete."""
if any(item.pool == models.Pool.DEFAULT_POOL_NAME for item in items):
flash("default_pool cannot be deleted", 'error')
self.update_redirect()
return redirect(self.get_redirect())
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pool_link(self):
"""Pool link rendering."""
pool_id = self.get('pool') # noqa pylint: disable=no-member
if pool_id is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id)
return Markup("<a href='{url}'>{pool_id}</a>").format(url=url, pool_id=pool_id) # noqa
else:
return Markup('<span class="label label-danger">Invalid</span>')
def frunning_slots(self):
"""Running slots rendering."""
pool_id = self.get('pool') # noqa pylint: disable=no-member
running_slots = self.get('running_slots') # noqa pylint: disable=no-member
if pool_id is not None and running_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='running')
return Markup("<a href='{url}'>{running_slots}</a>").format( # noqa
url=url, running_slots=running_slots
)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fqueued_slots(self):
"""Queued slots rendering."""
pool_id = self.get('pool') # noqa pylint: disable=no-member
queued_slots = self.get('queued_slots') # noqa pylint: disable=no-member
if pool_id is not None and queued_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='queued')
return Markup("<a href='{url}'>{queued_slots}</a>").format( # noqa
url=url, queued_slots=queued_slots
)
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {'pool': pool_link, 'running_slots': frunning_slots, 'queued_slots': fqueued_slots}
validators_columns = {'pool': [validators.DataRequired()], 'slots': [validators.NumberRange(min=-1)]}
class VariableModelView(AirflowModelView):
"""View to show records from Variable table"""
route_base = '/variable'
list_template = 'airflow/variable_list.html'
edit_template = 'airflow/variable_edit.html'
datamodel = AirflowModelView.CustomSQLAInterface(models.Variable) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_VARIABLE
method_permission_name = {
'add': 'create',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'action_muldelete': 'delete',
'action_varexport': 'read',
'varimport': 'create',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['key', 'val', 'is_encrypted']
add_columns = ['key', 'val']
edit_columns = ['key', 'val']
search_columns = ['key', 'val']
base_order = ('key', 'asc')
def hidden_field_formatter(self):
"""Formats hidden fields"""
key = self.get('key') # noqa pylint: disable=no-member
val = self.get('val') # noqa pylint: disable=no-member
if wwwutils.should_hide_value_for_key(key):
return Markup('*' * 8)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'val': hidden_field_formatter,
}
validators_columns = {'key': [validators.DataRequired()]}
def prefill_form(self, form, request_id): # pylint: disable=unused-argument
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?', single=False)
def action_muldelete(self, items):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action('varexport', 'Export', '', single=False)
def action_varexport(self, items):
"""Export variables."""
var_dict = {}
decoder = json.JSONDecoder()
for var in items:
try:
val = decoder.decode(var.val)
except Exception: # noqa pylint: disable=broad-except
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
@expose('/varimport', methods=["POST"])
@action_logging
def varimport(self):
"""Import variables"""
try:
out = request.files['file'].read()
if isinstance(out, bytes):
variable_dict = json.loads(out.decode('utf-8'))
else:
variable_dict = json.loads(out)
except Exception: # noqa pylint: disable=broad-except
self.update_redirect()
flash("Missing file or syntax error.", 'error')
return redirect(self.get_redirect())
else:
suc_count = fail_count = 0
for k, v in variable_dict.items():
try:
models.Variable.set(k, v, serialize_json=not isinstance(v, str))
except Exception as e: # pylint: disable=broad-except
logging.info('Variable import failed: %s', repr(e))
fail_count += 1
else:
suc_count += 1
flash(f"{suc_count} variable(s) successfully updated.")
if fail_count:
flash(f"{fail_count} variable(s) failed to be updated.", 'error')
self.update_redirect()
return redirect(self.get_redirect())
class JobModelView(AirflowModelView):
"""View to show records from Job table"""
route_base = '/job'
datamodel = AirflowModelView.CustomSQLAInterface(BaseJob) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_JOB
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
'id',
'dag_id',
'state',
'job_type',
'start_date',
'end_date',
'latest_heartbeat',
'executor_class',
'hostname',
'unixname',
]
search_columns = [
'id',
'dag_id',
'state',
'job_type',
'start_date',
'end_date',
'latest_heartbeat',
'executor_class',
'hostname',
'unixname',
]
base_order = ('start_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'latest_heartbeat': wwwutils.datetime_f('latest_heartbeat'),
}
class DagRunModelView(AirflowModelView):
"""View to show records from DagRun table"""
route_base = '/dagrun'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagRun) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_DAG_RUN
method_permission_name = {
'add': 'create',
'list': 'read',
'action_muldelete': 'delete',
'action_set_running': 'edit',
'action_set_failed': 'edit',
'action_set_success': 'edit',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
add_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger', 'conf']
list_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'run_type', 'external_trigger', 'conf']
search_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'run_type', 'external_trigger', 'conf']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
add_form = edit_form = DagRunForm
formatters_columns = {
'execution_date': wwwutils.datetime_f('execution_date'),
'state': wwwutils.state_f,
'start_date': wwwutils.datetime_f('start_date'),
'dag_id': wwwutils.dag_link,
'run_id': wwwutils.dag_run_link,
'conf': wwwutils.json_f('conf'),
}
@action('muldelete', "Delete", "Are you sure you want to delete selected records?", single=False)
@provide_session
def action_muldelete(self, items, session=None): # noqa # pylint: disable=unused-argument
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
dirty_ids = []
for item in items:
dirty_ids.append(item.dag_id)
return redirect(self.get_redirect())
@action('set_running', "Set state to 'running'", '', single=False)
@provide_session
def action_set_running(self, drs, session=None):
"""Set state to running."""
try:
count = 0
dirty_ids = []
for dr in (
session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all()
): # noqa pylint: disable=no-member
dirty_ids.append(dr.dag_id)
count += 1
dr.start_date = timezone.utcnow()
dr.state = State.RUNNING
session.commit()
flash(f"{count} dag runs were set to running")
except Exception as ex: # pylint: disable=broad-except
flash(str(ex), 'error')
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action(
'set_failed',
"Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?",
single=False,
)
@provide_session
def action_set_failed(self, drs, session=None):
"""Set state to failed."""
try:
count = 0
dirty_ids = []
altered_tis = []
for dr in (
session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all()
): # noqa pylint: disable=no-member
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += set_dag_run_state_to_failed(
current_app.dag_bag.get_dag(dr.dag_id), dr.execution_date, commit=True, session=session
)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(count=count, altered_ti_count=altered_ti_count)
)
except Exception: # noqa pylint: disable=broad-except
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action(
'set_success',
"Set state to 'success'",
"All task instances would also be marked as success, are you sure?",
single=False,
)
@provide_session
def action_set_success(self, drs, session=None):
"""Set state to success."""
try:
count = 0
dirty_ids = []
altered_tis = []
for dr in (
session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all()
): # noqa pylint: disable=no-member
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += set_dag_run_state_to_success(
current_app.dag_bag.get_dag(dr.dag_id), dr.execution_date, commit=True, session=session
)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(count=count, altered_ti_count=altered_ti_count)
)
except Exception: # noqa pylint: disable=broad-except
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action('clear', "Clear the state", "All task instances would be cleared, are you sure?", single=False)
@provide_session
def action_clear(self, drs, session=None):
"""Clears the state."""
try:
count = 0
cleared_ti_count = 0
dag_to_tis = {}
for dr in session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all():
count += 1
dag = current_app.dag_bag.get_dag(dr.dag_id)
tis_to_clear = dag_to_tis.setdefault(dag, [])
tis_to_clear += dr.get_task_instances()
for dag, tis in dag_to_tis.items():
cleared_ti_count += len(tis)
models.clear_task_instances(tis, session, dag=dag)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were cleared".format(count=count, altered_ti_count=cleared_ti_count)
)
except Exception: # noqa pylint: disable=broad-except
flash('Failed to clear state', 'error')
return redirect(self.get_default_url())
class LogModelView(AirflowModelView):
"""View to show records from Log table"""
route_base = '/log'
datamodel = AirflowModelView.CustomSQLAInterface(Log) # noqa # type:ignore
class_permission_name = permissions.RESOURCE_AUDIT_LOG
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['id', 'dttm', 'dag_id', 'task_id', 'event', 'execution_date', 'owner', 'extra']
search_columns = ['dag_id', 'task_id', 'event', 'execution_date', 'owner', 'extra']
base_order = ('dttm', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'dttm': wwwutils.datetime_f('dttm'),
'execution_date': wwwutils.datetime_f('execution_date'),
'dag_id': wwwutils.dag_link,
}
class TaskRescheduleModelView(AirflowModelView):
"""View to show records from Task Reschedule table"""
route_base = '/taskreschedule'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskReschedule) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_TASK_RESCHEDULE
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
'id',
'dag_id',
'task_id',
'execution_date',
'try_number',
'start_date',
'end_date',
'duration',
'reschedule_date',
]
search_columns = ['dag_id', 'task_id', 'execution_date', 'start_date', 'end_date', 'reschedule_date']
base_order = ('id', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def duration_f(self):
"""Duration calculation."""
end_date = self.get('end_date') # noqa pylint: disable=no-member
duration = self.get('duration') # noqa pylint: disable=no-member
if end_date and duration:
return timedelta(seconds=duration)
return None
formatters_columns = {
'dag_id': wwwutils.dag_link,
'task_id': wwwutils.task_instance_link,
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'execution_date': wwwutils.datetime_f('execution_date'),
'reschedule_date': wwwutils.datetime_f('reschedule_date'),
'duration': duration_f,
}
class TaskInstanceModelView(AirflowModelView):
"""View to show records from TaskInstance table"""
route_base = '/taskinstance'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskInstance) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_TASK_INSTANCE
method_permission_name = {
'list': 'read',
'action_clear': 'edit',
'action_set_running': 'edit',
'action_set_failed': 'edit',
'action_set_success': 'edit',
'action_set_retry': 'edit',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
page_size = PAGE_SIZE
list_columns = [
'state',
'dag_id',
'task_id',
'execution_date',
'operator',
'start_date',
'end_date',
'duration',
'job_id',
'hostname',
'unixname',
'priority_weight',
'queue',
'queued_dttm',
'try_number',
'pool',
'log_url',
]
order_columns = [item for item in list_columns if item not in ['try_number', 'log_url']]
search_columns = [
'state',
'dag_id',
'task_id',
'execution_date',
'hostname',
'queue',
'pool',
'operator',
'start_date',
'end_date',
]
base_order = ('job_id', 'asc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def log_url_formatter(self):
"""Formats log URL."""
log_url = self.get('log_url') # noqa pylint: disable=no-member
return Markup( # noqa
'<a href="{log_url}"><span class="material-icons" aria-hidden="true">reorder</span></a>'
).format(log_url=log_url)
def duration_f(self):
"""Formats duration."""
end_date = self.get('end_date') # noqa pylint: disable=no-member
duration = self.get('duration') # noqa pylint: disable=no-member
if end_date and duration:
return timedelta(seconds=duration)
return None
formatters_columns = {
'log_url': log_url_formatter,
'task_id': wwwutils.task_instance_link,
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'execution_date': wwwutils.datetime_f('execution_date'),
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'queued_dttm': wwwutils.datetime_f('queued_dttm'),
'dag_id': wwwutils.dag_link,
'duration': duration_f,
}
@provide_session
@action(
'clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task'
' instance(s) and set their dagruns to the running state?'
),
single=False,
)
def action_clear(self, task_instances, session=None):
"""Clears the action."""
try:
dag_to_tis = {}
for ti in task_instances:
dag = current_app.dag_bag.get_dag(ti.dag_id)
task_instances_to_clean = dag_to_tis.setdefault(dag, [])
task_instances_to_clean.append(ti)
for dag, task_instances_list in dag_to_tis.items():
models.clear_task_instances(task_instances_list, session, dag=dag)
session.commit()
flash("{} task instances have been cleared".format(len(task_instances)))
self.update_redirect()
return redirect(self.get_redirect())
except Exception: # noqa pylint: disable=broad-except
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, tis, target_state, session=None):
"""Set task instance state."""
try:
count = len(tis)
for ti in tis:
ti.set_state(target_state, session)
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(
count=count, target_state=target_state
)
)
except Exception: # noqa pylint: disable=broad-except
flash('Failed to set state', 'error')
@action('set_running', "Set state to 'running'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_running(self, tis):
"""Set state to 'running'"""
self.set_task_instance_state(tis, State.RUNNING)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_failed', "Set state to 'failed'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_failed(self, tis):
"""Set state to 'failed'"""
self.set_task_instance_state(tis, State.FAILED)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_success', "Set state to 'success'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_success(self, tis):
"""Set state to 'success'"""
self.set_task_instance_state(tis, State.SUCCESS)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_retry', "Set state to 'up_for_retry'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_retry(self, tis):
"""Set state to 'up_for_retry'"""
self.set_task_instance_state(tis, State.UP_FOR_RETRY)
self.update_redirect()
return redirect(self.get_redirect())
class DagModelView(AirflowModelView):
"""View to show records from DAG table"""
route_base = '/dagmodel'
datamodel = AirflowModelView.CustomSQLAInterface(DagModel) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_DAG
method_permission_name = {
'list': 'read',
'show': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
list_columns = [
'dag_id',
'is_paused',
'last_scheduler_run',
'last_expired',
'scheduler_lock',
'fileloc',
'owners',
]
formatters_columns = {'dag_id': wwwutils.dag_link}
base_filters = [['dag_id', DagFilter, lambda: []]]
def get_query(self):
"""Default filters for model"""
return (
super() # noqa pylint: disable=no-member
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""Default filters for model"""
return (
super() # noqa pylint: disable=no-member
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
]
)
@provide_session
@expose('/autocomplete')
def autocomplete(self, session=None):
"""Autocomplete."""
query = unquote(request.args.get('query', ''))
if not query:
wwwutils.json_response([])
# Provide suggestions of dag_ids and owners
dag_ids_query = session.query(DagModel.dag_id.label('item')).filter( # pylint: disable=no-member
~DagModel.is_subdag, DagModel.is_active, DagModel.dag_id.ilike('%' + query + '%')
) # noqa pylint: disable=no-member
owners_query = session.query(func.distinct(DagModel.owners).label('item')).filter(
~DagModel.is_subdag, DagModel.is_active, DagModel.owners.ilike('%' + query + '%')
) # noqa pylint: disable=no-member
# Hide DAGs if not showing status: "all"
status = flask_session.get(FILTER_STATUS_COOKIE)
if status == 'active':
dag_ids_query = dag_ids_query.filter(~DagModel.is_paused)
owners_query = owners_query.filter(~DagModel.is_paused)
elif status == 'paused':
dag_ids_query = dag_ids_query.filter(DagModel.is_paused)
owners_query = owners_query.filter(DagModel.is_paused)
filter_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
# pylint: disable=no-member
if permissions.RESOURCE_DAG not in filter_dag_ids:
dag_ids_query = dag_ids_query.filter(DagModel.dag_id.in_(filter_dag_ids))
owners_query = owners_query.filter(DagModel.dag_id.in_(filter_dag_ids))
# pylint: enable=no-member
payload = [row[0] for row in dag_ids_query.union(owners_query).limit(10).all()]
return wwwutils.json_response(payload)
| apache-2.0 | 6,772,566,299,823,838,000 | 35.249329 | 110 | 0.57077 | false |
neuronalmotion/plasmoid-amixer | src/contents/code/main.py | 1 | 3785 | # -*- coding: utf-8 -*-
# -----------------------#
# License: GPL #
# Author: NeuronalMotion #
# -----------------------#
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QGraphicsLinearLayout
from PyKDE4.plasma import Plasma
from PyKDE4 import plasmascript
from PyKDE4 import kdecore
import subprocess
import re
import time
import os
class NMAmixer(plasmascript.Applet):
cardName = "DGX" #from cat /proc/asound/cards
mixerControlName = "Analog Output"
frontAmixerValue = "Stereo Headphones FP"
rearAmixerValue = "Stereo Headphones"
frontLabel = "HP"
rearLabel = "Rear"
frontPicture = "/images/headphones.png"
rearPicture = "/images/speaker.png"
cardIndex = 0
buttonSwitchOutput = None
rootPath = None
def __init__(self,parent,args=None):
plasmascript.Applet.__init__(self,parent)
def init(self):
self.rootPath = kdecore.KGlobal.dirs().locate("data", "plasma/plasmoids/nm-plasmoid-amixer/contents/")
self.setHasConfigurationInterface(False)
self.setAspectRatioMode(Plasma.Square)
self.searchCardIndex()
self.layout = QGraphicsLinearLayout(Qt.Vertical, self.applet)
self.buttonSwitchOutput = Plasma.PushButton(self.applet)
#self.buttonSwitchOutput.setText(self.getCurrentOutputLabel())
self.buttonSwitchOutput.setImage(self.getCurrentOutputPicture())
self.buttonSwitchOutput.clicked.connect(self.onClickButtonSwitchOutput)
self.layout.addItem(self.buttonSwitchOutput)
self.applet.setLayout(self.layout)
self.resize(75,150)
def searchCardIndex(self):
proc = subprocess.Popen(["cat /proc/asound/cards | grep %s" % self.cardName], shell = True, stdout = subprocess.PIPE)
for line in proc.stdout:
m = re.search("(\\d).*\\[", line)
self.cardIndex = m.group(1)
print "card index is %s" % self.cardIndex
def getCurrentOutputName(self):
output = ""
cli = "amixer -c %s sget \"%s\"" % (self.cardIndex, self.mixerControlName)
print cli
proc = subprocess.Popen([cli], shell = True, stdout = subprocess.PIPE)
for line in proc.stdout:
if "Item0" in line:
m = re.search("'(.*)'", line)
output = m.group(1)
print output
return output
def getCurrentOutputLabel(self):
label = "?"
outputName = self.getCurrentOutputName()
if outputName == self.frontAmixerValue:
label = self.frontLabel
elif outputName == self.rearAmixerValue:
label = self.rearLabel
return label
def getCurrentOutputPicture(self):
picture = ""
outputName = self.getCurrentOutputName()
if outputName == self.frontAmixerValue:
picture = self.frontPicture
elif outputName == self.rearAmixerValue:
picture = self.rearPicture
return self.rootPath + picture
def onClickButtonSwitchOutput(self):
outputName = self.getCurrentOutputName()
outputNameTarget = None
if outputName == self.frontAmixerValue:
outputNameTarget = self.rearAmixerValue
elif outputName == self.rearAmixerValue:
outputNameTarget = self.frontAmixerValue
cli = "amixer -c %s sset \"Analog Output\" \"%s\"" % (self.cardIndex, outputNameTarget)
print cli
subprocess.Popen([cli], shell = True, stdout = subprocess.PIPE)
# Avoid IOError: [Errno 4] Interrupted system call
time.sleep(1)
#self.buttonSwitchOutput.setText(self.getCurrentOutputLabel())
self.buttonSwitchOutput.setImage(self.getCurrentOutputPicture())
def CreateApplet(parent):
return NMAmixer(parent)
| gpl-2.0 | -2,672,511,067,184,466,000 | 30.541667 | 125 | 0.643329 | false |
bl4de/irc-client | irc_client.py | 1 | 3291 | #!/usr/bin/env python
# by bl4de | github.com/bl4de | twitter.com/_bl4de | hackerone.com/bl4de
import socket
import sys
import threading
def usage():
print "IRC simple Python client | by bl4de | github.com/bl4de | twitter.com/_bl4de | hackerone.com/bl4de\n"
print "$ ./irc_client.py USERNAME CHANNEL\n"
print "where: USERNAME - your username, CHANNEL - channel you'd like to join (eg. channelname or #channelname)"
def channel(channel):
if channel.startswith("#") == False:
return "#" + channel
return channel
def quit():
client.send_cmd("QUIT", "Good bye!")
print "Quitting ..."
exit(0)
class IRCSimpleClient:
def __init__(self, username, channel, server="irc.freenode.net", port=6667):
self.username = username
self.server = server
self.port = port
self.channel = channel
def connect(self):
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((self.server, self.port))
def get_response(self):
return self.conn.recv(512)
def send_cmd(self, cmd, message):
command = "{} {}\r\n".format(cmd, message)
self.conn.send(command)
def send_message_to_channel(self, message):
command = "PRIVMSG {}".format(self.channel)
message = ":" + message
self.send_cmd(command, message)
def join_channel(self):
cmd = "JOIN"
channel = self.channel
self.send_cmd(cmd, channel)
def print_response(self):
resp = self.get_response()
if resp:
msg = resp.strip().split(":")
print "\n< {}> {}".format(msg[1].split("!")[0], msg[2].strip())
if __name__ == "__main__":
if len(sys.argv) != 3:
usage()
exit(0)
else:
username = sys.argv[1]
channel = channel(sys.argv[2])
cmd = ""
joined = False
client = IRCSimpleClient(username, channel)
client.connect()
while(joined == False):
resp = client.get_response()
print resp.strip()
if "No Ident response" in resp:
client.send_cmd("NICK", username)
client.send_cmd(
"USER", "{} * * :{}".format(username, username))
# we're accepted, now let's join the channel!
if "376" in resp:
client.join_channel()
# username already in use? try to use username with _
if "433" in resp:
username = "_" + username
client.send_cmd("NICK", username)
client.send_cmd(
"USER", "{} * * :{}".format(username, username))
# if PING send PONG with name of the server
if "PING" in resp:
client.send_cmd("PONG", ":" + resp.split(":")[1])
# we've joined
if "366" in resp:
joined = True
t = threading.Thread(target=client.print_response)
t.start()
try:
while(cmd != "/quit"):
cmd = raw_input("< {}> ".format(username)).strip()
if cmd == "/quit":
quit()
if cmd and len(cmd) > 0:
client.send_message_to_channel(cmd)
except KeyboardInterrupt:
quit()
t = threading.Thread(target=client.print_response)
t.start()
| mit | 8,789,912,966,740,696,000 | 28.657658 | 115 | 0.553327 | false |
CroceRossaItaliana/jorvik | api/v1/views.py | 1 | 4044 | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions
from oauth2_provider.ext.rest_framework import TokenHasScope
from api.settings import SCOPE_ANAGRAFICA_LETTURA_BASE, SCOPE_ANAGRAFICA_LETTURA_COMPLETA, SCOPE_APPARTENENZE_LETTURA
from api.v1 import serializzatori
from anagrafica.permessi.applicazioni import PERMESSI_NOMI_DICT
# /me/anagrafica/base/
class MiaAnagraficaBase(APIView):
"""
Una vista che ritorna informazioni sulla persona identificata.
"""
permission_classes = (permissions.IsAuthenticated,
TokenHasScope)
required_scopes = [SCOPE_ANAGRAFICA_LETTURA_BASE]
def get(self, request, format=None):
dati = serializzatori.persona_anagrafica_base(request.user.persona)
return Response(dati)
# /me/anagrafica/completa/
class MiaAnagraficaCompleta(APIView):
"""
Una vista che ritorna l'anagrafica completa della persona identificata
(anagrafica base, più dati aggiuntivi).
"""
permission_classes = (permissions.IsAuthenticated,
TokenHasScope)
required_scopes = [SCOPE_ANAGRAFICA_LETTURA_BASE,
SCOPE_ANAGRAFICA_LETTURA_COMPLETA]
def get(self, request, format=None):
dati = serializzatori.persona_anagrafica_completa(request.user.persona)
return Response(dati)
# /me/appartenenze/attuali/
class MieAppartenenzeAttuali(APIView):
"""
Una vista che ritorna informazioni sulle appartenenze attuali.
"""
required_scopes = [SCOPE_APPARTENENZE_LETTURA]
def get(self, request, format=None):
me = request.user.persona
appartenenze = me.appartenenze_attuali()
appartenenze = [serializzatori.appartenenza(i) for i in appartenenze]
dati = {"appartenenze": appartenenze}
return Response(dati)
class MiaAppartenenzaComplaeta(APIView):
"""
ID utente, - Persona
nome, - Persona
cognome, - Persona
indirizzo mail di contatto - Persona
rispettiva sede di appartenenza, - Persona
ID comitato,
nome comitato,
estensione del comitato R/P/L/T,
delega
"""
permission_classes = (permissions.IsAuthenticated,
TokenHasScope)
required_scopes = [SCOPE_ANAGRAFICA_LETTURA_BASE,
SCOPE_ANAGRAFICA_LETTURA_COMPLETA,
SCOPE_APPARTENENZE_LETTURA]
def get(self, request, format=None):
me = request.user.persona
# Persona
dati = {
'id_persona': me.pk,
'nome': me.nome,
'cognome': me.cognome,
'data_di_nascita': me.data_nascita,
'codice_fiscale': me.codice_fiscale,
}
if me.email is not None:
dati['email'] = me.email
# Comitato
deleghe = me.deleghe_attuali()
l_deleghe = []
for delega in deleghe:
d_delega = {
'id': delega.id,
'tipo': PERMESSI_NOMI_DICT[delega.tipo],
'appartenenza': delega.oggetto.nome,
}
l_deleghe.append(d_delega)
dati['deleghe'] = l_deleghe
# appartenenze
appartenenze = me.appartenenze_attuali()
l_appartenenza = []
for appartenenza in appartenenze:
comitato = appartenenza.sede
l_appartenenza.append({
'id': comitato.id,
'nome': comitato.nome,
'tipo': {
'id': appartenenza.membro,
'descrizione': appartenenza.get_membro_display()
},
'comitato': {
'id': comitato.estensione,
'descrizione': comitato.get_estensione_display()
},
})
dati['appartenenze'] = l_appartenenza
return Response(dati)
#serializzatori._campo(comitato.estensione, comitato.get_estensione_display())
| gpl-3.0 | 1,223,268,520,127,786,500 | 29.171642 | 117 | 0.606728 | false |
vertelab/odoo-smart | smart_salary_simulator_se/models/hr_payroll.py | 1 | 1318 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution, third party addon
# Copyright (C) 2004-2015 Vertel AB (<http://vertel.se>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
from openerp.exceptions import except_orm, Warning, RedirectWarning
import logging
_logger = logging.getLogger(__name__)
class hr_salary_rule(models.Model):
_inherit = 'hr.salary.rule'
name = fields.Char(string="Name",required=True, translate=True, readonly=False)
| agpl-3.0 | 6,327,244,166,136,757,000 | 42.933333 | 83 | 0.637329 | false |
susca/funge | minibefu93.py | 1 | 1657 | #!/usr/bin/python3
# minibefu93.py -- a minimal Befunge93 interpreter written in Python
# usage: minibefu93.py <prog.fu>
import sys,random
o=sys.stdout
q=0,0
d=1,0
m=80,25
def k(x,y):return x+m[0]*y+y
def g(s,p=None):
x,y=q if p is None else p
return s[k(x,y)]
def p(s,p,v):
o=k(*p)
return s[:o]+chr(v)+s[o+1:]
def a():return (q[0]+d[0])%m[0],(q[1]+d[1])%m[1]
class S(list):
def p(s,*a):return super().pop(*a) if s else 0
def a(s,v):s.append(v)
def __getitem__(s,key):return super().__getitem__(key) if s else 0
with open(sys.argv[1]) as f:r=f.read()
l=r.split('\n')
[l.append('') for _ in range(len(l),m[1])]
r='\n'.join(f'{s:<{m[0]}}' for s in l)
s=S()
f=False
while True:
c=g(r)
if c=='"':f=not f
elif f:s.a(ord(c))
elif c in '1234567890':s.a(int(c))
elif c=='>':d=(1,0)
elif c=='<':d=(-1,0)
elif c=='^':d=(0,-1)
elif c=='v':d=(0,1)
elif c=='?':d=random.choice(((0,1),(1,0),(-1,0),(0,-1)))
elif c=='#':q=a()
elif c=='+':s.a(s.p()+s.p())
elif c=='-':s.a(s.p(-2)-s.p())
elif c=='*':s.a(s.p()*s.p())
elif c=='/':s.a(int(s.p(-2) // s.p()))
elif c=='%':s.a(s.p(-2) % s.p())
elif c=='!':s.a(int(not bool(s.p())))
elif c=='`':s.a(int(s.p(-2)>s.p()))
elif c=='_':d=(-1,0) if s.p() else (1,0)
elif c=='|':d=(0,-1) if s.p() else (0,1)
elif c==':':s.a(s[-1])
elif c=='\\':i,j=s.p(),s.p();s.a(i);s.a(j)
elif c=='$':s.p()
elif c=='.':o.write(str(s.p()));o.flush()
elif c==',':o.write(chr(s.p()));o.flush()
elif c=='&':s.a(int(input()))
elif c=='~':s.a(ord(input()[0]))
elif c=='g':y,x=s.p(),s.p();s.a(ord(g(r,(x,y))))
elif c=='p':y,x,v=s.p(),s.p(),s.p();r=p(r,(x,y),v)
elif c=='@':break
q=a()
| mit | 7,724,251,767,620,839,000 | 28.070175 | 68 | 0.493663 | false |
DanielJDufour/person-extractor | person_extractor/__init__.py | 1 | 2950 | from nltk.chunk import _MULTICLASS_NE_CHUNKER
from nltk.data import load
from nltk.tag.perceptron import PerceptronTagger
from nltk import ne_chunk, word_tokenize
from os import listdir
from os.path import dirname, realpath
from re import findall, finditer, MULTILINE, UNICODE
from re import compile as re_compile
flags = MULTILINE|UNICODE
directory_of_this_file = dirname(realpath(__file__))
# load patterns
directory_of_patterns = directory_of_this_file + "/prep/patterns"
language_pattern = {}
for filename in listdir(directory_of_patterns):
language = filename.split(".")[0]
with open(directory_of_patterns + "/" + language + ".txt") as f:
pattern_as_string = (f.read().decode("utf-8").strip())
#print "p"
#print pattern_as_string
pattern = re_compile(pattern_as_string, flags=flags)
language_pattern[language] = pattern
global tagger
tagger = None
global chunker
chunker = None
def loadTaggerIfNecessary():
global tagger
if tagger is None:
tagger = PerceptronTagger()
def loadChunkerIfNecessary():
global chunker
if chunker is None:
chunker = load(_MULTICLASS_NE_CHUNKER)
def flatten(lst):
result = []
for element in lst:
if hasattr(element, '__iter__'):
result.extend(flatten(element))
else:
result.append(element)
return result
def extract_people_quickly(text, language=None):
if isinstance(text, str):
text = text.decode("utf-8")
people = set()
if language:
for mo in finditer(pattern, text):
people.add(mo.group("person"))
else:
for pattern in language_pattern.values():
print "pattern is"
print pattern
for mo in finditer(pattern, text):
people.add(mo.group("person"))
return list(people)
def extract_person_quickly(text, language=None):
return (extract_people_quickly(text, language=language) or [None])[0]
def extract_people_slowly(text, language=None):
global tagger
loadTaggerIfNecessary()
global chunker
loadChunkerIfNecessary()
if isinstance(text, str):
text = text.decode("utf-8")
people = []
for tree in chunker.parse(tagger.tag(word_tokenize(text))).subtrees():
if tree.label() == "PERSON":
people.append(" ".join([leaf[0] for leaf in tree.leaves()]))
people = findall("(?:[A-Z][a-z]+ )?(?:" + "|".join(people) + ")(?: [A-Z][a-z]+)?", text)
return people
def extract_person_slowly(text):
return extract_people(text)[0]
def extract_people(text, language=None, speed="slowly"):
if speed == "slowly":
return extract_people_slowly(text, language)
else:
return extract_people_quickly(text, language)
def extract_person(text, language=None, speed="slowly"):
return (extract_people(text, language, speed) or [None]) [0]
epq=extract_people_quickly
eps=extract_people_slowly
| apache-2.0 | 3,573,599,924,659,248,000 | 27.095238 | 92 | 0.654237 | false |
gschizas/praw | praw/models/reddit/redditor.py | 1 | 13544 | """Provide the Redditor class."""
from json import dumps
from typing import Any, Dict, Generator, List, Optional, TypeVar, Union
from ...const import API_PATH
from ...util.cache import cachedproperty
from ..listing.mixins import RedditorListingMixin
from ..util import stream_generator
from .base import RedditBase
from .mixins import FullnameMixin, MessageableMixin
_Redditor = TypeVar("_Redditor")
_RedditorStream = TypeVar("_RedditorStream")
Comment = TypeVar("Comment")
Multireddit = TypeVar("Multireddit")
Reddit = TypeVar("Reddit")
Submission = TypeVar("Submission")
Subreddit = TypeVar("Subreddit")
Trophy = TypeVar("Trophy")
class Redditor(
MessageableMixin, RedditorListingMixin, FullnameMixin, RedditBase
):
"""A class representing the users of reddit.
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
comprehensive in any way.
.. note:: Shadowbanned accounts are treated the same as non-existent
accounts, meaning that they will not have any attributes.
.. note:: Suspended/banned accounts will only return the ``name`` and
``is_suspended`` attributes.
==================================== ======================================
Attribute Description
==================================== ======================================
``comment_karma`` The comment karma for the Redditor.
``comments`` Provide an instance of
:class:`.SubListing` for comment
access.
``created_utc`` Time the account was created,
represented in `Unix Time`_.
``has_verified_email`` Whether or not the Redditor has
verified their email.
``icon_img`` The url of the Redditors' avatar.
``id`` The ID of the Redditor.
``is_employee`` Whether or not the Redditor is a
Reddit employee.
``is_friend`` Whether or not the Redditor is friends
with the authenticated user.
``is_mod`` Whether or not the Redditor mods any
subreddits.
``is_gold`` Whether or not the Redditor has active
Reddit Premium status.
``is_suspended`` Whether or not the Redditor is
currently suspended.
``link_karma`` The link karma for the Redditor.
``name`` The Redditor's username.
``subreddit`` If the Redditor has created a
user-subreddit, provides a dictionary
of additional attributes. See below.
``subreddit['banner_img']`` The URL of the user-subreddit banner.
``subreddit['name']`` The fullname of the user-subreddit.
``subreddit['over_18']`` Whether or not the user-subreddit is
NSFW.
``subreddit['public_description']`` The public description of the user-
subreddit.
``subreddit['subscribers']`` The number of users subscribed to the
user-subreddit.
``subreddit['title']`` The title of the user-subreddit.
==================================== ======================================
.. _Unix Time: https://en.wikipedia.org/wiki/Unix_time
"""
STR_FIELD = "name"
@classmethod
def from_data(cls, reddit, data):
"""Return an instance of Redditor, or None from ``data``."""
if data == "[deleted]":
return None
return cls(reddit, data)
@cachedproperty
def stream(self) -> _RedditorStream:
"""Provide an instance of :class:`.RedditorStream`.
Streams can be used to indefinitely retrieve new comments made by a
redditor, like:
.. code-block:: python
for comment in reddit.redditor('spez').stream.comments():
print(comment)
Additionally, new submissions can be retrieved via the stream. In the
following example all submissions are fetched via the redditor
``spez``:
.. code-block:: python
for submission in reddit.redditor('spez').stream.submissions():
print(submission)
"""
return RedditorStream(self)
@property
def _kind(self):
"""Return the class's kind."""
return self._reddit.config.kinds["redditor"]
@property
def _path(self):
return API_PATH["user"].format(user=self)
def __init__(
self,
reddit: Reddit,
name: Optional[str] = None,
fullname: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Initialize a Redditor instance.
:param reddit: An instance of :class:`~.Reddit`.
:param name: The name of the redditor.
:param fullname: The fullname of the redditor, starting with ``t2_``.
Exactly one of ``name``, ``fullname`` or ``_data`` must be provided.
"""
if (name, fullname, _data).count(None) != 2:
raise TypeError(
"Exactly one of `name`, `fullname`, or `_data` must be "
"provided."
)
if _data:
assert (
isinstance(_data, dict) and "name" in _data
), "Please file a bug with PRAW"
super().__init__(reddit, _data=_data)
self._listing_use_sort = True
if name:
self.name = name
elif fullname:
self._fullname = fullname
def _fetch_username(self, fullname):
return self._reddit.get(
API_PATH["user_by_fullname"], params={"ids": fullname}
)[fullname]["name"]
def _fetch_info(self):
if hasattr(self, "_fullname"):
self.name = self._fetch_username(self._fullname)
return ("user_about", {"user": self.name}, None)
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
data = data["data"]
other = type(self)(self._reddit, _data=data)
self.__dict__.update(other.__dict__)
self._fetched = True
def _friend(self, method, data):
url = API_PATH["friend_v1"].format(user=self)
self._reddit.request(method, url, data=dumps(data))
def block(self):
"""Block the Redditor.
For example, to block Redditor ``spez``:
.. code-block:: python
reddit.redditor("spez").block()
"""
self._reddit.post(
API_PATH["block_user"], params={"account_id": self.fullname}
)
def friend(self, note: str = None):
"""Friend the Redditor.
:param note: A note to save along with the relationship. Requires
Reddit Premium (default: None).
Calling this method subsequent times will update the note.
For example, to friend Redditor ``spez``:
.. code-block:: python
reddit.redditor("spez").friend()
To add a note to the friendship (requires Reddit Premium):
.. code-block:: python
reddit.redditor("spez").friend(note="My favorite admin")
"""
self._friend("PUT", data={"note": note} if note else {})
def friend_info(self) -> _Redditor:
"""Return a Redditor instance with specific friend-related attributes.
:returns: A :class:`.Redditor` instance with fields ``date``, ``id``,
and possibly ``note`` if the authenticated user has Reddit Premium.
For example, to get the friendship information of Redditor ``spez``:
.. code-block:: python
info = reddit.redditor("spez").friend_info
friend_data = info.date
"""
return self._reddit.get(API_PATH["friend_v1"].format(user=self))
def gild(self, months: int = 1):
"""Gild the Redditor.
:param months: Specifies the number of months to gild up to 36
(default: 1).
For example, to gild Redditor ``spez`` for 1 month:
.. code-block:: python
reddit.redditor("spez").gild(months=1)
"""
if months < 1 or months > 36:
raise TypeError("months must be between 1 and 36")
self._reddit.post(
API_PATH["gild_user"].format(username=self),
data={"months": months},
)
def moderated(self) -> List[Subreddit]:
"""Return a list of the redditor's moderated subreddits.
:returns: A ``list`` of :class:`~praw.models.Subreddit` objects.
Return ``[]`` if the redditor has no moderated subreddits.
.. note:: The redditor's own user profile subreddit will not be
returned, but other user profile subreddits they moderate
will be returned.
Usage:
.. code-block:: python
for subreddit in reddit.redditor('spez').moderated():
print(subreddit.display_name)
print(subreddit.title)
"""
modded_data = self._reddit.get(API_PATH["moderated"].format(user=self))
if "data" not in modded_data:
return []
else:
subreddits = [
self._reddit.subreddit(x["sr"]) for x in modded_data["data"]
]
return subreddits
def multireddits(self) -> List[Multireddit]:
"""Return a list of the redditor's public multireddits.
For example, to to get Redditor ``spez``'s multireddits:
.. code-block:: python
multireddits = reddit.redditor("spez").multireddits()
"""
return self._reddit.get(API_PATH["multireddit_user"].format(user=self))
def trophies(self) -> List[Trophy]:
"""Return a list of the redditor's trophies.
:returns: A ``list`` of :class:`~praw.models.Trophy` objects.
Return ``[]`` if the redditor has no trophy.
Raise ``prawcore.exceptions.BadRequest`` if the redditor doesn't exist.
Usage:
.. code-block:: python
for trophy in reddit.redditor('spez').trophies():
print(trophy.name)
print(trophy.description)
"""
return list(self._reddit.get(API_PATH["trophies"].format(user=self)))
def unblock(self):
"""Unblock the Redditor.
For example, to unblock Redditor ``spez``:
.. code-block:: python
reddit.redditor("spez").unblock()
"""
data = {
"container": self._reddit.user.me().fullname,
"name": str(self),
"type": "enemy",
}
url = API_PATH["unfriend"].format(subreddit="all")
self._reddit.post(url, data=data)
def unfriend(self):
"""Unfriend the Redditor.
For example, to unfriend Redditor ``spez``:
.. code-block:: python
reddit.redditor("spez").unfriend()
"""
self._friend(method="DELETE", data={"id": str(self)})
class RedditorStream:
"""Provides submission and comment streams."""
def __init__(self, redditor: Redditor):
"""Create a RedditorStream instance.
:param redditor: The redditor associated with the streams.
"""
self.redditor = redditor
def comments(
self, **stream_options: Union[str, int, Dict[str, str]]
) -> Generator[Comment, None, None]:
"""Yield new comments as they become available.
Comments are yielded oldest first. Up to 100 historical comments will
initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
For example, to retrieve all new comments made by redditor ``spez``,
try:
.. code-block:: python
for comment in reddit.redditor('spez').stream.comments():
print(comment)
"""
return stream_generator(self.redditor.comments.new, **stream_options)
def submissions(
self, **stream_options: Union[str, int, Dict[str, str]]
) -> Generator[Submission, None, None]:
"""Yield new submissions as they become available.
Submissions are yielded oldest first. Up to 100 historical submissions
will initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
For example to retrieve all new submissions made by redditor
``spez``, try:
.. code-block:: python
for submission in reddit.redditor('spez').stream.submissions():
print(submission)
"""
return stream_generator(
self.redditor.submissions.new, **stream_options
)
| bsd-2-clause | -3,467,038,217,675,936,000 | 32.691542 | 79 | 0.551831 | false |
glennhickey/hal | assemblyHub/wigTrack.py | 1 | 7699 | #!/usr/bin/env python3
#Copyright (C) 2013 by Ngan Nguyen
# Copyright (C) 2012-2019 by UCSC Computational Genomics Lab
#
#Released under the MIT license, see LICENSE.txt
"""Creating wiggle (annotation) tracks and lifted-over wiggle tracks for the hubs
"""
import os, re, time
from sonLib.bioio import system
from toil.job import Job
from optparse import OptionGroup
from hal.assemblyHub.assemblyHubCommon import *
class LiftoverWigFiles( Job ):
def __init__(self, indir, halfile, genome2seq2len, bigwigdir, noLiftover, outdir):
Job.__init__(self)
self.indir = indir
self.halfile = halfile
self.genome2seq2len = genome2seq2len
self.bigwigdir = bigwigdir
self.noLiftover = noLiftover
self.outdir = outdir
def run(self, fileStore):
#wigdir has the hierachy: indir/genome/chr1.wig, chr2.wig...
#for each genome in wigdir, liftover the wig records of that genome to the coordinate of all other genomes
#liftover wig file of each genome with available wigs to all genomes
genomes = list(self.genome2seq2len.keys())
tempwigs = []
for genome in os.listdir(self.indir):
if genome not in genomes:
continue
genomeindir = os.path.join(self.indir, genome)
assert os.path.isdir(genomeindir)
#Create wig directory for current genome
genomeoutdir = os.path.join(self.bigwigdir, genome)
system("mkdir -p %s" %genomeoutdir)
#get all the wig files (".wig" ext)
wigfiles = getFilesByExt(genomeindir, "wig")
#Concatenate all the input wig files and convert it into bigwig to outdir/genome/genome.bw
tempwig = "%s-temp.wig" % os.path.join(genomeoutdir, genome)
system( "cat %s/*wig > %s" %(genomeindir, tempwig) )
if os.stat(tempwig).st_size > 0:#make sure the file is not empty
outbigwig = os.path.join(genomeoutdir, "%s.bw" %genome)
chrsizefile = os.path.join(self.outdir, genome, "chrom.sizes")
system("wigToBigWig %s %s %s" %(tempwig, chrsizefile, outbigwig))
#Liftover to all other genomes:
if not self.noLiftover:
for othergenome in genomes:
if othergenome != genome:
self.addChild( LiftoverWig(genomeoutdir, tempwig, genome, othergenome, self.halfile, self.outdir) )
tempwigs.append( tempwig )
self.addFollowOn( CleanupFiles(tempwigs) )
class LiftoverWig( Job ):
def __init__(self, genomeoutdir, wig, genome, othergenome, halfile, outdir):
Job.__init__(self)
self.genomeoutdir = genomeoutdir
self.wig = wig
self.genome = genome
self.othergenome = othergenome
self.halfile = halfile
self.outdir = outdir
def run(self, fileStore):
liftovertempwig = "%s.wig" % os.path.join(self.genomeoutdir, self.othergenome)
system("halWiggleLiftover %s %s %s %s %s" %(self.halfile, self.genome, self.wig, self.othergenome, liftovertempwig))
outbigwig = os.path.join(self.genomeoutdir, "%s.bw" %self.othergenome)
chrsizefile = os.path.join(self.outdir, self.othergenome, "chrom.sizes")
if os.stat(liftovertempwig).st_size > 0:#make sure the file is not empty
system("wigToBigWig %s %s %s" %(liftovertempwig, chrsizefile, outbigwig))
#Cleanup:
system("rm %s" % liftovertempwig)
#def writeTrackDb_bigwigs(f, bigwigdir, genomes, subgenomes, currgenome, properName):
def writeTrackDb_bigwigs(f, bigwigdir, genomes, currgenome, properName):
annotation = os.path.basename(bigwigdir)
genome2priority = {}
for i, genome in enumerate(genomes):
if genome == currgenome:
genome2priority[genome] = 1
else:
genome2priority[genome] = i + 2
for genome in os.listdir(bigwigdir):
bwfile = os.path.join(bigwigdir, genome, "%s.bw" %currgenome)
if not os.path.exists(bwfile):
continue
#start writing track
genomeProperName = genome
if genome in properName:
genomeProperName = properName[genome]
priority = 1
if genome in genome2priority:
priority = genome2priority[genome]
f.write("\t\ttrack %s%s\n" % (annotation, genome))
if genome == currgenome:
f.write("\t\tlongLabel %s %s\n" % (genomeProperName, annotation))
else:
f.write("\t\tlongLabel %s Lifted-over %s\n" % (genomeProperName, annotation))
f.write("\t\tpriority %d\n" %priority)
f.write("\t\tshortLabel %s%s\n" % (genomeProperName, annotation))
f.write("\t\tbigDataUrl ../liftoverwig/%s\n" % os.path.join( annotation, genome, "%s.bw" % currgenome ) )
f.write("\t\ttype bigWig\n")
f.write("\t\tgroup annotation%s\n" %annotation)
f.write("\t\titemRgb On\n")
#if genome == currgenome or genome in subgenomes:
if genome == currgenome:
f.write("\t\tvisibility dense\n")
f.write("\t\tparent hubCentral%s\n"%annotation)
else:
f.write("\t\tvisibility hide\n")
f.write("\t\tparent hubCentral%s off\n"%annotation)
f.write("\t\twindowingFunction Mean\n")
f.write("\t\tautoScale On\n")
f.write("\t\tmaxHeightPixels 128:36:16\n")
f.write("\t\tgraphTypeDefault Bar\n")
f.write("\t\tgridDefault OFF\n")
f.write("\t\tcolor 0,0,0\n")
f.write("\t\taltColor 128,128,128\n")
f.write("\t\tviewLimits 30:70\n")
f.write("\t\tsubGroups view=%s orgs=%s\n" %(annotation, genome))
f.write("\n")
def addWigOptions(parser):
group = parser.add_argument_group("WIGGLE-FORMATTED ANNOTATIONS", "All annotations in wiggle or bigWig formats.")
group.add_argument('--wigDirs', dest='wigdirs', help='comma separated list of directories containing wig files of the input genomes. Each directory represents a type of annotation. The annotations of each genome will then be liftovered to all other genomes in the MSA. Example: "genes,genomicIsland,tRNA". Format of each directory: wigDir/ then genome1/ then chr1.wig, chr2.wig... ' )
group.add_argument('--finalBigwigDirs', dest='bwdirs', help='comma separated list of directories containing final big wig files to be displayed. No liftover will be done for these files. Each directory represents a type of annotation. Example: "readCoverage,". Format of each directory: bwDir/ then queryGenome/ then targetGenome1.bw, targetGenome2.bw ... (so annotation of queryGenome has been mapped to targetGenomes and will be display on the targetGenome browsers). ' )
group.add_argument('--nowigLiftover', dest='noWigLiftover', action='store_true', default=False, help='If specified, will not lift over the wig annotations. ')
group = parser.add_argument_group(group)
def checkWigOptions(parser, options):
options.bigwigdirs = []
if options.wigdirs:
dirs = [d.rstrip('/') for d in options.wigdirs.split(',')]
options.wigdirs = dirs
for d in dirs:
if not os.path.exists(d) or not os.path.isdir(d):
parser.error("Wig directory %s does not exist or is not a directory.\n" %d)
if options.bwdirs:
dirs = [d.rstrip('/') for d in options.bwdirs.split(',')]
options.bwdirs = dirs
for d in dirs:
if not os.path.exists(d) or not os.path.isdir(d):
parser.error("Bigwig directory %s does not exist or is not a directory.\n" %d)
| mit | -1,007,999,811,680,412,400 | 47.727848 | 477 | 0.636446 | false |
drabastomek/practicalDataAnalysisCookbook | Codes/Chapter07/ts_timeSeriesFunctions.py | 1 | 1760 | import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
# change the font size
matplotlib.rc('xtick', labelsize=9)
matplotlib.rc('ytick', labelsize=9)
matplotlib.rc('font', size=14)
# time series tools
import statsmodels.api as sm
# folder with data
data_folder = '../../Data/Chapter07/'
# colors
colors = ['#FF6600', '#000000', '#29407C', '#660000']
# read the data
riverFlows = pd.read_csv(data_folder + 'combined_flow.csv',
index_col=0, parse_dates=[0])
# autocorrelation function
acf = {} # to store the results
f = {}
for col in riverFlows.columns:
acf[col] = sm.tsa.stattools.acf(riverFlows[col])
# partial autocorrelation function
pacf = {}
for col in riverFlows.columns:
pacf[col] = sm.tsa.stattools.pacf(riverFlows[col])
# periodogram (spectral density)
sd = {}
for col in riverFlows.columns:
sd[col] = sm.tsa.stattools.periodogram(riverFlows[col])
# plot the data
fig, ax = plt.subplots(2, 3) # 2 rows and 3 columns
# set the size of the figure explicitly
fig.set_size_inches(12, 7)
# plot the charts for American
ax[0, 0].plot(acf['american_flow'], colors[0])
ax[0, 1].plot(pacf['american_flow'],colors[1])
ax[0, 2].plot(sd['american_flow'], colors[2])
ax[0, 2].yaxis.tick_right() # shows the numbers on the right
# plot the charts for Columbia
ax[1, 0].plot(acf['columbia_flow'], colors[0])
ax[1, 1].plot(pacf['columbia_flow'],colors[1])
ax[1, 2].plot(sd['columbia_flow'], colors[2])
ax[1, 2].yaxis.tick_right()
# set titles for columns
ax[0, 0].set_title('ACF')
ax[0, 1].set_title('PACF')
ax[0, 2].set_title('Spectral density')
# set titles for rows
ax[0, 0].set_ylabel('American')
ax[1, 0].set_ylabel('Columbia')
# save the chart
plt.savefig(data_folder + 'charts/acf_pacf_sd.png', dpi=300)
| gpl-2.0 | 3,228,647,342,116,916,700 | 24.142857 | 60 | 0.688636 | false |
Ictp/indico | indico/MaKaC/plugins/Collaboration/Vidyo/api/api.py | 1 | 4270 | # -*- coding: utf-8 -*-
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
from MaKaC.common.logger import Logger
from MaKaC.plugins.Collaboration.Vidyo.api.client import AdminClient, UserClient
from suds import WebFault
from MaKaC.plugins.Collaboration.Vidyo.common import VidyoConnectionException
from urllib2 import URLError
AUTOMUTE_API_PROFILE = "NoAudioAndVideo"
class ApiBase(object):
""" Provides the _handleServiceCallException method
"""
@classmethod
def _handleServiceCallException(cls, e):
Logger.get("Vidyo").exception("Service call exception")
cause = e.args[0]
if type(cause) is tuple and cause[0] == 401:
raise VidyoConnectionException(e)
elif type(e) == URLError:
raise VidyoConnectionException(e)
else:
raise
@classmethod
def _api_operation(cls, service, *params, **kwargs):
try:
vidyoClient = cls.getVidyoClient()
except Exception, e:
raise VidyoConnectionException(e)
try:
return getattr(vidyoClient.service, service)(*params, **kwargs)
except WebFault, e:
raise
except Exception, e:
cls._handleServiceCallException(e)
class AdminApi(ApiBase):
""" This class performs low-level operations by getting the corresponding
client and calling a SOAP service.
We write info statements to the log with the details of what we are doing.
Each class method performs a single service call to Vidyo.
"""
@classmethod
def getVidyoClient(cls):
return AdminClient.getInstance()
@classmethod
def addRoom(cls, newRoom):
return cls._api_operation('addRoom', newRoom)
@classmethod
def updateRoom(cls, roomId, updatedRoom):
return cls._api_operation('updateRoom', roomId, updatedRoom)
@classmethod
def getRooms(cls, searchFilter):
return cls._api_operation('getRooms', searchFilter)
@classmethod
def getRoom(cls, roomId):
return cls._api_operation('getRoom', roomId)
@classmethod
def deleteRoom(cls, roomId):
return cls._api_operation('deleteRoom', roomId)
@classmethod
def setAutomute(cls, roomId, enabled):
if enabled:
return cls._api_operation('setRoomProfile', roomId, AUTOMUTE_API_PROFILE)
else:
return cls._api_operation('removeRoomProfile', roomId)
@classmethod
def getAutomute(cls, roomId):
answer = cls._api_operation('getRoomProfile', roomId)
if answer is None or answer == "":
return False
return answer.roomProfileName == AUTOMUTE_API_PROFILE
@classmethod
def setModeratorPIN(cls, roomId, moderatorPIN):
if moderatorPIN:
return cls._api_operation('createModeratorPIN', roomId, moderatorPIN)
else:
return cls._api_operation('removeModeratorPIN', roomId)
@classmethod
def connectRoom(cls, roomId, legacyMember):
return cls._api_operation('inviteToConference', roomId, entityID=legacyMember)
class UserApi(ApiBase):
""" This class performs low-level operations by getting the corresponding
client and calling a SOAP service.
We write info statements to the log with the details of what we are doing.
"""
@classmethod
def getVidyoClient(cls):
return UserClient.getInstance()
@classmethod
def search(cls, searchFilter):
return cls._api_operation('search', searchFilter)
| gpl-3.0 | 1,933,884,575,633,309,700 | 32.359375 | 86 | 0.679625 | false |
jon-jacky/PyModel | samples/Marquee/fsmpy/PeriodFiveFSM1.py | 1 | 1330 |
# pma.py --maxTransitions 100 --output PeriodFiveFSM1 LoadFirst Marquee DisplayFive
# 6 states, 6 transitions, 6 accepting states, 0 unsafe states, 0 finished and 0 deadend states
# actions here are just labels, but must be symbols with __name__ attribute
def Load(): pass
def Shift(): pass
# states, key of each state here is its number in graph etc. below
states = {
0 : {'LoadFirst': 0, 'Marquee': {'display': '* * * * * * * * * * * * * '}},
1 : {'Marquee': {'display': 'Bye Bye Bye Bye Bye '}, 'LoadFirst': 1},
2 : {'Marquee': {'display': 'ye Bye Bye Bye Bye B'}, 'LoadFirst': 1},
3 : {'Marquee': {'display': 'e Bye Bye Bye Bye By'}, 'LoadFirst': 1},
4 : {'Marquee': {'display': ' Bye Bye Bye Bye Bye'}, 'LoadFirst': 1},
5 : {'Marquee': {'display': ' Bye Bye Bye Bye Bye '}, 'LoadFirst': 1},
}
# initial state, accepting states, unsafe states, frontier states, deadend states
initial = 0
accepting = [0, 1, 2, 3, 4, 5]
unsafe = []
frontier = []
finished = []
deadend = []
runstarts = [0]
# finite state machine, list of tuples: (current, (action, args, result), next)
graph = (
(0, (Load, ('Bye Bye Bye Bye Bye ',), None), 1),
(1, (Shift, (), None), 2),
(2, (Shift, (), None), 3),
(3, (Shift, (), None), 4),
(4, (Shift, (), None), 5),
(5, (Shift, (), None), 1),
)
| bsd-3-clause | 97,296,459,689,789,100 | 32.25 | 95 | 0.581955 | false |
isb-cgc/ISB-CGC-Webapp | bq_data_access/v2/seqpeek/seqpeek_view.py | 1 | 7709 | #
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from builtins import map
from builtins import str
from builtins import object
from copy import deepcopy
import logging
from bq_data_access.v2.seqpeek.seqpeek_interpro import InterProDataProvider
logger = logging.getLogger('main_logger')
SAMPLE_ID_FIELD_NAME = 'sample_id'
TRACK_ID_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'uniprot_aapos'
PROTEIN_ID_FIELD = 'ensg_id'
PROTEIN_DOMAIN_DB = 'PFAM'
SEQPEEK_VIEW_DEBUG_MODE = False
def get_number_of_unique_samples(track):
sample_ids = set()
for mutation in track['mutations']:
sample_ids.add(mutation[SAMPLE_ID_FIELD_NAME])
return len(sample_ids)
def get_number_of_mutated_positions(track):
sample_locations = set()
for mutation in track['mutations']:
sample_locations.add(mutation[COORDINATE_FIELD_NAME])
return len(sample_locations)
# TODO remove if not needed
def clean_track_mutations(mutations_array):
retval = []
for mutation in mutations_array:
cleaned = deepcopy(mutation)
cleaned[COORDINATE_FIELD_NAME] = int(mutation[COORDINATE_FIELD_NAME])
retval.append(cleaned)
return retval
def sort_track_mutations(mutations_array):
return sorted(mutations_array, key=lambda k: k[COORDINATE_FIELD_NAME])
def get_track_statistics_by_track_type(track, cohort_info_map):
track_id = track[TRACK_ID_FIELD]
result = {
'samples': {
'numberOf': get_number_of_unique_samples(track),
'mutated_positions': get_number_of_mutated_positions(track)
}
}
if track['type'] == 'tumor':
cohort_info = cohort_info_map[track_id]
result['cohort_size'] = cohort_info['size']
else:
# Do not assign cohort size for the 'COMBINED' track.
result['cohort_size'] = None
return result
def filter_protein_domains(match_array):
return [m for m in match_array if m['dbname'] == PROTEIN_DOMAIN_DB]
def get_table_row_id(tumor_type):
return "seqpeek_row_{0}".format(tumor_type)
def build_seqpeek_regions(protein_data):
return [{
'type': 'exon',
'start': 0,
'end': protein_data['length']
}]
def build_summary_track(tracks):
all = []
for track in tracks:
all.extend(track["mutations"])
return {
'mutations': all,
'label': 'COMBINED',
'tumor': 'none-combined',
'type': 'summary'
}
def get_track_label_and_cohort_information(track_id_value, cohort_info_map):
cohort_info = cohort_info_map[track_id_value]
label = cohort_info['name']
cohort_size = cohort_info['size']
return label, cohort_size
def get_track_label(track, cohort_info_array):
# The IDs in cohort_info_array are integers, whereas the track IDs are strings.
cohort_map = {str(item['id']): item['name'] for item in cohort_info_array}
return cohort_map[track[TRACK_ID_FIELD]]
def get_protein_domains(uniprot_id):
protein = InterProDataProvider().get_data(uniprot_id)
return protein
class MAFData(object):
def __init__(self, cohort_info, data):
self.cohort_info = cohort_info
self.data = data
@classmethod
def from_dict(cls, param):
return cls(param['cohort_set'], param['items'])
def build_track_data(track_id_list, all_tumor_mutations):
tracks = []
for track_id in track_id_list:
tracks.append({
TRACK_ID_FIELD: track_id,
'mutations': [m for m in all_tumor_mutations if int(track_id) in set(m['cohort'])]
})
return tracks
def find_uniprot_id(mutations):
uniprot_id = None
for m in mutations:
if PROTEIN_ID_FIELD in m:
uniprot_id = m[PROTEIN_ID_FIELD]
break
return uniprot_id
def get_genes_tumors_lists_debug():
return {
'symbol_list': ['EGFR', 'TP53', 'PTEN'],
'disease_codes': ['ACC', 'BRCA', 'GBM']
}
def get_genes_tumors_lists_remote():
context = {
'symbol_list': [],
'track_id_list': []
}
return context
def get_genes_tumors_lists():
if SEQPEEK_VIEW_DEBUG_MODE:
return get_genes_tumors_lists_debug()
else:
return get_genes_tumors_lists_remote()
def get_track_id_list(param):
return list(map(str, param))
def format_removed_row_statistics_to_list(stats_dict):
result = []
for key, value in list(stats_dict.items()):
result.append({
'name': key,
'num': value
})
return result
class SeqPeekViewDataBuilder(object):
def build_view_data(self, hugo_symbol, filtered_maf_vector, seqpeek_cohort_info, cohort_id_list, removed_row_statistics, tables_used):
context = get_genes_tumors_lists()
cohort_info_map = {str(item['id']): item for item in seqpeek_cohort_info}
track_id_list = get_track_id_list(cohort_id_list)
# Since the gene (hugo_symbol) parameter is part of the GNAB feature ID,
# it will be sanity-checked in the SeqPeekMAFDataAccess instance.
uniprot_id = find_uniprot_id(filtered_maf_vector)
logging.info("UniProt ID: " + str(uniprot_id))
protein_data = get_protein_domains(uniprot_id)
track_data = build_track_data(track_id_list, filtered_maf_vector)
plot_data = {
'gene_label': hugo_symbol,
'tracks': track_data,
'protein': protein_data
}
# Pre-processing
# - Sort mutations by chromosomal coordinate
for track in plot_data['tracks']:
track['mutations'] = sort_track_mutations(track['mutations'])
# Annotations
# - Add label, possibly human readable
# - Add type that indicates whether the track is driven by data from search or
# if the track is aggregate
for track in plot_data['tracks']:
track['type'] = 'tumor'
label, cohort_size = get_track_label_and_cohort_information(track[TRACK_ID_FIELD], cohort_info_map)
track['label'] = label
# Display the "combined" track only if more than one cohort is visualized
if len(cohort_id_list) >= 2:
plot_data['tracks'].append(build_summary_track(plot_data['tracks']))
for track in plot_data['tracks']:
# Calculate statistics
track['statistics'] = get_track_statistics_by_track_type(track, cohort_info_map)
# Unique ID for each row
track['render_info'] = {
'row_id': get_table_row_id(track[TRACK_ID_FIELD])
}
plot_data['regions'] = build_seqpeek_regions(plot_data['protein'])
plot_data['protein']['matches'] = filter_protein_domains(plot_data['protein']['matches'])
tumor_list = ','.join(track_id_list)
context.update({
'plot_data': plot_data,
'hugo_symbol': hugo_symbol,
'tumor_list': tumor_list,
'cohort_id_list': track_id_list,
'removed_row_statistics': format_removed_row_statistics_to_list(removed_row_statistics),
'bq_tables': list(set(tables_used))
})
return context
| apache-2.0 | -1,271,494,110,282,073,900 | 27.764925 | 138 | 0.634064 | false |
marcosbontempo/inatelos | poky-daisy/bitbake/lib/bb/server/process.py | 1 | 7728 | #
# BitBake Process based server.
#
# Copyright (C) 2010 Bob Foerster <robert@erafx.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This module implements a multiprocessing.Process based server for bitbake.
"""
import bb
import bb.event
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import time
import select
from Queue import Empty
from multiprocessing import Event, Process, util, Queue, Pipe, queues, Manager
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
logger = logging.getLogger('BitBake')
class ServerCommunicator():
def __init__(self, connection, event_handle):
self.connection = connection
self.event_handle = event_handle
def runCommand(self, command):
# @todo try/except
self.connection.send(command)
while True:
# don't let the user ctrl-c while we're waiting for a response
try:
if self.connection.poll(20):
return self.connection.recv()
else:
bb.fatal("Timeout while attempting to communicate with bitbake server")
except KeyboardInterrupt:
pass
def getEventHandle(self):
return self.event_handle.value
class EventAdapter():
"""
Adapter to wrap our event queue since the caller (bb.event) expects to
call a send() method, but our actual queue only has put()
"""
def __init__(self, queue):
self.queue = queue
def send(self, event):
try:
self.queue.put(event)
except Exception as err:
print("EventAdapter puked: %s" % str(err))
class ProcessServer(Process, BaseImplServer):
profile_filename = "profile.log"
profile_processed_filename = "profile.log.processed"
def __init__(self, command_channel, event_queue, featurelist):
BaseImplServer.__init__(self)
Process.__init__(self)
self.command_channel = command_channel
self.event_queue = event_queue
self.event = EventAdapter(event_queue)
self.featurelist = featurelist
self.quit = False
self.quitin, self.quitout = Pipe()
self.event_handle = multiprocessing.Value("i")
def run(self):
for event in bb.event.ui_queue:
self.event_queue.put(event)
self.event_handle.value = bb.event.register_UIHhandler(self)
bb.cooker.server_main(self.cooker, self.main)
def main(self):
# Ignore SIGINT within the server, as all SIGINT handling is done by
# the UI and communicated to us
self.quitin.close()
signal.signal(signal.SIGINT, signal.SIG_IGN)
while not self.quit:
try:
if self.command_channel.poll():
command = self.command_channel.recv()
self.runCommand(command)
if self.quitout.poll():
self.quitout.recv()
self.quit = True
self.idle_commands(.1, [self.event_queue._reader, self.command_channel, self.quitout])
except Exception:
logger.exception('Running command %s', command)
self.event_queue.close()
bb.event.unregister_UIHhandler(self.event_handle.value)
self.command_channel.close()
self.cooker.shutdown(True)
def idle_commands(self, delay, fds = []):
nextsleep = delay
for function, data in self._idlefuns.items():
try:
retval = function(self, data, False)
if retval is False:
del self._idlefuns[function]
nextsleep = None
elif retval is True:
nextsleep = None
elif nextsleep is None:
continue
else:
fds = fds + retval
except SystemExit:
raise
except Exception:
logger.exception('Running idle function')
if nextsleep is not None:
select.select(fds,[],[],nextsleep)
def runCommand(self, command):
"""
Run a cooker command on the server
"""
self.command_channel.send(self.cooker.command.runCommand(command))
def stop(self):
self.quitin.send("quit")
self.quitin.close()
class BitBakeProcessServerConnection(BitBakeBaseServerConnection):
def __init__(self, serverImpl, ui_channel, event_queue):
self.procserver = serverImpl
self.ui_channel = ui_channel
self.event_queue = event_queue
self.connection = ServerCommunicator(self.ui_channel, self.procserver.event_handle)
self.events = self.event_queue
def sigterm_terminate(self):
bb.error("UI received SIGTERM")
self.terminate()
def terminate(self):
def flushevents():
while True:
try:
event = self.event_queue.get(block=False)
except (Empty, IOError):
break
if isinstance(event, logging.LogRecord):
logger.handle(event)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.procserver.stop()
while self.procserver.is_alive():
flushevents()
self.procserver.join(0.1)
self.ui_channel.close()
self.event_queue.close()
self.event_queue.setexit()
# Wrap Queue to provide API which isn't server implementation specific
class ProcessEventQueue(multiprocessing.queues.Queue):
def __init__(self, maxsize):
multiprocessing.queues.Queue.__init__(self, maxsize)
self.exit = False
def setexit(self):
self.exit = True
def waitEvent(self, timeout):
if self.exit:
raise KeyboardInterrupt()
try:
return self.get(True, timeout)
except Empty:
return None
def getEvent(self):
try:
return self.get(False)
except Empty:
return None
class BitBakeServer(BitBakeBaseServer):
def initServer(self):
# establish communication channels. We use bidirectional pipes for
# ui <--> server command/response pairs
# and a queue for server -> ui event notifications
#
self.ui_channel, self.server_channel = Pipe()
self.event_queue = ProcessEventQueue(0)
self.serverImpl = ProcessServer(self.server_channel, self.event_queue, None)
def detach(self):
self.serverImpl.start()
return
def establishConnection(self, featureset):
self.connection = BitBakeProcessServerConnection(self.serverImpl, self.ui_channel, self.event_queue)
_, error = self.connection.connection.runCommand(["setFeatures", featureset])
if error:
logger.error("Unable to set the cooker to the correct featureset: %s" % error)
raise BaseException(error)
signal.signal(signal.SIGTERM, lambda i, s: self.connection.sigterm_terminate())
return self.connection
| mit | 239,827,762,069,532,380 | 31.745763 | 108 | 0.618659 | false |
demelin/learning_reinforcement_learning | recurrent_deep_q_network/training_session.py | 1 | 8324 | import os
import tensorflow as tf
import numpy as np
from tensorflow.contrib.rnn import LSTMCell
from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture
import gym
import universe
tf.reset_default_graph()
env = gym.make('Pong-v0')
# Network constants
FILTER_DIMS = [[8, 8], [4, 4], [3, 3], [6, 6]]
FILTER_NUMS = [32, 64, 64, 512]
STRIDES = [[4, 4], [2, 2], [1, 1], [1, 1]]
HIDDEN_SIZE = 512
ACTION_NUM = 6 # According to documentation
LEARNING_RATE = 1e-4
BUFFER_SIZE = 1000
# Session constants
BATCH_SIZE = 4
TRACE_LENGTH = 8
UPDATE_FREQ = 5
TAU = 0.99 # Discount factor on target Q-values
START_RAND = 1.0
END_RAND = 0.1
ANN_STEPS = 10000
NUM_EPISODES = 10000
PRE_TRAIN_STEPS = 10000
LOAD_MODEL = False
PATH = os.curdir + '/rdqn/model'
MAX_EPISODE_LENGTH = 50
SUMMARY_LENGTH = 100
SAVING_FREQ = 10000
# Defines cells to be used in the actor and the target network
actor_cell = LSTMCell(num_units=HIDDEN_SIZE, state_is_tuple=True)
target_cell = LSTMCell(num_units=HIDDEN_SIZE, state_is_tuple=True)
# Initialize networks and buffer
actor_qn = MentorAgent(HIDDEN_SIZE, actor_cell, FILTER_DIMS, FILTER_NUMS, STRIDES, 'actor', ACTION_NUM, LEARNING_RATE)
target_qn = \
MentorAgent(HIDDEN_SIZE, target_cell, FILTER_DIMS, FILTER_NUMS, STRIDES, 'target', ACTION_NUM, LEARNING_RATE)
session_buffer = ExperienceBuffer(BUFFER_SIZE)
# Define target_qn update OPs to be used in the session (tf.trainable_variables() operates on the graph)
tvars = tf.trainable_variables()
actor_tvars, target_tvars = tvars[:len(tvars)//2], tvars[len(tvars)//2:]
target_ops = update_target_graph(actor_tvars, target_tvars, TAU)
saver = tf.train.Saver(max_to_keep=5)
# Scheduling e-greedy exploration
epsilon = START_RAND
drop_per_step = (START_RAND - END_RAND) / ANN_STEPS
# Initialize tracking variables
steps_per_episode = list()
total_rewards = list()
total_steps = 0
# Make path for model saving
if not os.path.exists(PATH):
os.makedirs(PATH)
# Start the session
with tf.Session() as sess:
if LOAD_MODEL:
print('Loading model ... ')
checkpoint = tf.train.get_checkpoint_state(PATH)
saver.restore(sess, checkpoint.model_checkpoint_path)
sess.run(tf.global_variables_initializer())
# Set target network equal to the agent network
perform_update(target_ops, sess)
# Manage summaries
merged = tf.summary.merge_all()
training_writer = tf.summary.FileWriter('./train', sess.graph)
# Enter training loop
for i in range(NUM_EPISODES):
# Keep track of episodes and steps completed
print('Episode %d | Total steps taken: %d' % (i, total_steps))
episode_buffer = list()
# Get new observations
env_state = env.reset()
proc_env_state = process_capture(env_state)
done = False
running_reward = 0
step = 0
# Reset RNN hidden state
rnn_state = (np.zeros([1, HIDDEN_SIZE]), np.zeros([1, HIDDEN_SIZE]))
# Enter the Q-Network loop (play until a single game is completed, alternatively uncomment for max_ep_len)
# while step < MAX_EPISODE_LENGTH:
while True:
# step += 1
feed_dict = {actor_qn.scalar_input: proc_env_state, actor_qn.trace_length: 1,
actor_qn.state_in: rnn_state, actor_qn.batch_size: 1}
# Choose action following the e-greedy strategy
if np.random.rand(1) < epsilon or total_steps < PRE_TRAIN_STEPS:
# Take a random action
rnn_state_1 = sess.run(actor_qn.final_state, feed_dict=feed_dict)
action = np.random.randint(0, 3)
else:
# Obtain action from model
action, rnn_state_1 = sess.run([actor_qn.prediction, actor_qn.final_state], feed_dict=feed_dict)
action = action[0]
# Take a step in the environment
env_state_1, reward, done, _ = env.step(action)
proc_env_state_1 = process_capture(env_state_1)
total_steps += 1
# Add interaction to the episode buffer
episode_buffer.append(np.reshape([proc_env_state, action, reward, proc_env_state_1, done], [1, 5]))
# Proceed with exploitation once the exploration phase is concluded
if total_steps > PRE_TRAIN_STEPS:
if epsilon > END_RAND:
epsilon -= drop_per_step
# Update target network
if total_steps % (UPDATE_FREQ * 1000) == 0:
perform_update(target_ops, sess)
# Update agent network
if total_steps % UPDATE_FREQ == 0:
# Reset the RNN hidden state
rnn_state_train = (np.zeros([BATCH_SIZE, HIDDEN_SIZE]), np.zeros([BATCH_SIZE, HIDDEN_SIZE]))
# Get random batch of experiences from the experience buffer
train_batch = session_buffer.sample_experience(BATCH_SIZE, TRACE_LENGTH)
# Perform the Double-DQN update to the target Q-values
# Agent network
q_1 = sess.run(actor_qn.prediction,
feed_dict={actor_qn.scalar_input: (np.vstack(train_batch[:, 3]) / 255.0),
actor_qn.trace_length: TRACE_LENGTH,
actor_qn.state_in: rnn_state_train,
actor_qn.batch_size: BATCH_SIZE})
# Target network
q_2 = sess.run(target_qn.q_out,
feed_dict={target_qn.scalar_input: (np.vstack(train_batch[:, 3]) / 255.0),
target_qn.trace_length: TRACE_LENGTH,
target_qn.state_in: rnn_state_train,
target_qn.batch_size: BATCH_SIZE})
# Exclude final steps in each episode
end_multiplier = np.abs(train_batch[:, 4] - 1)
# Select q-values from target network based on actions predicted by the agent network
double_q = q_2[range(BATCH_SIZE * TRACE_LENGTH), q_1]
# See traget-Q double-DQN update equation
target_q = train_batch[:, 2] + (TAU * double_q * end_multiplier)
# Update agent network with the so obtained target_q values
_ = sess.run(actor_qn.update_model,
feed_dict={actor_qn.scalar_input: (np.vstack(train_batch[:, 0]) / 255.0),
actor_qn.target_q_holder: target_q,
actor_qn.action_holder: train_batch[:, 1],
actor_qn.trace_length: TRACE_LENGTH,
actor_qn.state_in: rnn_state_train,
actor_qn.batch_size: BATCH_SIZE})
# Update environment interaction variables
running_reward += reward
proc_env_state = proc_env_state_1
env_state = env_state_1
rnn_state = rnn_state_1
# Terminate episode once done
if done:
break
# Add episode to the experience buffer
buffer_array = np.array(episode_buffer)
# episode_buffer = zip(buffer_array)
session_buffer.add_experience(buffer_array, TRACE_LENGTH, buffer_array.shape[0])
# Update tracking lists
steps_per_episode.append(step)
total_rewards.append(running_reward)
# Save model periodically
if i % SAVING_FREQ == 0 and i != 0:
saver.save(sess, PATH + '/model-' + str(i) + '.cptk')
print('Model saved after %d steps!' % i)
# Report on the training performance of the actor network
if i % SUMMARY_LENGTH == 0 and i != 0:
print('Episode: %d | Steps taken: %d | Average episodic reward: %.4f | epsilon value: %.4f'
% (i, total_steps, np.mean(total_rewards[-SUMMARY_LENGTH:]), epsilon))
# Save final model
saver.save(sess, PATH + '/model-final' + '.cptk')
| mit | -2,580,441,314,753,550,300 | 43.042328 | 118 | 0.575925 | false |
qsantos/crpyt | digests/md2.py | 1 | 3065 | # crpyt: toy cryptographic python library
# Copyright (C) 2014 Quentin SANTOS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# END LICENCE
from digest import Digest
# Reference: RFC 1319
# Note: "Set C[j] to S[c xor L]." should be "Set C[j] to C[j] xor S[c xor L]."
class MD2(Digest):
def __init__(self):
super(MD2,self).__init__(16, 1)
self.C = [0] * 16
self.X = [0] * 16
def pad(self,l):
rem = self.blocksize - (l%self.blocksize)
return [rem]*rem
def final(self):
self.round(self.C)
return self.X
def round(self, block):
X = self.X + block + [xi ^ bi for (xi,bi) in zip(self.X,block)]
t = 0;
for i in range(18):
for j in range(48):
X[j] ^= self.S[t]
t = X[j]
t = (t+i) & 0xff
self.X = X[:16]
def block(self, block):
L = self.C[15];
for i in range(16):
self.C[i] ^= self.S[block[i] ^ L]
L = self.C[i]
self.round(block)
S = [
0x29, 0x2e, 0x43, 0xc9, 0xa2, 0xd8, 0x7c, 0x01, 0x3d, 0x36, 0x54, 0xa1, 0xec, 0xf0, 0x06, 0x13,
0x62, 0xa7, 0x05, 0xf3, 0xc0, 0xc7, 0x73, 0x8c, 0x98, 0x93, 0x2b, 0xd9, 0xbc, 0x4c, 0x82, 0xca,
0x1e, 0x9b, 0x57, 0x3c, 0xfd, 0xd4, 0xe0, 0x16, 0x67, 0x42, 0x6f, 0x18, 0x8a, 0x17, 0xe5, 0x12,
0xbe, 0x4e, 0xc4, 0xd6, 0xda, 0x9e, 0xde, 0x49, 0xa0, 0xfb, 0xf5, 0x8e, 0xbb, 0x2f, 0xee, 0x7a,
0xa9, 0x68, 0x79, 0x91, 0x15, 0xb2, 0x07, 0x3f, 0x94, 0xc2, 0x10, 0x89, 0x0b, 0x22, 0x5f, 0x21,
0x80, 0x7f, 0x5d, 0x9a, 0x5a, 0x90, 0x32, 0x27, 0x35, 0x3e, 0xcc, 0xe7, 0xbf, 0xf7, 0x97, 0x03,
0xff, 0x19, 0x30, 0xb3, 0x48, 0xa5, 0xb5, 0xd1, 0xd7, 0x5e, 0x92, 0x2a, 0xac, 0x56, 0xaa, 0xc6,
0x4f, 0xb8, 0x38, 0xd2, 0x96, 0xa4, 0x7d, 0xb6, 0x76, 0xfc, 0x6b, 0xe2, 0x9c, 0x74, 0x04, 0xf1,
0x45, 0x9d, 0x70, 0x59, 0x64, 0x71, 0x87, 0x20, 0x86, 0x5b, 0xcf, 0x65, 0xe6, 0x2d, 0xa8, 0x02,
0x1b, 0x60, 0x25, 0xad, 0xae, 0xb0, 0xb9, 0xf6, 0x1c, 0x46, 0x61, 0x69, 0x34, 0x40, 0x7e, 0x0f,
0x55, 0x47, 0xa3, 0x23, 0xdd, 0x51, 0xaf, 0x3a, 0xc3, 0x5c, 0xf9, 0xce, 0xba, 0xc5, 0xea, 0x26,
0x2c, 0x53, 0x0d, 0x6e, 0x85, 0x28, 0x84, 0x09, 0xd3, 0xdf, 0xcd, 0xf4, 0x41, 0x81, 0x4d, 0x52,
0x6a, 0xdc, 0x37, 0xc8, 0x6c, 0xc1, 0xab, 0xfa, 0x24, 0xe1, 0x7b, 0x08, 0x0c, 0xbd, 0xb1, 0x4a,
0x78, 0x88, 0x95, 0x8b, 0xe3, 0x63, 0xe8, 0x6d, 0xe9, 0xcb, 0xd5, 0xfe, 0x3b, 0x00, 0x1d, 0x39,
0xf2, 0xef, 0xb7, 0x0e, 0x66, 0x58, 0xd0, 0xe4, 0xa6, 0x77, 0x72, 0xf8, 0xeb, 0x75, 0x4b, 0x0a,
0x31, 0x44, 0x50, 0xb4, 0x8f, 0xed, 0x1f, 0x1a, 0xdb, 0x99, 0x8d, 0x33, 0x9f, 0x11, 0x83, 0x14,
]
| gpl-3.0 | 9,166,256,507,868,525,000 | 42.169014 | 98 | 0.651876 | false |
Applied-GeoSolutions/gips | gips/test/sys/expected/landsat_export.py | 1 | 15384 | from collections import OrderedDict
expectations = OrderedDict([
# t_project[landsat-cloudmask] recording:
('cloudmask',
[('0/2017213_LC8_cloudmask.tif',
'raster',
'gdalinfo-stats',
['Driver: GTiff/GeoTIFF',
'Size is 474, 657',
'Coordinate System is:',
'PROJCS["WGS 84 / UTM zone 16N",',
' GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.25722356,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.01745329,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]],',
' PROJECTION["Transverse_Mercator"],',
' PARAMETER["latitude_of_origin",0],',
' PARAMETER["central_meridian",-87],',
' PARAMETER["scale_factor",0.9996],',
' PARAMETER["false_easting",500000],',
' PARAMETER["false_northing",0],',
' UNIT["metre",1,',
' AUTHORITY["EPSG","9001"]],',
' AXIS["Easting",EAST],',
' AXIS["Northing",NORTH],',
' AUTHORITY["EPSG","32616"]]',
'Origin = (1777798.52615334,4934269.96318020)',
'Pixel Size = (100.00000000,-100.00000000)',
'Metadata:',
' AREA_OR_POINT=Point',
' GIPS_C1_DILATED_PIXELS=20',
' GIPS_LANDSAT_CLOUDMASK_CLEAR_OR_NODATA_VALUE=0',
' GIPS_LANDSAT_CLOUDMASK_CLOUD_VALUE=1',
' GIPS_Landsat_Version=1.0.1',
' GIPS_Source_Assets=LC08_L1TP_012030_20170801_20170811_01_T1.tar.gz',
' GIPS_Version=0.0.0-dev',
'Image Structure Metadata:',
' INTERLEAVE=BAND',
'Corner Coordinates:',
'Upper Left ( 1777798.526, 4934269.963) ( 71d12\'51.63"W, 43d27\'26.11"N)',
'Lower Left ( 1777798.526, 4868569.963) ( 71d21\'50.79"W, 42d53\'16.01"N)',
'Upper Right ( 1825198.526, 4934269.963) ( 70d39\' 6.29"W, 43d22\'33.82"N)',
'Lower Right ( 1825198.526, 4868569.963) ( 70d48\'22.61"W, 42d48\'29.43"N)',
'Center ( 1801498.526, 4901419.963) ( 71d 0\'33.78"W, 43d 7\'57.74"N)',
'Band 1 Block=474x17 Type=Byte, ColorInterp=Gray',
' Minimum=1.000, Maximum=1.000, Mean=1.000, StdDev=0.000',
' NoData Value=0',
' Metadata:',
' STATISTICS_MAXIMUM=1',
' STATISTICS_MEAN=1',
' STATISTICS_MINIMUM=1',
' STATISTICS_STDDEV=0',
' STATISTICS_VALID_PERCENT=30.31'])]),
# t_project[landsat-ndvi-toa] recording:
('ndvi-toa',
[('0/2017213_LC8_ndvi-toa.tif',
'raster',
'gdalinfo-stats',
['Driver: GTiff/GeoTIFF',
'Size is 474, 657',
'Coordinate System is:',
'PROJCS["WGS 84 / UTM zone 16N",',
' GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.25722356,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.01745329,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]],',
' PROJECTION["Transverse_Mercator"],',
' PARAMETER["latitude_of_origin",0],',
' PARAMETER["central_meridian",-87],',
' PARAMETER["scale_factor",0.9996],',
' PARAMETER["false_easting",500000],',
' PARAMETER["false_northing",0],',
' UNIT["metre",1,',
' AUTHORITY["EPSG","9001"]],',
' AXIS["Easting",EAST],',
' AXIS["Northing",NORTH],',
' AUTHORITY["EPSG","32616"]]',
'Origin = (1777798.52615334,4934269.96318020)',
'Pixel Size = (100.00000000,-100.00000000)',
'Metadata:',
' AREA_OR_POINT=Point',
' GIPS_Landsat_Version=1.0.1',
' GIPS_Source_Assets=LC08_L1TP_012030_20170801_20170811_01_T1.tar.gz',
' GIPS_Version=0.0.0-dev',
'Image Structure Metadata:',
' INTERLEAVE=BAND',
'Corner Coordinates:',
'Upper Left ( 1777798.526, 4934269.963) ( 71d12\'51.63"W, 43d27\'26.11"N)',
'Lower Left ( 1777798.526, 4868569.963) ( 71d21\'50.79"W, 42d53\'16.01"N)',
'Upper Right ( 1825198.526, 4934269.963) ( 70d39\' 6.29"W, 43d22\'33.82"N)',
'Lower Right ( 1825198.526, 4868569.963) ( 70d48\'22.61"W, 42d48\'29.43"N)',
'Center ( 1801498.526, 4901419.963) ( 71d 0\'33.78"W, 43d 7\'57.74"N)',
'Band 1 Block=474x8 Type=Int16, ColorInterp=Gray',
' Minimum=-4917.000, Maximum=8575.000, Mean=6404.952, StdDev=2230.297',
' NoData Value=-32768',
' Offset: 0, Scale:0.0001',
' Metadata:',
' STATISTICS_MAXIMUM=8575',
' STATISTICS_MEAN=6404.95164481',
' STATISTICS_MINIMUM=-4917',
' STATISTICS_STDDEV=2230.29720297',
' STATISTICS_VALID_PERCENT=70.92'])]),
# t_project[landsat-rad-toa] recording:
('rad-toa',
[('0/2017213_LC8_rad-toa.tif',
'raster',
'gdalinfo-stats',
['Driver: GTiff/GeoTIFF',
'Size is 474, 657',
'Coordinate System is:',
'PROJCS["WGS 84 / UTM zone 16N",',
' GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.25722356,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.01745329,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]],',
' PROJECTION["Transverse_Mercator"],',
' PARAMETER["latitude_of_origin",0],',
' PARAMETER["central_meridian",-87],',
' PARAMETER["scale_factor",0.9996],',
' PARAMETER["false_easting",500000],',
' PARAMETER["false_northing",0],',
' UNIT["metre",1,',
' AUTHORITY["EPSG","9001"]],',
' AXIS["Easting",EAST],',
' AXIS["Northing",NORTH],',
' AUTHORITY["EPSG","32616"]]',
'Origin = (1777798.52615334,4934269.96318020)',
'Pixel Size = (100.00000000,-100.00000000)',
'Metadata:',
' AREA_OR_POINT=Point',
' GIPS_Landsat_Version=1.0.1',
' GIPS_Source_Assets=LC08_L1TP_012030_20170801_20170811_01_T1.tar.gz',
' GIPS_Version=0.0.0-dev',
'Image Structure Metadata:',
' INTERLEAVE=PIXEL',
'Corner Coordinates:',
'Upper Left ( 1777798.526, 4934269.963) ( 71d12\'51.63"W, 43d27\'26.11"N)',
'Lower Left ( 1777798.526, 4868569.963) ( 71d21\'50.79"W, 42d53\'16.01"N)',
'Upper Right ( 1825198.526, 4934269.963) ( 70d39\' 6.29"W, 43d22\'33.82"N)',
'Lower Right ( 1825198.526, 4868569.963) ( 70d48\'22.61"W, 42d48\'29.43"N)',
'Center ( 1801498.526, 4901419.963) ( 71d 0\'33.78"W, 43d 7\'57.74"N)',
'Band 1 Block=474x1 Type=Int16, ColorInterp=Gray',
' Minimum=562.000, Maximum=4147.000, Mean=725.405, StdDev=287.605',
' NoData Value=-32768',
' Offset: 0, Scale:0.1',
' Metadata:',
' STATISTICS_MAXIMUM=4147',
' STATISTICS_MEAN=725.40467748',
' STATISTICS_MINIMUM=562',
' STATISTICS_STDDEV=287.60460578',
' STATISTICS_VALID_PERCENT=70.92',
'Band 2 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=438.000, Maximum=4464.000, Mean=619.673, StdDev=322.619',
' NoData Value=-32768',
' Offset: 0, Scale:0.1',
' Metadata:',
' STATISTICS_MAXIMUM=4464',
' STATISTICS_MEAN=619.67331386',
' STATISTICS_MINIMUM=438',
' STATISTICS_STDDEV=322.61922452',
' STATISTICS_VALID_PERCENT=70.92',
'Band 3 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=256.000, Maximum=4239.000, Mean=488.291, StdDev=310.536',
' NoData Value=-32768',
' Offset: 0, Scale:0.1',
' Metadata:',
' STATISTICS_MAXIMUM=4239',
' STATISTICS_MEAN=488.29148045',
' STATISTICS_MINIMUM=256',
' STATISTICS_STDDEV=310.53600490',
' STATISTICS_VALID_PERCENT=70.92',
'Band 4 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=126.000, Maximum=3896.000, Mean=310.746, StdDev=302.410',
' NoData Value=-32768',
' Offset: 0, Scale:0.1',
' Metadata:',
' STATISTICS_MAXIMUM=3896',
' STATISTICS_MEAN=310.74627906',
' STATISTICS_MINIMUM=126',
' STATISTICS_STDDEV=302.41045601',
' STATISTICS_VALID_PERCENT=70.92',
'Band 5 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=47.000, Maximum=2759.000, Mean=896.030, StdDev=289.645',
' NoData Value=-32768',
' Offset: 0, Scale:0.1',
' Metadata:',
' STATISTICS_MAXIMUM=2759',
' STATISTICS_MEAN=896.02961805',
' STATISTICS_MINIMUM=47',
' STATISTICS_STDDEV=289.64489382',
' STATISTICS_VALID_PERCENT=70.92',
'Band 6 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=-2.000, Maximum=495.000, Mean=105.961, StdDev=53.245',
' NoData Value=-32768',
' Offset: 0, Scale:0.1',
' Metadata:',
' STATISTICS_MAXIMUM=495',
' STATISTICS_MEAN=105.96076886',
' STATISTICS_MINIMUM=-2',
' STATISTICS_STDDEV=53.24468526',
' STATISTICS_VALID_PERCENT=70.92',
'Band 7 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=0.000, Maximum=135.000, Mean=17.222, StdDev=15.024',
' NoData Value=-32768',
' Offset: 0, Scale:0.1',
' Metadata:',
' STATISTICS_MAXIMUM=135',
' STATISTICS_MEAN=17.22196563',
' STATISTICS_MINIMUM=0',
' STATISTICS_STDDEV=15.02411311',
' STATISTICS_VALID_PERCENT=70.92',
'Band 8 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=0.000, Maximum=15.000, Mean=1.736, StdDev=0.682',
' NoData Value=-32768',
' Offset: 0, Scale:0.1',
' Metadata:',
' STATISTICS_MAXIMUM=15',
' STATISTICS_MEAN=1.73628110',
' STATISTICS_MINIMUM=0',
' STATISTICS_STDDEV=0.68213754',
' STATISTICS_VALID_PERCENT=70.92'])]),
# t_project[landsat-ref-toa] recording:
('ref-toa',
[('0/2017213_LC8_ref-toa.tif',
'raster',
'gdalinfo-stats',
['Driver: GTiff/GeoTIFF',
'Size is 474, 657',
'Coordinate System is:',
'PROJCS["WGS 84 / UTM zone 16N",',
' GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.25722356,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.01745329,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]],',
' PROJECTION["Transverse_Mercator"],',
' PARAMETER["latitude_of_origin",0],',
' PARAMETER["central_meridian",-87],',
' PARAMETER["scale_factor",0.9996],',
' PARAMETER["false_easting",500000],',
' PARAMETER["false_northing",0],',
' UNIT["metre",1,',
' AUTHORITY["EPSG","9001"]],',
' AXIS["Easting",EAST],',
' AXIS["Northing",NORTH],',
' AUTHORITY["EPSG","32616"]]',
'Origin = (1777798.52615334,4934269.96318020)',
'Pixel Size = (100.00000000,-100.00000000)',
'Metadata:',
' AREA_OR_POINT=Point',
' GIPS_Landsat_Version=1.0.1',
' GIPS_Source_Assets=LC08_L1TP_012030_20170801_20170811_01_T1.tar.gz',
' GIPS_Version=0.0.0-dev',
'Image Structure Metadata:',
' INTERLEAVE=PIXEL',
'Corner Coordinates:',
'Upper Left ( 1777798.526, 4934269.963) ( 71d12\'51.63"W, 43d27\'26.11"N)',
'Lower Left ( 1777798.526, 4868569.963) ( 71d21\'50.79"W, 42d53\'16.01"N)',
'Upper Right ( 1825198.526, 4934269.963) ( 70d39\' 6.29"W, 43d22\'33.82"N)',
'Lower Right ( 1825198.526, 4868569.963) ( 70d48\'22.61"W, 42d48\'29.43"N)',
'Center ( 1801498.526, 4901419.963) ( 71d 0\'33.78"W, 43d 7\'57.74"N)',
'Band 1 Block=474x1 Type=Int16, ColorInterp=Gray',
' Minimum=803.000, Maximum=5924.000, Mean=1036.177, StdDev=410.819',
' NoData Value=-32768',
' Offset: 0, Scale:0.0001',
' Metadata:',
' STATISTICS_MAXIMUM=5924',
' STATISTICS_MEAN=1036.17671217',
' STATISTICS_MINIMUM=803',
' STATISTICS_STDDEV=410.81863373',
' STATISTICS_VALID_PERCENT=70.92',
'Band 2 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=813.000, Maximum=8283.000, Mean=1149.791, StdDev=598.620',
' NoData Value=-32768',
' Offset: 0, Scale:0.0001',
' Metadata:',
' STATISTICS_MAXIMUM=8283',
' STATISTICS_MEAN=1149.79089859',
' STATISTICS_MINIMUM=813',
' STATISTICS_STDDEV=598.62029772',
' STATISTICS_VALID_PERCENT=70.92',
'Band 3 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=529.000, Maximum=8772.000, Mean=1010.495, StdDev=642.640',
' NoData Value=-32768',
' Offset: 0, Scale:0.0001',
' Metadata:',
' STATISTICS_MAXIMUM=8772',
' STATISTICS_MEAN=1010.49513459',
' STATISTICS_MINIMUM=529',
' STATISTICS_STDDEV=642.64002913',
' STATISTICS_VALID_PERCENT=70.92',
'Band 4 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=229.000, Maximum=7074.000, Mean=564.253, StdDev=549.118',
' NoData Value=-32768',
' Offset: 0, Scale:0.0001',
' Metadata:',
' STATISTICS_MAXIMUM=7074',
' STATISTICS_MEAN=564.25311870',
' STATISTICS_MINIMUM=229',
' STATISTICS_STDDEV=549.11800470',
' STATISTICS_VALID_PERCENT=70.92',
'Band 5 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=139.000, Maximum=8168.000, Mean=2652.734, StdDev=857.505',
' NoData Value=-32768',
' Offset: 0, Scale:0.0001',
' Metadata:',
' STATISTICS_MAXIMUM=8168',
' STATISTICS_MEAN=2652.73350539',
' STATISTICS_MINIMUM=139',
' STATISTICS_STDDEV=857.50489366',
' STATISTICS_VALID_PERCENT=70.92',
'Band 6 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=-34.000, Maximum=7558.000, Mean=1617.085, StdDev=812.587',
' NoData Value=-32768',
' Offset: 0, Scale:0.0001',
' Metadata:',
' STATISTICS_MAXIMUM=7558',
' STATISTICS_MEAN=1617.08474722',
' STATISTICS_MINIMUM=-34',
' STATISTICS_STDDEV=812.58672081',
' STATISTICS_VALID_PERCENT=70.92',
'Band 7 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=1.000, Maximum=5623.000, Mean=716.356, StdDev=624.757',
' NoData Value=-32768',
' Offset: 0, Scale:0.0001',
' Metadata:',
' STATISTICS_MAXIMUM=5623',
' STATISTICS_MEAN=716.35581063',
' STATISTICS_MINIMUM=1',
' STATISTICS_STDDEV=624.75673218',
' STATISTICS_VALID_PERCENT=70.92',
'Band 8 Block=474x1 Type=Int16, ColorInterp=Undefined',
' Minimum=3.000, Maximum=158.000, Mean=17.271, StdDev=5.809',
' NoData Value=-32768',
' Offset: 0, Scale:0.0001',
' Metadata:',
' STATISTICS_MAXIMUM=158',
' STATISTICS_MEAN=17.27096832',
' STATISTICS_MINIMUM=3',
' STATISTICS_STDDEV=5.80865187',
' STATISTICS_VALID_PERCENT=70.92'])]),
])
| gpl-3.0 | -7,747,350,162,968,871,000 | 40.691057 | 81 | 0.564808 | false |
rsampaio/cobbler | cobbler/remote.py | 1 | 100114 | """
Code for Cobbler's XMLRPC API
Copyright 2007-2009, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import sys, socket, time, os, errno, re, random, stat, string
import base64
import SimpleXMLRPCServer
from SocketServer import ThreadingMixIn
import xmlrpclib
import base64
import fcntl
import traceback
import glob
try:
import subprocess
except:
import sub_process as subprocess
from threading import Thread
import api as cobbler_api
import utils
from cexceptions import *
import item_distro
import item_profile
import item_system
import item_repo
import item_image
import item_mgmtclass
import item_package
import item_file
import clogger
import pxegen
import utils
#from utils import * # BAD!
from utils import _
import configgen
# FIXME: make configurable?
TOKEN_TIMEOUT = 60*60 # 60 minutes
EVENT_TIMEOUT = 7*24*60*60 # 1 week
CACHE_TIMEOUT = 10*60 # 10 minutes
# task codes
EVENT_RUNNING = "running"
EVENT_COMPLETE = "complete"
EVENT_FAILED = "failed"
# normal events
EVENT_INFO = "notification"
# for backwards compatibility with 1.6 and prev XMLRPC
# do not remove!
REMAP_COMPAT = {
"ksmeta" : "ks_meta",
"kopts" : "kernel_options",
"kopts_post" : "kernel_options_post",
"netboot-enabled" : "netboot_enabled"
}
class CobblerThread(Thread):
def __init__(self,event_id,remote,logatron,options):
Thread.__init__(self)
self.event_id = event_id
self.remote = remote
self.logger = logatron
if options is None:
options = {}
self.options = options
def on_done(self):
pass
def run(self):
time.sleep(1)
try:
rc = self._run(self)
self.remote._set_task_state(self,self.event_id,EVENT_COMPLETE)
self.on_done()
return rc
except:
utils.log_exc(self.logger)
self.remote._set_task_state(self,self.event_id,EVENT_FAILED)
return False
# *********************************************************************
# *********************************************************************
class CobblerXMLRPCInterface:
"""
This is the interface used for all XMLRPC methods, for instance,
as used by koan or CobblerWeb.
Most read-write operations require a token returned from "login".
Read operations do not.
"""
def __init__(self,api):
"""
Constructor. Requires a Cobbler API handle.
"""
self.api = api
self.logger = self.api.logger
self.token_cache = {}
self.object_cache = {}
self.timestamp = self.api.last_modified_time()
self.events = {}
self.shared_secret = utils.get_shared_secret()
random.seed(time.time())
self.translator = utils.Translator(keep=string.printable)
self.pxegen = pxegen.PXEGen(api._config,self.logger)
def check(self, token):
"""
Returns a list of all the messages/warnings that are things
that admin may want to correct about the configuration of
the cobbler server. This has nothing to do with "check_access"
which is an auth/authz function in the XMLRPC API.
"""
self.check_access(token, "check")
return self.api.check(logger=self.logger)
def background_buildiso(self, options, token):
"""
Generates an ISO in /var/www/cobbler/pub that can be used to install
profiles without using PXE.
"""
def runner(self):
return self.remote.api.build_iso(
self.options.get("iso","/var/www/cobbler/pub/generated.iso"),
self.options.get("profiles",None),
self.options.get("systems",None),
self.options.get("buildisodir",None),
self.options.get("distro",None),
self.options.get("standalone",False),
self.options.get("source",None),
self.options.get("exclude_dns",False),
self.logger
)
def on_done(self):
if self.options.get("iso","") == "/var/www/cobbler/pub/generated.iso":
msg = "ISO now available for <A HREF=\"/cobbler/pub/generated.iso\">download</A>"
self.remote._new_event(msg)
return self.__start_task(runner, token, "buildiso", "Build Iso", options, on_done)
def background_aclsetup(self, options, token):
def runner(self):
return self.remote.api.acl_config(
self.options.get("adduser",None),
self.options.get("addgroup",None),
self.options.get("removeuser",None),
self.options.get("removegroup",None),
self.logger
)
return self.__start_task(runner, token, "aclsetup", "(CLI) ACL Configuration", options)
def background_dlcontent(self, options, token):
"""
Download bootloaders and other support files.
"""
def runner(self):
return self.remote.api.dlcontent(self.options.get("force",False), self.logger)
return self.__start_task(runner, token, "get_loaders", "Download Bootloader Content", options)
def background_sync(self, options, token):
def runner(self):
return self.remote.api.sync(self.options.get("verbose",False),logger=self.logger)
return self.__start_task(runner, token, "sync", "Sync", options)
def background_hardlink(self, options, token):
def runner(self):
return self.remote.api.hardlink(logger=self.logger)
return self.__start_task(runner, token, "hardlink", "Hardlink", options)
def background_validateks(self, options, token):
def runner(self):
return self.remote.api.validateks(logger=self.logger)
return self.__start_task(runner, token, "validateks", "Kickstart Validation", options)
def background_replicate(self, options, token):
def runner(self):
# FIXME: defaults from settings here should come from views, fix in views.py
return self.remote.api.replicate(
self.options.get("master", None),
self.options.get("distro_patterns", ""),
self.options.get("profile_patterns", ""),
self.options.get("system_patterns", ""),
self.options.get("repo_patterns", ""),
self.options.get("image_patterns", ""),
self.options.get("mgmtclass_patterns", ""),
self.options.get("package_patterns", ""),
self.options.get("file_patterns", ""),
self.options.get("prune", False),
self.options.get("omit_data", False),
self.options.get("sync_all", False),
self.logger
)
return self.__start_task(runner, token, "replicate", "Replicate", options)
def background_import(self, options, token):
def runner(self):
return self.remote.api.import_tree(
self.options.get("path", None),
self.options.get("name", None),
self.options.get("available_as", None),
self.options.get("kickstart_file", None),
self.options.get("rsync_flags",None),
self.options.get("arch",None),
self.options.get("breed", None),
self.options.get("os_version", None),
self.logger
)
return self.__start_task(runner, token, "import", "Media import", options)
def background_reposync(self, options, token):
def runner(self):
# NOTE: WebUI passes in repos here, CLI passes only:
repos = options.get("repos", [])
only = options.get("only", None)
if only is not None:
repos = [ only ]
nofail = options.get("nofail", len(repos) > 0)
if len(repos) > 0:
for name in repos:
self.remote.api.reposync(tries=self.options.get("tries",
3), name=name, nofail=nofail, logger=self.logger)
else:
self.remote.api.reposync(tries=self.options.get("tries",3),
name=None, nofail=nofail, logger=self.logger)
return True
return self.__start_task(runner, token, "reposync", "Reposync", options)
def background_power_system(self, options, token):
def runner(self):
for x in self.options.get("systems",[]):
object_id = self.remote.get_system_handle(x,token)
self.remote.power_system(object_id,self.options.get("power",""),token,logger=self.logger)
return True
self.check_access(token, "power")
return self.__start_task(runner, token, "power", "Power management (%s)" % options.get("power",""), options)
def get_events(self, for_user=""):
"""
Returns a hash(key=event id) = [ statetime, name, state, [read_by_who] ]
If for_user is set to a string, it will only return events the user
has not seen yet. If left unset, it will return /all/ events.
"""
# return only the events the user has not seen
self.events_filtered = {}
for (k,x) in self.events.iteritems():
if for_user in x[3]:
pass
else:
self.events_filtered[k] = x
# mark as read so user will not get events again
if for_user is not None and for_user != "":
for (k,x) in self.events.iteritems():
if for_user in x[3]:
pass
else:
self.events[k][3].append(for_user)
return self.events_filtered
def get_event_log(self,event_id):
"""
Returns the contents of a task log.
Events that are not task-based do not have logs.
"""
event_id = str(event_id).replace("..","").replace("/","")
path = "/var/log/cobbler/tasks/%s.log" % event_id
self._log("getting log for %s" % event_id)
if os.path.exists(path):
fh = open(path, "r")
data = str(fh.read())
data = self.translator(data)
fh.close()
return data
else:
return "?"
def __generate_event_id(self,optype):
t = time.time()
(year, month, day, hour, minute, second, weekday, julian, dst) = time.localtime()
return "%04d-%02d-%02d_%02d%02d%02d_%s" % (year,month,day,hour,minute,second,optype)
def _new_event(self, name):
event_id = self.__generate_event_id("event")
event_id = str(event_id)
self.events[event_id] = [ float(time.time()), str(name), EVENT_INFO, [] ]
def __start_task(self, thr_obj_fn, token, role_name, name, args, on_done=None):
"""
Starts a new background task.
token -- token from login() call, all tasks require tokens
role_name -- used to check token against authn/authz layers
thr_obj_fn -- function handle to run in a background thread
name -- display name to show in logs/events
args -- usually this is a single hash, containing options
on_done -- an optional second function handle to run after success (and only success)
Returns a task id.
"""
self.check_access(token, role_name)
event_id = self.__generate_event_id(role_name) # use short form for logfile suffix
event_id = str(event_id)
self.events[event_id] = [ float(time.time()), str(name), EVENT_RUNNING, [] ]
self._log("start_task(%s); event_id(%s)"%(name,event_id))
logatron = clogger.Logger("/var/log/cobbler/tasks/%s.log" % event_id)
thr_obj = CobblerThread(event_id,self,logatron,args)
on_done_type = type(thr_obj.on_done)
thr_obj._run = thr_obj_fn
if on_done is not None:
thr_obj.on_done = on_done_type(on_done, thr_obj, CobblerThread)
thr_obj.start()
return event_id
def _set_task_state(self,thread_obj,event_id,new_state):
event_id = str(event_id)
if self.events.has_key(event_id):
self.events[event_id][2] = new_state
self.events[event_id][3] = [] # clear the list of who has read it
if thread_obj is not None:
if new_state == EVENT_COMPLETE:
thread_obj.logger.info("### TASK COMPLETE ###")
if new_state == EVENT_FAILED:
thread_obj.logger.error("### TASK FAILED ###")
def get_task_status(self, event_id):
event_id = str(event_id)
if self.events.has_key(event_id):
return self.events[event_id]
else:
raise CX("no event with that id")
def __sorter(self,a,b):
"""
Helper function to sort two datastructure representations of
cobbler objects by name.
"""
return cmp(a["name"],b["name"])
def last_modified_time(self, token=None):
"""
Return the time of the last modification to any object.
Used to verify from a calling application that no cobbler
objects have changed since last check.
"""
return self.api.last_modified_time()
def update(self, token=None):
"""
Deprecated method. Now does nothing.
"""
return True
def ping(self):
"""
Deprecated method. Now does nothing.
"""
return True
def get_user_from_token(self,token):
"""
Given a token returned from login, return the username
that logged in with it.
"""
if not self.token_cache.has_key(token):
raise CX("invalid token: %s" % token)
else:
return self.token_cache[token][1]
def _log(self,msg,user=None,token=None,name=None,object_id=None,attribute=None,debug=False,error=False):
"""
Helper function to write data to the log file from the XMLRPC remote implementation.
Takes various optional parameters that should be supplied when known.
"""
# add the user editing the object, if supplied
m_user = "?"
if user is not None:
m_user = user
if token is not None:
try:
m_user = self.get_user_from_token(token)
except:
# invalid or expired token?
m_user = "???"
msg = "REMOTE %s; user(%s)" % (msg, m_user)
if name is not None:
msg = "%s; name(%s)" % (msg, name)
if object_id is not None:
msg = "%s; object_id(%s)" % (msg, object_id)
# add any attributes being modified, if any
if attribute:
msg = "%s; attribute(%s)" % (msg, attribute)
# log to the correct logger
if error:
logger = self.logger.error
elif debug:
logger = self.logger.debug
else:
logger = self.logger.info
logger(msg)
def __sort(self,data,sort_field=None):
"""
Helper function used by the various find/search functions to return
object representations in order.
"""
sort_fields=["name"]
sort_rev=False
if sort_field is not None:
if sort_field.startswith("!"):
sort_field=sort_field[1:]
sort_rev=True
sort_fields.insert(0,sort_field)
sortdata=[(x.sort_key(sort_fields),x) for x in data]
if sort_rev:
sortdata.sort(lambda a,b:cmp(b,a))
else:
sortdata.sort()
return [x for (key, x) in sortdata]
def __paginate(self,data,page=None,items_per_page=None,token=None):
"""
Helper function to support returning parts of a selection, for
example, for use in a web app where only a part of the results
are to be presented on each screen.
"""
default_page = 1
default_items_per_page = 25
try:
page = int(page)
if page < 1:
page = default_page
except:
page = default_page
try:
items_per_page = int(items_per_page)
if items_per_page <= 0:
items_per_page = default_items_per_page
except:
items_per_page = default_items_per_page
num_items = len(data)
num_pages = ((num_items-1)/items_per_page)+1
if num_pages==0:
num_pages=1
if page>num_pages:
page=num_pages
start_item = (items_per_page * (page-1))
end_item = start_item + items_per_page
if start_item > num_items:
start_item = num_items - 1
if end_item > num_items:
end_item = num_items
data = data[start_item:end_item]
if page > 1:
prev_page = page - 1
else:
prev_page = None
if page < num_pages:
next_page = page + 1
else:
next_page = None
return (data,{
'page' : page,
'prev_page' : prev_page,
'next_page' : next_page,
'pages' : range(1,num_pages+1),
'num_pages' : num_pages,
'num_items' : num_items,
'start_item' : start_item,
'end_item' : end_item,
'items_per_page' : items_per_page,
'items_per_page_list' : [10,20,50,100,200,500],
})
def __get_object(self, object_id):
"""
Helper function. Given an object id, return the actual object.
"""
if object_id.startswith("___NEW___"):
return self.object_cache[object_id][1]
(otype, oname) = object_id.split("::",1)
return self.api.get_item(otype,oname)
def get_item(self, what, name, flatten=False):
"""
Returns a hash describing a given object.
what -- "distro", "profile", "system", "image", "repo", etc
name -- the object name to retrieve
flatten -- reduce hashes to string representations (True/False)
"""
self._log("get_item(%s,%s)"%(what,name))
item=self.api.get_item(what,name)
if item is not None:
item=item.to_datastruct()
if flatten:
item = utils.flatten(item)
return self.xmlrpc_hacks(item)
def get_distro(self,name,flatten=False,token=None,**rest):
return self.get_item("distro",name,flatten=flatten)
def get_profile(self,name,flatten=False,token=None,**rest):
return self.get_item("profile",name,flatten=flatten)
def get_system(self,name,flatten=False,token=None,**rest):
return self.get_item("system",name,flatten=flatten)
def get_repo(self,name,flatten=False,token=None,**rest):
return self.get_item("repo",name,flatten=flatten)
def get_image(self,name,flatten=False,token=None,**rest):
return self.get_item("image",name,flatten=flatten)
def get_mgmtclass(self,name,flatten=False,token=None,**rest):
return self.get_mgmtclass("mgmtclass",name,flatten=flatten)
def get_package(self,name,flatten=False,token=None,**rest):
return self.get_package("package",name,flatten=flatten)
def get_file(self,name,flatten=False,token=None,**rest):
return self.get_file("file",name,flatten=flatten)
def get_items(self, what):
"""
Returns a list of hashes.
what is the name of a cobbler object type, as described for get_item.
Individual list elements are the same for get_item.
"""
# FIXME: is the xmlrpc_hacks method still required ?
item = [x.to_datastruct() for x in self.api.get_items(what)]
return self.xmlrpc_hacks(item)
def get_item_names(self, what):
"""
Returns a list of object names (keys) for the given object type.
This is just like get_items, but transmits less data.
"""
return [x.name for x in self.api.get_items(what)]
def get_distros(self,page=None,results_per_page=None,token=None,**rest):
return self.get_items("distro")
def get_profiles(self,page=None,results_per_page=None,token=None,**rest):
return self.get_items("profile")
def get_systems(self,page=None,results_per_page=None,token=None,**rest):
return self.get_items("system")
def get_repos(self,page=None,results_per_page=None,token=None,**rest):
return self.get_items("repo")
def get_images(self,page=None,results_per_page=None,token=None,**rest):
return self.get_items("image")
def get_mgmtclasses(self,page=None,results_per_page=None,token=None,**rest):
return self.get_items("mgmtclass")
def get_packages(self,page=None,results_per_page=None,token=None,**rest):
return self.get_items("package")
def get_files(self,page=None,results_per_page=None,token=None,**rest):
return self.get_items("file")
def find_items(self, what, criteria=None,sort_field=None,expand=True):
"""
Returns a list of hashes.
Works like get_items but also accepts criteria as a hash to search on.
Example: { "name" : "*.example.org" }
Wildcards work as described by 'pydoc fnmatch'.
"""
self._log("find_items(%s); criteria(%s); sort(%s)" % (what,criteria,sort_field))
items = self.api.find_items(what,criteria=criteria)
items = self.__sort(items,sort_field)
if not expand:
items = [x.name for x in items]
else:
items = [x.to_datastruct() for x in items]
return self.xmlrpc_hacks(items)
def find_distro(self,criteria={},expand=False,token=None,**rest):
return self.find_items("distro",criteria,expand=expand)
def find_profile(self,criteria={},expand=False,token=None,**rest):
return self.find_items("profile",criteria,expand=expand)
def find_system(self,criteria={},expand=False,token=None,**rest):
return self.find_items("system",criteria,expand=expand)
def find_repo(self,criteria={},expand=False,token=None,**rest):
return self.find_items("repo",criteria,expand=expand)
def find_image(self,criteria={},expand=False,token=None,**rest):
return self.find_items("image",criteria,expand=expand)
def find_mgmtclass(self,criteria={},expand=False,token=None,**rest):
return self.find_items("mgmtclass",criteria,expand=expand)
def find_package(self,criteria={},expand=False,token=None,**rest):
return self.find_items("package",criteria,expand=expand)
def find_file(self,criteria={},expand=False,token=None,**rest):
return self.find_items("file",criteria,expand=expand)
def find_items_paged(self, what, criteria=None, sort_field=None, page=None, items_per_page=None, token=None):
"""
Returns a list of hashes as with find_items but additionally supports
returning just a portion of the total list, for instance in supporting
a web app that wants to show a limited amount of items per page.
"""
# FIXME: make token required for all logging calls
self._log("find_items_paged(%s); criteria(%s); sort(%s)" % (what,criteria,sort_field), token=token)
items = self.api.find_items(what,criteria=criteria)
items = self.__sort(items,sort_field)
(items,pageinfo) = self.__paginate(items,page,items_per_page)
items = [x.to_datastruct() for x in items]
return self.xmlrpc_hacks({
'items' : items,
'pageinfo' : pageinfo
})
def has_item(self,what,name,token=None):
"""
Returns True if a given collection has an item with a given name,
otherwise returns False.
"""
self._log("has_item(%s)"%what,token=token,name=name)
found = self.api.get_item(what,name)
if found is None:
return False
else:
return True
def get_item_handle(self,what,name,token=None):
"""
Given the name of an object (or other search parameters), return a
reference (object id) that can be used with modify_* functions or save_* functions
to manipulate that object.
"""
found = self.api.get_item(what,name)
if found is None:
raise CX("internal error, unknown %s name %s" % (what,name))
return "%s::%s" % (what,found.name)
def get_distro_handle(self,name,token):
return self.get_item_handle("distro",name,token)
def get_profile_handle(self,name,token):
return self.get_item_handle("profile",name,token)
def get_system_handle(self,name,token):
return self.get_item_handle("system",name,token)
def get_repo_handle(self,name,token):
return self.get_item_handle("repo",name,token)
def get_image_handle(self,name,token):
return self.get_item_handle("image",name,token)
def get_mgmtclass_handle(self,name,token):
return self.get_item_handle("mgmtclass",name,token)
def get_package_handle(self,name,token):
return self.get_item_handle("package",name,token)
def get_file_handle(self,name,token):
return self.get_item_handle("file",name,token)
def remove_item(self,what,name,token,recursive=True):
"""
Deletes an item from a collection.
Note that this requires the name of the distro, not an item handle.
"""
self._log("remove_item (%s, recursive=%s)" % (what,recursive),name=name,token=token)
self.check_access(token, "remove_item", name)
return self.api.remove_item(what,name,delete=True,with_triggers=True,recursive=recursive)
def remove_distro(self,name,token,recursive=1):
return self.remove_item("distro",name,token,recursive)
def remove_profile(self,name,token,recursive=1):
return self.remove_item("profile",name,token,recursive)
def remove_system(self,name,token,recursive=1):
return self.remove_item("system",name,token,recursive)
def remove_repo(self,name,token,recursive=1):
return self.remove_item("repo",name,token,recursive)
def remove_image(self,name,token,recursive=1):
return self.remove_item("image",name,token,recursive)
def remove_mgmtclass(self,name,token,recursive=1):
return self.remove_item("mgmtclass",name,token,recursive)
def remove_package(self,name,token,recursive=1):
return self.remove_item("package",name,token,recursive)
def remove_file(self,name,token,recursive=1):
return self.remove_item("file",name,token,recursive)
def copy_item(self,what,object_id,newname,token=None):
"""
Creates a new object that matches an existing object, as specified by an id.
"""
self._log("copy_item(%s)" % what,object_id=object_id,token=token)
self.check_access(token,"copy_%s" % what)
obj = self.__get_object(object_id)
return self.api.copy_item(what,obj,newname)
def copy_distro(self,object_id,newname,token=None):
return self.copy_item("distro",object_id,newname,token)
def copy_profile(self,object_id,newname,token=None):
return self.copy_item("profile",object_id,newname,token)
def copy_system(self,object_id,newname,token=None):
return self.copy_item("system",object_id,newname,token)
def copy_repo(self,object_id,newname,token=None):
return self.copy_item("repo",object_id,newname,token)
def copy_image(self,object_id,newname,token=None):
return self.copy_item("image",object_id,newname,token)
def copy_mgmtclass(self,object_id,newname,token=None):
return self.copy_item("mgmtclass",object_id,newname,token)
def copy_package(self,object_id,newname,token=None):
return self.copy_item("package",object_id,newname,token)
def copy_file(self,object_id,newname,token=None):
return self.copy_item("file",object_id,newname,token)
def rename_item(self,what,object_id,newname,token=None):
"""
Renames an object specified by object_id to a new name.
"""
self._log("rename_item(%s)" % what,object_id=object_id,token=token)
obj = self.__get_object(object_id)
return self.api.rename_item(what,obj,newname)
def rename_distro(self,object_id,newname,token=None):
return self.rename_item("distro",object_id,newname,token)
def rename_profile(self,object_id,newname,token=None):
return self.rename_item("profile",object_id,newname,token)
def rename_system(self,object_id,newname,token=None):
return self.rename_item("system",object_id,newname,token)
def rename_repo(self,object_id,newname,token=None):
return self.rename_item("repo",object_id,newname,token)
def rename_image(self,object_id,newname,token=None):
return self.rename_item("image",object_id,newname,token)
def rename_mgmtclass(self,object_id,newname,token=None):
return self.rename_item("mgmtclass",object_id,newname,token)
def rename_package(self,object_id,newname,token=None):
return self.rename_item("package",object_id,newname,token)
def rename_file(self,object_id,newname,token=None):
return self.rename_item("file",object_id,newname,token)
def new_item(self,what,token,is_subobject=False):
"""
Creates a new (unconfigured) object, returning an object
handle that can be used with modify_* methods and then finally
save_* methods. The handle only exists in memory until saved.
"what" specifies the type of object:
distro, profile, system, repo, or image
"""
self._log("new_item(%s)"%what,token=token)
self.check_access(token,"new_%s"%what)
if what == "distro":
d = item_distro.Distro(self.api._config,is_subobject=is_subobject)
elif what == "profile":
d = item_profile.Profile(self.api._config,is_subobject=is_subobject)
elif what == "system":
d = item_system.System(self.api._config,is_subobject=is_subobject)
elif what == "repo":
d = item_repo.Repo(self.api._config,is_subobject=is_subobject)
elif what == "image":
d = item_image.Image(self.api._config,is_subobject=is_subobject)
elif what == "mgmtclass":
d = item_mgmtclass.Mgmtclass(self.api._config,is_subobject=is_subobject)
elif what == "package":
d = item_package.Package(self.api._config,is_subobject=is_subobject)
elif what == "file":
d = item_file.File(self.api._config,is_subobject=is_subobject)
else:
raise CX("internal error, collection name is %s" % what)
key = "___NEW___%s::%s" % (what,self.__get_random(25))
self.object_cache[key] = (time.time(), d)
return key
def new_distro(self,token):
return self.new_item("distro",token)
def new_profile(self,token):
return self.new_item("profile",token)
def new_subprofile(self,token):
return self.new_item("profile",token,is_subobject=True)
def new_system(self,token):
return self.new_item("system",token)
def new_repo(self,token):
return self.new_item("repo",token)
def new_image(self,token):
return self.new_item("image",token)
def new_mgmtclass(self,token):
return self.new_item("mgmtclass",token)
def new_package(self,token):
return self.new_item("package",token)
def new_file(self,token):
return self.new_item("file",token)
def modify_item(self,what,object_id,attribute,arg,token):
"""
Adjusts the value of a given field, specified by 'what' on a given object id.
Allows modification of certain attributes on newly created or
existing distro object handle.
"""
self._log("modify_item(%s)" % what,object_id=object_id,attribute=attribute,token=token)
obj = self.__get_object(object_id)
self.check_access(token, "modify_%s"%what, obj, attribute)
# support 1.6 field name exceptions for backwards compat
attribute = REMAP_COMPAT.get(attribute,attribute)
method = obj.remote_methods().get(attribute, None)
if method == None:
# it's ok, the CLI will send over lots of junk we can't process
# (like newname or in-place) so just go with it.
return False
# raise CX("object has no method: %s" % attribute)
return method(arg)
def modify_distro(self,object_id,attribute,arg,token):
return self.modify_item("distro",object_id,attribute,arg,token)
def modify_profile(self,object_id,attribute,arg,token):
return self.modify_item("profile",object_id,attribute,arg,token)
def modify_system(self,object_id,attribute,arg,token):
return self.modify_item("system",object_id,attribute,arg,token)
def modify_image(self,object_id,attribute,arg,token):
return self.modify_item("image",object_id,attribute,arg,token)
def modify_repo(self,object_id,attribute,arg,token):
return self.modify_item("repo",object_id,attribute,arg,token)
def modify_mgmtclass(self,object_id,attribute,arg,token):
return self.modify_item("mgmtclass",object_id,attribute,arg,token)
def modify_package(self,object_id,attribute,arg,token):
return self.modify_item("package",object_id,attribute,arg,token)
def modify_file(self,object_id,attribute,arg,token):
return self.modify_item("file",object_id,attribute,arg,token)
def __is_interface_field(self,f):
k = "*%s" % f
for x in item_system.FIELDS:
if k == x[0]:
return True
return False
def xapi_object_edit(self,object_type,object_name,edit_type,attributes,token):
"""
Extended API: New style object manipulations, 2.0 and later
Prefered over using new_, modify_, save_ directly.
Though we must preserve the old ways for backwards compatibility these
cause much less XMLRPC traffic.
edit_type - One of 'add', 'rename', 'copy', 'remove'
Ex: xapi_object_edit("distro","el5","add",{"kernel":"/tmp/foo","initrd":"/tmp/foo"},token)
"""
self.check_access(token,"xedit_%s" % object_type, token)
if edit_type == "add" and not attributes.has_key("clobber"):
handle = 0
try:
handle = self.get_item_handle(object_type, object_name)
except:
utils.log_exc(self.logger)
pass
if handle != 0:
raise CX("it seems unwise to overwrite this object, try 'edit'")
if edit_type == "add":
is_subobject = object_type == "profile" and "parent" in attributes
handle = self.new_item(object_type, token, is_subobject=is_subobject)
else:
handle = self.get_item_handle(object_type, object_name)
if edit_type == "rename":
self.rename_item(object_type, handle, attributes["newname"], token)
handle = self.get_item_handle(object_type, attributes["newname"], token)
if edit_type == "copy":
self.copy_item(object_type, handle, attributes["newname"], token)
handle = self.get_item_handle(object_type, attributes["newname"], token)
if edit_type in [ "copy", "rename" ]:
del attributes["name"]
del attributes["newname"]
if edit_type != "remove":
# FIXME: this doesn't know about interfaces yet!
# if object type is system and fields add to hash and then
# modify when done, rather than now.
imods = {}
# FIXME: needs to know about how to delete interfaces too!
for (k,v) in attributes.iteritems():
if not object_type == "system" or not self.__is_interface_field(k):
# in place modifications allow for adding a key/value pair while keeping other k/v
# pairs intact.
if k in [ "ks_meta", "kernel_options", "kernel_options_post", "template_files", "fetchable_files"] and attributes.has_key("in_place") and attributes["in_place"]:
details = self.get_item(object_type,object_name)
v2 = details[k]
(ok, input) = utils.input_string_or_hash(v)
for (a,b) in input.iteritems():
v2[a] = b
v = v2
self.modify_item(object_type,handle,k,v,token)
else:
modkey = "%s-%s" % (k, attributes.get("interface","eth0"))
imods[modkey] = v
if object_type == "system" and not attributes.has_key("delete_interface"):
self.modify_system(handle, 'modify_interface', imods, token)
elif object_type == "system":
self.modify_system(handle, 'delete_interface', attributes.get("interface", "eth0"), token)
else:
self.remove_item(object_type, object_name, token, recursive=True)
return True
# FIXME: use the bypass flag or not?
return self.save_item(object_type, handle, token)
def save_item(self,what,object_id,token,editmode="bypass"):
"""
Saves a newly created or modified object to disk.
Calling save is required for any changes to persist.
"""
self._log("save_item(%s)" % what,object_id=object_id,token=token)
obj = self.__get_object(object_id)
self.check_access(token,"save_%s"%what,obj)
if editmode == "new":
rc = self.api.add_item(what,obj,check_for_duplicate_names=True)
else:
rc = self.api.add_item(what,obj)
return rc
def save_distro(self,object_id,token,editmode="bypass"):
return self.save_item("distro",object_id,token,editmode=editmode)
def save_profile(self,object_id,token,editmode="bypass"):
return self.save_item("profile",object_id,token,editmode=editmode)
def save_system(self,object_id,token,editmode="bypass"):
return self.save_item("system",object_id,token,editmode=editmode)
def save_image(self,object_id,token,editmode="bypass"):
return self.save_item("image",object_id,token,editmode=editmode)
def save_repo(self,object_id,token,editmode="bypass"):
return self.save_item("repo",object_id,token,editmode=editmode)
def save_mgmtclass(self,object_id,token,editmode="bypass"):
return self.save_item("mgmtclass",object_id,token,editmode=editmode)
def save_package(self,object_id,token,editmode="bypass"):
return self.save_item("package",object_id,token,editmode=editmode)
def save_file(self,object_id,token,editmode="bypass"):
return self.save_item("file",object_id,token,editmode=editmode)
def get_kickstart_templates(self,token=None,**rest):
"""
Returns all of the kickstarts that are in use by the system.
"""
self._log("get_kickstart_templates",token=token)
#self.check_access(token, "get_kickstart_templates")
return utils.get_kickstart_templates(self.api)
def get_snippets(self,token=None,**rest):
"""
Returns all the kickstart snippets.
"""
self._log("get_snippets",token=token)
# FIXME: settings.snippetsdir should be used here
return self.__get_sub_snippets("/var/lib/cobbler/snippets")
def __get_sub_snippets(self, path):
results = []
files = glob.glob(os.path.join(path,"*"))
for f in files:
if os.path.isdir(f) and not os.path.islink(f):
results += self.__get_sub_snippets(f)
elif not os.path.islink(f):
results.append(f)
results.sort()
return results
def is_kickstart_in_use(self,ks,token=None,**rest):
self._log("is_kickstart_in_use",token=token)
for x in self.api.profiles():
if x.kickstart is not None and x.kickstart == ks:
return True
for x in self.api.systems():
if x.kickstart is not None and x.kickstart == ks:
return True
return False
def generate_kickstart(self,profile=None,system=None,REMOTE_ADDR=None,REMOTE_MAC=None,**rest):
self._log("generate_kickstart")
return self.api.generate_kickstart(profile,system)
def get_blended_data(self,profile=None,system=None):
if profile is not None and profile != "":
obj = self.api.find_profile(profile)
if obj is None:
raise CX("profile not found: %s" % profile)
elif system is not None and system != "":
obj = self.api.find_system(system)
if obj is None:
raise CX("system not found: %s" % system)
else:
raise CX("internal error, no system or profile specified")
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
def get_settings(self,token=None,**rest):
"""
Return the contents of /etc/cobbler/settings, which is a hash.
"""
self._log("get_settings",token=token)
results = self.api.settings().to_datastruct()
self._log("my settings are: %s" % results, debug=True)
return self.xmlrpc_hacks(results)
def get_repo_config_for_profile(self,profile_name,**rest):
"""
Return the yum configuration a given profile should use to obtain
all of it's cobbler associated repos.
"""
obj = self.api.find_profile(profile_name)
if obj is None:
return "# object not found: %s" % profile_name
return self.api.get_repo_config_for_profile(obj)
def get_repo_config_for_system(self,system_name,**rest):
"""
Return the yum configuration a given profile should use to obtain
all of it's cobbler associated repos.
"""
obj = self.api.find_system(system_name)
if obj is None:
return "# object not found: %s" % system_name
return self.api.get_repo_config_for_system(obj)
def get_template_file_for_profile(self,profile_name,path,**rest):
"""
Return the templated file requested for this profile
"""
obj = self.api.find_profile(profile_name)
if obj is None:
return "# object not found: %s" % profile_name
return self.api.get_template_file_for_profile(obj,path)
def get_template_file_for_system(self,system_name,path,**rest):
"""
Return the templated file requested for this system
"""
obj = self.api.find_system(system_name)
if obj is None:
return "# object not found: %s" % system_name
return self.api.get_template_file_for_system(obj,path)
def register_new_system(self,info,token=None,**rest):
"""
If register_new_installs is enabled in settings, this allows
/usr/bin/cobbler-register (part of the koan package) to add
new system records remotely if they don't already exist.
There is a cobbler_register snippet that helps with doing
this automatically for new installs but it can also be used
for existing installs. See "AutoRegistration" on the Wiki.
"""
enabled = self.api.settings().register_new_installs
if not str(enabled) in [ "1", "y", "yes", "true" ]:
raise CX("registration is disabled in cobbler settings")
# validate input
name = info.get("name","")
profile = info.get("profile","")
hostname = info.get("hostname","")
interfaces = info.get("interfaces",{})
ilen = len(interfaces.keys())
if name == "":
raise CX("no system name submitted")
if profile == "":
raise CX("profile not submitted")
if ilen == 0:
raise CX("no interfaces submitted")
if ilen >= 64:
raise CX("too many interfaces submitted")
# validate things first
name = info.get("name","")
inames = interfaces.keys()
if self.api.find_system(name=name):
raise CX("system name conflicts")
if hostname != "" and self.api.find_system(hostname=hostname):
raise CX("hostname conflicts")
for iname in inames:
mac = info["interfaces"][iname].get("mac_address","")
ip = info["interfaces"][iname].get("ip_address","")
if ip.find("/") != -1:
raise CX("no CIDR ips are allowed")
if mac == "":
raise CX("missing MAC address for interface %s" % iname)
if mac != "":
system = self.api.find_system(mac_address=mac)
if system is not None:
raise CX("mac conflict: %s" % mac)
if ip != "":
system = self.api.find_system(ip_address=ip)
if system is not None:
raise CX("ip conflict: %s"% ip)
# looks like we can go ahead and create a system now
obj = self.api.new_system()
obj.set_profile(profile)
obj.set_name(name)
if hostname != "":
obj.set_hostname(hostname)
obj.set_netboot_enabled(False)
for iname in inames:
if info["interfaces"][iname].get("bridge","") == 1:
# don't add bridges
continue
#if info["interfaces"][iname].get("module","") == "":
# # don't attempt to add wireless interfaces
# continue
mac = info["interfaces"][iname].get("mac_address","")
ip = info["interfaces"][iname].get("ip_address","")
netmask = info["interfaces"][iname].get("netmask","")
if mac == "?":
# see koan/utils.py for explanation of network info discovery
continue;
obj.set_mac_address(mac, iname)
if hostname != "":
obj.set_dns_name(hostname, iname)
if ip != "" and ip != "?":
obj.set_ip_address(ip, iname)
if netmask != "" and netmask != "?":
obj.set_subnet(netmask, iname)
self.api.add_system(obj)
return 0
def disable_netboot(self,name,token=None,**rest):
"""
This is a feature used by the pxe_just_once support, see manpage.
Sets system named "name" to no-longer PXE. Disabled by default as
this requires public API access and is technically a read-write operation.
"""
self._log("disable_netboot",token=token,name=name)
# used by nopxe.cgi
if not self.api.settings().pxe_just_once:
# feature disabled!
return False
systems = self.api.systems()
obj = systems.find(name=name)
if obj == None:
# system not found!
return False
obj.set_netboot_enabled(0)
# disabling triggers and sync to make this extremely fast.
systems.add(obj,save=True,with_triggers=False,with_sync=False,quick_pxe_update=True)
return True
def upload_log_data(self, sys_name, file, size, offset, data, token=None,**rest):
"""
This is a logger function used by the "anamon" logging system to
upload all sorts of auxilliary data from Anaconda.
As it's a bit of a potential log-flooder, it's off by default
and needs to be enabled in /etc/cobbler/settings.
"""
self._log("upload_log_data (file: '%s', size: %s, offset: %s)" % (file, size, offset), token=token, name=sys_name)
# Check if enabled in self.api.settings()
if not self.api.settings().anamon_enabled:
# feature disabled!
return False
# Find matching system record
systems = self.api.systems()
obj = systems.find(name=sys_name)
if obj == None:
# system not found!
self._log("upload_log_data - system '%s' not found" % sys_name, token=token, name=sys_name)
return False
return self.__upload_file(sys_name, file, size, offset, data)
def __upload_file(self, sys_name, file, size, offset, data):
'''
system: the name of the system
name: the name of the file
size: size of contents (bytes)
data: base64 encoded file contents
offset: the offset of the chunk
files can be uploaded in chunks, if so the size describes
the chunk rather than the whole file. the offset indicates where
the chunk belongs
the special offset -1 is used to indicate the final chunk'''
contents = base64.decodestring(data)
del data
if offset != -1:
if size is not None:
if size != len(contents):
return False
#XXX - have an incoming dir and move after upload complete
# SECURITY - ensure path remains under uploadpath
tt = string.maketrans("/","+")
fn = string.translate(file, tt)
if fn.startswith('..'):
raise CX("invalid filename used: %s" % fn)
# FIXME ... get the base dir from cobbler settings()
udir = "/var/log/cobbler/anamon/%s" % sys_name
if not os.path.isdir(udir):
os.mkdir(udir, 0755)
fn = "%s/%s" % (udir, fn)
try:
st = os.lstat(fn)
except OSError, e:
if e.errno == errno.ENOENT:
pass
else:
raise
else:
if not stat.S_ISREG(st.st_mode):
raise CX("destination not a file: %s" % fn)
fd = os.open(fn, os.O_RDWR | os.O_CREAT, 0644)
# log_error("fd=%r" %fd)
try:
if offset == 0 or (offset == -1 and size == len(contents)):
#truncate file
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
try:
os.ftruncate(fd, 0)
# log_error("truncating fd %r to 0" %fd)
finally:
fcntl.lockf(fd, fcntl.LOCK_UN)
if offset == -1:
os.lseek(fd,0,2)
else:
os.lseek(fd,offset,0)
#write contents
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB, len(contents), 0, 2)
try:
os.write(fd, contents)
# log_error("wrote contents")
finally:
fcntl.lockf(fd, fcntl.LOCK_UN, len(contents), 0, 2)
if offset == -1:
if size is not None:
#truncate file
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
try:
os.ftruncate(fd, size)
# log_error("truncating fd %r to size %r" % (fd,size))
finally:
fcntl.lockf(fd, fcntl.LOCK_UN)
finally:
os.close(fd)
return True
def run_install_triggers(self,mode,objtype,name,ip,token=None,**rest):
"""
This is a feature used to run the pre/post install triggers.
See CobblerTriggers on Wiki for details
"""
self._log("run_install_triggers",token=token)
if mode != "pre" and mode != "post" and mode != "firstboot":
return False
if objtype != "system" and objtype !="profile":
return False
# the trigger script is called with name,mac, and ip as arguments 1,2, and 3
# we do not do API lookups here because they are rather expensive at install
# time if reinstalling all of a cluster all at once.
# we can do that at "cobbler check" time.
utils.run_triggers(self.api, None, "/var/lib/cobbler/triggers/install/%s/*" % mode, additional=[objtype,name,ip],logger=self.logger)
return True
def version(self,token=None,**rest):
"""
Return the cobbler version for compatibility testing with remote applications.
See api.py for documentation.
"""
self._log("version",token=token)
return self.api.version()
def extended_version(self,token=None,**rest):
"""
Returns the full dictionary of version information. See api.py for documentation.
"""
self._log("version",token=token)
return self.api.version(extended=True)
def get_distros_since(self,mtime):
"""
Return all of the distro objects that have been modified
after mtime.
"""
data = self.api.get_distros_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_profiles_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_profiles_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_systems_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_systems_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_repos_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_repos_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_images_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_images_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_mgmtclasses_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_mgmtclasses_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_packages_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_packages_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_files_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_files_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_repos_compatible_with_profile(self,profile=None,token=None,**rest):
"""
Get repos that can be used with a given profile name
"""
self._log("get_repos_compatible_with_profile",token=token)
profile = self.api.find_profile(profile)
if profile is None:
return -1
results = []
distro = profile.get_conceptual_parent()
repos = self.get_repos()
for r in repos:
# there be dragons!
# accept all repos that are src/noarch
# but otherwise filter what repos are compatible
# with the profile based on the arch of the distro.
if r["arch"] is None or r["arch"] in [ "", "noarch", "src" ]:
results.append(r)
else:
# some backwards compatibility fuzz
# repo.arch is mostly a text field
# distro.arch is i386/x86_64/ia64/s390x/etc
if r["arch"] in [ "i386", "x86", "i686" ]:
if distro.arch in [ "i386", "x86" ]:
results.append(r)
elif r["arch"] in [ "x86_64" ]:
if distro.arch in [ "x86_64" ]:
results.append(r)
elif r["arch"].startswith("s390"):
if distro.arch in [ "s390x" ]:
results.append(r)
else:
if distro.arch == r["arch"]:
results.append(r)
return results
# this is used by the puppet external nodes feature
def find_system_by_dns_name(self,dns_name):
# FIXME: implement using api.py's find API
# and expose generic finds for other methods
# WARNING: this function is /not/ expected to stay in cobbler long term
systems = self.get_systems()
for x in systems:
for y in x["interfaces"]:
if x["interfaces"][y]["dns_name"] == dns_name:
name = x["name"]
return self.get_system_for_koan(name)
return {}
def get_distro_as_rendered(self,name,token=None,**rest):
"""
Return the distribution as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_distro_for_koan(self,name)
def get_distro_for_koan(self,name,token=None,**rest):
"""
Same as get_distro_as_rendered.
"""
self._log("get_distro_as_rendered",name=name,token=token)
obj = self.api.find_distro(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_profile_as_rendered(self,name,token=None,**rest):
"""
Return the profile as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_profile_for_koan(name,token)
def get_profile_for_koan(self,name,token=None,**rest):
"""
Same as get_profile_as_rendered
"""
self._log("get_profile_as_rendered", name=name, token=token)
obj = self.api.find_profile(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_system_as_rendered(self,name,token=None,**rest):
"""
Return the system as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_system_for_koan(name)
def get_system_for_koan(self,name,token=None,**rest):
"""
Same as get_system_as_rendered.
"""
self._log("get_system_as_rendered",name=name,token=token)
obj = self.api.find_system(name=name)
if obj is not None:
hash = utils.blender(self.api,True,obj)
# Generate a pxelinux.cfg?
image_based = False
profile = obj.get_conceptual_parent()
distro = profile.get_conceptual_parent()
arch = distro.arch
if distro is None and profile.COLLECTION_TYPE == "profile":
image_based = True
arch = profile.arch
if obj.is_management_supported():
if not image_based:
hash["pxelinux.cfg"] = self.pxegen.write_pxe_file(
None, obj, profile, distro, arch)
else:
hash["pxelinux.cfg"] = self.pxegen.write_pxe_file(
None, obj,None,None,arch,image=profile)
return self.xmlrpc_hacks(hash)
return self.xmlrpc_hacks({})
def get_repo_as_rendered(self,name,token=None,**rest):
"""
Return the repo as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_repo_for_koan(self,name)
def get_repo_for_koan(self,name,token=None,**rest):
"""
Same as get_repo_as_rendered.
"""
self._log("get_repo_as_rendered",name=name,token=token)
obj = self.api.find_repo(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_image_as_rendered(self,name,token=None,**rest):
"""
Return the image as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_image_for_koan(self,name)
def get_image_for_koan(self,name,token=None,**rest):
"""
Same as get_image_as_rendered.
"""
self._log("get_image_as_rendered",name=name,token=token)
obj = self.api.find_image(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_mgmtclass_as_rendered(self,name,token=None,**rest):
"""
Return the mgmtclass as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_mgmtclass_for_koan(self,name)
def get_mgmtclass_for_koan(self,name,token=None,**rest):
"""
Same as get_mgmtclass_as_rendered.
"""
self._log("get_mgmtclass_as_rendered",name=name,token=token)
obj = self.api.find_mgmtclass(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_package_as_rendered(self,name,token=None,**rest):
"""
Return the package as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_package_for_koan(self,name)
def get_package_for_koan(self,name,token=None,**rest):
"""
Same as get_package_as_rendered.
"""
self._log("get_package_as_rendered",name=name,token=token)
obj = self.api.find_package(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_file_as_rendered(self,name,token=None,**rest):
"""
Return the file as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_file_for_koan(self,name)
def get_file_for_koan(self,name,token=None,**rest):
"""
Same as get_file_as_rendered.
"""
self._log("get_file_as_rendered",name=name,token=token)
obj = self.api.find_file(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_random_mac(self,virt_type="xenpv",token=None,**rest):
"""
Wrapper for utils.get_random_mac
Used in the webui
"""
self._log("get_random_mac",token=None)
return utils.get_random_mac(self.api,virt_type)
def xmlrpc_hacks(self,data):
"""
Convert None in XMLRPC to just '~' to make extra sure a client
that can't allow_none can deal with this. ALSO: a weird hack ensuring
that when dicts with integer keys (or other types) are transmitted
with string keys.
"""
return utils.strip_none(data)
def get_status(self,mode="normal",token=None,**rest):
"""
Returns the same information as `cobbler status`
While a read-only operation, this requires a token because it's potentially a fair amount of I/O
"""
self.check_access(token,"sync")
return self.api.status(mode=mode)
######
# READ WRITE METHODS REQUIRE A TOKEN, use login()
# TO OBTAIN ONE
######
def __get_random(self,length):
urandom = open("/dev/urandom")
b64 = base64.encodestring(urandom.read(length))
urandom.close()
b64 = b64.replace("\n","")
return b64
def __make_token(self,user):
"""
Returns a new random token.
"""
b64 = self.__get_random(25)
self.token_cache[b64] = (time.time(), user)
return b64
def __invalidate_expired_tokens(self):
"""
Deletes any login tokens that might have expired.
Also removes expired events
"""
timenow = time.time()
for token in self.token_cache.keys():
(tokentime, user) = self.token_cache[token]
if (timenow > tokentime + TOKEN_TIMEOUT):
self._log("expiring token",token=token,debug=True)
del self.token_cache[token]
# and also expired objects
for oid in self.object_cache.keys():
(tokentime, entry) = self.object_cache[oid]
if (timenow > tokentime + CACHE_TIMEOUT):
del self.object_cache[oid]
for tid in self.events.keys():
(eventtime, name, status, who) = self.events[tid]
if (timenow > eventtime + EVENT_TIMEOUT):
del self.events[tid]
# logfile cleanup should be dealt w/ by logrotate
def __validate_user(self,input_user,input_password):
"""
Returns whether this user/pass combo should be given
access to the cobbler read-write API.
For the system user, this answer is always "yes", but
it is only valid for the socket interface.
FIXME: currently looks for users in /etc/cobbler/auth.conf
Would be very nice to allow for PAM and/or just Kerberos.
"""
return self.api.authenticate(input_user,input_password)
def __validate_token(self,token):
"""
Checks to see if an API method can be called when
the given token is passed in. Updates the timestamp
of the token automatically to prevent the need to
repeatedly call login(). Any method that needs
access control should call this before doing anything
else.
"""
self.__invalidate_expired_tokens()
if self.token_cache.has_key(token):
user = self.get_user_from_token(token)
if user == "<system>":
# system token is only valid over Unix socket
return False
self.token_cache[token] = (time.time(), user) # update to prevent timeout
return True
else:
self._log("invalid token",token=token)
raise CX("invalid token: %s" % token)
def __name_to_object(self,resource,name):
if resource.find("distro") != -1:
return self.api.find_distro(name)
if resource.find("profile") != -1:
return self.api.find_profile(name)
if resource.find("system") != -1:
return self.api.find_system(name)
if resource.find("repo") != -1:
return self.api.find_repo(name)
if resource.find("mgmtclass") != -1:
return self.api.find_mgmtclass(name)
if resource.find("package") != -1:
return self.api.find_package(name)
if resource.find("file") != -1:
return self.api.find_file(name)
return None
def check_access_no_fail(self,token,resource,arg1=None,arg2=None):
"""
This is called by the WUI to decide whether an element
is editable or not. It differs form check_access in that
it is supposed to /not/ log the access checks (TBA) and does
not raise exceptions.
"""
need_remap = False
for x in [ "distro", "profile", "system", "repo", "image", "mgmtclass", "package", "file" ]:
if arg1 is not None and resource.find(x) != -1:
need_remap = True
break
if need_remap:
# we're called with an object name, but need an object
arg1 = self.__name_to_object(resource,arg1)
try:
self.check_access(token,resource,arg1,arg2)
return True
except:
utils.log_exc(self.logger)
return False
def check_access(self,token,resource,arg1=None,arg2=None):
validated = self.__validate_token(token)
user = self.get_user_from_token(token)
if user == "<DIRECT>":
self._log("CLI Authorized", debug=True)
return True
rc = self.api.authorize(user,resource,arg1,arg2)
self._log("%s authorization result: %s" % (user,rc),debug=True)
if not rc:
raise CX("authorization failure for user %s" % user)
return rc
def login(self,login_user,login_password):
"""
Takes a username and password, validates it, and if successful
returns a random login token which must be used on subsequent
method calls. The token will time out after a set interval if not
used. Re-logging in permitted.
"""
# if shared secret access is requested, don't bother hitting the auth
# plugin
if login_user == "":
if login_password == self.shared_secret:
return self.__make_token("<DIRECT>")
else:
utils.die(self.logger, "login failed")
# this should not log to disk OR make events as we're going to
# call it like crazy in CobblerWeb. Just failed attempts.
if self.__validate_user(login_user,login_password):
token = self.__make_token(login_user)
return token
else:
utils.die(self.logger, "login failed (%s)" % login_user)
def logout(self,token):
"""
Retires a token ahead of the timeout.
"""
self._log("logout", token=token)
if self.token_cache.has_key(token):
del self.token_cache[token]
return True
return False
def token_check(self,token):
"""
This is a demo function that does not return anything useful.
"""
self.__validate_token(token)
return True
def sync(self,token):
"""
Run sync code, which should complete before XMLRPC timeout. We can't
do reposync this way. Would be nice to send output over AJAX/other
later.
"""
# FIXME: performance
self._log("sync",token=token)
self.check_access(token,"sync")
return self.api.sync()
def read_or_write_kickstart_template(self,kickstart_file,is_read,new_data,token):
"""
Allows the web app to be used as a kickstart file editor. For security
reasons we will only allow kickstart files to be edited if they reside in
/var/lib/cobbler/kickstarts/ or /etc/cobbler. This limits the damage
doable by Evil who has a cobbler password but not a system password.
Also if living in /etc/cobbler the file must be a kickstart file.
"""
if is_read:
what = "read_kickstart_template"
else:
what = "write_kickstart_template"
self._log(what,name=kickstart_file,token=token)
self.check_access(token,what,kickstart_file,is_read)
if kickstart_file.find("..") != -1 or not kickstart_file.startswith("/"):
utils.die(self.logger,"tainted file location")
if not kickstart_file.startswith("/etc/cobbler/") and not kickstart_file.startswith("/var/lib/cobbler/kickstarts"):
utils.die(self.logger, "unable to view or edit kickstart in this location")
if kickstart_file.startswith("/etc/cobbler/"):
if not kickstart_file.endswith(".ks") and not kickstart_file.endswith(".cfg"):
# take care to not allow config files to be altered.
utils.die(self.logger, "this does not seem to be a kickstart file")
if not is_read and not os.path.exists(kickstart_file):
utils.die(self.logger, "new files must go in /var/lib/cobbler/kickstarts")
if is_read:
fileh = open(kickstart_file,"r")
data = fileh.read()
fileh.close()
return data
else:
if new_data == -1:
# delete requested
if not self.is_kickstart_in_use(kickstart_file,token):
os.remove(kickstart_file)
else:
utils.die(self.logger, "attempt to delete in-use file")
else:
fileh = open(kickstart_file,"w+")
fileh.write(new_data)
fileh.close()
return True
def read_or_write_snippet(self,snippet_file,is_read,new_data,token):
"""
Allows the WebUI to be used as a snippet file editor. For security
reasons we will only allow snippet files to be edited if they reside in
/var/lib/cobbler/snippets.
"""
# FIXME: duplicate code with kickstart view/edit
# FIXME: need to move to API level functions
if is_read:
what = "read_snippet"
else:
what = "write_snippet"
self._log(what,name=snippet_file,token=token)
self.check_access(token,what,snippet_file,is_read)
if snippet_file.find("..") != -1 or not snippet_file.startswith("/"):
utils.die(self.logger, "tainted file location")
if not snippet_file.startswith("/var/lib/cobbler/snippets"):
utils.die(self.logger, "unable to view or edit snippet in this location")
if is_read:
fileh = open(snippet_file,"r")
data = fileh.read()
fileh.close()
return data
else:
if new_data == -1:
# FIXME: no way to check if something is using it
os.remove(snippet_file)
else:
fileh = open(snippet_file,"w+")
fileh.write(new_data)
fileh.close()
return True
def power_system(self,object_id,power=None,token=None,logger=None):
"""
Internal implementation used by background_power, do not call
directly if possible.
Allows poweron/poweroff/powerstatus/reboot of a system specified by object_id.
"""
obj = self.__get_object(object_id)
self.check_access(token, "power_system", obj)
if power=="on":
rc=self.api.power_on(obj, user=None, password=None, logger=logger)
elif power=="off":
rc=self.api.power_off(obj, user=None, password=None, logger=logger)
elif power=="status":
rc=self.api.power_status(obj, user=None, password=None, logger=logger)
elif power=="reboot":
rc=self.api.reboot(obj, user=None, password=None, logger=logger)
else:
utils.die(self.logger, "invalid power mode '%s', expected on/off/status/reboot" % power)
return rc
def get_config_data(self,hostname):
"""
Generate configuration data for the system specified by hostname.
"""
self._log("get_config_data for %s" % hostname)
obj = configgen.ConfigGen(hostname)
return obj.gen_config_data_for_koan()
def clear_system_logs(self, object_id, token=None, logger=None):
"""
clears console logs of a system
"""
obj = self.__get_object(object_id)
self.check_access(token, "clear_system_logs", obj)
rc=self.api.clear_logs(obj, logger=logger)
return rc
# *********************************************************************************
# *********************************************************************************
class CobblerXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer):
def __init__(self, args):
self.allow_reuse_address = True
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self,args)
# *********************************************************************************
# *********************************************************************************
class ProxiedXMLRPCInterface:
def __init__(self,api,proxy_class):
self.proxied = proxy_class(api)
self.logger = self.proxied.api.logger
def _dispatch(self, method, params, **rest):
if not hasattr(self.proxied, method):
raise CX("unknown remote method")
method_handle = getattr(self.proxied, method)
# FIXME: see if this works without extra boilerplate
try:
return method_handle(*params)
except Exception, e:
utils.log_exc(self.logger)
raise e
# *********************************************************************
# *********************************************************************
def _test_setup_modules(authn="authn_testing",authz="authz_allowall",pxe_once=1):
# rewrite modules.conf so we know we can use the testing module
# for xmlrpc rw testing (Makefile will put the user value back)
import yaml
import Cheetah.Template as Template
MODULES_TEMPLATE = "installer_templates/modules.conf.template"
DEFAULTS = "installer_templates/defaults"
fh = open(DEFAULTS)
data = yaml.load(fh.read())
fh.close()
data["authn_module"] = authn
data["authz_module"] = authz
data["pxe_once"] = pxe_once
t = Template.Template(file=MODULES_TEMPLATE, searchList=[data])
open("/etc/cobbler/modules.conf","w+").write(t.respond())
def _test_setup_settings(pxe_once=1):
# rewrite modules.conf so we know we can use the testing module
# for xmlrpc rw testing (Makefile will put the user value back)
import yaml
import Cheetah.Template as Template
MODULES_TEMPLATE = "installer_templates/settings.template"
DEFAULTS = "installer_templates/defaults"
fh = open(DEFAULTS)
data = yaml.load(fh.read())
fh.close()
data["pxe_once"] = pxe_once
t = Template.Template(file=MODULES_TEMPLATE, searchList=[data])
open("/etc/cobbler/settings","w+").write(t.respond())
def _test_bootstrap_restart():
rc1 = subprocess.call(["/sbin/service","cobblerd","restart"],shell=False,close_fds=True)
assert rc1 == 0
rc2 = subprocess.call(["/sbin/service","httpd","restart"],shell=False,close_fds=True)
assert rc2 == 0
time.sleep(5)
_test_remove_objects()
def _test_remove_objects():
api = cobbler_api.BootAPI() # local handle
# from ro tests
d0 = api.find_distro("distro0")
i0 = api.find_image("image0")
r0 = api.find_image("repo0")
# from rw tests
d1 = api.find_distro("distro1")
i1 = api.find_image("image1")
r1 = api.find_image("repo1")
if d0 is not None: api.remove_distro(d0, recursive = True)
if i0 is not None: api.remove_image(i0)
if r0 is not None: api.remove_repo(r0)
if d1 is not None: api.remove_distro(d1, recursive = True)
if i1 is not None: api.remove_image(i1)
if r1 is not None: api.remove_repo(r1)
def test_xmlrpc_ro():
_test_bootstrap_restart()
server = xmlrpclib.Server("http://127.0.0.1/cobbler_api")
time.sleep(2)
# delete all distributions
distros = server.get_distros()
profiles = server.get_profiles()
systems = server.get_systems()
repos = server.get_repos()
images = server.get_systems()
settings = server.get_settings()
assert type(distros) == type([])
assert type(profiles) == type([])
assert type(systems) == type([])
assert type(repos) == type([])
assert type(images) == type([])
assert type(settings) == type({})
# now populate with something more useful
# using the non-remote API
api = cobbler_api.BootAPI() # local handle
before_distros = len(api.distros())
before_profiles = len(api.profiles())
before_systems = len(api.systems())
before_repos = len(api.repos())
before_images = len(api.images())
fake = open("/tmp/cobbler.fake","w+")
fake.write("")
fake.close()
distro = api.new_distro()
distro.set_name("distro0")
distro.set_kernel("/tmp/cobbler.fake")
distro.set_initrd("/tmp/cobbler.fake")
api.add_distro(distro)
repo = api.new_repo()
repo.set_name("repo0")
if not os.path.exists("/tmp/empty"):
os.mkdir("/tmp/empty",770)
repo.set_mirror("/tmp/empty")
files = glob.glob("rpm-build/*.rpm")
if len(files) == 0:
raise Exception("Tests must be run from the cobbler checkout directory.")
subprocess.call("cp rpm-build/*.rpm /tmp/empty",shell=True,close_fds=True)
api.add_repo(repo)
profile = api.new_profile()
profile.set_name("profile0")
profile.set_distro("distro0")
profile.set_kickstart("/var/lib/cobbler/kickstarts/sample.ks")
profile.set_repos(["repo0"])
api.add_profile(profile)
system = api.new_system()
system.set_name("system0")
system.set_hostname("hostname0")
system.set_gateway("192.168.1.1")
system.set_profile("profile0")
system.set_dns_name("hostname0","eth0")
api.add_system(system)
image = api.new_image()
image.set_name("image0")
image.set_file("/tmp/cobbler.fake")
api.add_image(image)
# reposync is required in order to create the repo config files
api.reposync(name="repo0")
# FIXME: the following tests do not yet look to see that all elements
# retrieved match what they were created with, but we presume this
# all works. It is not a high priority item to test but do not assume
# this is a complete test of access functions.
def comb(haystack, needle):
for x in haystack:
if x["name"] == needle:
return True
return False
distros = server.get_distros()
assert len(distros) == before_distros + 1
assert comb(distros, "distro0")
profiles = server.get_profiles()
print "BEFORE: %s" % before_profiles
print "CURRENT: %s" % len(profiles)
for p in profiles:
print " PROFILES: %s" % p["name"]
for p in api.profiles():
print " API : %s" % p.name
assert len(profiles) == before_profiles + 1
assert comb(profiles, "profile0")
systems = server.get_systems()
# assert len(systems) == before_systems + 1
assert comb(systems, "system0")
repos = server.get_repos()
# FIXME: disable temporarily
# assert len(repos) == before_repos + 1
assert comb(repos, "repo0")
images = server.get_images()
# assert len(images) == before_images + 1
assert comb(images, "image0")
# now test specific gets
distro = server.get_distro("distro0")
assert distro["name"] == "distro0"
assert type(distro["kernel_options"] == type({}))
profile = server.get_profile("profile0")
assert profile["name"] == "profile0"
assert type(profile["kernel_options"] == type({}))
system = server.get_system("system0")
assert system["name"] == "system0"
assert type(system["kernel_options"] == type({}))
repo = server.get_repo("repo0")
assert repo["name"] == "repo0"
image = server.get_image("image0")
assert image["name"] == "image0"
# now test the calls koan uses
# the difference is that koan's object types are flattened somewhat
# and also that they are passed through utils.blender() so they represent
# not the object but the evaluation of the object tree at that object.
server.update() # should be unneeded
distro = server.get_distro_for_koan("distro0")
assert distro["name"] == "distro0"
assert type(distro["kernel_options"] == type(""))
profile = server.get_profile_for_koan("profile0")
assert profile["name"] == "profile0"
assert type(profile["kernel_options"] == type(""))
system = server.get_system_for_koan("system0")
assert system["name"] == "system0"
assert type(system["kernel_options"] == type(""))
repo = server.get_repo_for_koan("repo0")
assert repo["name"] == "repo0"
image = server.get_image_for_koan("image0")
assert image["name"] == "image0"
# now test some of the additional webui calls
# compatible profiles, etc
assert server.ping() == True
assert server.get_size("distros") == 1
assert server.get_size("profiles") == 1
assert server.get_size("systems") == 1
assert server.get_size("repos") == 1
assert server.get_size("images") == 1
templates = server.get_kickstart_templates("???")
assert "/var/lib/cobbler/kickstarts/sample.ks" in templates
assert server.is_kickstart_in_use("/var/lib/cobbler/kickstarts/sample.ks","???") == True
assert server.is_kickstart_in_use("/var/lib/cobbler/kickstarts/legacy.ks","???") == False
generated = server.generate_kickstart("profile0")
assert type(generated) == type("")
assert generated.find("ERROR") == -1
assert generated.find("url") != -1
assert generated.find("network") != -1
yumcfg = server.get_repo_config_for_profile("profile0")
assert type(yumcfg) == type("")
assert yumcfg.find("ERROR") == -1
assert yumcfg.find("http://") != -1
yumcfg = server.get_repo_config_for_system("system0")
assert type(yumcfg) == type("")
assert yumcfg.find("ERROR") == -1
assert yumcfg.find("http://") != -1
server.register_mac("CC:EE:FF:GG:AA:AA","profile0")
systems = server.get_systems()
found = False
for s in systems:
if s["name"] == "CC:EE:FF:GG:AA:AA":
for iname in s["interfaces"]:
if s["interfaces"]["iname"].get("mac_address") == "CC:EE:FF:GG:AA:AA":
found = True
break
if found:
break
# FIXME: mac registration test code needs a correct settings file in order to
# be enabled.
# assert found == True
# FIXME: the following tests don't work if pxe_just_once is disabled in settings so we need
# to account for this by turning it on...
# basically we need to rewrite the settings file
# system = server.get_system("system0")
# assert system["netboot_enabled"] == "True"
# rc = server.disable_netboot("system0")
# assert rc == True
# ne = server.get_system("system0")["netboot_enabled"]
# assert ne == False
# FIXME: tests for new built-in configuration management feature
# require that --template-files attributes be set. These do not
# retrieve the kickstarts but rather config files (see Wiki topics).
# This is probably better tested at the URL level with urlgrabber, one layer
# up, in a different set of tests..
# FIXME: tests for rendered kickstart retrieval, same as above
assert server.run_install_triggers("pre","profile","profile0","127.0.0.1")
assert server.run_install_triggers("post","profile","profile0","127.0.0.1")
assert server.run_install_triggers("pre","system","system0","127.0.0.1")
assert server.run_install_triggers("post","system","system0","127.0.0.1")
ver = server.version()
assert (str(ver)[0] == "?" or str(ver).find(".") != -1)
# do removals via the API since the read-only API can't do them
# and the read-write tests are seperate
_test_remove_objects()
# this last bit mainly tests the tests, to ensure we've left nothing behind
# not XMLRPC. Tests polluting the user config is not desirable even though
# we do save/restore it.
# assert (len(api.distros()) == before_distros)
# assert (len(api.profiles()) == before_profiles)
# assert (len(api.systems()) == before_systems)
# assert (len(api.images()) == before_images)
# assert (len(api.repos()) == before_repos)
def test_xmlrpc_rw():
# ideally we need tests for the various auth modes, not just one
# and the ownership module, though this will provide decent coverage.
_test_setup_modules(authn="authn_testing",authz="authz_allowall")
_test_bootstrap_restart()
server = xmlrpclib.Server("http://127.0.0.1/cobbler_api") # remote
api = cobbler_api.BootAPI() # local instance, /DO/ ping cobblerd
# note if authn_testing is not engaged this will not work
# test getting token, will raise remote exception on fail
token = server.login("testing","testing")
# create distro
did = server.new_distro(token)
server.modify_distro(did, "name", "distro1", token)
server.modify_distro(did, "kernel", "/tmp/cobbler.fake", token)
server.modify_distro(did, "initrd", "/tmp/cobbler.fake", token)
server.modify_distro(did, "kopts", { "dog" : "fido", "cat" : "fluffy" }, token) # hash or string
server.modify_distro(did, "ksmeta", "good=sg1 evil=gould", token) # hash or string
server.modify_distro(did, "breed", "redhat", token)
server.modify_distro(did, "os-version", "rhel5", token)
server.modify_distro(did, "owners", "sam dave", token) # array or string
server.modify_distro(did, "mgmt-classes", "blip", token) # list or string
server.modify_distro(did, "template-files", "/tmp/cobbler.fake=/tmp/a /etc/fstab=/tmp/b",token) # hash or string
server.modify_distro(did, "comment", "...", token)
server.modify_distro(did, "redhat_management_key", "ALPHA", token)
server.modify_distro(did, "redhat_management_server", "rhn.example.com", token)
server.save_distro(did, token)
# use the non-XMLRPC API to check that it's added seeing we tested XMLRPC RW APIs above
# this makes extra sure it's been committed to disk.
api.deserialize()
assert api.find_distro("distro1") != None
pid = server.new_profile(token)
server.modify_profile(pid, "name", "profile1", token)
server.modify_profile(pid, "distro", "distro1", token)
server.modify_profile(pid, "enable-menu", True, token)
server.modify_profile(pid, "kickstart", "/var/lib/cobbler/kickstarts/sample.ks", token)
server.modify_profile(pid, "kopts", { "level" : "11" }, token)
server.modify_profile(pid, "kopts_post", "noapic", token)
server.modify_profile(pid, "virt_auto_boot", 0, token)
server.modify_profile(pid, "virt_file_size", 20, token)
server.modify_profile(pid, "virt_ram", 2048, token)
server.modify_profile(pid, "repos", [], token)
server.modify_profile(pid, "template-files", {}, token)
server.modify_profile(pid, "virt_path", "VolGroup00", token)
server.modify_profile(pid, "virt_bridge", "virbr1", token)
server.modify_profile(pid, "virt_cpus", 2, token)
server.modify_profile(pid, "owners", [ "sam", "dave" ], token)
server.modify_profile(pid, "mgmt_classes", "one two three", token)
server.modify_profile(pid, "comment", "...", token)
server.modify_profile(pid, "name_servers", ["one","two"], token)
server.modify_profile(pid, "name_servers_search", ["one","two"], token)
server.modify_profile(pid, "redhat_management_key", "BETA", token)
server.modify_distro(did, "redhat_management_server", "sat.example.com", token)
server.save_profile(pid, token)
api.deserialize()
assert api.find_profile("profile1") != None
sid = server.new_system(token)
server.modify_system(sid, 'name', 'system1', token)
server.modify_system(sid, 'hostname', 'system1', token)
server.modify_system(sid, 'gateway', '127.0.0.1', token)
server.modify_system(sid, 'profile', 'profile1', token)
server.modify_system(sid, 'kopts', { "dog" : "fido" }, token)
server.modify_system(sid, 'kopts_post', { "cat" : "fluffy" }, token)
server.modify_system(sid, 'kickstart', '/var/lib/cobbler/kickstarts/sample.ks', token)
server.modify_system(sid, 'netboot_enabled', True, token)
server.modify_system(sid, 'virt_path', "/opt/images", token)
server.modify_system(sid, 'virt_type', 'qemu', token)
server.modify_system(sid, 'name_servers', 'one two three four', token)
server.modify_system(sid, 'name_servers_search', 'one two three four', token)
server.modify_system(sid, 'modify_interface', {
"macaddress-eth0" : "AA:BB:CC:EE:EE:EE",
"ipaddress-eth0" : "192.168.10.50",
"gateway-eth0" : "192.168.10.1",
"virtbridge-eth0" : "virbr0",
"dnsname-eth0" : "foo.example.com",
"static-eth0" : False,
"dhcptag-eth0" : "section2",
"staticroutes-eth0" : "a:b:c d:e:f"
}, token)
server.modify_system(sid, 'modify_interface', {
"static-eth1" : False,
"staticroutes-eth1" : [ "g:h:i", "j:k:l" ]
}, token)
server.modify_system(sid, "mgmt_classes", [ "one", "two", "three"], token)
server.modify_system(sid, "template_files", {}, token)
server.modify_system(sid, "fetchable_files", {}, token)
server.modify_system(sid, "comment", "...", token)
server.modify_system(sid, "power_address", "power.example.org", token)
server.modify_system(sid, "power_type", "ipmitool", token)
server.modify_system(sid, "power_user", "Admin", token)
server.modify_system(sid, "power_pass", "magic", token)
server.modify_system(sid, "power_id", "7", token)
server.modify_system(sid, "redhat_management_key", "GAMMA", token)
server.modify_distro(did, "redhat_management_server", "spacewalk.example.com", token)
server.save_system(sid,token)
api.deserialize()
assert api.find_system("system1") != None
# FIXME: add some checks on object contents
iid = server.new_image(token)
server.modify_image(iid, "name", "image1", token)
server.modify_image(iid, "image_type", "iso", token)
server.modify_image(iid, "breed", "redhat", token)
server.modify_image(iid, "os_version", "rhel5", token)
server.modify_image(iid, "arch", "x86_64", token)
server.modify_image(iid, "file", "nfs://server/path/to/x.iso", token)
server.modify_image(iid, "owners", [ "alex", "michael" ], token)
server.modify_image(iid, "virt_auto_boot", 0, token)
server.modify_image(iid, "virt_cpus", 1, token)
server.modify_image(iid, "virt_file_size", 5, token)
server.modify_image(iid, "virt_bridge", "virbr0", token)
server.modify_image(iid, "virt_path", "VolGroup01", token)
server.modify_image(iid, "virt_ram", 1024, token)
server.modify_image(iid, "virt_type", "xenpv", token)
server.modify_image(iid, "comment", "...", token)
server.save_image(iid, token)
api.deserialize()
assert api.find_image("image1") != None
# FIXME: add some checks on object contents
# FIXME: repo adds
rid = server.new_repo(token)
server.modify_repo(rid, "name", "repo1", token)
server.modify_repo(rid, "arch", "x86_64", token)
server.modify_repo(rid, "mirror", "http://example.org/foo/x86_64", token)
server.modify_repo(rid, "keep_updated", True, token)
server.modify_repo(rid, "priority", "50", token)
server.modify_repo(rid, "rpm_list", [], token)
server.modify_repo(rid, "createrepo_flags", "--verbose", token)
server.modify_repo(rid, "yumopts", {}, token)
server.modify_repo(rid, "owners", [ "slash", "axl" ], token)
server.modify_repo(rid, "mirror_locally", True, token)
server.modify_repo(rid, "environment", {}, token)
server.modify_repo(rid, "comment", "...", token)
server.save_repo(rid, token)
api.deserialize()
assert api.find_repo("repo1") != None
# FIXME: add some checks on object contents
# test handle lookup
did = server.get_distro_handle("distro1", token)
assert did != None
rid = server.get_repo_handle("repo1", token)
assert rid != None
iid = server.get_image_handle("image1", token)
assert iid != None
# test renames
rc = server.rename_distro(did, "distro2", token)
assert rc == True
# object has changed due to parent rename, get a new handle
pid = server.get_profile_handle("profile1", token)
assert pid != None
rc = server.rename_profile(pid, "profile2", token)
assert rc == True
# object has changed due to parent rename, get a new handle
sid = server.get_system_handle("system1", token)
assert sid != None
rc = server.rename_system(sid, "system2", token)
assert rc == True
rc = server.rename_repo(rid, "repo2", token)
assert rc == True
rc = server.rename_image(iid, "image2", token)
assert rc == True
# FIXME: make the following code unneccessary
api.clear()
api.deserialize()
assert api.find_distro("distro2") != None
assert api.find_profile("profile2") != None
assert api.find_repo("repo2") != None
assert api.find_image("image2") != None
assert api.find_system("system2") != None
# BOOKMARK: currently here in terms of test testing.
for d in api.distros():
print "FOUND DISTRO: %s" % d.name
assert api.find_distro("distro1") == None
assert api.find_profile("profile1") == None
assert api.find_repo("repo1") == None
assert api.find_image("image1") == None
assert api.find_system("system1") == None
did = server.get_distro_handle("distro2", token)
assert did != None
pid = server.get_profile_handle("profile2", token)
assert pid != None
rid = server.get_repo_handle("repo2", token)
assert rid != None
sid = server.get_system_handle("system2", token)
assert sid != None
iid = server.get_image_handle("image2", token)
assert iid != None
# test copies
server.copy_distro(did, "distro1", token)
server.copy_profile(pid, "profile1", token)
server.copy_repo(rid, "repo1", token)
server.copy_image(iid, "image1", token)
server.copy_system(sid, "system1", token)
api.deserialize()
assert api.find_distro("distro2") != None
assert api.find_profile("profile2") != None
assert api.find_repo("repo2") != None
assert api.find_image("image2") != None
assert api.find_system("system2") != None
assert api.find_distro("distro1") != None
assert api.find_profile("profile1") != None
assert api.find_repo("repo1") != None
assert api.find_image("image1") != None
assert api.find_system("system1") != None
assert server.last_modified_time() > 0
print server.get_distros_since(2)
assert len(server.get_distros_since(2)) > 0
assert len(server.get_profiles_since(2)) > 0
assert len(server.get_systems_since(2)) > 0
assert len(server.get_images_since(2)) > 0
assert len(server.get_repos_since(2)) > 0
assert len(server.get_distros_since(2)) > 0
now = time.time()
the_future = time.time() + 99999
assert len(server.get_distros_since(the_future)) == 0
# it would be cleaner to do this from the distro down
# and the server.update calls would then be unneeded.
server.remove_system("system1", token)
server.update()
server.remove_profile("profile1", token)
server.update()
server.remove_distro("distro1", token)
server.remove_repo("repo1", token)
server.remove_image("image1", token)
server.remove_system("system2", token)
# again, calls are needed because we're deleting in the wrong
# order. A fix is probably warranted for this.
server.update()
server.remove_profile("profile2", token)
server.update()
server.remove_distro("distro2", token)
server.remove_repo("repo2", token)
server.remove_image("image2", token)
# have to update the API as it has changed
api.update()
d1 = api.find_distro("distro1")
assert d1 is None
assert api.find_profile("profile1") is None
assert api.find_repo("repo1") is None
assert api.find_image("image1") is None
assert api.find_system("system1") is None
for x in api.distros():
print "DISTRO REMAINING: %s" % x.name
assert api.find_distro("distro2") is None
assert api.find_profile("profile2") is None
assert api.find_repo("repo2") is None
assert api.find_image("image2") is None
assert api.find_system("system2") is None
# FIXME: should not need cleanup as we've done it above
_test_remove_objects()
| gpl-2.0 | 8,993,004,671,744,371,000 | 38.306635 | 181 | 0.599856 | false |
matslindh/codingchallenges | knowit2019/16.py | 1 | 1520 |
def fjordify(f):
lines = [line.strip() for line in open(f).readlines()]
width = len(lines[0])
fjord = {
'map': [],
'boat': None,
}
for y, line in enumerate(lines):
row = [' '] * width
for x in range(0, len(line)):
if line[x] == '#':
row[x] = '#'
elif line[x] == 'B':
row[x] = 'B'
fjord['boat'] = (x, y)
fjord['map'].append(row)
return fjord
def navigate(fjord):
x, y = fjord['boat']
d = 'ne'
changed = 0
while True:
x += 1
if x == len(fjord['map'][0]):
break
if d == 'ne':
y -= 1
elif d == 'se':
y += 1
fjord['map'][y][x] = '/' if d == 'ne' else '\\'
if (d == 'ne' and fjord['map'][y-3][x] == '#') or \
(d == 'se' and fjord['map'][y+3][x] == '#'):
changed += 1
if d == 'ne':
y -= 1
d = 'se'
else:
d = 'ne'
y += 1
return changed + 1
def print_map(fjord):
print("\n")
for row in fjord['map']:
print(''.join(row))
def test_fjordify():
fjord = fjordify('input/fjord.test.txt')
assert len(fjord['map']) == 11
assert len(fjord['map'][0]) == 20
assert fjord['boat'] == (1, 8)
result = navigate(fjord)
assert 5 == result
if __name__ == '__main__':
fjord = fjordify('input/fjord.txt')
print(navigate(fjord))
| mit | -2,061,431,919,864,742,100 | 19 | 59 | 0.404605 | false |
capitalone/cloud-custodian | tests/test_cwa.py | 1 | 1155 | # Copyright 2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
class AlarmTest(BaseTest):
def test_delete(self):
alarm_name = "c7n-test-alarm-delete"
factory = self.replay_flight_data("test_alarm_delete")
client = factory().client("cloudwatch")
client.put_metric_alarm(
AlarmName=alarm_name,
MetricName="CPUUtilization",
Namespace="AWS/EC2",
Statistic="Average",
Period=3600,
EvaluationPeriods=5,
Threshold=10,
ComparisonOperator="GreaterThanThreshold",
)
p = self.load_policy(
{
"name": "delete-alarm",
"resource": "alarm",
"filters": [{"AlarmName": alarm_name}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"], []
)
| apache-2.0 | -5,600,428,226,361,943,000 | 29.394737 | 79 | 0.550649 | false |
streed/PyEndicia | endicia/builders/ChangePassPhraseXmlBuilder.py | 1 | 1454 | from EndiciaXmlBuilder import EndiciaXmlBuilder
from EndiciaXmlBuilder import ValueToLongError
from lxml.builder import E
class ChangePassPhraseXmlBuilder( EndiciaXmlBuilder ):
xml = {}
def __init__( self ):
EndiciaXmlBuilder.__init__( self )
def setPartnerID( self, __id ):
if len( __id ) <= 50:
self.xml["RequesterID"] = __id
else:
raise ValueToLongError( "PartnerID", str( __id ) )
def setRequestID( self, __id ):
if len( __id ) <= 50:
self.xml["RequestID"] = __id
else:
raise ValueToLongError( "RequestID", str( __id ) )
def setAccountID( self, __id ):
if len( __id ) <= 6:
self.xml["AccountID"] = __id
else:
raise ValueToLongError( "AccountID", str( __id ) )
def setPassPhrase( self, passPhrase ):
if len( passPhrase ) <= 64:
self.xml["PassPhrase"] = passPhrase
else:
raise ValueToLongError( "PassPhrase", str( passPhrase ) )
def setNewPassPhrase( self, newPassPhrase ):
if len( newPassPhrase ) <= 64:
self.xml["NewPassPhrase"] = newPassPhrase
else:
raise ValueToLongError( "NewPassPhrase", str( newPassPhrase ) )
def to_xml( self ):
self.xmlString = (
E.ChangePassPhraseRequest(
E.RequesterID( self.xml["RequesterID"] ),
E.RequestID( self.xml["RequestID"] ),
E.CertifiedIntermediary(
E.AccountID( self.xml["AccountID"] ),
E.PassPhrase( self.xml["PassPhrase"] )
),
E.NewPassPhrase( self.xml["NewPassPhrase"] )
)
)
return self.xmlString
| gpl-3.0 | -5,050,795,239,346,306,000 | 25.436364 | 66 | 0.658184 | false |
rafaelolg/visiondataset | visiondataset/datasets/util.py | 1 | 1040 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
def base_name(filename):
"""
return the string filename without extensions nor directory path
>>> base_name('asdf.tar.gz')
'asdf'
>>> base_name('/root/ver_strange.dir/asdf.tar.gz')
'asdf'
>>> base_name(r'c:\Windows With Space\sdf.tar.gz')
'asdf'
"""
s = re.split(r'[\\|/]', filename)[-1]
s = re.split(r'\.', s)[0]
return s
def extension_name(filename):
"""
return the extension of the file
>>> extension_name('asdf.tar.gz')
'tar.gz'
>>> extension_name('/root/ver_strange.dir/asdf.tar.gz')
'tar.gz'
>>> extension_name(r'c:\Windows With Spaces\asdf.tar.gz')
'tar.gz'
"""
s = re.split(r'[\\|/]', filename)[-1]
m = re.search(r'((\.\w\w?\w?)+)',s)
if m:
s = m.group(0)[1:]
else:
s = ''
return s
def listfy(e):
"""
Make sure e is inside a list. If e is a list returns e.
"""
if isinstance(e, list):
return e
else:
return [e]
| gpl-3.0 | 8,852,463,943,848,875,000 | 20.666667 | 68 | 0.528846 | false |
stvoutsin/pyrothorn | pyrothorn/pyroquery/atpy/votable.py | 1 | 9010 | import os
from distutils import version
import numpy as np
import warnings
from exceptions import TableException
import atpy
from helpers import smart_dtype
from decorators import auto_download_to_file, auto_decompress_to_fileobj, auto_fileobj_to_file
vo_minimum_version = version.LooseVersion('0.3')
try:
from vo.table import parse
from vo.tree import VOTableFile, Resource, Table, Field
vo_installed = True
except:
vo_installed = False
def _check_vo_installed():
if not vo_installed:
raise Exception("Cannot read/write VO table files - vo " + \
vo_minimum_version.vstring + " or later required")
# Define type conversion dictionary
type_dict = {}
type_dict[np.bool_] = "boolean"
type_dict[np.uint8] = "unsignedByte"
type_dict[np.int16] = "short"
type_dict[np.int32] = "int"
type_dict[np.int64] = "long"
type_dict[np.float32] = "float"
type_dict[np.float64] = "double"
type_dict[np.str] = "char"
type_dict[np.string_] = "char"
type_dict[str] = "char"
def _list_tables(filename, pedantic=False):
votable = parse(filename, pedantic=pedantic)
tables = {}
for i, table in enumerate(votable.iter_tables()):
tables[i] = table.name
return tables
# VO can handle file objects, but because we need to read it twice we don't
# use that capability
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read(self, filename, pedantic=False, tid=-1, verbose=True):
'''
Read a table from a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to read the table from
Optional Keyword Arguments:
*tid*: [ integer ]
The ID of the table to read from the VO file (this is
only required if there are more than one table in the VO file)
*pedantic*: [ True | False ]
When *pedantic* is True, raise an error when the file violates
the VO Table specification, otherwise issue a warning.
'''
_check_vo_installed()
self.reset()
# If no table is requested, check that there is only one table
if tid==-1:
tables = _list_tables(filename, pedantic=pedantic)
if len(tables) == 1:
tid = 0
elif len(tables) == 0:
raise Exception("There are no tables present in this file")
else:
raise TableException(tables, 'tid')
votable = parse(filename, pedantic=pedantic)
for id, table in enumerate(votable.iter_tables()):
if id==tid:
break
if table.ID:
self.table_name = str(table.ID)
elif table.name:
self.table_name = str(table.name)
for field in table.fields:
if type(field.name) == str:
colname = field.name
else:
if type(field._ID) == str:
colname = field._ID
else:
raise Exception("Error reading in the VO table: no name or ID for field")
data = table.array[colname]
if len(data) > 0 and data.ndim == 1 and not np.all([np.isscalar(x) for x in data]):
warnings.warn("VO Variable length vector column detected (%s) - converting to string" % colname)
data = np.array([str(x) for x in data])
if self._masked:
self.add_column(colname, data, \
unit=field.unit, mask=table.mask[colname])
else:
self.add_column(colname, data, \
unit=field.unit)
def _to_table(self, VOTable):
'''
Return the current table as a VOT object
'''
table = Table(VOTable)
# Define some fields
n_rows = len(self)
fields = []
for i, name in enumerate(self.names):
data = self.data[name]
unit = self.columns[name].unit
dtype = self.columns[name].dtype
column_type = smart_dtype(dtype)
if data.ndim > 1:
arraysize = str(data.shape[1])
else:
arraysize = None
if column_type == np.string_:
arraysize = "1024"
if column_type in type_dict:
datatype = type_dict[column_type]
elif column_type == np.int8:
warnings.warn("int8 unsupported - converting to int16")
datatype = type_dict[np.int16]
elif column_type == np.uint16:
warnings.warn("uint16 unsupported - converting to int32")
datatype = type_dict[np.int32]
elif column_type == np.uint32:
warnings.warn("uint32 unsupported - converting to int64")
datatype = type_dict[np.int64]
elif column_type == np.uint64:
raise Exception("uint64 unsupported")
else:
raise Exception("cannot use numpy type " + str(column_type))
if column_type == np.float32:
precision = 'F9'
elif column_type == np.float64:
precision = 'F17'
else:
precision = None
fields.append(Field(VOTable, ID="col" + str(i), name=name, \
datatype=datatype, unit=unit, arraysize=arraysize, \
precision=precision))
table.fields.extend(fields)
table.create_arrays(n_rows)
# Character columns are stored as object columns in the VOTable
# instance. Leaving the type as string should work, but causes
# a segmentation fault on MacOS X with Python 2.6 64-bit so
# we force the conversion to object type columns.
for name in self.names:
dtype = self.columns[name].dtype
column_type = smart_dtype(dtype)
# Add data to the table
# At the moment, null values in VO table are dealt with via a
# 'mask' record array
if column_type == np.string_:
table.array[name] = self.data[name]
if self._masked:
table.mask[name] = self.data[name].mask.astype(np.object_)
else:
table.mask[name] = (self.data[name] == \
self.columns[name].null).astype(np.object_)
else:
table.array[name] = self.data[name]
if self._masked:
table.mask[name] = self.data[name].mask
else:
table.mask[name] = self.data[name] == \
self.columns[name].null
table.name = self.table_name
return table
def write(self, filename, votype='ascii', overwrite=False):
'''
Write the table to a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to write the table to
Optional Keyword Arguments:
*votype*: [ 'ascii' | 'binary' ]
Whether to write the table as ASCII or binary
'''
_check_vo_installed()
#if os.path.exists(filename):
# if overwrite:
# os.remove(filename)
# else:
# raise Exception("File exists: %s" % filename)
VOTable = VOTableFile()
resource = Resource()
VOTable.resources.append(resource)
resource.tables.append(_to_table(self, VOTable))
if votype is 'binary':
VOTable.get_first_table().format = 'binary'
VOTable.set_all_tables_format('binary')
VOTable.to_xml(filename)
# VO can handle file objects, but because we need to read it twice we don't
# use that capability
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read_set(self, filename, pedantic=False, verbose=True):
'''
Read all tables from a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to read the tables from
Optional Keyword Arguments:
*pedantic*: [ True | False ]
When *pedantic* is True, raise an error when the file violates
the VO Table specification, otherwise issue a warning.
'''
_check_vo_installed()
self.reset()
for tid in _list_tables(filename, pedantic=pedantic):
t = atpy.Table()
read(t, filename, tid=tid, verbose=verbose, pedantic=pedantic)
self.append(t)
def write_set(self, filename, votype='ascii', overwrite=False):
'''
Write all tables to a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to write the tables to
Optional Keyword Arguments:
*votype*: [ 'ascii' | 'binary' ]
Whether to write the tables as ASCII or binary tables
'''
_check_vo_installed()
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
VOTable = VOTableFile()
resource = Resource()
VOTable.resources.append(resource)
for table_key in self.tables:
resource.tables.append(_to_table(self.tables[table_key], VOTable))
if votype is 'binary':
VOTable.get_first_table().format = 'binary'
VOTable.set_all_tables_format('binary')
VOTable.to_xml(filename)
| gpl-3.0 | -2,821,985,157,562,026,000 | 26.723077 | 108 | 0.595117 | false |
hecate-xw/Miscellaneous | TsinghuaCSLT/audioEmbedded/Mission/develop/MCLT.py | 1 | 4754 | #!usr/bin/env python
#coding=utf-8
import numpy as np
import math
import disposeWav
pi = math.pi
def MCLT(x):
'''
Written by the MCLT definition, and it is slow.
'''
M = len(x)/2
#M = 8192
h = (2*M)*[0]
for n in range(M):
h[n] = -math.sin((2.0*n+1.0)*math.pi/(4.0*M))
h[2*M-n-1] = h[n]
X = []
for k in range(M):
X.append(0)
for n in range(2*M):
pc = math.sqrt(2.0/M) * h[n] * math.cos( (2.0*n+1.0+M)*(2.0*k+1)*pi/(4.0*M) )
ps = math.sqrt(2.0/M) * h[n] * math.sin( (2.0*n+1.0+M)*(2.0*k+1)*pi/(4.0*M) )
p = pc + 1j*ps
X[k] += x[n]*p
return X
def IMCLT(X):
'''
Written by IMCLT definition, and it is slow.
'''
M = len(X)
#M = 8192
h = (2*M)*[0]
for n in range(M):
h[n] = -math.sin((2.0*n+1.0)*math.pi/(4.0*M))
h[2*M-n-1] = h[n]
y = []
Bc = 1.0/2
Bs = 1.0-Bc #Bc+Bs = 1即可
for n in range(2*M):
y.append(0)
for k in range(M):
pc = math.sqrt(2.0/M) * h[n] * math.cos( (2.0*n+1.0+M)*(2.0*k+1)*pi/(4.0*M) )
ps = math.sqrt(2.0/M) * h[n] * math.sin( (2.0*n+1.0+M)*(2.0*k+1)*pi/(4.0*M) )
#p = pc + 1j*ps
y[n] += (Bc*X[k].real*pc + Bs*X[k].imag*ps)
return y
def W(M,r): #Local function: complex exponential
e = math.e
w = e ** (-1j*2.0*pi*r/M)
return w
def FastMCLT(audio):
'''
Written by the paper 'http://research.microsoft.com/pubs/70128/tr-2005-02.pdf'
Prefer to use this.
'''
# determine subbands, M
L = len(audio)
M = L/2
# normalized FFT of input
U = []
for f in np.fft.fft(audio):
U.append(math.sqrt(1/(2.0*M)) * f)
# compute modulation function
c = []
for i in range(M+1):
c.append( W(8.0,2*i+1.0) * W(4.0*M,i) )
# modulate U into V
V = []
for i in range(M+1):
V.append( c[i] * U[i])
X = []
# compute MCLT coefficients
for each in range(M):
X.append( 1j * V[each] + V[each+1] )
return X
def FastIMCLT(X):
'''
Written by the paper 'http://research.microsoft.com/pubs/70128/tr-2005-02.pdf'
Prefer to use this.
'''
# determine subbands, M
M = len(X)
# compute modulation function
c = []
for i in range(M-1):
k = i+1
c.append( W(8,2*k+1) * W(4*M,k) )
# allocate vector Y
Y = (2*M)*[0]
# map X into Y
for j in range(M-1):
i = j+1
Y[i] = 1.0/4 * c[j].conjugate() * (X[j] - 1j * X[j+1])
# determine first and last Y values
Y[0] = math.sqrt(1.0/8) * (X[0].real + X[0].imag)
Y[M] = -math.sqrt(1.0/8) * (X[M-1].real + X[M-1].imag)
# complete vector Y via conjugate symmetry property for the
# FFT of a real vector (not needed if the inverse FFT
# routine is a "real FFT", which should take only as input
# only M+1 coefficients)
for i in range(M-1):
Y[i+M+1] = Y[M-i-1].conjugate()
# inverse normalized FFT to compute the output vector
# output of ifft should have zero imaginary part; but
# by calling real(.) we remove the small rounding noise
# that's present in the imaginary part
yt = []
for i in Y:
yt.append( math.sqrt(2*M) * i )
y = (np.fft.ifft(yt)).real
return y
def test():
nchannels, sampwidth, framerate, nframes, wave_data, time = disposeWav.read_wave_data("../wavFile/test1.wav")
x = range(4000,8000,2)
BL = 32
B = len(x)*2/BL - 1
for i in range(B-1):
if i == 0:
print x[:BL]
continue
X_prev = MCLT(x[(i-1)*BL/2:(i+1)*BL/2])
X_curr = MCLT(x[(i)*BL/2:(i+2)*BL/2])
X_next = MCLT(x[(i+1)*BL/2:(i+3)*BL/2])
X = X_curr
y = IMCLT(X) #将嵌入信息后的复数域信息反变换到实数域
y_prev = IMCLT(X_prev)[BL/2:]
y_next = IMCLT(X_next)[:BL/2]
y = np.array(y_prev + y_next) + np.array(y)
print y[BL/2:]
print x[-BL/2:]
print "\n\n\n"
for i in range(B-1):
if i == 0:
print x[:BL]
continue
X_prev = FastMCLT(x[(i-1)*BL/2:(i+1)*BL/2])
X_curr = FastMCLT(x[(i)*BL/2:(i+2)*BL/2])
X_next = FastMCLT(x[(i+1)*BL/2:(i+3)*BL/2])
X = X_curr
y = FastIMCLT(X) #将嵌入信息后的复数域信息反变换到实数域
y_prev = FastIMCLT(X_prev).tolist()[BL/2:]
y_next = FastIMCLT(X_next).tolist()[:BL/2]
y = np.array(y_prev + y_next) + y
print y[BL/2:]
print x[-BL/2:]
if __name__ == "__main__":
test()
| mit | 5,250,337,636,828,632,000 | 26.174419 | 113 | 0.482028 | false |
socolofs/tamoc | bin/dbm/gas_bubbles.py | 1 | 3178 | """
Gas fluid particles
===================
Use the ``TAMOC`` ``DBM`` to specify a natural gas bubble that can dissolve
and calculate all of its properties in deepwater conditions.
In particular, this script demonstrates the methods:
* `dbm.FluidParticle.mass_frac`
* `dbm.FluidParticle.density`
* `dbm.FluidParticle.mass_by_diameter`
* `dbm.FluidParticle.diameter`
* `dbm.FluidParticle.particle_shape`
* `dbm.FluidParticle.slip_velocity`
* `dbm.FluidParticle.surface_area`
* `dbm.FluidParticle.mass_transfer`
* `dbm.FluidParticle.heat_transfer`
* `dbm.FluidParticle.solubility`
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from tamoc import dbm
from tamoc import seawater
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Define the composition of a natural gas
composition = ['methane', 'ethane', 'propane']
mol_frac = np.array([0.90, 0.07, 0.03])
# Specify that we are interested in properties for the gas phase
fl_type = 0
# Create a DBM FluidParticle object for this natural gas assuming zeros
# for all the binary interaction coefficients
delta = np.zeros((3,3))
ng = dbm.FluidParticle(composition, fl_type, delta)
# Specify some generic deepwater ocean conditions
P = 150.0 * 1.0e5
Ta = 273.15 + 4.0
Sa = 34.5
# Echo the ambient conditions to the screen
print('\nAmbient conditions: \n')
print(' P = %g (Pa)' % P)
print(' T = %g (K)' % Ta)
print(' S = %g (psu)' % Sa)
print(' rho_sw = %g (kg/m^3)' % (seawater.density(Ta, Sa, P)))
# Get the general properties of the gas
mf = ng.mass_frac(mol_frac)
T = 273.15 + 60.
print('\nBasic properties of gas: \n')
print(' T = %g (K)' % T)
print(' mol_frac = [' + ', '.join('%g' % mol_frac[i] for i in
range(ng.nc)) + '] (--)')
print(' mass_frac = [' + ', '.join('%g' % mf[i] for i in
range(ng.nc)) + '] (--)')
print(' rho_p = %g (kg/m^3) at %g (K) and %g (Pa)' %
(ng.density(mf, T, P), T, P))
# Get the masses in a 1.0 cm effective diameter bubble
de = 0.01
m = ng.masses_by_diameter(de, T, P, mol_frac)
# Echo the properties of the bubble to the screen
print('\nBasic bubbles properties: \n')
print(' de = %g (m)' % (ng.diameter(m, T, P)))
shape, de, rho_p, rho, mu_p, mu, sigma = ng.particle_shape(m, T, P, Sa,
Ta)
print(' shape = %g (1: Sphere, 2: Ellipsoid, 3: Spherical Cap)'
% shape)
print(' us = %g (m/s)' % (ng.slip_velocity(m, T, P, Sa, Ta)))
print(' A = %g (m^2)' % (ng.surface_area(m, T, P, Sa, Ta)))
beta = ng.mass_transfer(m, T, P, Sa, Ta)
print(' beta = [' + ', '.join('%g' % beta[i] for i in
range(ng.nc)) + '] (m/s)')
print(' beta_T = %g (m/s)' % (ng.heat_transfer(m, T, P, Sa, Ta)))
Cs = ng.solubility(m, T, P, Sa)
print(' Cs = [' + ', '.join('%g' % Cs[i] for i in
range(ng.nc)) + '] (kg/m^3)')
| mit | 8,101,110,010,766,100,000 | 33.923077 | 76 | 0.568911 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.