text
stringlengths 733
1.02M
| score
float64 0
0.27
|
---|---|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from unittest import mock
from tacker.tests import base
from tacker.tests import post_mortem_debug
class TestTesttoolsExceptionHandler(base.BaseTestCase):
def test_exception_handler(self):
try:
self.assertTrue(False)
except Exception:
exc_info = sys.exc_info()
with mock.patch('traceback.print_exception') as mock_print_exception:
with mock.patch('pdb.post_mortem') as mock_post_mortem:
with mock.patch.object(post_mortem_debug,
'get_ignored_traceback',
return_value=mock.Mock()):
post_mortem_debug.exception_handler(exc_info)
# traceback will become post_mortem_debug.FilteredTraceback
filtered_exc_info = (exc_info[0], exc_info[1], mock.ANY)
mock_print_exception.assert_called_once_with(*filtered_exc_info)
mock_post_mortem.assert_called_once_with(mock.ANY)
class TestFilteredTraceback(base.BaseTestCase):
def test_filter_traceback(self):
tb1 = mock.Mock()
tb2 = mock.Mock()
tb1.tb_next = tb2
tb2.tb_next = None
ftb1 = post_mortem_debug.FilteredTraceback(tb1, tb2)
for attr in ['lasti', 'lineno', 'frame']:
attr_name = 'tb_%s' % attr
self.assertEqual(getattr(tb1, attr_name, None),
getattr(ftb1, attr_name, None))
self.assertIsNone(ftb1.tb_next)
class TestGetIgnoredTraceback(base.BaseTestCase):
def _test_get_ignored_traceback(self, ignored_bit_array, expected):
root_tb = mock.Mock()
tb = root_tb
tracebacks = [tb]
for x in range(len(ignored_bit_array) - 1):
tb.tb_next = mock.Mock()
tb = tb.tb_next
tracebacks.append(tb)
tb.tb_next = None
tb = root_tb
for ignored in ignored_bit_array:
if ignored:
tb.tb_frame.f_globals = ['__unittest']
else:
tb.tb_frame.f_globals = []
tb = tb.tb_next
actual = post_mortem_debug.get_ignored_traceback(root_tb)
if expected is not None:
expected = tracebacks[expected]
self.assertEqual(expected, actual)
def test_no_ignored_tracebacks(self):
self._test_get_ignored_traceback([0, 0, 0], None)
def test_single_member_trailing_chain(self):
self._test_get_ignored_traceback([0, 0, 1], 2)
def test_two_member_trailing_chain(self):
self._test_get_ignored_traceback([0, 1, 1], 1)
def test_first_traceback_ignored(self):
self._test_get_ignored_traceback([1, 0, 0], None)
def test_middle_traceback_ignored(self):
self._test_get_ignored_traceback([0, 1, 0], None)
| 0 |
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Class to monitor a MongoDB server on a background thread."""
import weakref
from bson.codec_options import DEFAULT_CODEC_OPTIONS
from pymongo import common, helpers, message, periodic_executor
from pymongo.server_type import SERVER_TYPE
from pymongo.ismaster import IsMaster
from pymongo.monotonic import time as _time
from pymongo.read_preferences import MovingAverage
from pymongo.server_description import ServerDescription
class Monitor(object):
def __init__(
self,
server_description,
topology,
pool,
topology_settings):
"""Class to monitor a MongoDB server on a background thread.
Pass an initial ServerDescription, a Topology, a Pool, and
TopologySettings.
The Topology is weakly referenced. The Pool must be exclusive to this
Monitor.
"""
self._server_description = server_description
self._pool = pool
self._settings = topology_settings
self._avg_round_trip_time = MovingAverage()
self._listeners = self._settings._pool_options.event_listeners
pub = self._listeners is not None
self._publish = pub and self._listeners.enabled_for_server_heartbeat
# We strongly reference the executor and it weakly references us via
# this closure. When the monitor is freed, stop the executor soon.
def target():
monitor = self_ref()
if monitor is None:
return False # Stop the executor.
Monitor._run(monitor)
return True
executor = periodic_executor.PeriodicExecutor(
interval=self._settings.heartbeat_frequency,
min_interval=common.MIN_HEARTBEAT_INTERVAL,
target=target,
name="pymongo_server_monitor_thread")
self._executor = executor
# Avoid cycles. When self or topology is freed, stop executor soon.
self_ref = weakref.ref(self, executor.close)
self._topology = weakref.proxy(topology, executor.close)
def open(self):
"""Start monitoring, or restart after a fork.
Multiple calls have no effect.
"""
self._executor.open()
def close(self):
"""Close and stop monitoring.
open() restarts the monitor after closing.
"""
self._executor.close()
# Increment the pool_id and maybe close the socket. If the executor
# thread has the socket checked out, it will be closed when checked in.
self._pool.reset()
def join(self, timeout=None):
self._executor.join(timeout)
def request_check(self):
"""If the monitor is sleeping, wake and check the server soon."""
self._executor.wake()
def _run(self):
try:
self._server_description = self._check_with_retry()
self._topology.on_change(self._server_description)
except ReferenceError:
# Topology was garbage-collected.
self.close()
def _check_with_retry(self):
"""Call ismaster once or twice. Reset server's pool on error.
Returns a ServerDescription.
"""
# According to the spec, if an ismaster call fails we reset the
# server's pool. If a server was once connected, change its type
# to Unknown only after retrying once.
address = self._server_description.address
retry = self._server_description.server_type != SERVER_TYPE.Unknown
start = _time()
try:
return self._check_once()
except ReferenceError:
raise
except Exception as error:
error_time = _time() - start
self._topology.reset_pool(address)
default = ServerDescription(address, error=error)
if not retry:
if self._publish:
self._listeners.publish_server_heartbeat_failed(
address, error_time, error)
self._avg_round_trip_time.reset()
# Server type defaults to Unknown.
return default
# Try a second and final time. If it fails return original error.
start = _time()
try:
return self._check_once()
except ReferenceError:
raise
except Exception as error:
error_time = _time() - start
if self._publish:
self._listeners.publish_server_heartbeat_failed(
address, error_time, error)
self._avg_round_trip_time.reset()
return default
def _check_once(self):
"""A single attempt to call ismaster.
Returns a ServerDescription, or raises an exception.
"""
address = self._server_description.address
if self._publish:
self._listeners.publish_server_heartbeat_started(address)
with self._pool.get_socket({}) as sock_info:
response, round_trip_time = self._check_with_socket(sock_info)
self._avg_round_trip_time.add_sample(round_trip_time)
sd = ServerDescription(
address=address,
ismaster=response,
round_trip_time=self._avg_round_trip_time.get())
if self._publish:
self._listeners.publish_server_heartbeat_succeeded(
address, round_trip_time, response)
return sd
def _check_with_socket(self, sock_info):
"""Return (IsMaster, round_trip_time).
Can raise ConnectionFailure or OperationFailure.
"""
start = _time()
request_id, msg, max_doc_size = message.query(
0, 'admin.$cmd', 0, -1, {'ismaster': 1},
None, DEFAULT_CODEC_OPTIONS)
# TODO: use sock_info.command()
sock_info.send_message(msg, max_doc_size)
raw_response = sock_info.receive_message(1, request_id)
result = helpers._unpack_response(raw_response)
return IsMaster(result['data'][0]), _time() - start
| 0 |
'''
Copyright (C) 2017 NIKO RUMMUKAINEN
niko.rummukainen@gmail.com
Created by Niko Rummukainen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
bl_info = {
"name": "Auto Smooth Extras",
"description": "Enhances mesh property normals Auto Smooth function with possibility mark Sharp edges, Bewel weights & Edge greasing by auto smooth angle",
"author": "Niko Rummukainen",
"version": (0, 4, 1),
"blender": (2, 78, 0),
"location": "View3D",
"warning": "This is an unstable version",
"wiki_url": "",
"category": "Mesh" }
import bpy
from . import addon_updater_ops
#updater imports and setup
from .addon_updater import Updater as updater # for example
updater.user = "nikorummukainen"
updater.repo = "blender-addon-updater"
updater.current_version = bl_info["version"]
class ASEExtras(bpy.types.AddonPreferences):
bl_idname = __package__
# addon updater preferences
auto_check_update = bpy.props.BoolProperty(
name = "Auto-check for Update",
description = "If enabled, auto-check for updates using an interval",
default = False,
)
updater_intrval_months = bpy.props.IntProperty(
name='Months',
description = "Number of months between checking for updates",
default=0,
min=0
)
updater_intrval_days = bpy.props.IntProperty(
name='Days',
description = "Number of days between checking for updates",
default=7,
min=0,
)
updater_intrval_hours = bpy.props.IntProperty(
name='Hours',
description = "Number of hours between checking for updates",
default=0,
min=0,
max=23
)
updater_intrval_minutes = bpy.props.IntProperty(
name='Minutes',
description = "Number of minutes between checking for updates",
default=0,
min=0,
max=59
)
def draw(self, context):
layout = self.layout
# updater draw function
addon_updater_ops.update_settings_ui(self,context)
# load and reload submodules
##################################
import importlib
from . import developer_utils
importlib.reload(developer_utils)
modules = developer_utils.setup_addon_modules(__path__, __name__, "bpy" in locals())
def register():
# addon updater code and configurations
# in case of broken version, try to register the updater first
# so that users can revert back to a working version
addon_updater_ops.register(bl_info)
try: bpy.utils.register_module(__name__)
except: traceback.print_exc()
print("Registered {} with {} modules".format(bl_info["name"], len(modules)))
def unregister():
# addon updater unregister
addon_updater_ops.unregister()
try: bpy.utils.unregister_module(__name__)
except: traceback.print_exc()
print("Unregistered {}".format(bl_info["name"])) | 0.025814 |
# Copyright (c) 2015 Thales Services SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
import fixtures
from neutronclient.common import exceptions
from neutron.tests import base
def _safe_method(f):
@functools.wraps(f)
def delete(*args, **kwargs):
try:
return f(*args, **kwargs)
except exceptions.NotFound:
pass
return delete
class ClientFixture(fixtures.Fixture):
"""Manage and cleanup neutron resources."""
def __init__(self, client):
super(ClientFixture, self).__init__()
self.client = client
def _create_resource(self, resource_type, spec):
create = getattr(self.client, 'create_%s' % resource_type)
delete = getattr(self.client, 'delete_%s' % resource_type)
body = {resource_type: spec}
resp = create(body=body)
data = resp[resource_type]
self.addCleanup(_safe_method(delete), data['id'])
return data
def create_router(self, tenant_id, name=None, ha=False):
resource_type = 'router'
name = name or base.get_rand_name(prefix=resource_type)
spec = {'tenant_id': tenant_id, 'name': name, 'ha': ha}
return self._create_resource(resource_type, spec)
def create_network(self, tenant_id, name=None):
resource_type = 'network'
name = name or base.get_rand_name(prefix=resource_type)
spec = {'tenant_id': tenant_id, 'name': name}
return self._create_resource(resource_type, spec)
def create_subnet(self, tenant_id, network_id,
cidr, gateway_ip=None, ip_version=4,
name=None, enable_dhcp=True):
resource_type = 'subnet'
name = name or base.get_rand_name(prefix=resource_type)
spec = {'tenant_id': tenant_id, 'network_id': network_id, 'name': name,
'cidr': cidr, 'ip_version': ip_version,
'enable_dhcp': enable_dhcp}
if gateway_ip:
spec['gateway_ip'] = gateway_ip
return self._create_resource(resource_type, spec)
def create_port(self, tenant_id, network_id, hostname, qos_policy_id=None):
spec = {
'network_id': network_id,
'tenant_id': tenant_id,
'binding:host_id': hostname,
}
if qos_policy_id:
spec['qos_policy_id'] = qos_policy_id
return self._create_resource('port', spec)
def add_router_interface(self, router_id, subnet_id):
body = {'subnet_id': subnet_id}
self.client.add_interface_router(router=router_id, body=body)
self.addCleanup(_safe_method(self.client.remove_interface_router),
router=router_id, body=body)
def create_qos_policy(self, tenant_id, name, description, shared):
policy = self.client.create_qos_policy(
body={'policy': {'name': name,
'description': description,
'shared': shared,
'tenant_id': tenant_id}})
def detach_and_delete_policy():
qos_policy_id = policy['policy']['id']
ports_with_policy = self.client.list_ports(
qos_policy_id=qos_policy_id)['ports']
for port in ports_with_policy:
self.client.update_port(
port['id'],
body={'port': {'qos_policy_id': None}})
self.client.delete_qos_policy(qos_policy_id)
# NOTE: We'll need to add support for detaching from network once
# create_network() supports qos_policy_id.
self.addCleanup(_safe_method(detach_and_delete_policy))
return policy['policy']
def create_bandwidth_limit_rule(self, tenant_id, qos_policy_id, limit=None,
burst=None):
rule = {'tenant_id': tenant_id}
if limit:
rule['max_kbps'] = limit
if burst:
rule['max_burst_kbps'] = burst
rule = self.client.create_bandwidth_limit_rule(
policy=qos_policy_id,
body={'bandwidth_limit_rule': rule})
self.addCleanup(_safe_method(self.client.delete_bandwidth_limit_rule),
rule['bandwidth_limit_rule']['id'],
qos_policy_id)
return rule['bandwidth_limit_rule']
| 0 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
author: sanja7s
---------------
plot the distribution
"""
import os
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from collections import defaultdict
from matplotlib import colors
from pylab import MaxNLocator
import pylab as pl
from mpl_toolkits.axes_grid import inset_locator
matplotlib.style.use('ggplot')
IN_DIR = "../../data/timelines"
os.chdir(IN_DIR)
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
grid = {'color' : 'gray',
'alpha' : 0.5,
'linestyle' : '-.'}
lines = {'color' : 'gray'}
#xticks = {'color' : 'gray'}
matplotlib.rc('font', **font)
matplotlib.rc('grid', **grid)
matplotlib.rc('lines', **lines)
#matplotlib.rc('ticks', **ticks)
def read_in_num_jobs_data(node):
f_in = 'node_' + node +'_plug.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, jobs_list, n6 = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
jobs = jobs_list.split(',')
if jobs_list == "":
distr[t] = 0
else:
distr[t] = len(jobs)
return distr
def read_in_rb_data(node):
f_in = 'node_' + node +'_rb.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, r, n, b, n = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
r = int(r)
b = int(b)
distr[t] = (r, b)
return distr
def plot_plug_timeline_v2(node):
print 'Plotting plug values'
d = read_in_plug_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values = [v if v > 0 else 0 for v in d.values()]
ts = pd.Series(values, index = X)
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax = plt.subplots()
ts.plot(color = 'darkblue')
for tl in ax.get_yticklabels():
tl.set_color('darkblue')
fig.autofmt_xdate()
ax.set_xlabel('time')
ax.set_ylabel('plug value', color='darkblue')
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
ymin = 240
ymax = 280
if min(values) < 160:
ymin = min(values) - 10
if max(values) > 250:
ymax = max(values) + 10
plt.ylim(ymin, ymax)
#plt.savefig(cwd + '/multiple_v2/plug_only/plug_timeline_node_' + node + '_v2.png')
return fig, ax, plt
def plot_plug_and_num_jobs_timeline(node):
print 'Plotting num of jobs values'
d = read_in_num_jobs_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values = d.values()
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax1, plt = plot_plug_timeline_v2(node)
ax2 = ax1.twinx()
ax2.scatter(X, values,
marker='s', color='red', s=7)
ax2.set_ylabel('# of jobs', color='red')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('r')
cwd = os.getcwd()
print cwd
plt.savefig(cwd + '/lowest_norm_stdev/SandyBridge/num_jobs_and_plug_timeline_node_' + node + '_v2.png')
def plot_plug_and_rb_timeline(node):
print 'Plotting r b values'
d = read_in_rb_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values1 = [v[0] if v[0] > 0 else 0 for v in d.values()]
values2 = [v[1] if v[1] > 0 else 0 for v in d.values()]
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max MEM1 ', min(values1), max(values1)
print 'Min and max MEM2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline(node)
ax2 = ax1.twinx()
ax2.scatter(X, values1,
marker='s', color='tomato', s=3, label = 'r')
ax2.scatter(X, values2,
marker='s', color='sage', s=3, label = 'b')
ax2.set_ylabel('r and b values', color='sage')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('sage')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=1)
for text in l.get_texts():
text.set_color('gray')
plt.savefig('rb_plug_timeline_node_' + node + '.png')
| 0.036471 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Column Family."""
import datetime
from google.protobuf import duration_pb2
from gcloud._helpers import _total_seconds
from gcloud.bigtable._generated import (
table_pb2 as table_v2_pb2)
from gcloud.bigtable._generated import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
def _timedelta_to_duration_pb(timedelta_val):
"""Convert a Python timedelta object to a duration protobuf.
.. note::
The Python timedelta has a granularity of microseconds while
the protobuf duration type has a duration of nanoseconds.
:type timedelta_val: :class:`datetime.timedelta`
:param timedelta_val: A timedelta object.
:rtype: :class:`google.protobuf.duration_pb2.Duration`
:returns: A duration object equivalent to the time delta.
"""
seconds_decimal = _total_seconds(timedelta_val)
# Truncate the parts other than the integer.
seconds = int(seconds_decimal)
if seconds_decimal < 0:
signed_micros = timedelta_val.microseconds - 10**6
else:
signed_micros = timedelta_val.microseconds
# Convert nanoseconds to microseconds.
nanos = 1000 * signed_micros
return duration_pb2.Duration(seconds=seconds, nanos=nanos)
def _duration_pb_to_timedelta(duration_pb):
"""Convert a duration protobuf to a Python timedelta object.
.. note::
The Python timedelta has a granularity of microseconds while
the protobuf duration type has a duration of nanoseconds.
:type duration_pb: :class:`google.protobuf.duration_pb2.Duration`
:param duration_pb: A protobuf duration object.
:rtype: :class:`datetime.timedelta`
:returns: The converted timedelta object.
"""
return datetime.timedelta(
seconds=duration_pb.seconds,
microseconds=(duration_pb.nanos / 1000.0),
)
class GarbageCollectionRule(object):
"""Garbage collection rule for column families within a table.
Cells in the column family (within a table) fitting the rule will be
deleted during garbage collection.
.. note::
This class is a do-nothing base class for all GC rules.
.. note::
A string ``gc_expression`` can also be used with API requests, but
that value would be superceded by a ``gc_rule``. As a result, we
don't support that feature and instead support via native classes.
"""
def __ne__(self, other):
return not self.__eq__(other)
class MaxVersionsGCRule(GarbageCollectionRule):
"""Garbage collection limiting the number of versions of a cell.
:type max_num_versions: int
:param max_num_versions: The maximum number of versions
"""
def __init__(self, max_num_versions):
self.max_num_versions = max_num_versions
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.max_num_versions == self.max_num_versions
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
return table_v2_pb2.GcRule(max_num_versions=self.max_num_versions)
class MaxAgeGCRule(GarbageCollectionRule):
"""Garbage collection limiting the age of a cell.
:type max_age: :class:`datetime.timedelta`
:param max_age: The maximum age allowed for a cell in the table.
"""
def __init__(self, max_age):
self.max_age = max_age
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.max_age == self.max_age
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
max_age = _timedelta_to_duration_pb(self.max_age)
return table_v2_pb2.GcRule(max_age=max_age)
class GCRuleUnion(GarbageCollectionRule):
"""Union of garbage collection rules.
:type rules: list
:param rules: List of :class:`GarbageCollectionRule`.
"""
def __init__(self, rules):
self.rules = rules
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.rules == self.rules
def to_pb(self):
"""Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
union = table_v2_pb2.GcRule.Union(
rules=[rule.to_pb() for rule in self.rules])
return table_v2_pb2.GcRule(union=union)
class GCRuleIntersection(GarbageCollectionRule):
"""Intersection of garbage collection rules.
:type rules: list
:param rules: List of :class:`GarbageCollectionRule`.
"""
def __init__(self, rules):
self.rules = rules
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.rules == self.rules
def to_pb(self):
"""Converts the intersection into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
intersection = table_v2_pb2.GcRule.Intersection(
rules=[rule.to_pb() for rule in self.rules])
return table_v2_pb2.GcRule(intersection=intersection)
class ColumnFamily(object):
"""Representation of a Google Cloud Bigtable Column Family.
We can use a :class:`ColumnFamily` to:
* :meth:`create` itself
* :meth:`update` itself
* :meth:`delete` itself
:type column_family_id: str
:param column_family_id: The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the column family.
:type gc_rule: :class:`GarbageCollectionRule`
:param gc_rule: (Optional) The garbage collection settings for this
column family.
"""
def __init__(self, column_family_id, table, gc_rule=None):
self.column_family_id = column_family_id
self._table = table
self.gc_rule = gc_rule
@property
def name(self):
"""Column family name used in requests.
.. note::
This property will not change if ``column_family_id`` does not, but
the return value is not cached.
The table name is of the form
``"projects/../zones/../clusters/../tables/../columnFamilies/.."``
:rtype: str
:returns: The column family name.
"""
return self._table.name + '/columnFamilies/' + self.column_family_id
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.column_family_id == self.column_family_id and
other._table == self._table and
other.gc_rule == self.gc_rule)
def __ne__(self, other):
return not self.__eq__(other)
def to_pb(self):
"""Converts the column family to a protobuf.
:rtype: :class:`.table_v2_pb2.ColumnFamily`
:returns: The converted current object.
"""
if self.gc_rule is None:
return table_v2_pb2.ColumnFamily()
else:
return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb())
def create(self):
"""Create this column family."""
column_family = self.to_pb()
request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(
name=self._table.name)
request_pb.modifications.add(
id=self.column_family_id,
create=column_family,
)
client = self._table._instance._client
# We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client._table_stub.ModifyColumnFamilies(request_pb,
client.timeout_seconds)
def update(self):
"""Update this column family.
.. note::
Only the GC rule can be updated. By changing the column family ID,
you will simply be referring to a different column family.
"""
column_family = self.to_pb()
request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(
name=self._table.name)
request_pb.modifications.add(
id=self.column_family_id,
update=column_family)
client = self._table._instance._client
# We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client._table_stub.ModifyColumnFamilies(request_pb,
client.timeout_seconds)
def delete(self):
"""Delete this column family."""
request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(
name=self._table.name)
request_pb.modifications.add(
id=self.column_family_id,
drop=True)
client = self._table._instance._client
# We expect a `google.protobuf.empty_pb2.Empty`
client._table_stub.ModifyColumnFamilies(request_pb,
client.timeout_seconds)
def _gc_rule_from_pb(gc_rule_pb):
"""Convert a protobuf GC rule to a native object.
:type gc_rule_pb: :class:`.table_v2_pb2.GcRule`
:param gc_rule_pb: The GC rule to convert.
:rtype: :class:`GarbageCollectionRule` or :data:`NoneType <types.NoneType>`
:returns: An instance of one of the native rules defined
in :module:`column_family` or :data:`None` if no values were
set on the protobuf passed in.
:raises: :class:`ValueError <exceptions.ValueError>` if the rule name
is unexpected.
"""
rule_name = gc_rule_pb.WhichOneof('rule')
if rule_name is None:
return None
if rule_name == 'max_num_versions':
return MaxVersionsGCRule(gc_rule_pb.max_num_versions)
elif rule_name == 'max_age':
max_age = _duration_pb_to_timedelta(gc_rule_pb.max_age)
return MaxAgeGCRule(max_age)
elif rule_name == 'union':
return GCRuleUnion([_gc_rule_from_pb(rule)
for rule in gc_rule_pb.union.rules])
elif rule_name == 'intersection':
rules = [_gc_rule_from_pb(rule)
for rule in gc_rule_pb.intersection.rules]
return GCRuleIntersection(rules)
else:
raise ValueError('Unexpected rule name', rule_name)
| 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Auth Middleware.
"""
from oslo.config import cfg
import webob.dec
import webob.exc
from nova import context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
auth_opts = [
cfg.BoolOpt('api_rate_limit',
default=False,
help='whether to use per-user rate limiting for the api.'),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth: noauth or keystone.'),
cfg.BoolOpt('use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
LOG = logging.getLogger(__name__)
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
class InjectContext(wsgi.Middleware):
"""Add a 'nova.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
req.environ['nova.context'] = self.context
return self.application
class NovaKeystoneContext(wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
roles = self._get_roles(req)
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
user_name = req.headers.get('X_USER_NAME')
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
ctx = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog)
req.environ['nova.context'] = ctx
return self.application
def _get_roles(self, req):
"""Get the list of roles."""
if 'X_ROLES' in req.headers:
roles = req.headers.get('X_ROLES', '')
else:
# Fallback to deprecated role header:
roles = req.headers.get('X_ROLE', '')
if roles:
LOG.warn(_("Sourcing roles from deprecated X-Role HTTP "
"header"))
return [r.strip() for r in roles.split(',')]
| 0 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""build query for doclistview and return results"""
import frappe, json
from six.moves import range
import frappe.permissions
import MySQLdb
from frappe.model.db_query import DatabaseQuery
from frappe import _
from six import text_type, string_types, StringIO
@frappe.whitelist()
def get():
args = get_form_params()
data = compress(execute(**args), args = args)
return data
def execute(doctype, *args, **kwargs):
return DatabaseQuery(doctype).execute(*args, **kwargs)
def get_form_params():
"""Stringify GET request parameters."""
data = frappe._dict(frappe.local.form_dict)
del data["cmd"]
if "csrf_token" in data:
del data["csrf_token"]
if isinstance(data.get("filters"), string_types):
data["filters"] = json.loads(data["filters"])
if isinstance(data.get("fields"), string_types):
data["fields"] = json.loads(data["fields"])
if isinstance(data.get("docstatus"), string_types):
data["docstatus"] = json.loads(data["docstatus"])
if isinstance(data.get("save_user_settings"), string_types):
data["save_user_settings"] = json.loads(data["save_user_settings"])
else:
data["save_user_settings"] = True
doctype = data["doctype"]
fields = data["fields"]
for field in fields:
key = field.split(" as ")[0]
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = data.doctype
fieldname = fieldname.strip("`")
df = frappe.get_meta(parenttype).get_field(fieldname)
report_hide = df.report_hide if df else None
# remove the field from the query if the report hide flag is set
if report_hide:
fields.remove(field)
# queries must always be server side
data.query = None
return data
def compress(data, args = {}):
"""separate keys and values"""
from frappe.desk.query_report import add_total_row
if not data: return data
values = []
keys = data[0].keys()
for row in data:
new_row = []
for key in keys:
new_row.append(row[key])
values.append(new_row)
if args.get("add_total_row"):
meta = frappe.get_meta(args.doctype)
values = add_total_row(values, keys, meta)
return {
"keys": keys,
"values": values
}
@frappe.whitelist()
def save_report():
"""save report"""
data = frappe.local.form_dict
if frappe.db.exists('Report', data['name']):
d = frappe.get_doc('Report', data['name'])
else:
d = frappe.new_doc('Report')
d.report_name = data['name']
d.ref_doctype = data['doctype']
d.report_type = "Report Builder"
d.json = data['json']
frappe.get_doc(d).save()
frappe.msgprint(_("{0} is saved").format(d.name))
return d.name
@frappe.whitelist()
def export_query():
"""export from report builder"""
form_params = get_form_params()
form_params["limit_page_length"] = None
form_params["as_list"] = True
doctype = form_params.doctype
add_totals_row = None
file_format_type = form_params["file_format_type"]
del form_params["doctype"]
del form_params["file_format_type"]
if 'add_totals_row' in form_params and form_params['add_totals_row']=='1':
add_totals_row = 1
del form_params["add_totals_row"]
frappe.permissions.can_export(doctype, raise_exception=True)
if 'selected_items' in form_params:
si = json.loads(frappe.form_dict.get('selected_items'))
form_params["filters"] = {"name": ("in", si)}
del form_params["selected_items"]
db_query = DatabaseQuery(doctype)
ret = db_query.execute(**form_params)
if add_totals_row:
ret = append_totals_row(ret)
data = [['Sr'] + get_labels(db_query.fields, doctype)]
for i, row in enumerate(ret):
data.append([i+1] + list(row))
if file_format_type == "CSV":
# convert to csv
import csv
from frappe.utils.xlsxutils import handle_html
f = StringIO()
writer = csv.writer(f)
for r in data:
# encode only unicode type strings and not int, floats etc.
writer.writerow([handle_html(frappe.as_unicode(v)).encode('utf-8') \
if isinstance(v, string_types) else v for v in r])
f.seek(0)
frappe.response['result'] = text_type(f.read(), 'utf-8')
frappe.response['type'] = 'csv'
frappe.response['doctype'] = doctype
elif file_format_type == "Excel":
from frappe.utils.xlsxutils import make_xlsx
xlsx_file = make_xlsx(data, doctype)
frappe.response['filename'] = doctype + '.xlsx'
frappe.response['filecontent'] = xlsx_file.getvalue()
frappe.response['type'] = 'binary'
def append_totals_row(data):
if not data:
return data
data = list(data)
totals = []
totals.extend([""]*len(data[0]))
for row in data:
for i in range(len(row)):
if isinstance(row[i], (float, int)):
totals[i] = (totals[i] or 0) + row[i]
data.append(totals)
return data
def get_labels(fields, doctype):
"""get column labels based on column names"""
labels = []
for key in fields:
key = key.split(" as ")[0]
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = fieldname.strip("`")
df = frappe.get_meta(parenttype).get_field(fieldname)
label = df.label if df else fieldname.title()
if label in labels:
label = doctype + ": " + label
labels.append(label)
return labels
@frappe.whitelist()
def delete_items():
"""delete selected items"""
import json
il = json.loads(frappe.form_dict.get('items'))
doctype = frappe.form_dict.get('doctype')
for i, d in enumerate(il):
try:
frappe.delete_doc(doctype, d)
if len(il) >= 5:
frappe.publish_realtime("progress",
dict(progress=[i+1, len(il)], title=_('Deleting {0}').format(doctype)),
user=frappe.session.user)
except Exception:
pass
@frappe.whitelist()
def get_sidebar_stats(stats, doctype, filters=[]):
cat_tags = frappe.db.sql("""select tag.parent as category, tag.tag_name as tag
from `tabTag Doc Category` as docCat
INNER JOIN tabTag as tag on tag.parent = docCat.parent
where docCat.tagdoc=%s
ORDER BY tag.parent asc,tag.idx""",doctype,as_dict=1)
return {"defined_cat":cat_tags, "stats":get_stats(stats, doctype, filters)}
@frappe.whitelist()
def get_stats(stats, doctype, filters=[]):
"""get tag info"""
import json
tags = json.loads(stats)
if filters:
filters = json.loads(filters)
stats = {}
try:
columns = frappe.db.get_table_columns(doctype)
except MySQLdb.OperationalError:
# raised when _user_tags column is added on the fly
columns = []
for tag in tags:
if not tag in columns: continue
try:
tagcount = frappe.get_list(doctype, fields=[tag, "count(*)"],
#filters=["ifnull(`%s`,'')!=''" % tag], group_by=tag, as_list=True)
filters = filters + ["ifnull(`%s`,'')!=''" % tag], group_by = tag, as_list = True)
if tag=='_user_tags':
stats[tag] = scrub_user_tags(tagcount)
stats[tag].append([_("No Tags"), frappe.get_list(doctype,
fields=[tag, "count(*)"],
filters=filters +["({0} = ',' or {0} = '' or {0} is null)".format(tag)], as_list=True)[0][1]])
else:
stats[tag] = tagcount
except frappe.SQLError:
# does not work for child tables
pass
except MySQLdb.OperationalError:
# raised when _user_tags column is added on the fly
pass
return stats
@frappe.whitelist()
def get_filter_dashboard_data(stats, doctype, filters=[]):
"""get tags info"""
import json
tags = json.loads(stats)
if filters:
filters = json.loads(filters)
stats = {}
columns = frappe.db.get_table_columns(doctype)
for tag in tags:
if not tag["name"] in columns: continue
tagcount = []
if tag["type"] not in ['Date', 'Datetime']:
tagcount = frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters = filters + ["ifnull(`%s`,'')!=''" % tag["name"]],
group_by = tag["name"],
as_list = True)
if tag["type"] not in ['Check','Select','Date','Datetime','Int',
'Float','Currency','Percent'] and tag['name'] not in ['docstatus']:
stats[tag["name"]] = list(tagcount)
if stats[tag["name"]]:
data =["No Data", frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters=filters + ["({0} = '' or {0} is null)".format(tag["name"])],
as_list=True)[0][1]]
if data and data[1]!=0:
stats[tag["name"]].append(data)
else:
stats[tag["name"]] = tagcount
return stats
def scrub_user_tags(tagcount):
"""rebuild tag list for tags"""
rdict = {}
tagdict = dict(tagcount)
for t in tagdict:
if not t:
continue
alltags = t.split(',')
for tag in alltags:
if tag:
if not tag in rdict:
rdict[tag] = 0
rdict[tag] += tagdict[t]
rlist = []
for tag in rdict:
rlist.append([tag, rdict[tag]])
return rlist
# used in building query in queries.py
def get_match_cond(doctype):
cond = DatabaseQuery(doctype).build_match_conditions()
return ((' and ' + cond) if cond else "").replace("%", "%%")
def build_match_conditions(doctype, as_condition=True):
match_conditions = DatabaseQuery(doctype).build_match_conditions(as_condition=as_condition)
if as_condition:
return match_conditions.replace("%", "%%")
else:
return match_conditions
def get_filters_cond(doctype, filters, conditions, ignore_permissions=None, with_match_conditions=False):
if isinstance(filters, string_types):
filters = json.loads(filters)
if filters:
flt = filters
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], string_types) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
elif isinstance(f[1], list) and \
f[1][0] in (">", "<", ">=", "<=", "like", "not like", "in", "not in", "between"):
flt.append([doctype, f[0], f[1][0], f[1][1]])
else:
flt.append([doctype, f[0], '=', f[1]])
query = DatabaseQuery(doctype)
query.filters = flt
query.conditions = conditions
if with_match_conditions:
query.build_match_conditions()
query.build_filter_conditions(flt, conditions, ignore_permissions)
cond = ' and ' + ' and '.join(query.conditions)
else:
cond = ''
return cond
| 0.033287 |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from neutron.agent.linux import async_process
from neutron.agent.ovsdb import api as ovsdb
from neutron.agent.ovsdb.native import helpers
from neutron.common import utils
LOG = logging.getLogger(__name__)
OVSDB_ACTION_INITIAL = 'initial'
OVSDB_ACTION_INSERT = 'insert'
OVSDB_ACTION_DELETE = 'delete'
OVSDB_ACTION_NEW = 'new'
class OvsdbMonitor(async_process.AsyncProcess):
"""Manages an invocation of 'ovsdb-client monitor'."""
def __init__(self, table_name, columns=None, format=None,
respawn_interval=None, ovsdb_connection=None):
if ovsdb_connection:
# if ovsdb connection is configured (e.g. tcp:ip:port), use it,
# and there is no need to run as root
helpers.enable_connection_uri(ovsdb_connection)
cmd = ['ovsdb-client', 'monitor', ovsdb_connection, table_name]
run_as_root = False
else:
cmd = ['ovsdb-client', 'monitor', table_name]
run_as_root = True
if columns:
cmd.append(','.join(columns))
if format:
cmd.append('--format=%s' % format)
super(OvsdbMonitor, self).__init__(cmd, run_as_root=run_as_root,
respawn_interval=respawn_interval,
log_output=True,
die_on_error=True)
class SimpleInterfaceMonitor(OvsdbMonitor):
"""Monitors the Interface table of the local host's ovsdb for changes.
The has_updates() method indicates whether changes to the ovsdb
Interface table have been detected since the monitor started or
since the previous access.
"""
def __init__(self, respawn_interval=None, ovsdb_connection=None):
super(SimpleInterfaceMonitor, self).__init__(
'Interface',
columns=['name', 'ofport', 'external_ids'],
format='json',
respawn_interval=respawn_interval,
ovsdb_connection=ovsdb_connection
)
self.new_events = {'added': [], 'removed': []}
@property
def has_updates(self):
"""Indicate whether the ovsdb Interface table has been updated.
If the monitor process is not active an error will be logged since
it won't be able to communicate any update. This situation should be
temporary if respawn_interval is set.
"""
if not self.is_active():
LOG.error("Interface monitor is not active")
else:
self.process_events()
return bool(self.new_events['added'] or self.new_events['removed'])
def get_events(self):
self.process_events()
events = self.new_events
self.new_events = {'added': [], 'removed': []}
return events
def process_events(self):
devices_added = []
devices_removed = []
dev_to_ofport = {}
for row in self.iter_stdout():
json = jsonutils.loads(row).get('data')
for ovs_id, action, name, ofport, external_ids in json:
if external_ids:
external_ids = ovsdb.val_to_py(external_ids)
if ofport:
ofport = ovsdb.val_to_py(ofport)
device = {'name': name,
'ofport': ofport,
'external_ids': external_ids}
if action in (OVSDB_ACTION_INITIAL, OVSDB_ACTION_INSERT):
devices_added.append(device)
elif action == OVSDB_ACTION_DELETE:
devices_removed.append(device)
elif action == OVSDB_ACTION_NEW:
dev_to_ofport[name] = ofport
self.new_events['added'].extend(devices_added)
self.new_events['removed'].extend(devices_removed)
# update any events with ofports received from 'new' action
for event in self.new_events['added']:
event['ofport'] = dev_to_ofport.get(event['name'], event['ofport'])
def start(self, block=False, timeout=5):
super(SimpleInterfaceMonitor, self).start()
if block:
utils.wait_until_true(self.is_active)
| 0 |
#!/usr/bin/python
'''
Created on 8 Apr 2012
@author: sam
'''
import sys, json, os, loggy
class config(object):
#TODO: add support for command line args
def __init__(self, soundblizzard):
#load defaults first then config file then command line args
self.config = { 'configfile' : os.path.expanduser('~/.config/soundblizzard/soundblizzard.conf'),
'libraryfolders' : [os.path.expanduser('~/Music')], #TODO: support multiple folders
'playlistfolder' : '~/.config/playlists',
'databasefile' : os.path.expanduser('~/.config/soundblizzard/soundblizzard.db'),
'mpdhost' : 'localhost',
'mpdport' : 6600,
'dbupdatetime' : loggy.currenttime()
}
if (not(os.path.isdir(os.path.dirname(self.config['configfile'])))):
os.makedirs(os.path.dirname(self.config['configfile'])) or loggy.warn ('could not create config dir')
# if (not (os.path.isfile('~/.config/soundblizzard/soundblizzard.conf')))
fd = None
try:
fd = open(self.config['configfile'], 'r')#tries to open config file
except:
loggy.warn('Could not open config file '+ self.config['configfile'])
try:
self.config.update(json.load(fd)) #adds config file to dictionary and overrides with config file
except:
loggy.warn('Could not read config file '+ self.config['configfile'])
#Handle command line arguments
#Splits command line args into dict, if key starts with -- then takes this as an argument and prints these
# if key is help prints defaults
#TODO: improve command line argument recognition
a = sys.argv[1:]
if len(a) % 2:
a.append('')
b = {a[i]: a[i+1] for i in range(0, len(a), 2)}
c ={}
for key in b:
if key.startswith('--help'):
loggy.warn ('Soundblizzard media player\nTo amend config settings please use --key \'value\' on the command line. \n Current values:\n'+ json.dumps(self.config, sort_keys=True, indent=2))
loggy.die('help delivered')
elif key.startswith('--'):
c[key[2:]] = b[key]
self.config.update(c)
loggy.log('Configuration:' +str(self.config))
def save_config(self):
#import json
configfd = open(self.config['configfile'], 'w') or loggy.warn('Could not open config file for writing')
json.dump(self.config, configfd, sort_keys=True, indent=2) #or loggy.warn ('Could not write config file')
configfd.close()
loggy.log('Saved config to ' + self.config['configfile'] + ' Contents' + json.dumps(self.config, sort_keys=True, indent=2))
# try:
#
# json.dump(self.config, configfd, sort_keys=True, indent=2)
# configfd.close()
# except:
# import loggy
# loggy.warn('Could not save config file to '+self.config['configfile'])
# def __del__(self, soundblizzard=None):
#self.save_config()
if __name__ == "__main__":
temp = 'foo'
conf = config(temp)
conf.save_config()
| 0.043385 |
from functools import reduce
from botlang.ast.ast_visitor import ASTVisitor
from botlang.evaluation.values import *
class ExecutionStack(list):
def print_trace(self):
from botlang.macros.default_macros import DefaultMacros
return reduce(
lambda a, n: a + n + '\n',
[
self.frame_message(frame) for frame in self
if frame.s_expr.source_reference.source_id !=
DefaultMacros.DEFAULT_MACROS_SOURCE_ID
],
''
)
@classmethod
def frame_message(cls, frame):
return '\tModule "{0}", line {1}, in {2}:\n\t\t{3}'.format(
frame.s_expr.source_reference.source_id,
frame.s_expr.source_reference.start_line,
frame.print_node_type(),
frame.s_expr.code.split('\n')[0]
)
class Evaluator(ASTVisitor):
"""
AST visitor for evaluation
"""
def __init__(self, module_resolver=None):
if module_resolver is None:
raise Exception('Module resolver required')
self.module_resolver = module_resolver
self.execution_stack = ExecutionStack()
def visit_val(self, val_node, env):
"""
Value expression evaluation
"""
return val_node.value
def visit_list(self, literal_list, env):
return [
element.accept(self, env) for element in literal_list.elements
]
def visit_if(self, if_node, env):
"""
'If' construct evaluation
"""
self.execution_stack.append(if_node)
if if_node.cond.accept(self, env):
self.execution_stack.pop()
return if_node.if_true.accept(self, env)
else:
self.execution_stack.pop()
return if_node.if_false.accept(self, env)
def visit_cond(self, cond_node, env):
"""
'Cond' conditional evaluation
"""
self.execution_stack.append(cond_node)
value = None
for clause in cond_node.cond_clauses:
value = clause.accept(self, env)
if value is not None:
break
self.execution_stack.pop()
return value
def visit_cond_predicate_clause(self, predicate_node, env):
"""
'Cond' predicate clause evaluation
"""
self.execution_stack.append(predicate_node)
value = None
if predicate_node.predicate.accept(self, env):
value = predicate_node.then_body.accept(self, env)
self.execution_stack.pop()
return value
def visit_cond_else_clause(self, else_node, env):
"""
'Cond' else clause evaluation
"""
self.execution_stack.append(else_node)
value = else_node.then_body.accept(self, env)
self.execution_stack.pop()
return value
def visit_and(self, and_node, env):
"""
Logical 'and' evaluation
"""
self.execution_stack.append(and_node)
left_branch = and_node.cond1.accept(self, env)
result = left_branch and and_node.cond2.accept(self, env)
self.execution_stack.pop()
return result
def visit_or(self, or_node, env):
"""
Logical 'or' evaluation
"""
self.execution_stack.append(or_node)
left_branch = or_node.cond1.accept(self, env)
result = left_branch or or_node.cond2.accept(self, env)
self.execution_stack.pop()
return result
def visit_id(self, id_node, env):
"""
Identifier (variable name) resolution
"""
self.execution_stack.append(id_node)
identifier = env.lookup(id_node.identifier)
self.execution_stack.pop()
return identifier
def visit_fun(self, fun_node, env):
"""
Function expression evaluation.
Returns closure
"""
self.execution_stack.append(fun_node)
closure = Closure(fun_node, env, self)
self.execution_stack.pop()
return closure
def visit_bot_node(self, bot_node, env):
"""
Bot node expression evaluation.
Returns bot-node closure
"""
self.execution_stack.append(bot_node)
bot_node = BotNodeValue(bot_node, env, self)
self.execution_stack.pop()
return bot_node
def visit_bot_result(self, bot_result_node, env):
"""
Bot result evaluation. Returns a BotResultValue which can be used
to resume execution in the future.
"""
self.execution_stack.append(bot_result_node)
data = bot_result_node.data.accept(self, env)
message = bot_result_node.message.accept(self, env)
next_node = bot_result_node.next_node.accept(self, env)
bot_result_value = BotResultValue(
data,
message,
next_node
)
self.execution_stack.pop()
return bot_result_value
def visit_app(self, app_node, env):
"""
Function application evaluation.
"""
self.execution_stack.append(app_node)
fun_val = app_node.fun_expr.accept(self, env)
if not isinstance(fun_val, FunVal):
raise Exception(
'Invalid function application: {0} is not a function'.format(
fun_val
)
)
arg_vals = [arg.accept(self, env) for arg in app_node.arg_exprs]
if fun_val.is_reflective():
result = fun_val.apply(env, *arg_vals)
else:
result = fun_val.apply(*arg_vals)
self.execution_stack.pop()
return result
def visit_body(self, body_node, env):
"""
Evaluation of a sequence of expressions
"""
self.execution_stack.append(body_node)
for expr in body_node.expressions[0:-1]:
expr.accept(self, env)
result = body_node.expressions[-1].accept(self, env)
self.execution_stack.pop()
return result
def visit_definition(self, def_node, env):
"""
Definition evaluation.
Mutates the environment with this definition.
Evaluates the definition body with the same environment
that is mutated, which allows recursion.
Doesn't return a value.
"""
self.execution_stack.append(def_node)
env.update(
{def_node.name: def_node.expr.accept(self, env)}
)
self.execution_stack.pop()
def visit_local(self, local_node, env):
"""
Local definition evaluation
"""
self.execution_stack.append(local_node)
new_env = env.new_environment()
for definition in local_node.definitions:
definition.accept(self, new_env)
result = local_node.body.accept(self, new_env)
self.execution_stack.pop()
return result
def visit_module_definition(self, module_node, env):
"""
Module definition
"""
self.execution_stack.append(module_node)
from botlang.modules.module import BotlangModule
module = BotlangModule(
module_node.name.accept(self, env),
module_node.body
)
self.module_resolver.add_module(module)
self.execution_stack.pop()
return module
def visit_module_function_export(self, provide_node, env):
"""
Module function's export
"""
raise NotInModuleContextException()
def visit_module_import(self, require_node, env):
"""
Import a module into scope
"""
self.execution_stack.append(require_node)
module_name = require_node.module_name.accept(self, env)
bindings = self.module_resolver.get_bindings(self, module_name)
env.update(bindings)
self.execution_stack.pop()
return Nil
class NotInModuleContextException(Exception):
def __init__(self):
super(NotInModuleContextException, self).__init__(
'The "provide" keyword must appear in a top-level module context'
)
| 0 |
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from oslo_log import log as logging
from neutron.services.trunk import constants as trunk_consts
from neutron.services.trunk.rpc import server
LOG = logging.getLogger(__name__)
@registry.has_registry_receivers
class ServerSideRpcBackend(object):
"""The Neutron Server RPC backend."""
def __init__(self):
"""Initialize an RPC backend for the Neutron Server."""
self._skeleton = server.TrunkSkeleton()
self._stub = server.TrunkStub()
LOG.debug("RPC backend initialized for trunk plugin")
# Set up listeners to trunk events: they dispatch RPC messages
# to agents as needed. These are designed to work with any
# agent-based driver that may integrate with the trunk service
# plugin, e.g. linux bridge or ovs.
@registry.receives(trunk_consts.TRUNK,
[events.AFTER_CREATE, events.AFTER_DELETE])
@registry.receives(trunk_consts.SUBPORTS,
[events.AFTER_CREATE, events.AFTER_DELETE])
def process_event(self, resource, event, trunk_plugin, payload):
"""Emit RPC notifications to registered subscribers."""
context = payload.context
LOG.debug("RPC notification needed for trunk %s", payload.trunk_id)
if resource == trunk_consts.SUBPORTS:
payload = payload.subports
method = {
events.AFTER_CREATE: self._stub.subports_added,
events.AFTER_DELETE: self._stub.subports_deleted,
}
elif resource == trunk_consts.TRUNK:
# On AFTER_DELETE event, current_trunk is None
payload = payload.current_trunk or payload.original_trunk
method = {
events.AFTER_CREATE: self._stub.trunk_created,
events.AFTER_DELETE: self._stub.trunk_deleted,
}
LOG.debug("Emitting event %s for resource %s", event, resource)
method[event](context, payload)
| 0 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# TV Ultra 7K Parser de SeriesFLV.com
# Version 0.1 (02.11.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import re,urllib,urllib2,sys,requests
import plugintools
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
thumbnail = 'http://www.oranline.com/wp-content/uploads/2015/01/logoneworange-300x92.png'
fanart = 'http://wallpoper.com/images/00/33/95/82/film-roll_00339582.jpg'
referer = 'http://www.oranline.com/'
def oranline0(params):
plugintools.log('[%s %s] Oranline regex %s' % (addonName, addonVersion, repr(params)))
# Leemos el código web
url = params.get("url")
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(url, headers=headers)
data = r.content
bloque_thumb = plugintools.find_single_match(data, '<!--Begin Image-->(.*?)<!--End Image-->')
thumb_peli = plugintools.find_single_match(bloque_thumb, '<img src="([^"]+)')
if thumb_peli == "":
thumb_peli = thumbnail
datamovie = {}
datamovie["Plot"]=params.get("plot") # Sinopsis
bloque = plugintools.find_single_match(data, '<div id="veronline">(.*?)</form>')
bloque_peli = plugintools.find_multiple_matches(bloque, '<p>(.*?)</img></a></span></p>')
i = 1
plugintools.add_item(action="", title='[COLOR orange][B]Oranline.com / [/B][/COLOR][COLOR white] Resultados de: [/COLOR][COLOR lightyellow][I]'+params.get("title")+'[/I][/COLOR]', thumbnail=thumb_peli, fanart=fanart, folder=False, isPlayable=False)
plugintools.add_item(action="", title='[COLOR white][I](Núm.) [/I][/COLOR][COLOR white][I]Formato[/I][/COLOR][COLOR lightblue] Calidad (sobre 5)[/COLOR][COLOR lightgreen][I][Idioma][/I][/COLOR]', thumbnail=thumb_peli, fanart=fanart, folder=False, isPlayable=False)
for entry in bloque_peli:
#plugintools.log("entry= "+entry)
lang_audio = plugintools.find_single_match(entry, 'src="([^"]+)')
if lang_audio.endswith("1.png") == True:
lang_audio = '[COLOR lightgreen][I][ESP][/I][/COLOR]'
elif lang_audio.endswith("2.png") == True:
lang_audio = '[COLOR lightgreen][I][LAT][/I][/COLOR]'
elif lang_audio.endswith("3.png") == True:
lang_audio = '[COLOR lightgreen][I][VOS][/I][/COLOR]'
elif lang_audio.endswith("4.png") == True:
lang_audio = '[COLOR lightgreen][I][ENG][/I][/COLOR]'
formatq = plugintools.find_single_match(entry, 'calidad(.+?).png')
#plugintools.log("formatq= "+formatq)
id_link = plugintools.find_single_match(entry, 'reportarpelicula([^>]+)')
id_link = id_link.replace('"', "").replace("'", "").replace(",", "").replace(")", "")
url = oranline2(id_link, params.get("url"))
#plugintools.log("url= "+url)
title_peli = plugintools.find_single_match(entry, 'title="([^"]+)')
title_peli = title_peli.replace("Calidad de Video:", "").replace("Calidad de video", "").replace("Calidad de Audio", "").replace("\t\r\n", "").replace("reportar enlace", "N/D").strip()
info_video = title_peli.split(":")[0].strip()
title_peli = '[COLOR white][I]'+info_video+' [/I][/COLOR][COLOR lightblue]'+formatq+'/5[/COLOR]'
if url.find("allmyvideos") >= 0:
server_url = "[COLOR lightyellow][I][allmyvideos][/I][/COLOR]"
plugintools.add_item(action="allmyvideos", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("vidspot") >= 0:
server_url = "[COLOR lightyellow][I][vidspot][/I][/COLOR]"
plugintools.add_item(action="vidspot", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("played.to") >= 0:
server_url = "[COLOR lightyellow][I][played.to][/I][/COLOR]"
plugintools.add_item(action="playedto", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("nowvideo") >= 0:
server_url = "[COLOR lightyellow][I][nowvideo][/I][/COLOR]"
plugintools.add_item(action="nowvideo", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("streamin.to") >= 0:
server_url = "[COLOR lightyellow][I][streamin.to][/I][/COLOR]"
plugintools.add_item(action="streaminto", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("vk") >= 0:
server_url = "[COLOR lightyellow][I][vk][/I][/COLOR]"
plugintools.add_item(action="vk", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("tumi") >= 0:
server_url = "[COLOR lightyellow][I][tumi][/I][/COLOR]"
plugintools.add_item(action="tumi", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("powvideo") >= 0:
server_url = "[COLOR lightyellow][I][powvideo][/I][/COLOR]"
plugintools.add_item(action="powvideo", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("streamcloud") >= 0:
server_url = "[COLOR lightyellow][I][streamcloud][/I][/COLOR]"
plugintools.add_item(action="streamcloud", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("veehd") >= 0:
server_url = "[COLOR lightyellow][I][veehd][/I][/COLOR]"
plugintools.add_item(action="veehd", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("novamov") >= 0:
server_url = "[COLOR lightyellow][I][novamov][/I][/COLOR]"
plugintools.add_item(action="novamov", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("moevideos") >= 0:
server_url = "[COLOR lightyellow][I][moevideos][/I][/COLOR]"
plugintools.add_item(action="moevideos", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("movshare") >= 0:
server_url = "[COLOR lightyellow][I][movshare][/I][/COLOR]"
plugintools.add_item(action="movshare", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("movreel") >= 0:
server_url = "[COLOR lightyellow][I][movreel][/I][/COLOR]"
plugintools.add_item(action="movshare", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("gamovideo") >= 0:
server_url = "[COLOR lightyellow][I][gamovideo][/I][/COLOR]"
plugintools.add_item(action="gamovideo", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("videobam") >= 0:
server_url = "[COLOR lightyellow][I][videobam][/I][/COLOR]"
plugintools.add_item(action="videobam", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("videoweed") >= 0:
server_url = "[COLOR lightyellow][I][videoweed][/I][/COLOR]"
plugintools.add_item(action="videoweed", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("streamable") >= 0:
server_url = "[COLOR lightyellow][I][streamable][/I][/COLOR]"
plugintools.add_item(action="streamable", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("rocvideo") >= 0:
server_url = "[COLOR lightyellow][I][rocvideo][/I][/COLOR]"
plugintools.add_item(action="rocvideo", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("realvid") >= 0:
server_url = "[COLOR lightyellow][I][realvid][/I][/COLOR]"
plugintools.add_item(action="realvid", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("netu") >= 0:
server_url = "[COLOR lightyellow][I][netu][/I][/COLOR]"
plugintools.add_item(action="netu", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("videomega") >= 0:
server_url = "[COLOR lightyellow][I][videomega][/I][/COLOR]"
plugintools.add_item(action="videomega", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("video.tt") >= 0:
server_url = "[COLOR lightyellow][I][video.tt][/I][/COLOR]"
plugintools.add_item(action="videott", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
elif url.find("flashx.tv") >= 0:
server_url = "[COLOR lightyellow][I][flashx][/I][/COLOR]"
plugintools.add_item(action="flashx", title= '[COLOR white]'+str(i)+'. '+title_peli+'[/COLOR] '+lang_audio+' '+server_url , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumb_peli , folder = False, isPlayable = True)
i = i + 1
def oranline1(params):
plugintools.log('[%s %s] Oranline URL analyzer %s' % (addonName, addonVersion, repr(params)))
if url.find("allmyvideos") >= 0:
server_url = "[COLOR lightgreen][I][allmyvideos][/I][/COLOR]"
plugintools.add_item(action="allmyvideos", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("vidspot") >= 0:
server_url = "[COLOR lightgreen][I][vidspot][/I][/COLOR]"
plugintools.add_item(action="vidspot", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("played.to") >= 0:
server_url = "[COLOR lightgreen][I][played.to][/I][/COLOR]"
plugintools.add_item(action="playedto", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("nowvideo") >= 0:
server_url = "[COLOR lightgreen][I][nowvideo][/I][/COLOR]"
plugintools.add_item(action="nowvideo", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("streamin.to") >= 0:
server_url = "[COLOR lightgreen][I][streamin.to][/I][/COLOR]"
plugintools.add_item(action="streaminto", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("vk") >= 0:
server_url = "[COLOR lightgreen][I][vk][/I][/COLOR]"
plugintools.add_item(action="vk", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("tumi") >= 0:
server_url = "[COLOR lightgreen][I][tumi][/I][/COLOR]"
plugintools.add_item(action="tumi", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("streamcloud") >= 0:
server_url = "[COLOR lightgreen][I][streamcloud][/I][/COLOR]"
plugintools.add_item(action="streamcloud", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("veehd") >= 0:
server_url = "[COLOR lightgreen][I][veehd][/I][/COLOR]"
plugintools.add_item(action="veehd", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("novamov") >= 0:
server_url = "[COLOR lightgreen][I][novamov][/I][/COLOR]"
plugintools.add_item(action="novamov", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("moevideos") >= 0:
server_url = "[COLOR lightgreen][I][moevideos][/I][/COLOR]"
plugintools.add_item(action="moevideos", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("movshare") >= 0:
server_url = "[COLOR lightgreen][I][movshare][/I][/COLOR]"
plugintools.add_item(action="movshare", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("movreel") >= 0:
server_url = "[COLOR lightgreen][I][movreel][/I][/COLOR]"
plugintools.add_item(action="movshare", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("gamovideo") >= 0:
server_url = "[COLOR lightgreen][I][gamovideo][/I][/COLOR]"
plugintools.add_item(action="gamovideo", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("videobam") >= 0:
server_url = "[COLOR lightgreen][I][videobam][/I][/COLOR]"
plugintools.add_item(action="videobam", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("videoweed") >= 0:
server_url = "[COLOR lightgreen][I][videoweed][/I][/COLOR]"
plugintools.add_item(action="videoweed", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("streamable") >= 0:
server_url = "[COLOR lightgreen][I][streamable][/I][/COLOR]"
plugintools.add_item(action="streamable", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("rocvideo") >= 0:
server_url = "[COLOR lightgreen][I][rocvideo][/I][/COLOR]"
plugintools.add_item(action="rocvideo", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("realvid") >= 0:
server_url = "[COLOR lightgreen][I][realvid][/I][/COLOR]"
plugintools.add_item(action="realvid", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("netu") >= 0:
server_url = "[COLOR lightgreen][I][netu][/I][/COLOR]"
plugintools.add_item(action="netu", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("videomega") >= 0:
server_url = "[COLOR lightgreen][I][videomega][/I][/COLOR]"
plugintools.add_item(action="videomega", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("video.tt") >= 0:
server_url = "[COLOR lightgreen][I][video.tt][/I][/COLOR]"
plugintools.add_item(action="videott", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
elif url.find("flashx.tv") >= 0:
server_url = "[COLOR lightgreen][I][flashx][/I][/COLOR]"
plugintools.add_item(action="flashx", title= '[COLOR white]'+title_fixed+' [/COLOR][COLOR lightyellow][I]('+server_name+')[/I][/COLOR] [COLOR lightgreen][I]'+lang+'[/I][/COLOR]' , url = url, info_labels = datamovie, fanart = fanart , thumbnail = thumbnail , folder = False, isPlayable = True)
def categorias_flv(data):
plugintools.log("[tv.ultra.7k 0.3.4] SeriesFLV Categorias ")
params = plugintools.get_params()
thumbnail = params.get("thumbnail")
if thumbnail == "":
thumbnail = 'http://m1.paperblog.com/i/249/2490697/seriesflv-mejor-alternativa-series-yonkis-L-2whffw.jpeg'
fanart = 'http://www.nikopik.com/wp-content/uploads/2011/10/S%C3%A9ries-TV.jpg'
sinopsis = params.get("plot")
datamovie = {}
datamovie["Plot"]=sinopsis
sections = plugintools.find_single_match(data, '<div class="lang over font2 bold">(.*?)</div>')
#plugintools.log("sections= "+sections)
tipo_selected = plugintools.find_single_match(sections, 'class="select">(.*?)</a>')
plugintools.add_item(action="listado_seriesflv", title='[COLOR orange]Listado completo[/COLOR]' , url = "http://www.seriesflv.net/series/", extra = data , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True, isPlayable = False)
plugintools.add_item(action="lista_chapters", title='[COLOR orange]'+tipo_selected+'[/COLOR]' , url = "", extra = data , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True, isPlayable = False)
tipos = plugintools.find_multiple_matches(sections, ';">(.*?)</a>')
for entry in tipos:
plugintools.add_item(action="lista_chapters", title='[COLOR orange]'+entry+'[/COLOR]' , url = "", thumbnail = thumbnail , extra = data , plot = datamovie["Plot"] , info_labels = datamovie, fanart = fanart , folder = True, isPlayable = False)
def lista_chapters(params):
plugintools.log("[tv.ultra.7k 0.3.4] SeriesFLV Lista_chapters "+repr(params))
url = params.get("url")
referer = 'http://www.seriesflv.com/'
data = gethttp_referer_headers(url, referer)
thumbnail = params.get("thumbnail")
if thumbnail == "":
thumbnail = 'http://m1.paperblog.com/i/249/2490697/seriesflv-mejor-alternativa-series-yonkis-L-2whffw.jpeg'
fanart = 'http://www.nikopik.com/wp-content/uploads/2011/10/S%C3%A9ries-TV.jpg'
sinopsis = params.get("plot")
datamovie = {}
datamovie["Plot"]=sinopsis
chapters = plugintools.find_multiple_matches(data, '<a href="http://www.seriesflv.net/ver/(.*?)</a>')
title = params.get("title")
for entry in chapters:
if title.find("Subtitulada") >= 0:
if entry.find('lang="sub"') >=0:
#plugintools.log("entry= "+entry)
entry_fixed = entry.split('"')
url_chapter = 'http://www.seriesflv.net/ver/'+entry_fixed[0]
#plugintools.log("url_chapter= "+url_chapter)
title_chapter = plugintools.find_single_match(entry, '<div class="i-title">(.*?)</div>')
#plugintools.log("title_chapter= "+title_chapter)
num_chapter = plugintools.find_single_match(entry, '<div class="box-tc">(.*?)</div>')
#plugintools.log("núm. capítulo= "+num_chapter)
i_time = plugintools.find_single_match(entry, '<div class="i-time">(.*?)</div>')
#plugintools.log("desde hace= "+i_time)
plugintools.add_item(action="chapter_urls", title='[COLOR orange]'+num_chapter+'[/COLOR]'+' [COLOR lightyellow][B]'+title_chapter+'[/B][/COLOR][COLOR lightgreen][I] ('+i_time+')[/I][/COLOR]' , info_labels = datamovie , plot = datamovie["Plot"], url = url_chapter , thumbnail = thumbnail , fanart = fanart , folder = True, isPlayable = False)
if title.find("Español") >= 0:
if entry.find('lang="es"') >= 0:
#plugintools.log("entry= "+entry)
entry_fixed = entry.split('"')
url_chapter = 'http://www.seriesflv.net/ver/'+entry_fixed[0]
#plugintools.log("url_chapter= "+url_chapter)
title_chapter = plugintools.find_single_match(entry, '<div class="i-title">(.*?)</div>')
#plugintools.log("title_chapter= "+title_chapter)
num_chapter = plugintools.find_single_match(entry, '<div class="box-tc">(.*?)</div>')
#plugintools.log("núm. capítulo= "+num_chapter)
i_time = plugintools.find_single_match(entry, '<div class="i-time">(.*?)</div>')
#plugintools.log("desde hace= "+i_time)
plugintools.add_item(action="chapter_urls", title='[COLOR orange]'+num_chapter+'[/COLOR]'+' [COLOR lightyellow][B]'+title_chapter+'[/B][/COLOR][COLOR lightgreen][I] ('+i_time+')[/I][/COLOR]' , url = url_chapter , info_labels = datamovie , plot = datamovie["Plot"], thumbnail = thumbnail , fanart = fanart , folder = True, isPlayable = False)
if title.find("Latino") >= 0:
if entry.find('lang="la"') >= 0:
#plugintools.log("entry= "+entry)
entry_fixed = entry.split('"')
url_chapter = 'http://www.seriesflv.net/ver/'+entry_fixed[0]
#plugintools.log("url_chapter= "+url_chapter)
title_chapter = plugintools.find_single_match(entry, '<div class="i-title">(.*?)</div>')
#plugintools.log("title_chapter= "+title_chapter)
num_chapter = plugintools.find_single_match(entry, '<div class="box-tc">(.*?)</div>')
#plugintools.log("núm. capítulo= "+num_chapter)
i_time = plugintools.find_single_match(entry, '<div class="i-time">(.*?)</div>')
#plugintools.log("desde hace= "+i_time)
plugintools.add_item(action="chapter_urls", title='[COLOR orange]'+num_chapter+'[/COLOR]'+' [COLOR lightyellow][B]'+title_chapter+'[/B][/COLOR][COLOR lightgreen][I] ('+i_time+')[/I][/COLOR]' , url = url_chapter , info_labels = datamovie , plot = datamovie["Plot"], thumbnail = thumbnail , fanart = fanart , folder = True, isPlayable = False)
if title.find("Original") >= 0:
if entry.find('lang="en"') >= 0:
#plugintools.log("entry= "+entry)
entry_fixed = entry.split('"')
url_chapter = 'http://www.seriesflv.net/ver/'+entry_fixed[0]
#plugintools.log("url_chapter= "+url_chapter)
title_chapter = plugintools.find_single_match(entry, '<div class="i-title">(.*?)</div>')
#plugintools.log("title_chapter= "+title_chapter)
num_chapter = plugintools.find_single_match(entry, '<div class="box-tc">(.*?)</div>')
#plugintools.log("núm. capítulo= "+num_chapter)
i_time = plugintools.find_single_match(entry, '<div class="i-time">(.*?)</div>')
#plugintools.log("desde hace= "+i_time)
plugintools.add_item(action="chapter_urls", title='[COLOR orange]'+num_chapter+'[/COLOR]'+' [COLOR lightyellow][B]'+title_chapter+'[/B][/COLOR][COLOR lightgreen][I] ('+i_time+')[/I][/COLOR]' , url = url_chapter , info_labels = datamovie , plot = datamovie["Plot"], thumbnail = thumbnail , fanart = fanart , folder = True, isPlayable = False)
def oranline2(id_link, url):
plugintools.log("[%s %s] Oranline2 %s %s" % (addonName, addonVersion, id_link, url))
geturl = 'http://www.oranline.com/wp-content/themes/reviewit/enlace.php?id='+id_link
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14', 'Referer': url}
r = requests.get(geturl, headers=headers)
data = r.content
url = 'http'+plugintools.find_single_match(data, '<a href="http([^"]+)')
return url
def gethttp_referer_headers(url,referer):
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer", referer])
request_headers.append(["X-Requested-With", "XMLHttpRequest"])
request_headers.append(["Cookie:","__utma=253162379.286456173.1418323503.1421078750.1422185754.16; __utmz=253162379.1421070671.14.3.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=http%3A%2F%2Fwww.seriesflv.net%2Fserie%2Fhora-de-aventuras.html; __cfduid=daeed6a2aacaffab2433869fd863162821419890996; __utmb=253162379.4.10.1422185754; __utmc=253162379; __utmt=1"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers);print response_headers
return body
| 0.034522 |
"""
Connection Pooling
"""
import datetime
import logging
import os
import threading
import time
import weakref
import psycopg2
LOGGER = logging.getLogger(__name__)
DEFAULT_IDLE_TTL = 60
DEFAULT_MAX_SIZE = int(os.environ.get('QUERIES_MAX_POOL_SIZE', 1))
class Connection(object):
"""Contains the handle to the connection, the current state of the
connection and methods for manipulating the state of the connection.
"""
_lock = threading.Lock()
def __init__(self, handle):
self.handle = handle
self.used_by = None
self.executions = 0
self.exceptions = 0
def close(self):
"""Close the connection
:raises: ConnectionBusyError
"""
LOGGER.debug('Connection %s closing', self.id)
if self.busy and not self.closed:
raise ConnectionBusyError(self)
with self._lock:
if not self.handle.closed:
try:
self.handle.close()
except psycopg2.InterfaceError as error:
LOGGER.error('Error closing socket: %s', error)
@property
def closed(self):
"""Return if the psycopg2 connection is closed.
:rtype: bool
"""
return self.handle.closed != 0
@property
def busy(self):
"""Return if the connection is currently executing a query or is locked
by a session that still exists.
:rtype: bool
"""
if self.handle.isexecuting():
return True
elif self.used_by is None:
return False
return not self.used_by() is None
@property
def executing(self):
"""Return if the connection is currently executing a query
:rtype: bool
"""
return self.handle.isexecuting()
def free(self):
"""Remove the lock on the connection if the connection is not active
:raises: ConnectionBusyError
"""
LOGGER.debug('Connection %s freeing', self.id)
if self.handle.isexecuting():
raise ConnectionBusyError(self)
with self._lock:
self.used_by = None
LOGGER.debug('Connection %s freed', self.id)
@property
def id(self):
"""Return id of the psycopg2 connection object
:rtype: int
"""
return id(self.handle)
def lock(self, session):
"""Lock the connection, ensuring that it is not busy and storing
a weakref for the session.
:param queries.Session session: The session to lock the connection with
:raises: ConnectionBusyError
"""
if self.busy:
raise ConnectionBusyError(self)
with self._lock:
self.used_by = weakref.ref(session)
LOGGER.debug('Connection %s locked', self.id)
@property
def locked(self):
"""Return if the connection is currently exclusively locked
:rtype: bool
"""
return self.used_by is not None
class Pool(object):
"""A connection pool for gaining access to and managing connections"""
_lock = threading.Lock()
idle_start = None
idle_ttl = DEFAULT_IDLE_TTL
max_size = DEFAULT_MAX_SIZE
def __init__(self,
pool_id,
idle_ttl=DEFAULT_IDLE_TTL,
max_size=DEFAULT_MAX_SIZE,
time_method=None):
self.connections = {}
self._id = pool_id
self.idle_ttl = idle_ttl
self.max_size = max_size
self.time_method = time_method or time.time
def __contains__(self, connection):
"""Return True if the pool contains the connection"""
return id(connection) in self.connections
def __len__(self):
"""Return the number of connections in the pool"""
return len(self.connections)
def add(self, connection):
"""Add a new connection to the pool
:param connection: The connection to add to the pool
:type connection: psycopg2.extensions.connection
:raises: PoolFullError
"""
if id(connection) in self.connections:
raise ValueError('Connection already exists in pool')
if len(self.connections) == self.max_size:
LOGGER.warning('Race condition found when adding new connection')
try:
connection.close()
except (psycopg2.Error, psycopg2.Warning) as error:
LOGGER.error('Error closing the conn that cant be used: %s',
error)
raise PoolFullError(self)
with self._lock:
self.connections[id(connection)] = Connection(connection)
LOGGER.debug('Pool %s added connection %s', self.id, id(connection))
@property
def busy_connections(self):
"""Return a list of active/busy connections
:rtype: list
"""
return [c for c in self.connections.values()
if c.busy and not c.closed]
def clean(self):
"""Clean the pool by removing any closed connections and if the pool's
idle has exceeded its idle TTL, remove all connections.
"""
LOGGER.debug('Cleaning the pool')
for connection in [self.connections[k] for k in self.connections if
self.connections[k].closed]:
LOGGER.debug('Removing %s', connection.id)
self.remove(connection.handle)
if self.idle_duration > self.idle_ttl:
self.close()
LOGGER.debug('Pool %s cleaned', self.id)
def close(self):
"""Close the pool by closing and removing all of the connections"""
for cid in list(self.connections.keys()):
self.remove(self.connections[cid].handle)
LOGGER.debug('Pool %s closed', self.id)
@property
def closed_connections(self):
"""Return a list of closed connections
:rtype: list
"""
return [c for c in self.connections.values() if c.closed]
def connection_handle(self, connection):
"""Return a connection object for the given psycopg2 connection
:param connection: The connection to return a parent for
:type connection: psycopg2.extensions.connection
:rtype: Connection
"""
return self.connections[id(connection)]
@property
def executing_connections(self):
"""Return a list of connections actively executing queries
:rtype: list
"""
return [c for c in self.connections.values() if c.executing]
def free(self, connection):
"""Free the connection from use by the session that was using it.
:param connection: The connection to free
:type connection: psycopg2.extensions.connection
:raises: ConnectionNotFoundError
"""
LOGGER.debug('Pool %s freeing connection %s', self.id, id(connection))
try:
self.connection_handle(connection).free()
except KeyError:
raise ConnectionNotFoundError(self.id, id(connection))
if self.idle_connections == list(self.connections.values()):
with self._lock:
self.idle_start = self.time_method()
LOGGER.debug('Pool %s freed connection %s', self.id, id(connection))
def get(self, session):
"""Return an idle connection and assign the session to the connection
:param queries.Session session: The session to assign
:rtype: psycopg2.extensions.connection
:raises: NoIdleConnectionsError
"""
idle = self.idle_connections
if idle:
connection = idle.pop(0)
connection.lock(session)
if self.idle_start:
with self._lock:
self.idle_start = None
return connection.handle
raise NoIdleConnectionsError(self.id)
@property
def id(self):
"""Return the ID for this pool
:rtype: str
"""
return self._id
@property
def idle_connections(self):
"""Return a list of idle connections
:rtype: list
"""
return [c for c in self.connections.values()
if not c.busy and not c.closed]
@property
def idle_duration(self):
"""Return the number of seconds that the pool has had no active
connections.
:rtype: float
"""
if self.idle_start is None:
return 0
return self.time_method() - self.idle_start
@property
def is_full(self):
"""Return True if there are no more open slots for connections.
:rtype: bool
"""
return len(self.connections) >= self.max_size
def lock(self, connection, session):
"""Explicitly lock the specified connection
:type connection: psycopg2.extensions.connection
:param connection: The connection to lock
:param queries.Session session: The session to hold the lock
"""
cid = id(connection)
try:
self.connection_handle(connection).lock(session)
except KeyError:
raise ConnectionNotFoundError(self.id, cid)
else:
if self.idle_start:
with self._lock:
self.idle_start = None
LOGGER.debug('Pool %s locked connection %s', self.id, cid)
@property
def locked_connections(self):
"""Return a list of all locked connections
:rtype: list
"""
return [c for c in self.connections.values() if c.locked]
def remove(self, connection):
"""Remove the connection from the pool
:param connection: The connection to remove
:type connection: psycopg2.extensions.connection
:raises: ConnectionNotFoundError
:raises: ConnectionBusyError
"""
cid = id(connection)
if cid not in self.connections:
raise ConnectionNotFoundError(self.id, cid)
self.connection_handle(connection).close()
with self._lock:
del self.connections[cid]
LOGGER.debug('Pool %s removed connection %s', self.id, cid)
def report(self):
"""Return a report about the pool state and configuration.
:rtype: dict
"""
return {
'connections': {
'busy': len(self.busy_connections),
'closed': len(self.closed_connections),
'executing': len(self.executing_connections),
'idle': len(self.idle_connections),
'locked': len(self.busy_connections)
},
'exceptions': sum([c.exceptions
for c in self.connections.values()]),
'executions': sum([c.executions
for c in self.connections.values()]),
'full': self.is_full,
'idle': {
'duration': self.idle_duration,
'ttl': self.idle_ttl
},
'max_size': self.max_size
}
def shutdown(self):
"""Forcefully shutdown the entire pool, closing all non-executing
connections.
:raises: ConnectionBusyError
"""
with self._lock:
for cid in list(self.connections.keys()):
if self.connections[cid].executing:
raise ConnectionBusyError(cid)
if self.connections[cid].locked:
self.connections[cid].free()
self.connections[cid].close()
del self.connections[cid]
def set_idle_ttl(self, ttl):
"""Set the idle ttl
:param int ttl: The TTL when idle
"""
with self._lock:
self.idle_ttl = ttl
def set_max_size(self, size):
"""Set the maximum number of connections
:param int size: The maximum number of connections
"""
with self._lock:
self.max_size = size
class PoolManager(object):
"""The connection pool object implements behavior around connections and
their use in queries.Session objects.
We carry a pool id instead of the connection URI so that we will not be
carrying the URI in memory, creating a possible security issue.
"""
_lock = threading.Lock()
_pools = {}
def __contains__(self, pid):
"""Returns True if the pool exists
:param str pid: The pool id to check for
:rtype: bool
"""
return pid in self.__class__._pools
@classmethod
def instance(cls):
"""Only allow a single PoolManager instance to exist, returning the
handle for it.
:rtype: PoolManager
"""
if not hasattr(cls, '_instance'):
with cls._lock:
cls._instance = cls()
return cls._instance
@classmethod
def add(cls, pid, connection):
"""Add a new connection and session to a pool.
:param str pid: The pool id
:type connection: psycopg2.extensions.connection
:param connection: The connection to add to the pool
"""
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].add(connection)
@classmethod
def clean(cls, pid):
"""Clean the specified pool, removing any closed connections or
stale locks.
:param str pid: The pool id to clean
"""
with cls._lock:
try:
cls._ensure_pool_exists(pid)
except KeyError:
LOGGER.debug('Pool clean invoked against missing pool %s', pid)
return
cls._pools[pid].clean()
cls._maybe_remove_pool(pid)
@classmethod
def create(cls, pid, idle_ttl=DEFAULT_IDLE_TTL, max_size=DEFAULT_MAX_SIZE,
time_method=None):
"""Create a new pool, with the ability to pass in values to override
the default idle TTL and the default maximum size.
A pool's idle TTL defines the amount of time that a pool can be open
without any sessions before it is removed.
A pool's max size defines the maximum number of connections that can
be added to the pool to prevent unbounded open connections.
:param str pid: The pool ID
:param int idle_ttl: Time in seconds for the idle TTL
:param int max_size: The maximum pool size
:param callable time_method: Override the use of :py:meth:`time.time`
method for time values.
:raises: KeyError
"""
if pid in cls._pools:
raise KeyError('Pool %s already exists' % pid)
with cls._lock:
LOGGER.debug("Creating Pool: %s (%i/%i)", pid, idle_ttl, max_size)
cls._pools[pid] = Pool(pid, idle_ttl, max_size, time_method)
@classmethod
def free(cls, pid, connection):
"""Free a connection that was locked by a session
:param str pid: The pool ID
:param connection: The connection to remove
:type connection: psycopg2.extensions.connection
"""
with cls._lock:
LOGGER.debug('Freeing %s from pool %s', id(connection), pid)
cls._ensure_pool_exists(pid)
cls._pools[pid].free(connection)
@classmethod
def get(cls, pid, session):
"""Get an idle, unused connection from the pool. Once a connection has
been retrieved, it will be marked as in-use until it is freed.
:param str pid: The pool ID
:param queries.Session session: The session to assign to the connection
:rtype: psycopg2.extensions.connection
"""
with cls._lock:
cls._ensure_pool_exists(pid)
return cls._pools[pid].get(session)
@classmethod
def get_connection(cls, pid, connection):
"""Return the specified :class:`~queries.pool.Connection` from the
pool.
:param str pid: The pool ID
:param connection: The connection to return for
:type connection: psycopg2.extensions.connection
:rtype: queries.pool.Connection
"""
with cls._lock:
return cls._pools[pid].connection_handle(connection)
@classmethod
def has_connection(cls, pid, connection):
"""Check to see if a pool has the specified connection
:param str pid: The pool ID
:param connection: The connection to check for
:type connection: psycopg2.extensions.connection
:rtype: bool
"""
with cls._lock:
cls._ensure_pool_exists(pid)
return connection in cls._pools[pid]
@classmethod
def has_idle_connection(cls, pid):
"""Check to see if a pool has an idle connection
:param str pid: The pool ID
:rtype: bool
"""
with cls._lock:
cls._ensure_pool_exists(pid)
return bool(cls._pools[pid].idle_connections)
@classmethod
def is_full(cls, pid):
"""Return a bool indicating if the specified pool is full
:param str pid: The pool id
:rtype: bool
"""
with cls._lock:
cls._ensure_pool_exists(pid)
return cls._pools[pid].is_full
@classmethod
def lock(cls, pid, connection, session):
"""Explicitly lock the specified connection in the pool
:param str pid: The pool id
:type connection: psycopg2.extensions.connection
:param connection: The connection to add to the pool
:param queries.Session session: The session to hold the lock
"""
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].lock(connection, session)
@classmethod
def remove(cls, pid):
"""Remove a pool, closing all connections
:param str pid: The pool ID
"""
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].close()
del cls._pools[pid]
@classmethod
def remove_connection(cls, pid, connection):
"""Remove a connection from the pool, closing it if is open.
:param str pid: The pool ID
:param connection: The connection to remove
:type connection: psycopg2.extensions.connection
:raises: ConnectionNotFoundError
"""
cls._ensure_pool_exists(pid)
cls._pools[pid].remove(connection)
@classmethod
def set_idle_ttl(cls, pid, ttl):
"""Set the idle TTL for a pool, after which it will be destroyed.
:param str pid: The pool id
:param int ttl: The TTL for an idle pool
"""
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].set_idle_ttl(ttl)
@classmethod
def set_max_size(cls, pid, size):
"""Set the maximum number of connections for the specified pool
:param str pid: The pool to set the size for
:param int size: The maximum number of connections
"""
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].set_max_size(size)
@classmethod
def shutdown(cls):
"""Close all connections on in all pools"""
for pid in list(cls._pools.keys()):
cls._pools[pid].shutdown()
LOGGER.info('Shutdown complete, all pooled connections closed')
@classmethod
def size(cls, pid):
"""Return the number of connections in the pool
:param str pid: The pool id
:rtype int
"""
with cls._lock:
cls._ensure_pool_exists(pid)
return len(cls._pools[pid])
@classmethod
def report(cls):
"""Return the state of the all of the registered pools.
:rtype: dict
"""
return {
'timestamp': datetime.datetime.utcnow().isoformat(),
'process': os.getpid(),
'pools': dict([(i, p.report()) for i, p in cls._pools.items()])
}
@classmethod
def _ensure_pool_exists(cls, pid):
"""Raise an exception if the pool has yet to be created or has been
removed.
:param str pid: The pool ID to check for
:raises: KeyError
"""
if pid not in cls._pools:
raise KeyError('Pool %s has not been created' % pid)
@classmethod
def _maybe_remove_pool(cls, pid):
"""If the pool has no open connections, remove it
:param str pid: The pool id to clean
"""
if not len(cls._pools[pid]):
del cls._pools[pid]
class QueriesException(Exception):
"""Base Exception for all other Queries exceptions"""
pass
class ConnectionException(QueriesException):
def __init__(self, cid):
self.cid = cid
class PoolException(QueriesException):
def __init__(self, pid):
self.pid = pid
class PoolConnectionException(PoolException):
def __init__(self, pid, cid):
self.pid = pid
self.cid = cid
class ActivePoolError(PoolException):
"""Raised when removing a pool that has active connections"""
def __str__(self):
return 'Pool %s has at least one active connection' % self.pid
class ConnectionBusyError(ConnectionException):
"""Raised when trying to lock a connection that is already busy"""
def __str__(self):
return 'Connection %s is busy' % self.cid
class ConnectionNotFoundError(PoolConnectionException):
"""Raised if a specific connection is not found in the pool"""
def __str__(self):
return 'Connection %s not found in pool %s' % (self.cid, self.pid)
class NoIdleConnectionsError(PoolException):
"""Raised if a pool does not have any idle, open connections"""
def __str__(self):
return 'Pool %s has no idle connections' % self.pid
class PoolFullError(PoolException):
"""Raised when adding a connection to a pool that has hit max-size"""
def __str__(self):
return 'Pool %s is at its maximum capacity' % self.pid
| 0 |
from ryu.lib.packet import packet
from . import ids_utils
from . import BoyerMooreStringSearch
import MySQLdb as mdb
class icmp(object):
def __init__(self,packet_data):
self.packet_data = packet.Packet(packet_data.data)
self.dst_ip = ids_utils.get_packet_dst_ip_address(self.packet_data)
self.src_ip = ids_utils.get_packet_src_ip_address(self.packet_data)
self.rule_type = ids_utils.get_packet_type(self.packet_data)
self.src_port = ids_utils.get_packet_src_port(self.packet_data)
self.dst_port = ids_utils.get_packet_dst_port(self.packet_data)
def check_packet(self,mode,src_ip, src_port, dst_ip, dst_port,rule_type,pattern,depth,offset,flags,rule_msg):
for p in self.packet_data:
if hasattr(p, 'protocol_name') is True:
#print p.protocol_name
if p.protocol_name == 'icmp':
#print 'p.data: ', p.data
#print p.data
match = self.check_ip_match(src_ip, dst_ip,rule_type)
#print 'From ICMP Class : Match'
#print match
pkt_contents=""
if match == True:
length = ids_utils.get_packet_length(self.packet_data)
#print 'length: '
#print length
#for p in self.packet_data.protocols:
#for p in self.packet_data:
#print 'p: '
#print p
#if hasattr(p, 'protocol_name') is False:
#if p.protocol_name == 'ipv4':
#print 'Before Call to Print Packet Data in ICMP'
#ids_utils.print_packet_data(str(p), length)
#ids_utils.print_packet_data(str(p.data), length)
#pkt_contents=ids_utils.get_packet_data(str(p),length)
contents = ids_utils.get_packet_data(str(p.data),length)
pkt_contents = str(contents)
#ignore the first 11 characters consisting of the string echo(data='
pkt_contents = pkt_contents[11:]
if offset is not None:
pkt_contents = pkt_contents[offset:]
if depth is not None:
pkt_contents = pkt_contents[:depth]
#print 'Pattern: '
#print pattern
if pattern is not None:
for p in pattern:
#match_content = BoyerMooreStringSearch.BMSearch(pkt_contents,pattern)
match_content = pkt_contents.find(p)
if match_content == -1:
break
else:
# if pattern is None just set the match_content to True Value(1)
match_content = 1
#print 'match_content: '
#print match_content
#if match_content == True:
if match_content != -1:
f = open('/home/ubuntu/RYU295/ryu/lib/ids/log.txt', 'a')
f.write("\n")
f.write(rule_msg)
f.close()
self.writeToDB('ICMP Attack Packet', 'icmp',rule_msg,
self.src_ip, self.dst_ip, self.src_port, self.dst_port)
#print 'After Call to Print Packet Data in TCP'
#if mode == 'alert' and match_content == True:
if mode == 'alert' and match_content != -1:
#print 'TCP Attack Packet'
alertmsg = rule_msg
return alertmsg
def check_ip_match(self,src_ip, dst_ip,rule_type):
#print 'packet source', self.src_ip
#print 'packet dst', self.dst_ip
#print 'packet rule ', self.rule_type
#print 'rule source', src_ip
#print 'rule dst', dst_ip
#print 'rule type ', rule_type
#print 'Print Message from ICMP Module'
if (('any'in src_ip) or (self.src_ip in src_ip)):
if (('any' in dst_ip) or (self.dst_ip in dst_ip)):
if(self.rule_type == rule_type):
return True
def writeToDB(self,name, protocol, msg, srcip, dstip,srcport, dstport):
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
#with dbcon:
cursor = dbcon.cursor()
try:
#print 'In Try Block of WriteToDB'
cursor.execute("INSERT INTO attacks(name,protocol, message, sourceip, destip, sourceport, destport)VALUES (%s, %s,%s, %s, %s, %s, %s)",(name, protocol, msg, srcip, dstip, srcport, dstport))
dbcon.commit()
except:
#print 'In Rollback Block'
dbcon.rollback()
| 0.042962 |
from __future__ import unicode_literals
from werkzeug.exceptions import HTTPException
from jinja2 import DictLoader, Environment
SINGLE_ERROR_RESPONSE = u"""<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>{{error_type}}</Code>
<Message>{{message}}</Message>
{% block extra %}{% endblock %}
<RequestID>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestID>
</Error>
"""
ERROR_RESPONSE = u"""<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Errors>
<Error>
<Code>{{error_type}}</Code>
<Message>{{message}}</Message>
{% block extra %}{% endblock %}
</Error>
</Errors>
<RequestID>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestID>
</Response>
"""
ERROR_JSON_RESPONSE = u"""{
"message": "{{message}}",
"__type": "{{error_type}}"
}
"""
class RESTError(HTTPException):
code = 400
templates = {
'single_error': SINGLE_ERROR_RESPONSE,
'error': ERROR_RESPONSE,
'error_json': ERROR_JSON_RESPONSE,
}
def __init__(self, error_type, message, template='error', **kwargs):
super(RESTError, self).__init__()
env = Environment(loader=DictLoader(self.templates))
self.error_type = error_type
self.message = message
self.description = env.get_template(template).render(
error_type=error_type, message=message, **kwargs)
class DryRunClientError(RESTError):
code = 400
class JsonRESTError(RESTError):
def __init__(self, error_type, message, template='error_json', **kwargs):
super(JsonRESTError, self).__init__(
error_type, message, template, **kwargs)
def get_headers(self, *args, **kwargs):
return [('Content-Type', 'application/json')]
def get_body(self, *args, **kwargs):
return self.description
| 0 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import pytest
from django.core.files.uploadedfile import SimpleUploadedFile
import pytest_pootle
from pytest_pootle.utils import create_store
from import_export.exceptions import UnsupportedFiletypeError
from import_export.utils import import_file
from pootle_app.models.permissions import check_user_permission
from pootle_statistics.models import SubmissionTypes
from pootle_store.constants import NEW, OBSOLETE, PARSED, TRANSLATED
from pootle_store.models import Store, Unit
IMPORT_SUCCESS = "headers_correct.po"
IMPORT_UNSUPP_FILE = "tutorial.ts"
def _import_file(file_name, file_dir=None,
content_type="text/x-gettext-translation",
user=None):
if not file_dir:
file_dir = os.path.join(
os.path.dirname(pytest_pootle.__file__),
"data/po/tutorial/en")
with open(os.path.join(file_dir, file_name), "r") as f:
import_file(
SimpleUploadedFile(file_name, f.read(), content_type),
user=user)
@pytest.mark.django_db
def test_import_success(project0_nongnu, store0, admin):
store0.sync()
assert store0.state == NEW
_import_file(IMPORT_SUCCESS, user=admin)
store = Store.objects.get(pk=store0.pk)
assert store.state == PARSED
@pytest.mark.django_db
def test_import_failure(po_directory, en_tutorial_po,
file_import_failure, member):
filename, exception = file_import_failure
with pytest.raises(exception):
_import_file(filename, user=member)
@pytest.mark.django_db
def test_import_unsupported(po_directory, en_tutorial_ts,
ts_directory, member):
with pytest.raises(UnsupportedFiletypeError):
_import_file(IMPORT_UNSUPP_FILE,
file_dir=os.path.join(ts_directory, "tutorial/en"),
content_type="text/vnd.trolltech.linguist",
user=member)
@pytest.mark.django_db
def test_import_new_file(project0_nongnu, import_tps, site_users):
tp = import_tps
user = site_users["user"]
store_pootle_path = tp.pootle_path + "import_new_file.po"
filestore = create_store(store_pootle_path, "0",
[("Unit Source", "Unit Target", False)])
# New store can't be created via current import command. This test will
# need to be adjusted if we allow to create new stores via import command.
from import_export.exceptions import FileImportError
with pytest.raises(FileImportError):
import_file(SimpleUploadedFile("import_new_file.po",
str(filestore),
"text/x-gettext-translation"), user)
@pytest.mark.django_db
def test_import_to_empty(project0_nongnu, import_tps, site_users):
from pytest_pootle.factories import StoreDBFactory
tp = import_tps
user = site_users["user"]
store = StoreDBFactory(translation_project=tp, name="import_to_empty.po")
filestore = create_store(store.pootle_path, "0",
[("Unit Source", "Unit Target", False)])
import_file(SimpleUploadedFile(store.name,
str(filestore),
"text/x-gettext-translation"), user)
allow_add_and_obsolete = ((tp.project.checkstyle == 'terminology'
or tp.is_template_project)
and check_user_permission(user,
'administrate',
tp.directory))
if allow_add_and_obsolete:
assert tp.stores.get(pootle_path=store.pootle_path).units.count() == 1
unit_source = store.units[0].unit_source.get()
assert unit_source.created_with == SubmissionTypes.UPLOAD
assert unit_source.created_by == user
assert store.units[0].change.changed_with == SubmissionTypes.UPLOAD
assert store.units[0].change.submitted_by == user
else:
assert tp.stores.get(pootle_path=store.pootle_path).units.count() == 0
@pytest.mark.django_db
def test_import_add_and_obsolete_units(project0_nongnu, import_tps,
site_users):
from pytest_pootle.factories import StoreDBFactory, UnitDBFactory
tp = import_tps
user = site_users["user"]
store = StoreDBFactory(translation_project=tp)
unit = UnitDBFactory(store=store, state=TRANSLATED)
filestore = create_store(
store.pootle_path,
"0",
[(unit.source_f + " REPLACED", unit.target_f + " REPLACED", False)])
import_file(SimpleUploadedFile("import_add_and_obsolete.po",
str(filestore),
"text/x-gettext-translation"), user)
allow_add_and_obsolete = ((tp.project.checkstyle == 'terminology'
or tp.is_template_project)
and check_user_permission(user,
'administrate',
tp.directory))
if allow_add_and_obsolete:
assert Unit.objects.filter(store=store, state=OBSOLETE).count() == 1
assert store.units.filter(state=TRANSLATED).count() == 1
unit_source = store.units[0].unit_source.get()
assert unit_source.created_with == SubmissionTypes.UPLOAD
assert unit_source.created_by == user
assert store.units[0].change.changed_with == SubmissionTypes.UPLOAD
assert store.units[0].change.submitted_by == user
assert Unit.objects.filter(store=store).count() == 2
else:
assert store.units.all().count() == 1
| 0 |
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from oauth2client.client import GoogleCredentials
import installed_app
PROJECT = os.environ['GCLOUD_PROJECT']
CLIENT_SECRETS = os.environ['GOOGLE_CLIENT_SECRETS']
class Namespace(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def test_main(monkeypatch, capsys):
installed_app.CLIENT_SECRETS = CLIENT_SECRETS
# Replace the user credentials flow with Application Default Credentials.
# Unfortunately, there's no easy way to fully test the user flow.
def mock_run_flow(flow, storage, args):
return GoogleCredentials.get_application_default()
monkeypatch.setattr(installed_app.tools, 'run_flow', mock_run_flow)
args = Namespace(
project_id=PROJECT,
logging_level='INFO',
noauth_local_webserver=True)
installed_app.main(args)
out, _ = capsys.readouterr()
assert re.search(re.compile(
r'bigquery#datasetList', re.DOTALL), out)
| 0 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import json
from frappe.utils import flt, cstr, nowdate, nowtime
class InvalidWarehouseCompany(frappe.ValidationError): pass
def get_stock_value_on(warehouse=None, posting_date=None, item_code=None):
if not posting_date: posting_date = nowdate()
values, condition = [posting_date], ""
if warehouse:
values.append(warehouse)
condition += " AND warehouse = %s"
if item_code:
values.append(item_code)
condition.append(" AND item_code = %s")
stock_ledger_entries = frappe.db.sql("""
SELECT item_code, stock_value
FROM `tabStock Ledger Entry`
WHERE posting_date <= %s {0}
ORDER BY timestamp(posting_date, posting_time) DESC, name DESC
""".format(condition), values, as_dict=1)
sle_map = {}
for sle in stock_ledger_entries:
sle_map.setdefault(sle.item_code, flt(sle.stock_value))
return sum(sle_map.values())
def get_stock_balance(item_code, warehouse, posting_date=None, posting_time=None, with_valuation_rate=False):
"""Returns stock balance quantity at given warehouse on given posting date or current date.
If `with_valuation_rate` is True, will return tuple (qty, rate)"""
from erpnext.stock.stock_ledger import get_previous_sle
if not posting_date: posting_date = nowdate()
if not posting_time: posting_time = nowtime()
last_entry = get_previous_sle({
"item_code": item_code,
"warehouse":warehouse,
"posting_date": posting_date,
"posting_time": posting_time })
if with_valuation_rate:
return (last_entry.qty_after_transaction, last_entry.valuation_rate) if last_entry else (0.0, 0.0)
else:
return last_entry.qty_after_transaction or 0.0
def get_latest_stock_balance():
bin_map = {}
for d in frappe.db.sql("""SELECT item_code, warehouse, stock_value as stock_value
FROM tabBin""", as_dict=1):
bin_map.setdefault(d.warehouse, {}).setdefault(d.item_code, flt(d.stock_value))
return bin_map
def get_bin(item_code, warehouse):
bin = frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse})
if not bin:
bin_obj = frappe.get_doc({
"doctype": "Bin",
"item_code": item_code,
"warehouse": warehouse,
})
bin_obj.flags.ignore_permissions = 1
bin_obj.insert()
else:
bin_obj = frappe.get_doc('Bin', bin)
bin_obj.flags.ignore_permissions = True
return bin_obj
def update_bin(args, allow_negative_stock=False, via_landed_cost_voucher=False):
is_stock_item = frappe.db.get_value('Item', args.get("item_code"), 'is_stock_item')
if is_stock_item:
bin = get_bin(args.get("item_code"), args.get("warehouse"))
bin.update_stock(args, allow_negative_stock, via_landed_cost_voucher)
return bin
else:
frappe.msgprint(_("Item {0} ignored since it is not a stock item").format(args.get("item_code")))
def get_incoming_rate(args):
"""Get Incoming Rate based on valuation method"""
from erpnext.stock.stock_ledger import get_previous_sle
in_rate = 0
if (args.get("serial_no") or "").strip():
in_rate = get_avg_purchase_rate(args.get("serial_no"))
else:
valuation_method = get_valuation_method(args.get("item_code"))
previous_sle = get_previous_sle(args)
if valuation_method == 'FIFO':
if not previous_sle:
return 0.0
previous_stock_queue = json.loads(previous_sle.get('stock_queue', '[]') or '[]')
in_rate = get_fifo_rate(previous_stock_queue, args.get("qty") or 0) if previous_stock_queue else 0
elif valuation_method == 'Moving Average':
in_rate = previous_sle.get('valuation_rate') or 0
return in_rate
def get_avg_purchase_rate(serial_nos):
"""get average value of serial numbers"""
serial_nos = get_valid_serial_nos(serial_nos)
return flt(frappe.db.sql("""select avg(ifnull(purchase_rate, 0)) from `tabSerial No`
where name in (%s)""" % ", ".join(["%s"] * len(serial_nos)),
tuple(serial_nos))[0][0])
def get_valuation_method(item_code):
"""get valuation method from item or default"""
val_method = frappe.db.get_value('Item', item_code, 'valuation_method')
if not val_method:
val_method = frappe.db.get_value("Stock Settings", None, "valuation_method") or "FIFO"
return val_method
def get_fifo_rate(previous_stock_queue, qty):
"""get FIFO (average) Rate from Queue"""
if qty >= 0:
total = sum(f[0] for f in previous_stock_queue)
return sum(flt(f[0]) * flt(f[1]) for f in previous_stock_queue) / flt(total) if total else 0.0
else:
available_qty_for_outgoing, outgoing_cost = 0, 0
qty_to_pop = abs(qty)
while qty_to_pop and previous_stock_queue:
batch = previous_stock_queue[0]
if 0 < batch[0] <= qty_to_pop:
# if batch qty > 0
# not enough or exactly same qty in current batch, clear batch
available_qty_for_outgoing += flt(batch[0])
outgoing_cost += flt(batch[0]) * flt(batch[1])
qty_to_pop -= batch[0]
previous_stock_queue.pop(0)
else:
# all from current batch
available_qty_for_outgoing += flt(qty_to_pop)
outgoing_cost += flt(qty_to_pop) * flt(batch[1])
batch[0] -= qty_to_pop
qty_to_pop = 0
return outgoing_cost / available_qty_for_outgoing
def get_valid_serial_nos(sr_nos, qty=0, item_code=''):
"""split serial nos, validate and return list of valid serial nos"""
# TODO: remove duplicates in client side
serial_nos = cstr(sr_nos).strip().replace(',', '\n').split('\n')
valid_serial_nos = []
for val in serial_nos:
if val:
val = val.strip()
if val in valid_serial_nos:
frappe.throw(_("Serial number {0} entered more than once").format(val))
else:
valid_serial_nos.append(val)
if qty and len(valid_serial_nos) != abs(qty):
frappe.throw(_("{0} valid serial nos for Item {1}").format(abs(qty), item_code))
return valid_serial_nos
def validate_warehouse_company(warehouse, company):
warehouse_company = frappe.db.get_value("Warehouse", warehouse, "company")
if warehouse_company and warehouse_company != company:
frappe.throw(_("Warehouse {0} does not belong to company {1}").format(warehouse, company),
InvalidWarehouseCompany)
| 0.026411 |
# __METADATA__BEGIN__
# <?xml version="1.0" encoding="utf-8" ?>
# <metadata version="1.0">
# <description>description</description>
# <prerequisites>prerequisites</prerequisites>
# <parameters>
# </parameters>
# </metadata>
# __METADATA__END__
##
# This file is a part of the SIP Virtual Endpoint component for Testerman.
#
# (c) Denis Machard and other contributors.
##
"""
An Encoder/Decoder of SDP messages + a function to negotiate codecs
@date: 12/29/2008
"""
class Sdp:
def __init__ (self, versionSdp, versionIp, userName, sessionName):
"""
Initialize the class Sdp
@param versionSdp: 0 or more
@type versionSdp: string
@param versionIp: IP4 / IP6
@type versionIp: string
@param userName: owner
@type userName: string
@param sessionName: session name
@type sessionName: string
"""
self.versionIp = versionIp
self.versionSdp = versionSdp
self.sessionId = '1'
self.sessionVersion = '1'
self.userName = userName
self.sessionName = sessionName
self.codecMap = { 0: 'PCMU/8000',
3: 'GSM/8000',
4: 'G723/8000',
8: 'PCMA/8000',
18: 'G729/8000',
34: 'H263/90000',
97: 'H264/90000',
99: 'H263-1998/90000',
100: 'MP4V-ES/90000',
101: 'telephone-event/8000'
}
self.protos = ['RTP/AVP']
self.defaultCodec = 'PCMA/8000'
self.codecsSupported = None
# sdp template
self.sdpOffer = None
self.sdpOfferNull = None
self.sdpAnswer = None
self.sdpAnswerReInvite = None
#
self.sdpOfferReceived = False
self.sdpOfferSended = False
self.sdpAnswerReceived = False
self.sdpAnswerSended = False
self.sdpReceived = None
self.portUsed = None
self.ipUsed = None
def setSdpAnswerSended(self):
self.sdpAnswerSended = True
def setSdpOfferReceived(self):
self.sdpOfferReceived = True
def setSdpOfferSended(self):
self.sdpOfferSended = True
def setSdpTemplate(self, ip, port, codecs):
self.codecsSupported = codecs
self.portUsed = port
self.ipUsed = ip
self.sdpOffer = { 'ownerAddress': str(ip),
'connectionAddress': str(ip),
'sessionAttribute': 'sendrecv',
'mediaDescription': [ { '__type': 'audio',
'__port': int(port),
'__proto': 'RTP/AVP',
'__codecs': codecs
}
]
}
self.sdpOfferNull = { 'ownerAddress': str(ip),
'connectionAddress': '0.0.0.0',
'mediaDescription': [ { '__type': 'audio',
'__port': int(port),
'__proto': 'RTP/AVP',
'__codecs': codecs,
'__attribute':'sendonly'
}
]
}
self.sdpAnswer = { 'ownerAddress': str(ip),
'connectionAddress': str(ip),
'sessionAttribute': 'sendrecv',
'mediaDescription': []
}
self.sdpAnswerReInvite = { 'ownerAddress': str(ip),
'connectionAddress': str(ip),
'sessionAttribute': 'sendrecv',
'mediaDescription': []
}
def decodeSdpAndNegociateCodec(self, sdp):
self.resetSdpAnswer()
sdpNegociated = False
self.sdpAnswerReceived = True
self.sdpReceived = sdp
sdpDecoded = self.decode(sdp)
if sdpDecoded.has_key('mediaDescription'):
if len(sdpDecoded['mediaDescription']) > 0:
for media in sdpDecoded['mediaDescription']:
if sdpNegociated == False:
sdpNegociated = True
portToSend = media['__port']
codecChoosed = self.negotiatesCodec(self.codecsSupported, media['__codecs'])
if codecChoosed != None:
self.sdpAnswer['mediaDescription'].append({ '__type': 'audio',
'__port': self.portUsed,
'__proto': 'RTP/AVP',
'__codecs': [codecChoosed]
})
self.sdpAnswerReInvite['mediaDescription'].append({ '__type': 'audio',
'__port': self.portUsed,
'__proto': 'RTP/AVP',
'__codecs': [codecChoosed]
})
else:
return None, None, None
else:
self.sdpAnswer['mediaDescription'].append({ '__type': media['__type'],
'__port': 0,
'__proto': media['__proto'],
'__codecs': media['__codecs']
})
self.sdpAnswerReInvite['mediaDescription'].append({ '__type': media['__type'],
'__port': 0,
'__proto': media['__proto'],
'__codecs': media['__codecs']
})
else:
return None, None, None
else:
return None, None, None
if sdpNegociated == False:
return None, None, None
else:
return codecChoosed, sdpDecoded['connectionAddress'], portToSend
def getSdpOffer(self, type = None):
self.sdpOfferSended = True
if type == 'Null':
return self.encode(self.sdpOfferNull)
else:
return self.encode(self.sdpOffer)
def getSdpAnswer(self):
return self.encode(self.sdpAnswer)
def getSdpAnswerReInvite(self):
return self.encode(self.sdpAnswerReInvite)
def resetSdpAnswer(self):
self.sdpAnswer['mediaDescription'] = []
self.sdpAnswerReInvite['mediaDescription'] = []
def reset(self):
self.sdpOfferReceived = False
self.sdpOfferSended = False
self.sdpAnswerReceived = False
self.sdpAnswerSended = False
self.sdpReceived = None
def decode (self, sdp):
"""
Decode a sdp text message to a python dictionnary
@type sdp: string
@param sdp: sdp payload (raw text)
@return: a sdp message (dictionnary python)
@rtype: dict
"""
data = {}
media = False
mediaDescription = []
lines = sdp.splitlines()
i = 1
for line in lines:
if line.startswith('v='):
tmp = line[2:].split(' ')
if tmp[0] != self.versionSdp:
raise Exception('Sdp version not supported!')
if line.startswith('o='):
tmp = line[2:].split(' ')
if tmp[4] != self.versionIp:
raise Exception('Incorrect ip version!')
data.update({'ownerAddress': tmp[5]})
if line.startswith('c='):
tmp = line[2:].split(' ')
if tmp[1] != self.versionIp:
raise Exception('Incorrect ip version!')
data.update({'connectionAddress': tmp[2]})
if line.startswith('a='):
tmp = line[2:].split(' ')
if media == True:
if tmp[0] == 'sendonly' or tmp[0] == 'recvonly':
dico.update({'__attribute': tmp[0]})
else:
data.update({'sessionAttribute': tmp[0]})
if line.startswith('m='):
if media == True:
mediaDescription.append(dico)
media = True
tmp = line[2:].split(' ')
codec = tmp[3:]
codec = map(lambda x: int(x), codec)
isPresent = False
for proto in self.protos:
if tmp[2] == proto:
isPresent = True
if isPresent == False:
raise Exception('protocol not supported: %s' % str(tmp[2]))
dico = {'__type':tmp[0],'__port':tmp[1], '__codecs':codec,'__proto': tmp[2]}
if i == len(lines):
if media == True:
mediaDescription.append(dico)
media = False
data.update({'mediaDescription': mediaDescription})
i += 1
return data
def encode (self, sdp):
"""
Encode SDP dictionnary message to SDP text message
@type sdp: dict
@param sdp: a sdp message (dictionnary python) \n
example: sdpDesc = { 'ownerAddress': '10.0.0.1', 'connectionAddress': '0.0.0.0', 'sessionAttribute':'directive:active',
'mediaDescription': [ {'__type': 'audio', '__port': 30900, '__codecs': [8,0,18], '__proto': 'RPT/AVP'} ] }
@return: payload SDP (text)
@rtype: string
"""
mediaDescription = ''
if not (isinstance(sdp,(dict))): raise Exception('A dictionary is required!')
if not sdp.has_key('mediaDescription'): raise Exception('<mediaDescription> is mandatory')
if not sdp.has_key('ownerAddress'): raise Exception('<ownerAddress> is mandatory')
if not sdp.has_key('connectionAddress'): raise Exception('<connectionAddress> is mandatory')
nbMedia = len(sdp['mediaDescription'])
i = 0
for media in sdp['mediaDescription'] :
i = i + 1
mediaAttribute = ''
mediaFormat = ''
if not media.has_key('__codecs'): raise Exception('<__codecs> is mandatory')
if not media.has_key('__proto'): raise Exception('<__proto> is mandatory')
if not media.has_key('__type'): raise Exception('<__type> is mandatory')
if not media.has_key('__port'): raise Exception('<__port> is mandatory')
isPresent = False
for proto in self.protos:
if media['__proto'] == proto:
isPresent = True
if isPresent == False:
raise Exception('protocol not supported: %s' % str(media['__proto']))
media['__codecs'] = map(lambda x: int(x), media['__codecs'])
for codec in media['__codecs'] :
mediaFormat += ' ' + str(codec)
if self.codecMap.has_key(codec):
details = self.codecMap[codec]
else:
details = self.defaultCodec
mediaAttribute += 'a=rtpmap:%s %s\r\n' % (str(codec), details)
if codec == 101:
mediaAttribute += 'a=fmtp:101 0-11,16\r\n'
mediaDescription += 'm=%s %s %s%s\r\n' % (media['__type'], media['__port'], media['__proto'], mediaFormat)
mediaDescription += mediaAttribute
if media.has_key('__attribute'):
mediaDescription += 'a=%s\r\n' % (media['__attribute'])
mediaDescription = mediaDescription.rstrip("\r\n")
if i != nbMedia:
mediaDescription += '\r\n'
sdpMessage = 'v=%s\r\n' % self.versionSdp
sdpMessage = sdpMessage + 'o=%s %s %s IN %s %s\r\n' % (self.userName, self.sessionId, self.sessionVersion, self.versionIp, sdp['ownerAddress'])
sdpMessage = sdpMessage + 's=%s\r\n' % self.sessionName
sdpMessage = sdpMessage + 'c=IN %s %s\r\n' % (self.versionIp,sdp['connectionAddress'])
sdpMessage = sdpMessage + 't=0 0\r\n'
if sdp.has_key('sessionAttribute'):
sdpMessage = sdpMessage + 'a=%s\r\n' % sdp['sessionAttribute']
sdpMessage = sdpMessage + '%s\r\n' % mediaDescription
return sdpMessage
def negotiatesCodec(self, codecsSupported, codecsReceived):
"""
This function enables to negociate codecs
@param codecsSupported: example [8,0]
@type codecsSupported: list
@param codecsReceived: example [8, 18, 0]
@type codecsReceived: list
@return: codec choosed
@rtype: integer
"""
codecChoosed = None
for codecReceived in codecsReceived:
for codecSupported in codecsSupported:
if codecReceived == codecSupported:
codecChoosed = codecSupported
return codecChoosed
return codecChoosed
| 0.043811 |
#!/usr/bin/env python
"""
General geometry support routines.
--------------------------------------------------------------------
This program is licensed under the GNU General Public License (GPL)
Version 3. See http://www.fsf.org for details of the license.
Rugged Circuits LLC
http://ruggedcircuits.com/gerbmerge
"""
import math
# Ensure all list elements are unique
def uniqueify(L):
return {}.fromkeys(L).keys()
# This function rounds an (X,Y) point to integer co-ordinates
def roundPoint(pt):
return (int(round(pt[0])),int(round(pt[1])))
# Returns True if the segment defined by endpoints p1 and p2 is vertical
def isSegmentVertical(p1, p2):
return p1[0]==p2[0]
# Returns True if the segment defined by endpoints p1 and p2 is horizontal
def isSegmentHorizontal(p1, p2):
return p1[1]==p2[1]
# Returns slope of a non-vertical line segment
def segmentSlope(p1, p2):
return float(p2[1]-p1[1])/(p2[0]-p1[0])
# Determine if the (X,Y) 'point' is on the line segment defined by endpoints p1
# and p2, both (X,Y) tuples. It's assumed that the point is on the line defined
# by the segment, but just may be beyond the endpoints. NOTE: No testing is
# performed to see if the point is actually on the line defined by the segment!
# This is assumed!
def isPointOnSegment(point, p1, p2):
if isSegmentVertical(p1,p2):
# Treat vertical lines by comparing Y-ordinates
return (point[1]-p2[1])*(point[1]-p1[1]) <= 0
else:
# Treat other lines, including horizontal lines, by comparing X-ordinates
return (point[0]-p2[0])*(point[0]-p1[0]) <= 0
# Returns (X,Y) point where the line segment defined by (X,Y) endpoints p1 and
# p2 intersects the line segment defined by endpoints q1 and q2. Only a single
# intersection point is allowed, so no coincident lines. If there is no point
# of intersection, None is returned.
def segmentXsegment1pt(p1, p2, q1, q2):
A,B = p1
C,D = p2
P,Q = q1
R,S = q2
# We have to consider special cases of one or other line segments being vertical
if isSegmentVertical(p1,p2):
if isSegmentVertical(q1,q2): return None
x = A
y = segmentSlope(q1,q2)*(A-P) + Q
elif isSegmentVertical(q1,q2):
x = P
y = segmentSlope(p1,p2)*(P-A) + B
else:
m1 = segmentSlope(p1,p2)
m2 = segmentSlope(q1,q2)
if m1==m2: return None
x = (A*m1 - B - P*m2 + Q) / (m1-m2)
y = m1*(x-A) + B
# Candidate point identified. Check to make sure it's on both line segments.
if isPointOnSegment((x,y), p1, p2) and isPointOnSegment((x,y), q1, q2):
return roundPoint((x,y))
else:
return None
# Returns True if the given (X,Y) 'point' is strictly within the rectangle
# defined by (LLX,LLY,URX,URY) co-ordinates (LL=lower left, UR=upper right).
def isPointStrictlyInRectangle(point, rect):
x,y = point
llx,lly,urx,ury = rect
return (llx < x < urx) and (lly < y < ury)
# This function takes two points which define the extents of a rectangle. The
# return value is a 5-tuple (ll, ul, ur, lr, rect) which comprises 4 points
# (lower-left, upper-left, upper-right, lower-right) and a rect object (minx,
# miny, maxx, maxy). If called with a single argument, it is expected to be
# a 4-tuple (x1,y1,x2,y2).
def canonicalizeExtents(pt1, pt2=None):
# First canonicalize lower-left and upper-right points
if pt2 is None:
maxx = max(pt1[0], pt1[2])
minx = min(pt1[0], pt1[2])
maxy = max(pt1[1], pt1[3])
miny = min(pt1[1], pt1[3])
else:
maxx = max(pt1[0], pt2[0])
minx = min(pt1[0], pt2[0])
maxy = max(pt1[1], pt2[1])
miny = min(pt1[1], pt2[1])
# Construct the four corners
llpt = (minx,miny)
urpt = (maxx,maxy)
ulpt = (minx,maxy)
lrpt = (maxx,miny)
# Construct a rect object for use by various functions
rect = (minx, miny, maxx, maxy)
return (llpt, ulpt, urpt, lrpt, rect)
# This function returns a list of intersection points of the line segment
# pt1-->pt2 and the box defined by corners llpt and urpt. These corners are
# canonicalized internally so they need not necessarily be lower-left and
# upper-right points.
#
# The return value may be a list of 0, 1, or 2 points. If the list has 2
# points, then the segment intersects the box in two points since both points
# are outside the box. If the list has 1 point, then the segment has one point
# inside the box and another point outside. If the list is empty, the segment
# has both points outside the box and there is no intersection, or has both
# points inside the box.
#
# Note that segments collinear with box edges produce no points of
# intersection.
def segmentXbox(pt1, pt2, llpt, urpt):
# First canonicalize lower-left and upper-right points
llpt, ulpt, urpt, lrpt, rect = canonicalizeExtents(llpt, urpt)
# Determine whether one point is inside the rectangle and the other is not.
# Note the XOR operator '^'
oneInOneOut = isPointStrictlyInRectangle(pt1,rect) ^ isPointStrictlyInRectangle(pt2,rect)
# Find all intersections of the segment with the 4 sides of the box,
# one side at a time. L will be the list of definitely-true intersections,
# while corners is a list of potential intersections. An intersection
# is potential if a) it is a corner, and b) there is another intersection
# of the line with the box somewhere else. This is how we handle
# corner intersections, which are sometimes legal (when one segment endpoint
# is inside the box and the other isn't, or when the segment intersects the
# box in two places) and sometimes not (when the segment is "tangent" to
# the box at the corner and the corner is the signle point of intersection).
L = []
corners = []
# Do not allow intersection if segment is collinear with box sides. For
# example, a horizontal line collinear with the box top side should not
# return an intersection with the upper-left or upper-right corner.
# Similarly, a point of intersection that is a corner should only be
# allowed if one segment point is inside the box and the other is not,
# otherwise it means the segment is "tangent" to the box at that corner.
# There is a case, however, in which a corner is a point of intersection
# with both segment points outside the box, and that is if there are two
# points of intersection, i.e., the segment goes completely through the box.
def checkIntersection(corner1, corner2):
# Check intersection with side of box
pt = segmentXsegment1pt(pt1, pt2, corner1, corner2)
if pt in (corner1,corner2):
# Only allow this corner intersection point if line is not
# horizontal/vertical and one point is inside rectangle while other is
# not, or the segment intersects the box in two places. Since oneInOneOut
# calls isPointStrictlyInRectangle(), which automatically excludes points
# on the box itself, horizontal/vertical lines collinear with box sides
# will always lead to oneInOneOut==False (since both will be "out of
# box").
if oneInOneOut:
L.append(pt)
else:
corners.append(pt) # Potentially a point of intersection...we'll have to wait and
# see if there is one more point of intersection somewhere else.
else:
# Not a corner intersection, so it's valid
if pt is not None: L.append(pt)
# Check intersection with left side of box
checkIntersection(llpt, ulpt)
# Check intersection with top side of box
checkIntersection(ulpt, urpt)
# Check intersection with right side of box
checkIntersection(urpt, lrpt)
# Check intersection with bottom side of box
checkIntersection(llpt, lrpt)
# Ensure all points are unique. We may get a double hit at the corners
# of the box.
L = uniqueify(L)
corners = uniqueify(corners)
# If the total number of intersections len(L)+len(corners) is 2, the corner
# is valid. If there is only a single corner, it's a tangent and invalid.
# However, if both corners are on the same side of the box, it's not valid.
numPts = len(L)+len(corners)
assert numPts <= 2
if numPts == 2:
if len(corners)==2 and (isSegmentHorizontal(corners[0], corners[1]) or isSegmentVertical(corners[0],corners[1])):
return []
else:
L += corners
L.sort() # Just for stability in assertion checking
return L
else:
L.sort()
return L # Correct if numPts==1, since it will be empty or contain a single valid intersection
# Correct if numPts==0, since it will be empty
# This function determines if two rectangles defined by 4-tuples
# (minx, miny, maxx, maxy) have any rectangle in common. If so, it is
# returned as a 4-tuple, else None is returned. This function assumes
# the rectangles are canonical so that minx<maxx, miny<maxy. If the
# optional allowLines parameter is True, rectangles that overlap on
# a line are considered overlapping, otherwise they must overlap with
# a rectangle of at least width 1.
def areExtentsOverlapping(E1, E2, allowLines=False):
minX,minY,maxX,maxY = E1
minU,minV,maxU,maxV = E2
if allowLines:
if (minU > maxX) or (maxU < minX) or (minV > maxY) or (maxV < minY):
return False
else:
return True
else:
if (minU >= maxX) or (maxU <= minX) or (minV >= maxY) or (maxV <= minY):
return False
else:
return True
# Compute the intersection of two rectangles defined by 4-tuples E1 and E2,
# which are not necessarily canonicalized.
def intersectExtents(E1, E2):
ll1, ul1, ur1, lr1, rect1 = canonicalizeExtents(E1)
ll2, ul2, ur2, lr2, rect2 = canonicalizeExtents(E2)
if not areExtentsOverlapping(rect1, rect2):
return None
xll = max(rect1[0], rect2[0]) # Maximum of minx values
yll = max(rect1[1], rect2[1]) # Maximum of miny values
xur = min(rect1[2], rect2[2]) # Minimum of maxx values
yur = min(rect1[3], rect2[3]) # Minimum of maxy values
return (xll, yll, xur, yur)
# This function returns True if rectangle E1 is wholly contained within
# rectangle E2. Both E1 and E2 are 4-tuples (minx,miny,maxx,maxy), not
# necessarily canonicalized. This function is like a slightly faster
# version of "intersectExtents(E1, E2)==E1".
def isRect1InRect2(E1, E2):
ll1, ul1, ur1, lr1, rect1 = canonicalizeExtents(E1)
ll2, ul2, ur2, lr2, rect2 = canonicalizeExtents(E2)
return (ll1[0] >= ll2[0]) and (ll1[1] >= ll2[1]) \
and (ur1[0] <= ur2[0]) and (ur1[1] <= ur2[1])
# Return width of rectangle, which may be 0 if bottom-left and upper-right X
# positions are the same. The rectangle is a 4-tuple (minx,miny,maxx,maxy).
def rectWidth(rect):
return abs(rect[2]-rect[0])
# Return height of rectangle, which may be 0 if bottom-left and upper-right Y
# positions are the same. The rectangle is a 4-tuple (minx,miny,maxx,maxy).
def rectHeight(rect):
return abs(rect[3]-rect[1])
# Return center (X,Y) co-ordinates of rectangle.
def rectCenter(rect):
dx = rectWidth(rect)
dy = rectHeight(rect)
if dx & 1: # Odd width: center is (left+right)/2 + 1/2
X = (rect[0] + rect[2] + 1)/2
else: # Even width: center is (left+right)/2
X = (rect[0] + rect[2])/2
if dy & 1:
Y = (rect[1] + rect[3] + 1)/2
else:
Y = (rect[1] + rect[3])/2
return (X,Y)
if __name__=="__main__":
llpt = (1000,1000)
urpt = (5000,5000)
# A segment that cuts across the box and intersects in corners
assert segmentXbox((0,0), (6000,6000), llpt, urpt) == [(1000,1000), (5000,5000)] # Two valid corners
assert segmentXbox((0,6000), (6000,0), llpt, urpt) == [(1000,5000), (5000,1000)] # Two valid corners
assert segmentXbox((500,500), (2500, 2500), llpt, urpt) == [(1000,1000)] # One valid corner
assert segmentXbox((2500,2500), (5500, 5500), llpt, urpt) == [(5000,5000)] # One valid corner
# Segments collinear with box sides
assert segmentXbox((1000,0), (1000,6000), llpt, urpt) == [] # Box side contained in segment
assert segmentXbox((1000,0), (1000,3000), llpt, urpt) == [] # Box side partially overlaps segment
assert segmentXbox((1000,2000), (1000,4000), llpt, urpt) == [] # Segment contained in box side
# Segments fully contained within box
assert segmentXbox((1500,2000), (2000,2500), llpt, urpt) == []
# Segments with points on box sides
assert segmentXbox((2500,1000), (2700,1200), llpt, urpt) == [(2500,1000)] # One point on box side
assert segmentXbox((2500,1000), (2700,5000), llpt, urpt) == [(2500,1000), (2700,5000)] # Two points on box sides
# Segment intersects box at one point
assert segmentXbox((3500,5500), (3000, 2500), llpt, urpt) == [(3417, 5000)] # First point outside
assert segmentXbox((3500,1500), (3000, 6500), llpt, urpt) == [(3150, 5000)] # Second point outside
# Segment intersects box at two points, not corners
assert segmentXbox((500,3000), (1500,500), llpt, urpt) == [(1000,1750), (1300,1000)]
assert segmentXbox((2500,300), (5500,3500), llpt, urpt) == [(3156,1000), (5000,2967)]
assert segmentXbox((5200,1200), (2000,6000), llpt, urpt) == [(2667,5000), (5000, 1500)]
assert segmentXbox((3200,5200), (-10, 1200), llpt, urpt) == [(1000, 2459), (3040, 5000)]
assert segmentXbox((500,2000), (5500, 2000), llpt, urpt) == [(1000,2000), (5000, 2000)]
assert segmentXbox((5200,1250), (-200, 4800), llpt, urpt) == [(1000, 4011), (5000, 1381)]
assert segmentXbox((1300,200), (1300, 5200), llpt, urpt) == [(1300, 1000), (1300, 5000)]
assert segmentXbox((1200,200), (1300, 5200), llpt, urpt) == [(1216, 1000), (1296, 5000)]
assert intersectExtents( (100,100,500,500), (500,500,900,900) ) == None
assert intersectExtents( (100,100,500,500), (400,400,900,900) ) == (400,400,500,500)
assert intersectExtents( (100,100,500,500), (200,0,600,300) ) == (200,100,500,300)
assert intersectExtents( (100,100,500,500), (200,0,300,600) ) == (200,100,300,500)
assert intersectExtents( (100,100,500,500), (0,600,50,550) ) == None
assert intersectExtents( (100,100,500,500), (0,600,600,-10) ) == (100,100,500,500)
assert intersectExtents( (100,100,500,500), (0,600,600,200) ) == (100,200,500,500)
assert intersectExtents( (100,100,500,500), (0,600,300,300) ) == (100,300,300,500)
assert isRect1InRect2( (100,100,500,500), (0,600,50,550) ) == False
assert isRect1InRect2( (100,100,500,500), (0,600,600,-10) ) == True
assert isRect1InRect2( (100,100,500,500), (0,600,600,200) ) == False
assert isRect1InRect2( (100,100,500,500), (0,600,300,300) ) == False
assert isRect1InRect2( (100,100,500,500), (0,0,500,500) ) == True
print 'All tests pass'
| 0.030242 |
import numpy as np
import pylab as plt
import sys
plt.ion()
#plt.close('all')
#constants
TRAIN=0
TESTLOC=1
TESTHIC=2
D=10
a2str=['B','W','G','RS','RM','RL','S','M','L','XL']
a2data=np.array([[0,1,2,2,1,0,2,1,0,np.nan],
[2,1,0,2,1,0,2,1,0,np.nan],[np.nan,np.nan,np.nan,2,1,0,np.nan,2,1,0]])
data2a=np.zeros((3,D,3))
for i in range(3):
data2a[i,:,:] = np.int32(a2data==i).T
feedback=np.array([[1,0,0,0,0,1,0,0,1,np.nan],
[0,0,1,0,0,1,0,0,1,np.nan],[np.nan,np.nan,np.nan,0,0,1,0,0,0,1]])
w=np.array([1,1,1,0.5,0.5,0.5,0.5,0.5,0.5,0.5])
# functions
def getProb(a,d):
p=np.power(a,d)
p/=np.nansum(p)
return p
def chooseAction(p):
action=np.random.multinomial(1,p)
return action.nonzero()[0][0]
class Model():
def __init__(self,q0=0.5,u0=0.5,d=1,g=0.7,h=0.5,m=1):
''' q0 - prior preference of color over length (0,1)
u0 - prior preference of rel. over abs. length (0,1)
d - decision consistency (0,inf), 0=random, 1=deterministic
h - learning from positive feedback (0,1);
1=current evidence (fast shifting), 0= prior(slow shifing)
g - learning from negative feedback (0,1);
m - attentional focus (0, inf); 0= uniform distribution
'''
self.q0=q0; self.u0=u0; self.d=d
self.g=g; self.h=h; self.m=m
def exp1run(self):
T=20
#initialize
q=np.zeros(T+1); q[0]=self.q0
u=np.zeros(T+1); u[0]=self.u0
a=np.zeros((T+1,D));self.f=[]
p=np.zeros((T+1,D));dat=np.zeros(T)
a[0,:]=np.ones(10)/3.0
a[0,-1]=np.nan
a[0,:3]*=q[0]
a[0,3:6]*=(1-q[0])*u[0]
a[0,6:]*=(1-q[0])*(1-u[0])
b=np.zeros(T)# observed behavior
phase=0
#print a[0,:]
for t in range(T):
if t>10: phase=1
else: phase=0
p[t,:]=getProb(a[t,:],self.d)
b[t]=chooseAction(p[t,:])
dat[t]=a2data[phase,b[t]]
m=data2a[dat[t],:,phase]
f=feedback[phase,b[t]]
w=np.power(a[t,:],self.m)
self.f.append(f)
if f==1:
s=m*w
a[t+1,:]= self.h*s/np.nansum(s) + (1-self.h)*a[t,:]
else:
s=(1-m)*w
a[t+1,:]= self.g*s/np.nansum(s) + (1-self.g)*a[t,:]
u[t+1]= np.nansum(a[t+1,3:6])/np.nansum(a[t+1,3:])
q[t+1]= np.nansum(a[t+1,:3])/np.nansum(a[t+1,:])
#(np.nansum(a[t+1,:3])+(1-u[t+1])*np.nansum(a[t+1,6:])+u[t+1]*np.nansum(a[t+1,3:6])
self.a=a
self.b=b
self.dat=dat
self.f=np.array(self.f)
return self.dat,self.f
def exp1computeLL(self,dat,f):
T=20
#initialize
q=np.zeros(T+1); q[0]=self.q0
u=np.zeros(T+1); u[0]=self.u0
a=np.zeros((T+1,D));self.f=[]
p=np.zeros((T+1,D));
a[0,:]=np.ones(10)/3.0
a[0,-1]=np.nan
a[0,:3]*=q[0]
a[0,3:6]*=(1-q[0])*u[0]
a[0,6:]*=(1-q[0])*(1-u[0])
phase=0
LL=0
#print a[0,:]
for t in range(T):
if t>10: phase=1
else: phase=0
p[t,:]=getProb(a[t,:],self.d)
m=data2a[dat[t],:,phase]
w=np.power(a[t,:],self.m)
loglik= np.nansum(np.log(np.maximum(0.001,p[t,m==f[t]])))
if f[t]==1:
s=m*w
a[t+1,:]= self.h*s/np.nansum(s) + (1-self.h)*a[t,:]
else:
s=(1-m)*w
a[t+1,:]= self.g*s/np.nansum(s) + (1-self.g)*a[t,:]
#print t,dat[t],f[t],np.nansum(p[t,m==f[t]]),loglik
#print 'm= ',m
#print 'p= ',p
LL+=loglik
return LL
def plothistory(self):
a=self.a
b=self.b
plt.figure(figsize=(12,6))
I=np.concatenate([a.T,np.array(np.nansum(a[:,:3],1),ndmin=2),
np.array(np.nansum(a[:,3:6],1),ndmin=2),np.array(np.nansum(a[:,6:],1),ndmin=2)],axis=0)
plt.plot(range(b.size),b,'rx',ms=8,mew=2)
plt.plot([10.5,10.5],[-1,I.shape[1]],'r',lw=2)
plt.imshow(I,interpolation='nearest',cmap='winter')
plt.colorbar()
ax=plt.gca()
ax.set_yticks(range(I.shape[0]))
ax.set_yticklabels(['']*a.shape[0]+['color','rel len','abs len'])
c1=plt.Circle((-1.5,0),radius=0.4,color='blue',clip_on=False)
c2=plt.Circle((-1.5,1),radius=0.4,color='white',clip_on=False)
c3=plt.Circle((-1.5,2),radius=0.4,color='yellow',clip_on=False)
ax.add_patch(c1);ax.add_patch(c2);ax.add_patch(c3);
c1=plt.Rectangle((-2,3),1,0.2,color='white',clip_on=False)
c2=plt.Rectangle((-2.5,4),1.5,0.2,color='white',clip_on=False)
c3=plt.Rectangle((-3,5),2,0.2,color='white',clip_on=False)
ax.add_patch(c1);ax.add_patch(c2);ax.add_patch(c3);
c1=plt.Rectangle((-2,6),1,0.2,color='gray',clip_on=False)
c2=plt.Rectangle((-2.5,7),1.5,0.2,color='gray',clip_on=False)
c3=plt.Rectangle((-3,8),2,0.2,color='gray',clip_on=False)
c4=plt.Rectangle((-3.5,9),2.5,0.2,color='gray',clip_on=False)
ax.add_patch(c1);ax.add_patch(c2);ax.add_patch(c3);ax.add_patch(c4);
print I[-3,-1]
def LLsample(M,Y):
LL=0
for y in Y:
LL+= M.exp1computeLL(y[0],y[1])
return LL
def checkLL(M,n=50):
np.random.seed(4)
fname='LLRq%.2fu%.2fh%.2fm%.2fd%.2f'%(M.q0,M.u0,M.h,M.m,M.d)
Y=[]
for i in range(n):
dat,f=M.exp1run()
Y.append([dat,f])
#return np.array(Y)
#M.plothistory()
h= np.linspace(0,1,21)#np.array([1])
#g= np.linspace(0,1,21)
g=np.linspace(0,1,21)
import time
t0=time.time()
out=np.ones((h.size,g.size))
for hh in range(h.size):
print np.round(hh/float(h.size),2)
#for gg in range(g.size):
for gg in range(g.size):
M.h=h[hh];#M.g=g[gg]
M.g=g[gg]
out[hh,gg]=LLsample(M,Y)
print time.time()-t0
np.save(fname,out)
return out
def plotLL(fname='out4.npy'):
plt.figure()
h= np.linspace(0,1,21)
g= np.linspace(0,1,21)
m=np.linspace(0,2,21)
d=np.linspace(0,2,21)
out=np.load(fname)
print np.nanmax(out),np.nanmin(out)
rang=np.nanmax(out)-np.nanmin(out)
maxloc= np.squeeze(np.array((np.nanmax(out)==out).nonzero()))
H,G=np.meshgrid(h,g)
print maxloc
for mm in range(m.size/2):
for dd in range(d.size/2):
plt.subplot(10,10,(9-mm)*10+dd+1)
plt.pcolormesh(h,g,out[:,:,mm*2,dd*2].T,
vmax=np.nanmax(out),vmin=np.nanmax(out)-rang/4.)
plt.gca().set_xticks([])
plt.gca().set_yticks([])
if mm==maxloc[2]/2 and dd==maxloc[3]/2:
plt.plot(h[maxloc[0]],g[maxloc[1]],'ow',ms=8)
if dd==0:
print mm,dd
plt.ylabel('%.1f'%m[mm*2])
if mm==0: plt.xlabel('%.1f'%d[dd*2])
plt.title(fname[:6])
if __name__ == '__main__':
ags=[]
#for i in range(1,len(sys.argv)): ags.append(float(sys.argv[i]))
np.random.seed(5)
M=Model(q0=0.9,u0=1,h=0.9,g=0.5,m=1,d=1)
out=checkLL(M)
| 0.063998 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2012-2014 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class AccountVoucher(orm.Model):
_inherit = "account.voucher"
_columns = {
'line_total': fields.float(
'Lines Total', digits_compute=dp.get_precision('Account'),
readonly=True),
# exclude_write_off field will be used by modules like
# account_vat_on_payment and l10n_it_withholding_tax
'exclude_write_off': fields.boolean(
'Exclude write-off from tax on payment',
help="""Select this if you want, when closing the invoice, the
tax to be computed
based on the invoice's totals instead of the paid amount"""),
}
def balance_move(self, cr, uid, move_id, context=None):
currency_obj = self.pool.get('res.currency')
move = self.pool.get('account.move').browse(cr, uid, move_id, context)
amount = 0.0
for line in move.line_id:
amount += line.debit - line.credit
amount = currency_obj.round(
cr, uid, move.company_id.currency_id, amount)
# max_balance_diff improve the evaluation, beetwen
# procedure's' error and an currency rounding's error
max_balance_diff = move.company_id.max_balance_diff
balance_diff = abs(amount * 10 ** dp.get_precision('Account')(cr)[1])
# check if balance differs for more than 1 decimal according to account
# decimal precision
if (balance_diff > max_balance_diff):
raise orm.except_orm(
_('Error'),
_(
'The generated payment entry '
'is unbalanced for more than %d '
'decimal' % max_balance_diff
)
)
if not currency_obj.is_zero(
cr, uid, move.company_id.currency_id, amount
):
for line in move.line_id:
# adjust the first move line that's not receivable, payable or
# liquidity
if (
line.account_id.type != 'receivable' and
line.account_id.type != 'payable' and
line.account_id.type != 'liquidity'
):
if line.credit:
line.write({
'credit': line.credit + amount,
}, update_check=False)
elif line.debit:
line.write({
'debit': line.debit - amount,
}, update_check=False)
if line.tax_amount:
line.write({
'tax_amount': line.tax_amount + amount,
}, update_check=False)
break
return amount
def voucher_move_line_create(
self, cr, uid, voucher_id, line_total,
move_id, company_currency, current_currency, context=None
):
res = super(AccountVoucher, self).voucher_move_line_create(
cr, uid, voucher_id, line_total, move_id, company_currency,
current_currency, context)
self.write(cr, uid, voucher_id, {'line_total': res[0]}, context)
return res
def get_invoice_total(self, invoice):
res = 0.0
for inv_move_line in invoice.move_id.line_id:
if inv_move_line.account_id.type in ('receivable', 'payable'):
res += inv_move_line.debit or inv_move_line.credit
return res
def get_invoice_total_currency(self, invoice):
res = 0.0
for inv_move_line in invoice.move_id.line_id:
if inv_move_line.account_id.type in ('receivable', 'payable'):
res += abs(inv_move_line.amount_currency)
return res
def allocated_amounts_grouped_by_invoice(
self, cr, uid, voucher, context=None
):
'''
this method builds a dictionary in the following form
{
first_invoice_id: {
'allocated': 120.0,
'total': 120.0,
'total_currency': 0.0,
'write-off': -20.0,
'allocated_currency': 0.0,
'foreign_currency_id': False, # int
'currency-write-off': 0.0,
}
second_invoice_id: {
'allocated': 50.0,
'total': 100.0,
'total_currency': 0.0,
'write-off': 0.0,
'allocated_currency': 0.0,
'foreign_currency_id': False,
'currency-write-off': 0.0,
}
}
every amout is expressed in company currency.
In order to compute cashed amount correctly, write-off will be
subtract to reconciled amount.
If more than one invoice is paid with this voucher, we distribute
write-off equally (if allowed)
'''
res = {}
ctx = dict(context) or {}
company_currency = super(AccountVoucher, self)._get_company_currency(
cr, uid, voucher.id, context=ctx)
current_currency = super(AccountVoucher, self)._get_current_currency(
cr, uid, voucher.id, context=ctx)
for line in voucher.line_ids:
if line.amount and line.move_line_id and line.move_line_id.invoice:
if line.move_line_id.invoice.id not in res:
res[line.move_line_id.invoice.id] = {
'allocated': 0.0,
'total': 0.0,
'total_currency': 0.0,
'write-off': 0.0,
'allocated_currency': 0.0,
'foreign_currency_id': False,
'currency-write-off': 0.0,
}
current_amount = line.amount
if company_currency != current_currency:
ctx['date'] = voucher.date
current_amount = super(
AccountVoucher, self)._convert_amount(
cr, uid, line.amount, voucher.id, context=ctx)
res[line.move_line_id.invoice.id][
'allocated_currency'
] += line.amount
res[line.move_line_id.invoice.id][
'foreign_currency_id'
] = current_currency
res[line.move_line_id.invoice.id][
'total_currency'
] = self.get_invoice_total_currency(
line.move_line_id.invoice)
res[line.move_line_id.invoice.id][
'allocated'
] += current_amount
res[line.move_line_id.invoice.id][
'total'
] = self.get_invoice_total(line.move_line_id.invoice)
if res:
# we use line_total as it can be != writeoff_amount in case of
# multi currency
write_off_per_invoice = voucher.line_total / len(res)
if not voucher.company_id.allow_distributing_write_off and len(
res
) > 1 and write_off_per_invoice:
raise orm.except_orm(_('Error'), _(
'You are trying to pay with write-off more than one '
'invoice and distributing write-off is not allowed. '
'See company settings.'))
if voucher.type == 'payment' or voucher.type == 'purchase':
write_off_per_invoice = - write_off_per_invoice
for inv_id in res:
res[inv_id]['write-off'] = write_off_per_invoice
if company_currency != current_currency:
curr_write_off_per_invoice = voucher.writeoff_amount / len(res)
if voucher.type == 'payment' or voucher.type == 'purchase':
curr_write_off_per_invoice = - curr_write_off_per_invoice
for inv_id in res:
res[inv_id][
'currency-write-off'] = curr_write_off_per_invoice
return res
| 0 |
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
import logging
from collections import defaultdict
from powerline.lib.threaded import ThreadedSegment
from powerline.lib.unicode import unicode
from powerline.lint.markedjson.markedvalue import MarkedUnicode
from powerline.lint.markedjson.error import DelayedEchoErr, Mark
from powerline.lint.selfcheck import havemarks
from powerline.lint.context import JStr, list_themes
from powerline.lint.imp import WithPath, import_function, import_segment
from powerline.lint.spec import Spec
from powerline.lint.inspect import getconfigargspec
list_sep = JStr(', ')
generic_keys = set((
'exclude_modes', 'include_modes',
'exclude_function', 'include_function',
'width', 'align',
'name',
'draw_soft_divider', 'draw_hard_divider',
'priority',
'after', 'before',
'display'
))
type_keys = {
'function': set(('function', 'args', 'draw_inner_divider')),
'string': set(('contents', 'type', 'highlight_groups', 'divider_highlight_group')),
'segment_list': set(('function', 'segments', 'args', 'type')),
}
required_keys = {
'function': set(('function',)),
'string': set(()),
'segment_list': set(('function', 'segments',)),
}
highlight_keys = set(('highlight_groups', 'name'))
def get_function_strings(function_name, context, ext):
if '.' in function_name:
module, function_name = function_name.rpartition('.')[::2]
else:
module = context[0][1].get(
'default_module', MarkedUnicode('powerline.segments.' + ext, None))
return module, function_name
def check_matcher_func(ext, match_name, data, context, echoerr):
havemarks(match_name)
import_paths = [os.path.expanduser(path) for path in context[0][1].get('common', {}).get('paths', [])]
match_module, separator, match_function = match_name.rpartition('.')
if not separator:
match_module = 'powerline.matchers.{0}'.format(ext)
match_function = match_name
with WithPath(import_paths):
try:
func = getattr(__import__(str(match_module), fromlist=[str(match_function)]), str(match_function))
except ImportError:
echoerr(context='Error while loading matcher functions',
problem='failed to load module {0}'.format(match_module),
problem_mark=match_name.mark)
return True, False, True
except AttributeError:
echoerr(context='Error while loading matcher functions',
problem='failed to load matcher function {0}'.format(match_function),
problem_mark=match_name.mark)
return True, False, True
if not callable(func):
echoerr(context='Error while loading matcher functions',
problem='loaded “function” {0} is not callable'.format(match_function),
problem_mark=match_name.mark)
return True, False, True
if hasattr(func, 'func_code') and hasattr(func.func_code, 'co_argcount'):
if func.func_code.co_argcount != 1:
echoerr(
context='Error while loading matcher functions',
problem=(
'function {0} accepts {1} arguments instead of 1. '
'Are you sure it is the proper function?'
).format(match_function, func.func_code.co_argcount),
problem_mark=match_name.mark
)
return True, False, False
def check_ext(ext, data, context, echoerr):
havemarks(ext)
hadsomedirs = False
hadproblem = False
if ext not in data['lists']['exts']:
hadproblem = True
echoerr(context='Error while loading {0} extension configuration'.format(ext),
context_mark=ext.mark,
problem='extension configuration does not exist')
else:
for typ in ('themes', 'colorschemes'):
if ext not in data['configs'][typ] and not data['configs']['top_' + typ]:
hadproblem = True
echoerr(context='Error while loading {0} extension configuration'.format(ext),
context_mark=ext.mark,
problem='{0} configuration does not exist'.format(typ))
else:
hadsomedirs = True
return hadsomedirs, hadproblem
def check_config(d, theme, data, context, echoerr):
if len(context) == 4:
ext = context[-2][0]
else:
# local_themes
ext = context[-3][0]
if ext not in data['lists']['exts']:
echoerr(context='Error while loading {0} extension configuration'.format(ext),
context_mark=ext.mark,
problem='extension configuration does not exist')
return True, False, True
if (
(ext not in data['configs'][d] or theme not in data['configs'][d][ext])
and theme not in data['configs']['top_' + d]
):
echoerr(context='Error while loading {0} from {1} extension configuration'.format(d[:-1], ext),
problem='failed to find configuration file {0}/{1}/{2}.json'.format(d, ext, theme),
problem_mark=theme.mark)
return True, False, True
return True, False, False
def check_top_theme(theme, data, context, echoerr):
havemarks(theme)
if theme not in data['configs']['top_themes']:
echoerr(context='Error while checking extension configuration (key {key})'.format(key=context.key),
context_mark=context[-2][0].mark,
problem='failed to find top theme {0}'.format(theme),
problem_mark=theme.mark)
return True, False, True
return True, False, False
def check_color(color, data, context, echoerr):
havemarks(color)
if (color not in data['colors_config'].get('colors', {})
and color not in data['colors_config'].get('gradients', {})):
echoerr(
context='Error while checking highlight group in colorscheme (key {key})'.format(
key=context.key),
problem='found unexistent color or gradient {0}'.format(color),
problem_mark=color.mark
)
return True, False, True
return True, False, False
def check_translated_group_name(group, data, context, echoerr):
return check_group(group, data, context, echoerr)
def check_group(group, data, context, echoerr):
havemarks(group)
if not isinstance(group, unicode):
return True, False, False
colorscheme = data['colorscheme']
ext = data['ext']
configs = None
if ext:
def listed_key(d, k):
try:
return [d[k]]
except KeyError:
return []
if colorscheme == '__main__':
colorscheme_names = set(data['ext_colorscheme_configs'][ext])
colorscheme_names.update(data['top_colorscheme_configs'])
colorscheme_names.discard('__main__')
configs = [
(
name,
listed_key(data['ext_colorscheme_configs'][ext], name)
+ listed_key(data['ext_colorscheme_configs'][ext], '__main__')
+ listed_key(data['top_colorscheme_configs'], name)
)
for name in colorscheme_names
]
else:
configs = [
(
colorscheme,
listed_key(data['ext_colorscheme_configs'][ext], colorscheme)
+ listed_key(data['ext_colorscheme_configs'][ext], '__main__')
+ listed_key(data['top_colorscheme_configs'], colorscheme)
)
]
else:
try:
configs = [(colorscheme, [data['top_colorscheme_configs'][colorscheme]])]
except KeyError:
pass
hadproblem = False
for new_colorscheme, config_lst in configs:
not_found = []
new_data = data.copy()
new_data['colorscheme'] = new_colorscheme
for config in config_lst:
havemarks(config)
try:
group_data = config['groups'][group]
except KeyError:
not_found.append(config.mark.name)
else:
proceed, echo, chadproblem = check_group(
group_data,
new_data,
context,
echoerr,
)
if chadproblem:
hadproblem = True
if not proceed:
break
if not_found and len(not_found) == len(config_lst):
echoerr(
context='Error while checking group definition in colorscheme (key {key})'.format(
key=context.key),
problem='name {0} is not present anywhere in {1} {2} {3} colorschemes: {4}'.format(
group, len(not_found), ext, new_colorscheme, ', '.join(not_found)),
problem_mark=group.mark
)
hadproblem = True
return True, False, hadproblem
def check_key_compatibility(segment, data, context, echoerr):
havemarks(segment)
segment_type = segment.get('type', MarkedUnicode('function', None))
havemarks(segment_type)
if segment_type not in type_keys:
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
problem='found segment with unknown type {0}'.format(segment_type),
problem_mark=segment_type.mark)
return False, False, True
hadproblem = False
keys = set(segment)
if not ((keys - generic_keys) < type_keys[segment_type]):
unknown_keys = keys - generic_keys - type_keys[segment_type]
echoerr(
context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=context[-1][1].mark,
problem='found keys not used with the current segment type: {0}'.format(
list_sep.join(unknown_keys)),
problem_mark=list(unknown_keys)[0].mark
)
hadproblem = True
if not (keys >= required_keys[segment_type]):
missing_keys = required_keys[segment_type] - keys
echoerr(
context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=context[-1][1].mark,
problem='found missing required keys: {0}'.format(
list_sep.join(missing_keys))
)
hadproblem = True
if not (segment_type == 'function' or (keys & highlight_keys)):
echoerr(
context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=context[-1][1].mark,
problem=(
'found missing keys required to determine highlight group. '
'Either highlight_groups or name key must be present'
)
)
hadproblem = True
return True, False, hadproblem
def check_segment_module(module, data, context, echoerr):
havemarks(module)
with WithPath(data['import_paths']):
try:
__import__(str(module))
except ImportError as e:
if echoerr.logger.level >= logging.DEBUG:
echoerr.logger.exception(e)
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
problem='failed to import module {0}'.format(module),
problem_mark=module.mark)
return True, False, True
return True, False, False
def check_full_segment_data(segment, data, context, echoerr):
if 'name' not in segment and 'function' not in segment:
return True, False, False
ext = data['ext']
theme_segment_data = context[0][1].get('segment_data', {})
main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None)
if not main_theme_name or data['theme'] == main_theme_name:
top_segment_data = {}
else:
top_segment_data = data['ext_theme_configs'].get(main_theme_name, {}).get('segment_data', {})
if segment.get('type', 'function') == 'function':
function_name = segment.get('function')
if function_name:
module, function_name = get_function_strings(function_name, context, ext)
names = [module + '.' + function_name, function_name]
else:
names = []
elif segment.get('name'):
names = [segment['name']]
else:
return True, False, False
segment_copy = segment.copy()
for key in ('before', 'after', 'args', 'contents'):
if key not in segment_copy:
for segment_data in [theme_segment_data, top_segment_data]:
for name in names:
try:
val = segment_data[name][key]
k = segment_data[name].keydict[key]
segment_copy[k] = val
except KeyError:
pass
return check_key_compatibility(segment_copy, data, context, echoerr)
highlight_group_spec = Spec().ident().copy
_highlight_group_spec = highlight_group_spec().context_message(
'Error while checking function documentation while checking theme (key {key})')
def check_hl_group_name(hl_group, context_mark, context, echoerr):
'''Check highlight group name: it should match naming conventions
:param str hl_group:
Checked group.
:param Mark context_mark:
Context mark. May be ``None``.
:param Context context:
Current context.
:param func echoerr:
Function used for error reporting.
:return: ``False`` if check succeeded and ``True`` if it failed.
'''
return _highlight_group_spec.match(hl_group, context_mark=context_mark, context=context, echoerr=echoerr)[1]
def check_segment_function(function_name, data, context, echoerr):
havemarks(function_name)
ext = data['ext']
module, function_name = get_function_strings(function_name, context, ext)
if context[-2][1].get('type', 'function') == 'function':
func = import_segment(function_name, data, context, echoerr, module=module)
if not func:
return True, False, True
hl_groups = []
divider_hl_group = None
hadproblem = False
if func.__doc__:
NO_H_G_USED_STR = 'No highlight groups are used (literal segment).'
H_G_USED_STR = 'Highlight groups used: '
LHGUS = len(H_G_USED_STR)
D_H_G_USED_STR = 'Divider highlight group used: '
LDHGUS = len(D_H_G_USED_STR)
pointer = 0
mark_name = '<{0} docstring>'.format(function_name)
for i, line in enumerate(func.__doc__.split('\n')):
if H_G_USED_STR in line:
idx = line.index(H_G_USED_STR) + LHGUS
if hl_groups is None:
idx -= LHGUS
mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx)
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
context_mark=function_name.mark,
problem=(
'found highlight group definition in addition to sentense stating that '
'no highlight groups are used'
),
problem_mark=mark,
)
hadproblem = True
continue
hl_groups.append((
line[idx:],
(mark_name, i + 1, idx + 1, func.__doc__),
pointer + idx
))
elif D_H_G_USED_STR in line:
idx = line.index(D_H_G_USED_STR) + LDHGUS + 2
mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx)
divider_hl_group = MarkedUnicode(line[idx:-3], mark)
elif NO_H_G_USED_STR in line:
idx = line.index(NO_H_G_USED_STR)
if hl_groups:
mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx)
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
context_mark=function_name.mark,
problem=(
'found sentense stating that no highlight groups are used '
'in addition to highlight group definition'
),
problem_mark=mark,
)
hadproblem = True
continue
hl_groups = None
pointer += len(line) + len('\n')
if divider_hl_group:
r = hl_exists(divider_hl_group, data, context, echoerr, allow_gradients=True)
if r:
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
context_mark=function_name.mark,
problem=(
'found highlight group {0} not defined in the following colorschemes: {1}\n'
'(Group name was obtained from function documentation.)'
).format(divider_hl_group, list_sep.join(r)),
problem_mark=divider_hl_group.mark,
)
hadproblem = True
if check_hl_group_name(divider_hl_group, function_name.mark, context, echoerr):
hadproblem = True
if hl_groups:
greg = re.compile(r'``([^`]+)``( \(gradient\))?')
parsed_hl_groups = []
for line, mark_args, pointer in hl_groups:
for s in line.split(', '):
required_pack = []
sub_pointer = pointer
for subs in s.split(' or '):
match = greg.match(subs)
try:
if not match:
continue
hl_group = MarkedUnicode(
match.group(1),
Mark(*mark_args, pointer=sub_pointer + match.start(1))
)
if check_hl_group_name(hl_group, function_name.mark, context, echoerr):
hadproblem = True
gradient = bool(match.group(2))
required_pack.append((hl_group, gradient))
finally:
sub_pointer += len(subs) + len(' or ')
parsed_hl_groups.append(required_pack)
pointer += len(s) + len(', ')
del hl_group, gradient
for required_pack in parsed_hl_groups:
rs = [
hl_exists(hl_group, data, context, echoerr, allow_gradients=('force' if gradient else False))
for hl_group, gradient in required_pack
]
if all(rs):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem=(
'found highlight groups list ({0}) with all groups not defined in some colorschemes\n'
'(Group names were taken from function documentation.)'
).format(list_sep.join((h[0] for h in required_pack))),
problem_mark=function_name.mark
)
for r, h in zip(rs, required_pack):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight group {0} not defined in the following colorschemes: {1}'.format(
h[0], list_sep.join(r))
)
hadproblem = True
elif hl_groups is not None:
r = hl_exists(function_name, data, context, echoerr, allow_gradients=True)
if r:
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem=(
'found highlight group {0} not defined in the following colorschemes: {1}\n'
'(If not specified otherwise in documentation, '
'highlight group for function segments\n'
'is the same as the function name.)'
).format(function_name, list_sep.join(r)),
problem_mark=function_name.mark
)
hadproblem = True
return True, False, hadproblem
elif context[-2][1].get('type') != 'segment_list':
if function_name not in context[0][1].get('segment_data', {}):
main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None)
if data['theme'] == main_theme_name:
main_theme = {}
else:
main_theme = data['ext_theme_configs'].get(main_theme_name, {})
if (
function_name not in main_theme.get('segment_data', {})
and function_name not in data['ext_theme_configs'].get('__main__', {}).get('segment_data', {})
and not any(((function_name in theme.get('segment_data', {})) for theme in data['top_themes'].values()))
):
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
problem='found useless use of name key (such name is not present in theme/segment_data)',
problem_mark=function_name.mark)
return True, False, False
def hl_group_in_colorscheme(hl_group, cconfig, allow_gradients, data, context, echoerr):
havemarks(hl_group, cconfig)
if hl_group not in cconfig.get('groups', {}):
return False
elif not allow_gradients or allow_gradients == 'force':
group_config = cconfig['groups'][hl_group]
while isinstance(group_config, unicode):
try:
group_config = cconfig['groups'][group_config]
except KeyError:
# No such group. Error was already reported when checking
# colorschemes.
return True
havemarks(group_config)
hadgradient = False
for ckey in ('fg', 'bg'):
color = group_config.get(ckey)
if not color:
# No color. Error was already reported when checking
# colorschemes.
return True
havemarks(color)
# Gradients are only allowed for function segments. Note that
# whether *either* color or gradient exists should have been
# already checked
hascolor = color in data['colors_config'].get('colors', {})
hasgradient = color in data['colors_config'].get('gradients', {})
if hasgradient:
hadgradient = True
if allow_gradients is False and not hascolor and hasgradient:
echoerr(
context='Error while checking highlight group in theme (key {key})'.format(
key=context.key),
context_mark=hl_group.mark,
problem='group {0} is using gradient {1} instead of a color'.format(hl_group, color),
problem_mark=color.mark
)
return False
if allow_gradients == 'force' and not hadgradient:
echoerr(
context='Error while checking highlight group in theme (key {key})'.format(
key=context.key),
context_mark=hl_group.mark,
problem='group {0} should have at least one gradient color, but it has no'.format(hl_group),
problem_mark=group_config.mark
)
return False
return True
def hl_exists(hl_group, data, context, echoerr, allow_gradients=False):
havemarks(hl_group)
ext = data['ext']
if ext not in data['colorscheme_configs']:
# No colorschemes. Error was already reported, no need to report it
# twice
return []
r = []
found = False
for colorscheme, cconfig in data['colorscheme_configs'][ext].items():
if hl_group_in_colorscheme(hl_group, cconfig, allow_gradients, data, context, echoerr):
found = True
else:
r.append(colorscheme)
if not found:
pass
return r
def check_highlight_group(hl_group, data, context, echoerr):
havemarks(hl_group)
r = hl_exists(hl_group, data, context, echoerr)
if r:
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight group {0} not defined in the following colorschemes: {1}'.format(
hl_group, list_sep.join(r)),
problem_mark=hl_group.mark
)
return True, False, True
return True, False, False
def check_highlight_groups(hl_groups, data, context, echoerr):
havemarks(hl_groups)
rs = [hl_exists(hl_group, data, context, echoerr) for hl_group in hl_groups]
if all(rs):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight groups list ({0}) with all groups not defined in some colorschemes'.format(
list_sep.join((unicode(h) for h in hl_groups))),
problem_mark=hl_groups.mark
)
for r, hl_group in zip(rs, hl_groups):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight group {0} not defined in the following colorschemes: {1}'.format(
hl_group, list_sep.join(r)),
problem_mark=hl_group.mark
)
return True, False, True
return True, False, False
def check_segment_data_key(key, data, context, echoerr):
havemarks(key)
has_module_name = '.' in key
found = False
for ext, theme in list_themes(data, context):
for segments in theme.get('segments', {}).values():
for segment in segments:
if 'name' in segment:
if key == segment['name']:
found = True
break
else:
function_name = segment.get('function')
if function_name:
module, function_name = get_function_strings(function_name, ((None, theme),), ext)
if has_module_name:
full_name = module + '.' + function_name
if key == full_name:
found = True
break
else:
if key == function_name:
found = True
break
if found:
break
if found:
break
else:
if data['theme_type'] != 'top':
echoerr(context='Error while checking segment data',
problem='found key {0} that cannot be associated with any segment'.format(key),
problem_mark=key.mark)
return True, False, True
return True, False, False
threaded_args_specs = {
'interval': Spec().cmp('gt', 0.0),
'update_first': Spec().type(bool),
'shutdown_event': Spec().error('Shutdown event must be set by powerline'),
}
def check_args_variant(func, args, data, context, echoerr):
havemarks(args)
argspec = getconfigargspec(func)
present_args = set(args)
all_args = set(argspec.args)
required_args = set(argspec.args[:-len(argspec.defaults)])
hadproblem = False
if required_args - present_args:
echoerr(
context='Error while checking segment arguments (key {key})'.format(key=context.key),
context_mark=args.mark,
problem='some of the required keys are missing: {0}'.format(list_sep.join(required_args - present_args))
)
hadproblem = True
if not all_args >= present_args:
echoerr(context='Error while checking segment arguments (key {key})'.format(key=context.key),
context_mark=args.mark,
problem='found unknown keys: {0}'.format(list_sep.join(present_args - all_args)),
problem_mark=next(iter(present_args - all_args)).mark)
hadproblem = True
if isinstance(func, ThreadedSegment):
for key in set(threaded_args_specs) & present_args:
proceed, khadproblem = threaded_args_specs[key].match(
args[key],
args.mark,
data,
context.enter_key(args, key),
echoerr
)
if khadproblem:
hadproblem = True
if not proceed:
return hadproblem
return hadproblem
def check_args(get_functions, args, data, context, echoerr):
new_echoerr = DelayedEchoErr(echoerr)
count = 0
hadproblem = False
for func in get_functions(data, context, new_echoerr):
count += 1
shadproblem = check_args_variant(func, args, data, context, echoerr)
if shadproblem:
hadproblem = True
if not count:
hadproblem = True
if new_echoerr:
new_echoerr.echo_all()
else:
echoerr(context='Error while checking segment arguments (key {key})'.format(key=context.key),
context_mark=context[-2][1].mark,
problem='no suitable segments found')
return True, False, hadproblem
def get_one_segment_function(data, context, echoerr):
ext = data['ext']
function_name = context[-2][1].get('function')
if function_name:
module, function_name = get_function_strings(function_name, context, ext)
func = import_segment(function_name, data, context, echoerr, module=module)
if func:
yield func
common_names = defaultdict(set)
def register_common_name(name, cmodule, cname):
s = cmodule + '.' + cname
cmodule_mark = Mark('<common name definition>', 1, 1, s, 1)
cname_mark = Mark('<common name definition>', 1, len(cmodule) + 1, s, len(cmodule) + 1)
common_names[name].add((MarkedUnicode(cmodule, cmodule_mark), MarkedUnicode(cname, cname_mark)))
def get_all_possible_functions(data, context, echoerr):
name = context[-2][0]
module, name = name.rpartition('.')[::2]
if module:
func = import_segment(name, data, context, echoerr, module=module)
if func:
yield func
else:
if name in common_names:
for cmodule, cname in common_names[name]:
cfunc = import_segment(cname, data, context, echoerr, module=MarkedUnicode(cmodule, None))
if cfunc:
yield cfunc
for ext, theme_config in list_themes(data, context):
for segments in theme_config.get('segments', {}).values():
for segment in segments:
if segment.get('type', 'function') == 'function':
function_name = segment.get('function')
current_name = segment.get('name')
if function_name:
module, function_name = get_function_strings(function_name, ((None, theme_config),), ext)
if current_name == name or function_name == name:
func = import_segment(function_name, data, context, echoerr, module=module)
if func:
yield func
def check_exinclude_function(name, data, context, echoerr):
ext = data['ext']
module, name = name.rpartition('.')[::2]
if not module:
module = MarkedUnicode('powerline.selectors.' + ext, None)
func = import_function('selector', name, data, context, echoerr, module=module)
if not func:
return True, False, True
return True, False, False
| 0.028012 |
# for training data bcf only
import sys
import os
import numpy as np
import scipy.misc
import scipy.io as sio
import cPickle as pickle
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'common'))
from w_util import getsubimg, rgbimg2vec, readLines, gray2vec, rgb2gray, ismember
import bcfstore as bcfs
import StringIO
def getStore(datadir):
return ILVRC2012_Set(datadir)
class ILVRC2012_Set:
def __init__(self, paramfile):
print "ilvrc2012_imagebbx_mv_bcfp with paramfile:", paramfile
self.param = {'paramfile': paramfile, 'imgsize': 224, 'test': 0}
plines=readLines(paramfile)
for l in plines:
l=l.rstrip().split()
self.param[l[0]]=l[1]
self.param['scale']=[float(_) for _ in self.param['scale'].split('+')]
self.param['crop']=self.param['crop'].split('+') #list of initial strings
self.param['imgsize']=int(self.param['imgsize'])
self.param['test']=int(self.param['test'])
assert(self.param['imgsize']==224)
print self.param
self.pertcomb=[]
for crop in self.param['crop']:
if crop[0]=='u':
nc=int(crop[1:])
crops=['u{}-{}'.format(int(np.sqrt(nc)), _) for _ in range(nc)]
elif crop[0:4]=='scan':
nc=int(crop[4:])
crops=['s{}-{}'.format(nc, _) for _ in range(nc) ]
else:
crops=[crop]
for c in crops:
for s in self.param['scale']:
self.pertcomb+=[[c, s]]
if c=='wh':
break
print 'image expanded with %d perturbation(s):' % len(self.pertcomb)
print self.pertcomb
sys.stdout.flush()
if self.param['meanimg']=='-1':
print 'no meanimg specified, using 128 as mean'
self.meanImg = np.zeros([256, 256, 3])+128
else:
self.meanImg = np.load(self.param['meanimg'])
self.meanImg = self.meanImg[16:256-16,16:256-16,:]
if self.param['imgsize']!=224:
print 'reshape meanImg'
self.meanImg = scipy.misc.imresize(np.round(self.meanImg).astype(np.uint8), \
(self.param['imgsize'], self.param['imgsize']))
self.meanImg = self.meanImg.astype(float)
#read label, bounding box, and index
self.bcfstore=bcfs.bcf_store_file(self.param['imgfile'])
print "{} files found in bcf file".format(self.bcfstore.size())
bb=pickle.load(open(self.param['bbxfile'], 'rb'))
self.bbx=bb['bbx']
assert(self.bbx.shape[1]==4)
print '%d bounding boxes read' % (self.bbx.shape[0])
self.bcfList=bb['bcfidx']
self.rawsize=bb['imsize'] #[w, h]
if self.param['imglist']!='-1':
self.imgList=readLines(self.param['imglist'])
self.imgList=[int(_.rstrip()) for _ in self.imgList] #index in bcf
self.imgList=[np.where(self.bcfList==_)[0] for _ in self.imgList] #index in bbx, from 1
mask=np.array([len(_) for _ in self.imgList])
self.imgList=np.array(self.imgList)[mask==1]
self.imgList=[_[0]+1 for _ in self.imgList]
else:
self.imgList=range(1, 1+self.bbx.shape[0]) #index in bbx, starts from 1
self.imgNum = len(self.imgList)
print '%d images found' % self.imgNum
self.labels = np.zeros([max(self.imgList)+1, ])
self.curidx = -1 #globla index
self.curimgidx = -1
self.curimg = None
self.curbbx = None
def get_num_images(self):
return self.imgNum*len(self.pertcomb)
def get_num_classes(self):
return 1000
def get_input_dim(self):
return self.param['imgsize']*self.param['imgsize']*3
def get_output_dim(self):
return 4
def get_input(self, idx):
#print 'input idx:', idx
self.curidx = idx
crop, scale=self.pertcomb[idx%len(self.pertcomb)]
imgidx = self.imgList[idx/len(self.pertcomb)] #image index in the 544539-bbx
#print 'idx=%d, imgidx=%d' % (idx, imgidx)
if self.curimgidx==imgidx:
img = self.curimg
else:
img = scipy.misc.imread(StringIO.StringIO(self.bcfstore.get(self.bcfList[imgidx-1]-1)))
#print 'load bcf: ', imgidx-1
# convert to 3 channels
if len(img.shape) == 2:
newimg = np.zeros((img.shape)+(3,), dtype=img.dtype)
newimg[:,:,0] = img
newimg[:,:,1] = img
newimg[:,:,2] = img
img = newimg
else:
if img.shape[2] == 4:
img = img[:,:,:3]
self.curimg = img
self.curimgidx = imgidx
# crop image and find bbx
h, w, c = img.shape
if (self.param['test']==1):
b = np.array([1, 1, h, w], dtype=float)
else:
b = self.bbx[imgidx-1].astype(float)
# convert from raw coordinate
s=self.rawsize[imgidx-1]
#print "image converted from", s, "to", (w, h)
b=b*w/s[0]
b[0]=max(1, b[0]);
b[1]=max(1, b[1]);
b[2]=min(h, b[2]);
b[3]=min(w, b[3]);
if crop=='wh':
l = min(w, h)
dx = 0
dy = 0
elif crop[0]=='u': #uniform grid
l = int(scale*min(w, h))
if (self.param['test']==1): #all over the image
x0 = 0
x1 = w-l
y0 = 0
y1 = h-l
else: #at least cover half of ground truth b
bx = (b[1]+b[3])/2.0
by = (b[0]+b[2])/2.0
x0 = max(0, bx-l)
x1 = min(w-l, bx)
y0 = max(0, by-l)
y1 = min(h-l, by)
crop = [int(_) for _ in crop[1:].split('-')]
cidx = crop[1]%crop[0]
ridx = crop[1]/crop[0]
assert(crop[0]>0)
if (crop[0]>1):
dx = int(x0+(x1-x0)*cidx/(crop[0]-1))
dy = int(y0+(y1-y0)*ridx/(crop[0]-1))
else: #(crop[0]==1)
dx = int(x0+(x1-x0)/2)
dy = int(y0+(y1-y0)/2)
else:
print 'undefined CROP: %s' % crop
assert(0)
img = getsubimg(img, (dx+1, dy+1, dx+l, dy+l) )
# convert to imgsize [224x224]
ll = self.param['imgsize']
img = scipy.misc.imresize(img, (ll, ll))
if self.param['test']==1:
self.curbbx = np.array([[0, 0, 0, 0]])
else:
self.curbbx = (np.array([b.tolist()]) - np.array([[dy, dx, dy, dx]]))*(1.0*ll/l)
self.curbbx = np.round(self.curbbx).astype(int)
return img-self.meanImg
#output bounding box [cx offset, cy offset, width, height], in the transformed image plane
def get_output(self, idx):
if idx!=self.curidx:
self.get_input(idx)
return self.curbbx
def get_label(self, idx):
return self.labels[self.imgList[idx/len(self.pertcomb)]-1]
def get_meta(self, idx):
return None
def test(param):
ts = ILVRC2012_Set(param)
print "{} images in total".format(ts.get_num_images())
for i in range(0,500,500):
im=ts.get_input(i)
y=ts.get_label(i)
print "i={}, label={}".format(i, y)
print 'image shape:', np.shape(im)
b = []
for i in range(11000, 13000, 100):
im = ts.get_input(i)
bbx = ts.get_output(i)
print i, bbx[0], im.shape
b += [bbx[0]]
scipy.misc.imsave('./img/{}.jpg'.format(i), im)
sio.savemat('./img/bbx.mat', {'bbx': b})
if __name__ == '__main__':
print 'testing ilvrc2012_imagebbx_mv_bcfp.py!'
assert(len(sys.argv)==2)
test(sys.argv[1])
| 0.040617 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import unittest
from base64 import b64encode as b64e
from airflow.contrib.sensors.pubsub_sensor import PubSubPullSensor
from airflow.exceptions import AirflowSensorTimeout
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
TASK_ID = 'test-task-id'
TEST_PROJECT = 'test-project'
TEST_TOPIC = 'test-topic'
TEST_SUBSCRIPTION = 'test-subscription'
TEST_MESSAGES = [
{
'data': b64e(b'Hello, World!'),
'attributes': {'type': 'greeting'}
},
{'data': b64e(b'Knock, knock')},
{'attributes': {'foo': ''}}]
class PubSubPullSensorTest(unittest.TestCase):
def _generate_messages(self, count):
messages = []
for i in range(1, count + 1):
messages.append({
'ackId': '%s' % i,
'message': {
'data': b64e('Message {}'.format(i).encode('utf8')),
'attributes': {'type': 'generated message'}
}
})
return messages
@mock.patch('airflow.contrib.sensors.pubsub_sensor.PubSubHook')
def test_poke_no_messages(self, mock_hook):
operator = PubSubPullSensor(task_id=TASK_ID, project=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION)
mock_hook.return_value.pull.return_value = []
self.assertEquals([], operator.poke(None))
@mock.patch('airflow.contrib.sensors.pubsub_sensor.PubSubHook')
def test_poke_with_ack_messages(self, mock_hook):
operator = PubSubPullSensor(task_id=TASK_ID, project=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
ack_messages=True)
generated_messages = self._generate_messages(5)
mock_hook.return_value.pull.return_value = generated_messages
self.assertEquals(generated_messages, operator.poke(None))
mock_hook.return_value.acknowledge.assert_called_with(
TEST_PROJECT, TEST_SUBSCRIPTION, ['1', '2', '3', '4', '5']
)
@mock.patch('airflow.contrib.sensors.pubsub_sensor.PubSubHook')
def test_execute(self, mock_hook):
operator = PubSubPullSensor(task_id=TASK_ID, project=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0)
generated_messages = self._generate_messages(5)
mock_hook.return_value.pull.return_value = generated_messages
response = operator.execute(None)
mock_hook.return_value.pull.assert_called_with(
TEST_PROJECT, TEST_SUBSCRIPTION, 5, False)
self.assertEquals(response, generated_messages)
@mock.patch('airflow.contrib.sensors.pubsub_sensor.PubSubHook')
def test_execute_timeout(self, mock_hook):
operator = PubSubPullSensor(task_id=TASK_ID, project=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0, timeout=1)
mock_hook.return_value.pull.return_value = []
with self.assertRaises(AirflowSensorTimeout):
operator.execute(None)
mock_hook.return_value.pull.assert_called_with(
TEST_PROJECT, TEST_SUBSCRIPTION, 5, False)
| 0 |
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class SchedulerStatsClient(rest_client.RestClient):
api_version = "v2"
def list_pools(self, detail=False):
"""List all the volumes pools (hosts).
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v2/index.html#list-back-end-storage-pools
"""
url = 'scheduler-stats/get_pools'
if detail:
url += '?detail=True'
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
| 0 |
import numpy as np
from numpy import arange, array
import matplotlib as mp
import matplotlib.pyplot as pplot
from math import exp, log, pi, sin, cos, tan, tanh
black = [0,0,0]
lw_line = 2.0
lw_grid = 1.8
lw_tick = 1.8
tick_length = 6.0
style = {
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"grid.linewidth": lw_grid,
"grid.linestyle": "dotted"
}
mp.rcParams.update(style)
def ln(x):
return float("nan") if x<=0 else log(x)
def plot(a,X,Y,title,discont=None):
fig = pplot.figure()
ax = fig.add_subplot(1,1,1)
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.grid()
ax.yaxis.grid()
ax.xaxis.set_tick_params(width=lw_tick,length=tick_length)
ax.yaxis.set_tick_params(width=lw_tick,length=tick_length)
ax.set_aspect('equal')
ax.axis(X+Y)
ax.set_xticks(list(range(X[0],0,1))+list(range(1,X[1]+1,1)))
ax.set_yticks(list(range(Y[0],0,1))+list(range(1,Y[1]+1,1)))
x = arange(X[0],X[1],0.001)
if not isinstance(a,list): a=[a]
for f in a:
y = array(map(f,x))
if discont is not None:
y[:-1][np.diff(y) >= discont] = np.nan
ax.plot(x,y,color=black,linewidth=lw_line)
pplot.savefig(title,bbox_inches='tight')
# plot([exp,ln],X=[-5,5],Y=[-5,5],title="exp.pdf")
# plot(sin,X=[-1,9],Y=[-2,2],title="sin.pdf")
# plot(cos,X=[-1,9],Y=[-2,2],title="cos.pdf")
# plot(tan,X=[-1,9],Y=[-4,4],title="tan.pdf",discont=0.5)
plot(tanh,X=[-5,5],Y=[-2,2],title="tanh.pdf")
| 0.039433 |
#!/usr/bin/env python
# encoding: utf-8
# === IMPORTANT ====
# NOTE: In order to support non-ASCII file names,
# your system's locale MUST be set to 'utf-8'
# CAVEAT: DOESN'T work with proxy, the underlying reason being
# the 'requests' package used for http communication doesn't seem
# to work properly with proxies, reason unclear.
# NOTE: It seems Baidu doesn't handle MD5 quite right after combining files,
# so it may return erroneous MD5s. Perform a rapidupload again may fix the problem.
# That's why I changed default behavior to no-verification.
# NOTE: syncup / upload, syncdown / downdir are partially duplicates
# the difference: syncup/down compare and perform actions
# while down/up just proceed to download / upload (but still compare during actions)
# so roughly the same, except that sync can delete extra files
#
# TODO: Dry run?
# TODO: Use batch functions for better performance
'''
bypy -- Python client for Baidu Yun
---
Copyright 2013 Hou Tianze (GitHub: houtianze, Twitter: @ibic, G+: +TianzeHou)
Licensed under the GPLv3
https://www.gnu.org/licenses/gpl-3.0.txt
bypy is a Baidu Yun client written in Python (2.7).
(NOTE: You need to install the 'requests' library by running 'pip install requests')
It offers some file operations like: list, download, upload, syncup, syncdown, etc.
The main purpose is to utilize Baidu Yun in Linux environment (e.g. Raspberry Pi)
It uses a server for OAuth authorization, to conceal the Application's Secret Key.
Alternatively, you can create your own App at Baidu and replace the 'ApiKey' and 'SecretKey' with your copies,
and then, change 'ServerAuth' to 'False'
---
@author: Hou Tianze (GitHub: houtianze, Twitter: @ibic, G+: +TianzeHou)
@copyright: 2013 Hou Tianze. All rights reserved.
@license: GPLv3
@contact: None
@deffield updated: Updated
'''
# it takes days just to fix you, unicode ...
# some references
# https://stackoverflow.com/questions/4374455/how-to-set-sys-stdout-encoding-in-python-3
# https://stackoverflow.com/questions/492483/setting-the-correct-encoding-when-piping-stdout-in-python
# http://drj11.wordpress.com/2007/05/14/python-how-is-sysstdoutencoding-chosen/
# https://stackoverflow.com/questions/11741574/how-to-set-the-default-encoding-to-utf-8-in-python
# https://stackoverflow.com/questions/2276200/changing-default-encoding-of-python
from __future__ import unicode_literals
import os
import sys
#reload(sys)
#sys.setdefaultencoding(SystemEncoding)
import locale
SystemLanguageCode, SystemEncoding = locale.getdefaultlocale()
if SystemEncoding and not sys.platform.startswith('win32'):
sysenc = SystemEncoding.upper()
if sysenc != 'UTF-8' and sysenc != 'UTF8':
err = "You MUST set system locale to 'UTF-8' to support unicode file names.\n" + \
"Current locale is '{}'".format(SystemEncoding)
ex = Exception(err)
print(err)
raise ex
if not SystemEncoding:
# ASSUME UTF-8 encoding, if for whatever reason,
# we can't get the default system encoding
print("*WARNING*: Cannot detect the system encoding, assume it's 'UTF-8'")
SystemEncoding = 'utf-8'
import codecs
# no idea who is the asshole that screws the sys.stdout.encoding
# the locale is 'UTF-8', sys.stdin.encoding is 'UTF-8',
# BUT, sys.stdout.encoding is 'None' ...
if not (sys.stdout.encoding and sys.stdout.encoding.lower() == 'utf-8'):
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
import signal
import time
import shutil
import posixpath
#import types
import traceback
import inspect
import logging
import httplib
import urllib
import json
import hashlib
import binascii
import re
import cPickle as pickle
import pprint
import socket
#from collections import OrderedDict
from os.path import expanduser
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
# Defines that should never be changed
OneK = 1024
OneM = OneK * OneK
OneG = OneM * OneK
OneT = OneG * OneK
OneP = OneT * OneK
OneE = OneP * OneK
# special variables
__all__ = []
__version__ = 0.1
__date__ = '2013-10-25'
__updated__ = '2014-01-13'
# ByPy default values
DefaultSliceInMB = 20
DefaultSliceSize = 20 * OneM
DefaultDlChunkSize = 20 * OneM
RetryDelayInSec = 10
# Baidu PCS constants
MinRapidUploadFileSize = 256 * OneK
MaxSliceSize = 2 * OneG
MaxSlicePieces = 1024
# return (error) codes
ENoError = 0 # plain old OK, fine, no error.
EIncorrectPythonVersion = 1
EApiNotConfigured = 10 # ApiKey, SecretKey and AppPcsPath not properly configured
EArgument = 10 # invalid program command argument
EAbort = 20 # aborted
EException = 30 # unhandled exception occured
EParameter = 40 # invalid parameter passed to ByPy
EInvalidJson = 50
EHashMismatch = 60 # MD5 hashes of the local file and remote file don't match each other
EFileWrite = 70
EFileTooBig = 80 # file too big to upload
EFailToCreateLocalDir = 90
EFailToCreateLocalFile = 100
EFailToDeleteDir = 110
EFailToDeleteFile = 120
EFileNotFound = 130
EMaxRetry = 140
ERequestFailed = 150 # request failed
ECacheNotLoaded = 160
EFatal = -1 # No way to continue
# internal errors
IEMD5NotFound = 31079 # File md5 not found, you should use upload API to upload the whole file.
# PCS configuration constants
# ==== NOTE ====
# I use server auth, because it's the only possible method to protect the SecretKey.
# If you don't like that and want to perform local authorization using 'Device' method, you need to:
# - Change to: ServerAuth = False
# - Paste your own ApiKey and SecretKey.
# - Change the AppPcsPath to your own App's directory at Baidu PCS
# Then you are good to go
ServerAuth = True # change it to 'False' if you use your own appid
GaeUrl = 'https://bypyoauth.appspot.com'
OpenShiftUrl = 'https://bypy-tianze.rhcloud.com'
JaeUrl = 'http://bypyoauth.jd-app.com'
GaeRedirectUrl = GaeUrl + '/auth'
GaeRefreshUrl = GaeUrl + '/refresh'
OpenShiftRedirectUrl = OpenShiftUrl + '/auth'
OpenShiftRefreshUrl = OpenShiftUrl + '/refresh'
JaeRedirectUrl = JaeUrl + '/auth'
JaeRefreshUrl = JaeUrl + '/refresh'
AuthServerList = [
# url, rety?, message
(GaeRedirectUrl, False, "Authorizing with the GAE server ..."),
(OpenShiftRedirectUrl, True, "I think you are WALLed, so let's authorize with the OpenShift server ..."),
(JaeRedirectUrl, True, "OpenShift also failed. Last resort: authorizing with the JAE server (*** WARNING *** NON-encrypted http protocol) ..."),
]
RefreshServerList = [
# url, rety?, message
(GaeRefreshUrl, False, "Refreshing with the GAE server ..."),
(OpenShiftRefreshUrl, True, "I think you are WALLed, so let's refresh with the OpenShift server ..."),
(JaeRefreshUrl, True, "OpenShift also failed. Last resort: refreshing with the JAE server (*** WARNING *** NON-encrypted http protocol) ..."),
]
ApiKey = 'q8WE4EpCsau1oS0MplgMKNBn' # replace with your own ApiKey if you use your own appid
SecretKey = '' # replace with your own SecretKey if you use your own appid
if not SecretKey:
ServerAuth = True
# NOTE: no trailing '/'
AppPcsPath = '/apps/bypy' # change this to the App's direcotry you specified when creating the app
AppPcsPathLen = len(AppPcsPath)
# Program setting constants
HomeDir = expanduser('~')
TokenFilePath = HomeDir + os.sep + '.bypy.json'
HashCachePath = HomeDir + os.sep + '.bypy.pickle'
#UserAgent = 'Mozilla/5.0'
#UserAgent = "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)"
# According to seanlis@github, this User-Agent string affects the download.
UserAgent = None
DisableSslCheckOption = '--disable-ssl-check'
# Baidu PCS URLs etc.
OpenApiUrl = "https://openapi.baidu.com"
OpenApiVersion = "2.0"
OAuthUrl = OpenApiUrl + "/oauth/" + OpenApiVersion
ServerAuthUrl = OAuthUrl + "/authorize"
DeviceAuthUrl = OAuthUrl + "/device/code"
TokenUrl = OAuthUrl + "/token"
PcsUrl = 'https://pcs.baidu.com/rest/2.0/pcs/'
CPcsUrl = 'https://c.pcs.baidu.com/rest/2.0/pcs/'
DPcsUrl = 'https://d.pcs.baidu.com/rest/2.0/pcs/'
vi = sys.version_info
if vi.major != 2 or vi.minor < 7:
print("Error: Incorrect Python version. " + \
"You need 2.7 or above (but not 3)")
sys.exit(EIncorrectPythonVersion)
try:
# non-standard python library, needs 'pip install requests'
import requests
except:
print("Fail to import the 'requests' library\n" + \
"You need to install the 'requests' python library\n" + \
"You can install it by running 'pip install requests'")
raise
requests_version = requests.__version__.split('.')
if int(requests_version[0]) < 1:
print("You Python Requests Library version is to lower than 1.\n" + \
"You can run 'pip install requests' to upgrade it.")
raise
# non-standard python library, needs 'pip install requesocks'
#import requesocks as requests # if you need socks proxy
# when was your last time flushing a toilet?
__last_flush = time.time()
#__last_flush = 0
PrintFlushPeriodInSec = 5.0
# save cache if more than 10 minutes passed
last_cache_save = time.time()
CacheSavePeriodInSec = 10 * 60.0
# https://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
# https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
# 0 - black, 1 - red, 2 - green, 3 - yellow
# 4 - blue, 5 - magenta, 6 - cyan 7 - white
class TermColor:
NumOfColors = 8
Black, Red, Green, Yellow, Blue, Magenta, Cyan, White = range(NumOfColors)
Nil = -1
def colorstr(msg, fg, bg):
CSI = '\x1b['
fgs = ''
bgs = ''
if fg >=0 and fg <= 7:
fgs = str(fg + 30)
if bg >= 0 and bg <=7:
bgs = str(bg + 40)
cs = ';'.join([fgs, bgs]).strip(';')
if cs:
return CSI + cs + 'm' + msg + CSI + '0m'
else:
return msg
def prc(msg):
print(msg)
# we need to flush the output periodically to see the latest status
global __last_flush
now = time.time()
if now - __last_flush >= PrintFlushPeriodInSec:
sys.stdout.flush()
__last_flush = now
pr = prc
def prcolorc(msg, fg, bg):
if sys.stdout.isatty() and not sys.platform.startswith('win32'):
pr(colorstr(msg, fg, bg))
else:
pr(msg)
prcolor = prcolorc
def plog(tag, msg, showtime = True, showdate = False,
prefix = '', suffix = '', fg = TermColor.Nil, bg = TermColor.Nil):
if showtime or showdate:
now = time.localtime()
if showtime:
tag += time.strftime("[%H:%M:%S] ", now)
if showdate:
tag += time.strftime("[%Y-%m-%d] ", now)
if prefix:
prcolor("{}{}".format(tag, prefix), fg, bg)
prcolor("{}{}".format(tag, msg), fg, bg)
if suffix:
prcolor("{}{}".format(tag, suffix), fg, bg)
def perr(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<E> ', msg, showtime, showdate, prefix, suffix, TermColor.Red)
def pwarn(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<W> ', msg, showtime, showdate, prefix, suffix, TermColor.Yellow)
def pinfo(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<I> ', msg, showtime, showdate, prefix, suffix, TermColor.Green)
def pdbg(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<D> ', msg, showtime, showdate, prefix, suffix, TermColor.Cyan)
def askc(msg, enter = True):
pr(msg)
if enter:
pr('Press [Enter] when you are done')
return raw_input()
ask = askc
# print progress
# https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
def pprgrc(finish, total, start_time = None, existing = 0,
prefix = '', suffix = '', seg = 20):
# we don't want this goes to the log, so we use stderr
segth = seg * finish // total
percent = 100 * finish // total
eta = ''
now = time.time()
if start_time is not None and percent > 5 and finish > 0:
finishf = float(finish) - float(existing)
totalf = float(total)
remainf = totalf - float(finish)
elapsed = now - start_time
speed = human_speed(finishf / elapsed)
eta = 'ETA: ' + human_time(elapsed * remainf / finishf) + \
' (' + speed + ', ' + \
human_time(elapsed) + ' gone)'
msg = '\r' + prefix + '[' + segth * '=' + (seg - segth) * '_' + ']' + \
" {}% ({}/{})".format(percent, si_size(finish), si_size(total)) + \
' ' + eta + suffix
sys.stderr.write(msg + ' ') # space is used as a clearer
sys.stderr.flush()
pprgr = pprgrc
def si_size(num, precision = 3):
''' DocTests:
>>> si_size(1000)
u'1000B'
>>> si_size(1025)
u'1.001KB'
'''
numa = abs(num)
if numa < OneK:
return str(num) + 'B'
elif numa < OneM:
return str(round(float(num) / float(OneK), precision)) + 'KB'
elif numa < OneG:
return str(round(float(num) / float(OneM), precision)) + 'MB'
elif numa < OneT:
return str(round(float(num) / float(OneG), precision)) + 'GB'
elif numa < OneP:
return str(round(float(num) / float(OneT), precision)) + 'TB'
elif numa < OneE:
return str(round(float(num) / float(OneP), precision)) + 'PB'
else :
return str(num) + 'B'
si_table = {
'K' : OneK,
'M' : OneM,
'G' : OneG,
'T' : OneT,
'E' : OneE }
def interpret_size(si):
'''
>>> interpret_size(10)
10
>>> interpret_size('10')
10
>>> interpret_size('10b')
10
>>> interpret_size('10k')
10240
>>> interpret_size('10K')
10240
>>> interpret_size('10kb')
10240
>>> interpret_size('10kB')
10240
>>> interpret_size('a10')
Traceback (most recent call last):
ValueError
>>> interpret_size('10a')
Traceback (most recent call last):
KeyError: 'A'
'''
m = re.match(r"\s*(\d+)\s*([ac-z]?)(b?)\s*$", str(si), re.I)
if m:
if not m.group(2) and m.group(3):
times = 1
else:
times = si_table[m.group(2).upper()] if m.group(2) else 1
return int(m.group(1)) * times
else:
raise ValueError
def human_time(seconds):
''' DocTests:
>>> human_time(0)
u''
>>> human_time(122.1)
u'2m2s'
>>> human_time(133)
u'2m13s'
>>> human_time(12345678)
u'20W2D21h21m18s'
'''
isec = int(seconds)
s = isec % 60
m = isec / 60 % 60
h = isec / 60 / 60 % 24
d = isec / 60 / 60 / 24 % 7
w = isec / 60 / 60 / 24 / 7
result = ''
for t in [ ('W', w), ('D', d), ('h', h), ('m', m), ('s', s) ]:
if t[1]:
result += str(t[1]) + t[0]
return result
def human_speed(speed, precision = 0):
''' DocTests:
'''
# https://stackoverflow.com/questions/15263597/python-convert-floating-point-number-to-certain-precision-then-copy-to-string/15263885#15263885
numfmt = '{{:.{}f}}'.format(precision)
if speed < OneK:
return numfmt.format(speed) + 'B/s'
elif speed < OneM:
return numfmt.format(speed / float(OneK)) + 'KB/s'
elif speed < OneG:
return numfmt.format(speed / float(OneM)) + 'MB/s'
elif speed < OneT:
return numfmt.format(speed / float(OneG)) + 'GB/s'
else:
return 'HAHA'
def remove_backslash(s):
return s.replace(r'\/', r'/')
def rb(s):
return s.replace(r'\/', r'/')
# no leading, trailing '/'
# remote path rule:
# - all public methods of ByPy shall accept remote path as "partial path"
# (before calling get_pcs_path())
# - all private methods of ByPy shall accept remote path as "full path"
# (after calling get_pcs_path())
def get_pcs_path(path):
if not path or path == '/' or path == '\\':
return AppPcsPath
return (AppPcsPath + '/' + path.strip('/')).rstrip('/')
# guarantee no-exception
def removefile(path, verbose = False):
result = ENoError
try:
if verbose:
pr("Removing local file '{}'".format(path))
if path:
os.remove(path)
except Exception:
perr("Fail to remove local fle '{}'.\nException:{}\n".format(path, traceback.format_exc()))
result = EFailToDeleteFile
return result
def removedir(path, verbose = False):
result = ENoError
try:
if verbose:
pr("Removing local directory '{}'".format(path))
if path:
shutil.rmtree(path)
except Exception:
perr("Fail to remove local directory '{}'.\nException:{}\n".format(path, traceback.format_exc()))
result = EFailToDeleteDir
return result
def makedir(path, verbose = False):
result = ENoError
try:
if verbose:
pr("Creating local directory '{}'".format(path))
if not (not path or path == '.'):
os.makedirs(path)
except os.error:
perr("Failed at creating local dir '{}'.\nException:\n'{}'".format(path, traceback.format_exc()))
result = EFailToCreateLocalDir
return result
# guarantee no-exception
def getfilesize(path):
size = -1
try:
size = os.path.getsize(path)
except os.error:
perr("Exception occured while getting size of '{}'. Exception:\n{}".format(path, traceback.format_exc()))
return size
# guarantee no-exception
def getfilemtime(path):
mtime = -1
try:
mtime = os.path.getmtime(path)
except os.error:
perr("Exception occured while getting modification time of '{}'. Exception:\n{}".format(path, traceback.format_exc()))
return mtime
# seems os.path.join() doesn't handle Unicode well
def joinpath(first, second, sep = os.sep):
head = ''
if first:
head = first.rstrip(sep) + sep
tail = ''
if second:
tail = second.lstrip(sep)
return head + tail
def donothing():
pass
# https://stackoverflow.com/questions/10883399/unable-to-encode-decode-pprint-output
class MyPrettyPrinter(pprint.PrettyPrinter):
def format(self, obj, context, maxlevels, level):
if isinstance(obj, unicode):
#return (obj.encode('utf8'), True, False)
return (obj, True, False)
if isinstance(obj, str):
convert = False
#for c in obj:
# if ord(c) >= 128:
# convert = True
# break
try:
codecs.decode(obj)
except:
convert = True
if convert:
return ("0x{}".format(binascii.hexlify(obj)), True, False)
return pprint.PrettyPrinter.format(self, obj, context, maxlevels, level)
# there is room for more space optimization (like using the tree structure),
# but it's not added at the moment. for now, it's just simple pickle.
# SQLite might be better for portability
# NOTE: file names are case-sensitive
class cached(object):
''' simple decorator for hash caching (using pickle) '''
usecache = True
verbose = False
debug = False
cache = {}
cacheloaded = False
dirty = False
# we don't do cache loading / unloading here because it's an decorator,
# and probably multiple instances are created for md5, crc32, etc
# it's a bit complex, and i thus don't have the confidence to do it in ctor/dtor
def __init__(self, f):
self.f = f
def __call__(self, *args):
assert len(args) > 0
result = None
path = args[0]
dir, file = os.path.split(path) # the 'filename' parameter
absdir = os.path.abspath(dir)
if absdir in cached.cache:
entry = cached.cache[absdir]
if file in entry:
info = entry[file]
if self.f.__name__ in info \
and info['size'] == getfilesize(path) \
and info['mtime'] == getfilemtime(path) \
and self.f.__name__ in info \
and cached.usecache:
result = info[self.f.__name__]
if cached.debug:
pdbg("Cache hit for file '{}',\n{}: {}\nsize: {}\nmtime: {}".format(
path, self.f.__name__,
result if isinstance(result, (int, long, float, complex)) else binascii.hexlify(result),
info['size'], info['mtime']))
else:
result = self.f(*args)
self.__store(info, path, result)
else:
result = self.f(*args)
entry[file] = {}
info = entry[file]
self.__store(info, path, result)
else:
result = self.f(*args)
cached.cache[absdir] = {}
entry = cached.cache[absdir]
entry[file] = {}
info = entry[file]
self.__store(info, path, result)
return result
def __store(self, info, path, value):
cached.dirty = True
info['size'] = getfilesize(path)
info['mtime'] = getfilemtime(path)
info[self.f.__name__] = value
if cached.debug:
situation = "Storing cache"
if cached.usecache:
situation = "Cache miss"
pdbg((situation + " for file '{}',\n{}: {}\nsize: {}\nmtime: {}").format(
path, self.f.__name__,
value if isinstance(value, (int, long, float, complex)) else binascii.hexlify(value),
info['size'], info['mtime']))
# periodically save to prevent loss in case of system crash
global last_cache_save
now = time.time()
if now - last_cache_save >= CacheSavePeriodInSec:
cached.savecache()
last_cache_save = now
if cached.debug:
pdbg("Periodically saving Hash Cash")
@staticmethod
def loadcache():
# load cache even we don't use cached hash values,
# because we will save (possibly updated) and hash values
if not cached.cacheloaded: # no double-loading
if cached.verbose:
pr("Loading Hash Cache File '{}'...".format(HashCachePath))
if os.path.exists(HashCachePath):
try:
with open(HashCachePath, 'rb') as f:
cached.cache = pickle.load(f)
cached.cacheloaded = True
if cached.verbose:
pr("Hash Cache File loaded.")
except pickle.PickleError:
perr("Fail to load the Hash Cache, no caching. Exception:\n{}".format(traceback.format_exc()))
cached.cache = {}
else:
if cached.verbose:
pr("Hash Cache File not found, no caching")
else:
if cached.verbose:
pr("Not loading Hash Cache since 'cacheloaded' is '{}'".format( cached.cacheloaded))
return cached.cacheloaded
@staticmethod
def savecache(force_saving = False):
saved = False
# even if we were unable to load the cache, we still save it.
if cached.dirty or force_saving:
if cached.verbose:
pr("Saving Hash Cache...")
try:
with open(HashCachePath, 'wb') as f:
pickle.dump(cached.cache, f)
if cached.verbose:
pr("Hash Cache saved.")
saved = True
cached.dirty = False
except Exception:
perr("Failed to save Hash Cache. Exception:\n".format(traceback.format_exc()))
else:
if cached.verbose:
pr("Not saving Hash Cache since 'dirty' is '{}' and 'force_saving' is '{}'".format(
cached.dirty, force_saving))
return saved
@staticmethod
def cleancache():
if cached.loadcache():
for absdir in cached.cache.keys():
if not os.path.exists(absdir):
if cached.verbose:
pr("Directory: '{}' no longer exists, removing the cache entries".format(absdir))
cached.dirty = True
del cached.cache[absdir]
else:
oldfiles = cached.cache[absdir]
files = {}
needclean = False
for f in oldfiles.keys():
#p = os.path.join(absdir, f)
p = joinpath(absdir, f)
if os.path.exists(p):
files[f] = oldfiles[f]
else:
if cached.verbose:
needclean = True
pr("File '{}' no longer exists, removing the cache entry".format(p))
if needclean:
cached.dirty = True
cached.cache[absdir] = files
cached.savecache()
@cached
def md5(filename, slice = OneM):
m = hashlib.md5()
with open(filename, "rb") as f:
while True:
buf = f.read(slice)
if buf:
m.update(buf)
else:
break
return m.digest()
# slice md5 for baidu rapidupload
@cached
def slice_md5(filename):
m = hashlib.md5()
with open(filename, "rb") as f:
buf = f.read(256 * OneK)
m.update(buf)
return m.digest()
@cached
def crc32(filename, slice = OneM):
with open(filename, "rb") as f:
buf = f.read(slice)
crc = binascii.crc32(buf)
while True:
buf = f.read(slice)
if buf:
crc = binascii.crc32(buf, crc)
else:
break
return crc & 0xffffffff
def enable_http_logging():
httplib.HTTPConnection.debuglevel = 1
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def ls_type(isdir):
return 'D' if isdir else 'F'
def ls_time(itime):
return time.strftime('%Y-%m-%d, %H:%M:%S', time.localtime(itime))
def print_pcs_list(json, foundmsg = "Found:", notfoundmsg = "Nothing found."):
list = json['list']
if list:
pr(foundmsg)
for f in list:
pr("{} {} {} {} {} {}".format(
ls_type(f['isdir']),
f['path'],
f['size'],
ls_time(f['ctime']),
ls_time(f['mtime']),
f['md5']))
else:
pr(notfoundmsg)
# tree represented using dictionary, (Obsolete: OrderedDict no longer required)
# NOTE: No own-name is kept, so the caller needs to keep track of that
# NOTE: Case-sensitive, as I don't want to waste time wrapping up a case-insensitive one
# single-linked-list, no backwards travelling capability
class PathDictTree(dict):
def __init__(self, type = 'D', **kwargs):
self.type = type
self.extra = {}
for k, v in kwargs.items():
self.extra[k] = v
super(PathDictTree, self).__init__()
def __str__(self):
return self.__str('')
def __str(self, prefix):
result = ''
for k, v in self.iteritems():
result += "{} - {}{} - size: {} - md5: {} \n".format(
v.type, prefix, k,
v.extra['size'] if 'size' in v.extra else '',
binascii.hexlify(v.extra['md5']) if 'md5' in v.extra else '')
for k, v in self.iteritems():
if v.type == 'D':
result += v.__str(prefix + '/' + k)
return result
def add(self, name, child):
self[name] = child
return child
# returns the child tree at the given path
# assume that path is only separated by '/', instead of '\\'
def get(self, path):
place = self
if path:
# Linux can have file / folder names with '\\'?
if sys.platform.startswith('win32'):
assert '\\' not in path
route = filter(None, path.split('/'))
for part in route:
if part in place:
sub = place[part]
assert place.type == 'D' # sanity check
place = sub
else:
return None
return place
# return a string list of all 'path's in the tree
def allpath(self):
result = []
for k, v in self.items():
result.append(k)
if v.type == 'D':
for p in self.get(k).allpath():
result.append(k + '/' + p)
return result
class ByPy(object):
'''The main class of the bypy program'''
# public static properties
HelpMarker = "Usage:"
ListFormatDict = {
'$t' : (lambda json: ls_type(json['isdir'])),
'$f' : (lambda json: json['path'].split('/')[-1]),
'$c' : (lambda json: ls_time(json['ctime'])),
'$m' : (lambda json: ls_time(json['mtime'])),
'$d' : (lambda json: str(json['md5'] if 'md5' in json else '')),
'$s' : (lambda json: str(json['size'])),
'$i' : (lambda json: str(json['fs_id'])),
'$b' : (lambda json: str(json['block_list'] if 'block_list' in json else '')),
'$u' : (lambda json: 'HasSubDir' if 'ifhassubdir' in json and json['ifhassubdir'] else 'NoSubDir'),
'$$' : (lambda json: '$')
}
def __init__(self,
slice_size = DefaultSliceSize,
dl_chunk_size = DefaultDlChunkSize,
verify = True,
retry = 5, timeout = None,
quit_when_fail = False,
listfile = None,
resumedownload = True,
extraupdate = lambda: (),
incregex = '',
ondup = '',
followlink = True,
checkssl = True,
verbose = 0, debug = False):
self.__slice_size = slice_size
self.__dl_chunk_size = dl_chunk_size
self.__verify = verify
self.__retry = retry
self.__quit_when_fail = quit_when_fail
self.__timeout = timeout
self.__listfile = listfile
self.__resumedownload = resumedownload
self.__extraupdate = extraupdate
self.__incregex = incregex
self.__incregmo = re.compile(incregex)
if ondup and len(ondup) > 0:
self.__ondup = ondup[0].upper()
else:
self.__ondup = 'O' # O - Overwrite* S - Skip P - Prompt
self.__followlink = followlink;
self.__checkssl = checkssl
self.Verbose = verbose
self.Debug = debug
# the prophet said: thou shalt initialize
self.__existing_size = 0
self.__json = {}
self.__access_token = ''
self.__remote_json = {}
self.__slice_md5s = []
if self.__listfile and os.path.exists(self.__listfile):
with open(self.__listfile, 'r') as f:
self.__list_file_contents = f.read()
else:
self.__list_file_contents = None
# only if user specifies '-ddd' or more 'd's, the following
# debugging information will be shown, as it's very talkative.
if self.Debug >= 3:
# these two lines enable debugging at httplib level (requests->urllib3->httplib)
# you will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# the only thing missing will be the response.body which is not logged.
enable_http_logging()
if not self.__load_local_json():
# no need to call __load_local_json() again as __auth() will load the json & acess token.
result = self.__auth()
if result != ENoError:
perr("Program authorization FAILED.\n" + \
"You need to authorize this program before using any PCS functions.\n" + \
"Quitting...\n")
onexit(result)
def pv(self, msg, **kwargs):
if self.Verbose:
pr(msg)
def pd(self, msg, level = 1, **kwargs):
if self.Debug >= level:
pdbg(msg, kwargs)
def shalloverwrite(self, prompt):
if self.__ondup == 'S':
return False
elif self.__ondup == 'P':
ans = ask(prompt, False).upper()
if not ans.startswith('Y'):
return False
return True
def __print_error_json(self, r):
try:
dj = r.json()
if 'error_code' in dj and 'error_msg' in dj:
ec = dj['error_code']
et = dj['error_msg']
msg = ''
if ec == IEMD5NotFound:
pf = pinfo
msg = et
else:
pf = perr
msg = "Error code: {}\nError Description: {}".format(ec, et)
pf(msg)
except Exception:
perr('Error parsing JSON Error Code from:\n{}'.format(rb(r.text)))
perr('Exception: {}'.format(traceback.format_exc()))
def __dump_exception(self, ex, url, pars, r, act):
if self.Debug or self.Verbose:
perr("Error accessing '{}'".format(url))
if ex and isinstance(ex, Exception) and self.Debug:
perr("Exception: {}".format(ex))
tb = traceback.format_exc()
if tb:
pr(tb)
perr("Function: {}".format(act.__name__))
perr("Website parameters: {}".format(pars))
if r:
perr("HTTP Status Code: {}".format(r.status_code))
self.__print_error_json(r)
perr("Website returned: {}".format(rb(r.text)))
# always append / replace the 'access_token' parameter in the https request
def __request_work(self, url, pars, act, method, actargs = None, addtoken = True, dumpex = True, **kwargs):
result = ENoError
r = None
self.__extraupdate()
parsnew = pars.copy()
if addtoken:
parsnew['access_token'] = self.__access_token
try:
self.pd(method + ' ' + url)
self.pd("actargs: {}".format(actargs))
self.pd("Params: {}".format(pars))
if method.upper() == 'GET':
r = requests.get(url,
params = parsnew, timeout = self.__timeout, verify = self.__checkssl, **kwargs)
elif method.upper() == 'POST':
r = requests.post(url,
params = parsnew, timeout = self.__timeout, verify = self.__checkssl, **kwargs)
# BUGFIX: DON'T do this, if we are downloading a big file, the program sticks and dies
#self.pd("Request Headers: {}".format(
# pprint.pformat(r.request.headers)), 2)
sc = r.status_code
self.pd("HTTP Status Code: {}".format(sc))
# BUGFIX: DON'T do this, if we are downloading a big file, the program sticks and dies
#self.pd("Header returned: {}".format(pprint.pformat(r.headers)), 2)
#self.pd("Website returned: {}".format(rb(r.text)), 3)
if sc == requests.codes.ok or sc == 206: # 206 Partial Content
if sc == requests.codes.ok:
self.pd("Request OK, processing action")
else:
self.pd("206 Partial Content")
result = act(r, actargs)
if result == ENoError:
self.pd("Request all goes fine")
else:
ec = 0
try:
j = r.json()
ec = j['error_code']
# error print is done in __dump_exception()
# self.__print_error_json(r)
except ValueError:
perr("Not valid error JSON")
# 6 (sc: 403): No permission to access user data
# 110 (sc: 401): Access token invalid or no longer valid
# 111 (sc: 401): Access token expired
if ec == 111 or ec == 110 or ec == 6: # and sc == 401:
self.pd("Need to refresh token, refreshing")
if ENoError == self.__refresh_token(): # refresh the token and re-request
# TODO: avoid dead recursive loops
# TODO: properly pass retry
result = self.__request(url, pars, act, method, actargs, True, addtoken, dumpex, **kwargs)
else:
result = EFatal
perr("FATAL: Token refreshing failed, can't continue.\nQuitting...\n")
onexit(result)
# File md5 not found, you should use upload API to upload the whole file.
elif ec == IEMD5NotFound: # and sc == 404:
self.pd("MD5 not found, rapidupload failed")
result = ec
# errors that make retrying meaningless
elif (
ec == 31061 or # sc == 400 file already exists
ec == 31062 or # sc == 400 file name is invalid
ec == 31063 or # sc == 400 file parent path does not exist
ec == 31064 or # sc == 403 file is not authorized
ec == 31065 or # sc == 400 directory is full
ec == 31066): # sc == 403 (indeed 404) file does not exist
result = ec
if dumpex:
self.__dump_exception(None, url, pars, r, act)
else:
result = ERequestFailed
if dumpex:
self.__dump_exception(None, url, pars, r, act)
except (requests.exceptions.RequestException,
socket.error) as ex:
result = ERequestFailed
if dumpex:
self.__dump_exception(ex, url, pars, r, act)
except Exception as ex: # shall i quit? i think so.
result = EFatal
if dumpex:
self.__dump_exception(ex, url, pars, r, act)
perr("Fatal Exception.\nQuitting...\n")
perr("If you see any 'InsecureRequestWarning' message in the error output, " + \
"I think in most of the cases, " + \
"you can disable the SSL check by running this program " + \
"with the '" + DisableSslCheckOption + "' option.")
onexit(result)
# we eat the exception, and use return code as the only
# error notification method, we don't want to mix them two
#raise # must notify the caller about the failure
return result
def __request(self, url, pars, act, method, actargs = None, retry = True, addtoken = True, dumpex = True, **kwargs):
tries = 1
if retry:
tries = self.__retry
i = 0
result = ERequestFailed
# Change the User-Agent to avoid server fuss
kwnew = kwargs.copy()
if 'headers' not in kwnew:
kwnew['headers'] = { 'User-Agent': UserAgent }
else:
kwnew['headers']['User-Agent'] = UserAgent
while True:
result = self.__request_work(url, pars, act, method, actargs, addtoken, dumpex, **kwnew)
i += 1
# only ERequestFailed needs retry, other error still directly return
if result == ERequestFailed:
if i < tries:
# algo changed: delay more after each failure
delay = RetryDelayInSec * i
perr("Waiting {} seconds before retrying...".format(delay))
time.sleep(delay)
perr("Request Try #{} / {}".format(i + 1, tries))
else:
perr("Maximum number ({}) of tries failed.".format(tries))
if self.__quit_when_fail:
onexit(EMaxRetry)
break
else:
break
return result
def __get(self, url, pars, act, actargs = None, retry = True, addtoken = True, dumpex = True, **kwargs):
return self.__request(url, pars, act, 'GET', actargs, retry, addtoken, dumpex, **kwargs)
def __post(self, url, pars, act, actargs = None, retry = True, addtoken = True, dumpex = True, **kwargs):
return self.__request(url, pars, act, 'POST', actargs, retry, addtoken, dumpex, **kwargs)
# direction: True - upload, False - download
def __shallinclude(self, lpath, rpath, direction):
arrow = '==>' if direction else '<=='
checkpath = lpath if direction else rpath
# TODO: bad practice, see os.access() document for more info
if direction: # upload
if not os.path.exists(lpath):
perr("'{}' {} '{}' skipped since local path no longer exists".format(
lpath, arrow, rpath));
return False
else: # download
if os.path.exists(lpath) and (not os.access(lpath, os.R_OK)):
perr("'{}' {} '{}' skipped due to permission".format(
lpath, arrow, rpath));
return False
if '\\' in os.path.basename(checkpath):
perr("'{}' {} '{}' skipped due to problemic '\\' in the path".format(
lpath, arrow, rpath));
return False
include = (not self.__incregex) or self.__incregmo.match(checkpath)
if not include:
self.pv("'{}' {} '{}' skipped as it's not included in the regex pattern".format(
lpath, arrow, rpath));
return include
def __replace_list_format(self, fmt, j):
output = fmt
for k, v in ByPy.ListFormatDict.iteritems():
output = output.replace(k, v(j))
return output
def __load_local_json(self):
try:
with open(TokenFilePath, 'rb') as infile:
self.__json = json.load(infile)
self.__access_token = self.__json['access_token']
self.pd("Token loaded:")
self.pd(self.__json)
return True
except IOError:
perr('Error while loading baidu pcs token:')
perr(traceback.format_exc())
return False
def __store_json_only(self, j):
self.__json = j
self.__access_token = self.__json['access_token']
self.pd("access token: " + self.__access_token)
self.pd("Authorize JSON:")
self.pd(self.__json)
try:
with os.fdopen(os.open(TokenFilePath, os.O_WRONLY | os.O_CREAT, 0600),'wb') as outfile:
json.dump(self.__json, outfile)
return ENoError
except Exception:
perr("Exception occured while trying to store access token:\n" \
"Exception:\n{}".format(traceback.format_exc()))
return EFileWrite
def __store_json(self, r):
return self.__store_json_only(r.json())
def __server_auth_act(self, r, args):
return self.__store_json(r)
def __server_auth(self):
params = {
'client_id' : ApiKey,
'response_type' : 'code',
'redirect_uri' : 'oob',
'scope' : 'basic netdisk' }
pars = urllib.urlencode(params)
msg = 'Please visit:\n{}\nAnd authorize this app'.format(ServerAuthUrl + '?' + pars) + \
'\nPaste the Authorization Code here within 10 minutes.'
auth_code = ask(msg).strip()
self.pd("auth_code: {}".format(auth_code))
pr('Authorizing, please be patient, it may take upto {} seconds...'.format(self.__timeout))
pars = {
'code' : auth_code,
'redirect_uri' : 'oob' }
result = None
for auth in AuthServerList:
(url, retry, msg) = auth
pr(msg)
result = self.__get(url, pars, self.__server_auth_act, retry = retry, addtoken = False)
if result == ENoError:
break
if result == ENoError:
pr("Successfully authorized")
else:
perr("Fatal: All server authorizations failed.")
return result
def __device_auth_act(self, r, args):
dj = r.json()
return self.__get_token(dj)
def __device_auth(self):
pars = {
'client_id' : ApiKey,
'response_type' : 'device_code',
'scope' : 'basic netdisk'}
return self.__get(DeviceAuthUrl, pars, self.__device_auth_act, addtoken = False)
def __auth(self):
if ServerAuth:
return self.__server_auth()
else:
return self.__device_auth()
def __get_token_act(self, r, args):
return self.__store_json(r)
def __get_token(self, deviceJson):
msg = 'Please visit:\n' + deviceJson['verification_url'] + \
'\nwithin ' + str(deviceJson['expires_in']) + ' seconds\n' + \
'Input the CODE: {}\n'.format(deviceJson['user_code']) + \
'and Authorize this little app.\n' + \
"Press [Enter] when you've finished\n"
ask(msg)
pars = {
'grant_type' : 'device_token',
'code' : deviceJson['device_code'],
'client_id' : ApiKey,
'client_secret' : SecretKey}
return self.__get(TokenUrl, pars, self.__get_token_act, addtoken = False)
def __refresh_token_act(self, r, args):
return self.__store_json(r)
def __refresh_token(self):
if ServerAuth:
pr('Refreshing, please be patient, it may take upto {} seconds...'.format(self.__timeout))
pars = {
'grant_type' : 'refresh_token',
'refresh_token' : self.__json['refresh_token'] }
result = None
for refresh in RefreshServerList:
(url, retry, msg) = refresh
pr(msg)
result = self.__get(url, pars, self.__refresh_token_act, retry = retry, addtoken = False)
if result == ENoError:
break
if result == ENoError:
pr("Token successfully refreshed")
else:
perr("Token-refreshing on all the servers failed")
return result
else:
pars = {
'grant_type' : 'refresh_token',
'refresh_token' : self.__json['refresh_token'],
'client_secret' : SecretKey,
'client_id' : ApiKey }
return self.__post(TokenUrl, pars, self.__refresh_token_act)
def __quota_act(self, r, args):
j = r.json()
pr('Quota: ' + si_size(j['quota']))
pr('Used: ' + si_size(j['used']))
return ENoError
def help(self, command): # this comes first to make it easy to spot
''' Usage: help command - provide some information for the command '''
for i, v in ByPy.__dict__.iteritems():
if callable(v) and v.__doc__ and v.__name__ == command :
help = v.__doc__.strip()
pos = help.find(ByPy.HelpMarker)
if pos != -1:
pr("Usage: " + help[pos + len(ByPy.HelpMarker):].strip())
def refreshtoken(self):
''' Usage: refreshtoken - refresh the access token '''
return self.__refresh_token()
def info(self):
return self.quota()
def quota(self):
''' Usage: quota/info - displays the quota information '''
pars = {
'method' : 'info' }
return self.__get(PcsUrl + 'quota', pars, self.__quota_act)
# return:
# 0: local and remote files are of same size
# 1: local file is larger
# 2: remote file is larger
# -1: inconclusive (probably invalid remote json)
def __compare_size(self, lsize, rjson):
if 'size' in rjson:
rsize = rjson['size']
if lsize == rsize:
return 0;
elif lsize > rsize:
return 1;
else:
return 2
else:
return -1
def __verify_current_file(self, j, gotlmd5):
# if we really don't want to verify
if self.__current_file == '/dev/null' and not self.__verify:
return ENoError
rsize = 0
rmd5 = 0
# always perform size check even __verify is False
if 'size' in j:
rsize = j['size']
else:
perr("Unable to verify JSON: '{}', as no 'size' entry found".format(j))
return EHashMismatch
if 'md5' in j:
rmd5 = binascii.unhexlify(j['md5'])
#elif 'block_list' in j and len(j['block_list']) > 0:
# rmd5 = j['block_list'][0]
#else:
# # quick hack for meta's 'block_list' field
# pwarn("No 'md5' nor 'block_list' found in json:\n{}".format(j))
# pwarn("Assuming MD5s match, checking size ONLY.")
# rmd5 = self.__current_file_md5
else:
perr("Unable to verify JSON: '{}', as no 'md5' entry found".format(j))
return EHashMismatch
self.pd("Comparing local file '{}' and remote file '{}'".format(
self.__current_file, j['path']))
self.pd("Local file size : {}".format(self.__current_file_size))
self.pd("Remote file size: {}".format(rsize))
if self.__current_file_size == rsize:
self.pd("Local file and remote file sizes match")
if self.__verify:
if not gotlmd5:
self.__current_file_md5 = md5(self.__current_file)
self.pd("Local file MD5 : {}".format(binascii.hexlify(self.__current_file_md5)))
self.pd("Remote file MD5: {}".format(binascii.hexlify(rmd5)))
if self.__current_file_md5 == rmd5:
self.pd("Local file and remote file hashes match")
return ENoError
else:
pinfo("Local file and remote file hashes DON'T match")
return EHashMismatch
else:
return ENoError
else:
pinfo("Local file and remote file sizes DON'T match")
return EHashMismatch
def __get_file_info_act(self, r, args):
remotefile = args
j = r.json()
self.pd("List json: {}".format(j))
l = j['list']
for f in l:
if f['path'] == remotefile: # case-sensitive
self.__remote_json = f
self.pd("File info json: {}".format(self.__remote_json))
return ENoError;
return EFileNotFound
# the 'meta' command sucks, since it doesn't supply MD5 ...
# now the JSON is written to self.__remote_json, due to Python call-by-reference chaos
# https://stackoverflow.com/questions/986006/python-how-do-i-pass-a-variable-by-reference
# as if not enough confusion in Python call-by-reference
def __get_file_info(self, remotefile, **kwargs):
rdir, rfile = posixpath.split(remotefile)
self.pd("__get_file_info(): rdir : {} | rfile: {}".format(rdir, rfile))
if rdir and rfile:
pars = {
'method' : 'list',
'path' : rdir,
'by' : 'name', # sort in case we can use binary-search, etc in the futrue.
'order' : 'asc' }
return self.__get(PcsUrl + 'file', pars, self.__get_file_info_act, remotefile, **kwargs)
else:
perr("Invalid remotefile '{}' specified.".format(remotefile))
return EArgument
def __list_act(self, r, args):
(remotedir, fmt) = args
j = r.json()
pr("{} ({}):".format(remotedir, fmt))
for f in j['list']:
pr(self.__replace_list_format(fmt, f))
return ENoError
def ls(self, remotepath = '',
fmt = '$t $f $s $m $d',
sort = 'name', order = 'asc'):
return self.list(remotepath, fmt, sort, order)
def list(self, remotepath = '',
fmt = '$t $f $s $m $d',
sort = 'name', order = 'asc'):
''' Usage: list/ls [remotepath] [format] [sort] [order] - list the 'remotepath' directory at Baidu PCS
remotepath - the remote path at Baidu PCS. default: root directory '/'
format - specifies how the list are displayed
$t - Type: Directory ('D') or File ('F')
$f - File name
$c - Creation time
$m - Modification time
$d - MD5 hash
$s - Size
$$ - The '$' sign
So '$t - $f - $s - $$' will display "Type - File - Size - $'
Default format: '$t $f $s $m $d'
sort - sorting by [name, time, size]. default: 'name'
order - sorting order [asc, desc]. default: 'asc'
'''
rpath = get_pcs_path(remotepath)
pars = {
'method' : 'list',
'path' : rpath,
'by' : sort,
'order' : order }
return self.__get(PcsUrl + 'file', pars, self.__list_act, (rpath, fmt))
def __meta_act(self, r, args):
return self.__list_act(r, args)
# multi-file meta is not implemented for it's low usage
def meta(self, remotepath, fmt = '$t $u $f $s $c $m $i $b'):
''' Usage: meta <remotepath> [format] - \
get information of the given path (dir / file) at Baidu Yun.
remotepath - the remote path
format - specifies how the list are displayed
it supports all the format variables in the 'list' command, and additionally the followings:
$i - fs_id
$b - MD5 block_list
$u - Has sub directory or not
'''
rpath = get_pcs_path(remotepath)
pars = {
'method' : 'meta',
'path' : rpath }
return self.__get(PcsUrl + 'file', pars,
self.__meta_act, (rpath, fmt))
def __combine_file_act(self, r, args):
result = self.__verify_current_file(r.json(), False)
if result == ENoError:
self.pv("'{}' =C=> '{}' OK.".format(self.__current_file, args))
else:
perr("'{}' =C=> '{}' FAILED.".format(self.__current_file, args))
# save the md5 list, in case we add in resume function later to this program
self.__last_slice_md5s = self.__slice_md5s
self.__slice_md5s = []
return result
def __combine_file(self, remotepath, ondup = 'overwrite'):
pars = {
'method' : 'createsuperfile',
'path' : remotepath,
'ondup' : ondup }
# always print this, so that we can use these data to combine file later
pr("Combining the following MD5 slices:")
for m in self.__slice_md5s:
pr(m)
param = { 'block_list' : self.__slice_md5s }
return self.__post(PcsUrl + 'file',
pars, self.__combine_file_act,
remotepath,
data = { 'param' : json.dumps(param) } )
def __upload_slice_act(self, r, args):
j = r.json()
# slices must be verified and re-upload if MD5s don't match,
# otherwise, it makes the uploading slower at the end
rsmd5 = j['md5']
self.pd("Uploaded MD5 slice: " + rsmd5)
if self.__current_slice_md5 == binascii.unhexlify(rsmd5):
self.__slice_md5s.append(rsmd5)
self.pv("'{}' >>==> '{}' OK.".format(self.__current_file, args))
return ENoError
else:
perr("'{}' >>==> '{}' FAILED.".format(self.__current_file, args))
return EHashMismatch
def __upload_slice(self, remotepath):
pars = {
'method' : 'upload',
'type' : 'tmpfile'}
return self.__post(CPcsUrl + 'file',
pars, self.__upload_slice_act, remotepath,
# wants to be proper? properness doesn't work (search this sentence for more occurence)
#files = { 'file' : (os.path.basename(self.__current_file), self.__current_slice) } )
files = { 'file' : ('file', self.__current_slice) } )
def __upload_file_slices(self, localpath, remotepath, ondup = 'overwrite'):
pieces = MaxSlicePieces
slice = self.__slice_size
if self.__current_file_size <= self.__slice_size * MaxSlicePieces:
# slice them using slice size
pieces = (self.__current_file_size + self.__slice_size - 1 ) / self.__slice_size
else:
# the following comparision is done in the caller:
# elif self.__current_file_size <= MaxSliceSize * MaxSlicePieces:
# no choice, but need to slice them to 'MaxSlicePieces' pieces
slice = (self.__current_file_size + MaxSlicePieces - 1) / MaxSlicePieces
self.pd("Slice size: {}, Pieces: {}".format(slice, pieces))
i = 0
ec = ENoError
with open(self.__current_file, 'rb') as f:
start_time = time.time()
while i < pieces:
self.__current_slice = f.read(slice)
m = hashlib.md5()
m.update(self.__current_slice)
self.__current_slice_md5 = m.digest()
self.pd("Uploading MD5 slice: {}, #{} / {}".format(
binascii.hexlify(self.__current_slice_md5),
i + 1, pieces))
j = 0
while True:
ec = self.__upload_slice(remotepath)
if ec == ENoError:
self.pd("Slice MD5 match, continuing next slice")
pprgr(f.tell(), self.__current_file_size, start_time)
break
elif j < self.__retry:
j += 1
# TODO: Improve or make it TRY with the __requet retry logic
perr("Slice MD5 mismatch, waiting {} seconds before retrying...".format(RetryDelayInSec))
time.sleep(RetryDelayInSec)
perr("Retrying #{} / {}".format(j + 1, self.__retry))
else:
self.__slice_md5s = []
break
i += 1
if ec != ENoError:
return ec
else:
#self.pd("Sleep 2 seconds before combining, just to be safer.")
#time.sleep(2)
return self.__combine_file(remotepath, ondup = 'overwrite')
def __rapidupload_file_act(self, r, args):
if self.__verify:
self.pd("Not strong-consistent, sleep 1 second before verification")
time.sleep(1)
return self.__verify_current_file(r.json(), True)
else:
return ENoError
def __rapidupload_file(self, localpath, remotepath, ondup = 'overwrite'):
self.__current_file_md5 = md5(self.__current_file)
self.__current_file_slice_md5 = slice_md5(self.__current_file)
self.__current_file_crc32 = crc32(self.__current_file)
md5str = binascii.hexlify(self.__current_file_md5)
slicemd5str = binascii.hexlify(self.__current_file_slice_md5)
crcstr = hex(self.__current_file_crc32)
pars = {
'method' : 'rapidupload',
'path' : remotepath,
'content-length' : self.__current_file_size,
'content-md5' : md5str,
'slice-md5' : slicemd5str,
'content-crc32' : crcstr,
'ondup' : ondup }
self.pd("RapidUploading Length: {} MD5: {}, Slice-MD5: {}, CRC: {}".format(
self.__current_file_size, md5str, slicemd5str, crcstr))
return self.__post(PcsUrl + 'file', pars, self.__rapidupload_file_act)
def __upload_one_file_act(self, r, args):
result = self.__verify_current_file(r.json(), False)
if result == ENoError:
self.pv("'{}' ==> '{}' OK.".format(self.__current_file, args))
else:
perr("'{}' ==> '{}' FAILED.".format(self.__current_file, args))
return result
def __upload_one_file(self, localpath, remotepath, ondup = 'overwrite'):
pars = {
'method' : 'upload',
'path' : remotepath,
'ondup' : ondup }
with open(localpath, "rb") as f:
return self.__post(CPcsUrl + 'file',
pars, self.__upload_one_file_act, remotepath,
# wants to be proper? properness doesn't work
# there seems to be a bug at Baidu's handling of http text:
# Content-Disposition: ... filename=utf-8''yourfile.ext
# (pass '-ddd' to this program to verify this)
# when you specify a unicode file name, which will be encoded
# using the utf-8'' syntax
# so, we put a work-around here: we always call our file 'file'
# NOTE: an empty file name '' doesn't seem to work, so we
# need to give it a name at will, but empty one.
# apperantly, Baidu PCS doesn't use this file name for
# checking / verification, so we are probably safe here.
#files = { 'file' : (os.path.basename(localpath), f) })
files = { 'file' : ('file', f) })
#TODO: upload empty directories as well?
def __walk_upload(self, localpath, remotepath, ondup, walk):
(dirpath, dirnames, filenames) = walk
rdir = os.path.relpath(dirpath, localpath)
if rdir == '.':
rdir = ''
else:
rdir = rdir.replace('\\', '/')
rdir = (remotepath + '/' + rdir).rstrip('/') # '/' bites
result = ENoError
for name in filenames:
#lfile = os.path.join(dirpath, name)
lfile = joinpath(dirpath, name)
self.__current_file = lfile
self.__current_file_size = getfilesize(lfile)
rfile = rdir + '/' + name.replace('\\', '/')
# if the corresponding file matches at Baidu Yun, then don't upload
upload = True
self.__remote_json = {}
subresult = self.__get_file_info(rfile, dumpex = False)
if subresult == ENoError: # same-name remote file exists
if ENoError == self.__verify_current_file(self.__remote_json, False):
# the two files are the same
upload = False
self.pv("Remote file '{}' already exists, skip uploading".format(rfile))
else: # the two files are different
if not self.shalloverwrite("Remote file '{}' exists but is different, ".format(rfile) + \
"do you want to overwrite it? [y/N]"):
upload = False
if upload:
fileresult = self.__upload_file(lfile, rfile, ondup)
if fileresult != ENoError:
result = fileresult # we still continue
else:
pinfo("Remote file '{}' exists but is different, skip uploading".format(rfile))
# next / continue
return result
def __upload_dir(self, localpath, remotepath, ondup = 'overwrite'):
self.pd("Uploading directory '{}' to '{}'".format(localpath, remotepath))
# it's so minor that we don't care about the return value
self.__mkdir(remotepath, dumpex = False)
for walk in os.walk(localpath, followlinks=self.__followlink):
self.__walk_upload(localpath, remotepath, ondup, walk)
def __upload_file(self, localpath, remotepath, ondup = 'overwrite'):
# TODO: this is a quick patch
if not self.__shallinclude(localpath, remotepath, True):
# since we are not going to upload it, there is no error
return ENoError
self.__current_file = localpath
self.__current_file_size = getfilesize(localpath)
result = ENoError
if self.__current_file_size > MinRapidUploadFileSize:
self.pd("'{}' is being RapidUploaded.".format(self.__current_file))
result = self.__rapidupload_file(localpath, remotepath, ondup)
if result == ENoError:
self.pv("RapidUpload: '{}' =R=> '{}' OK.".format(localpath, remotepath))
else:
self.pd("'{}' can't be RapidUploaded, now trying normal uploading.".format(
self.__current_file))
# rapid upload failed, we have to upload manually
if self.__current_file_size <= self.__slice_size:
self.pd("'{}' is being non-slicing uploaded.".format(self.__current_file))
# no-slicing upload
result = self.__upload_one_file(localpath, remotepath, ondup)
elif self.__current_file_size <= MaxSliceSize * MaxSlicePieces:
# slice them using slice size
self.pd("'{}' is being slicing uploaded.".format(self.__current_file))
result = self.__upload_file_slices(localpath, remotepath, ondup)
else:
result = EFileTooBig
perr("Error: size of file '{}' - {} is too big".format(
self.__current_file,
self.__current_file_size))
return result
else: # very small file, must be uploaded manually and no slicing is needed
self.pd("'{}' is small and being non-slicing uploaded.".format(self.__current_file))
return self.__upload_one_file(localpath, remotepath, ondup)
def upload(self, localpath = '', remotepath = '', ondup = "overwrite"):
''' Usage: upload [localpath] [remotepath] [ondup] - \
upload a file or directory (recursively)
localpath - local path, is the current directory '.' if not specified
remotepath - remote path at Baidu Yun (after app root directory at Baidu Yun)
ondup - what to do upon duplication ('overwrite' or 'newcopy'), default: 'overwrite'
'''
# copying since Python is call-by-reference by default,
# so we shall not modify the passed-in parameters
lpath = localpath.rstrip('\\/ ') # no trailing slashes
lpathbase = os.path.basename(lpath)
rpath = remotepath
if not lpath:
# so, if you don't specify the local path, it will always be the current direcotry
# and thus isdir(localpath) is always true
lpath = os.path.abspath(".")
self.pd("localpath not set, set it to current directory '{}'".format(localpath))
if os.path.isfile(lpath):
self.pd("Uploading file '{}'".format(lpath))
if not rpath or rpath == '/': # to root we go
rpath = lpathbase
if rpath[-1] == '/': # user intends to upload to this DIR
rpath = get_pcs_path(rpath + lpathbase)
else:
rpath = get_pcs_path(rpath)
# avoid uploading a file and destroy a directory by accident
subresult = self.__get_file_info(rpath)
if subresult == ENoError: # remove path exists, check is dir or file
if self.__remote_json['isdir']: # do this only for dir
rpath += '/' + lpathbase # rpath is guaranteed no '/' ended
self.pd("remote path is '{}'".format(rpath))
return self.__upload_file(lpath, rpath, ondup)
elif os.path.isdir(lpath):
self.pd("Uploading directory '{}' recursively".format(lpath))
rpath = get_pcs_path(rpath)
return self.__upload_dir(lpath, rpath, ondup)
else:
perr("Error: invalid local path '{}' for uploading specified.".format(localpath))
return EParameter
def combine(self, remotefile, localfile = '', *args):
''' Usage: combine <remotefile> [md5s] [localfile] - \
try to create a file at PCS by combining slices, having MD5s specified
remotefile - remote file at Baidu Yun (after app root directory at Baidu Yun)
md5s - MD5 digests of the slices, separated by spaces
if not specified, you must specify the 'listfile' using the '-l' or '--list-file' switch in command line. the MD5 digests will be read from the (text) file, which can store the MD5 digest seperate by new-line or spaces
localfile - local file for verification, if not specified, no verification is done
'''
self.__slice_md5s = []
if args:
for arg in args:
self.__slice_md5s.append(arg)
elif self.__list_file_contents:
digests = filter(None, self.__list_file_contents.split())
for d in digests:
self.__slice_md5s.append(d)
else:
perr("You MUST either provide the MD5s through the command line, "
"or using the '-l' ('--list-file') switch to specify "
"the 'listfile' to read MD5s from")
return EArgument
verify = self.__verify
if localfile:
self.__current_file = localfile
self.__current_file_size = getfilesize(localfile)
else:
self.__current_file = '/dev/null' # Force no verify
self.__verify = False
result = self.__combine_file(get_pcs_path(remotefile))
self.__verify = verify
return result
# no longer used
def __get_meta_act(self, r, args):
parse_ok = False
j = r.json()
if 'list' in j:
lj = j['list']
if len(lj) > 0:
self.__remote_json = lj[0] # TODO: ugly patch
# patch for inconsistency between 'list' and 'meta' json
#self.__remote_json['md5'] = self.__remote_json['block_list'].strip('[]"')
self.pd("self.__remote_json: {}".format(self.__remote_json))
parse_ok = True
return ENoError
if not parse_ok:
self.__remote_json = {}
perr("Invalid JSON: {}\n{}".format(j, traceback.format_exc()))
return EInvalidJson
# no longer used
def __get_meta(self, remotefile):
pars = {
'method' : 'meta',
'path' : remotefile }
return self.__get(
PcsUrl + 'file', pars,
self.__get_meta_act)
# NO LONGER IN USE
def __downfile_act(self, r, args):
rfile, offset = args
with open(self.__current_file, 'r+b' if offset > 0 else 'wb') as f:
if offset > 0:
f.seek(offset)
rsize = self.__remote_json['size']
start_time = time.time()
for chunk in r.iter_content(chunk_size = self.__dl_chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
pprgr(f.tell(), rsize, start_time)
# https://stackoverflow.com/questions/7127075/what-exactly-the-pythons-file-flush-is-doing
#os.fsync(f.fileno())
# No exception above, then everything goes fine
result = ENoError
if self.__verify:
self.__current_file_size = getfilesize(self.__current_file)
result = self.__verify_current_file(self.__remote_json, False)
if result == ENoError:
self.pv("'{}' <== '{}' OK".format(self.__current_file, rfile))
else:
perr("'{}' <== '{}' FAILED".format(self.__current_file, rfile))
return result
def __downchunks_act(self, r, args):
rfile, offset, rsize, start_time = args
expectedBytes = self.__dl_chunk_size
if rsize - offset < self.__dl_chunk_size:
expectedBytes = rsize - offset
if len(r.content) != expectedBytes:
return ERequestFailed
else:
with open(self.__current_file, 'r+b' if offset > 0 else 'wb') as f:
if offset > 0:
f.seek(offset)
f.write(r.content)
pos = f.tell()
pprgr(pos, rsize, start_time, existing = self.__existing_size)
if pos - offset == expectedBytes:
return ENoError
else:
return EFileWrite
# requirment: self.__remote_json is already gotten
def __downchunks(self, rfile, start):
rsize = self.__remote_json['size']
pars = {
'method' : 'download',
'path' : rfile }
offset = start
self.__existing_size = offset
start_time = time.time()
while True:
nextoffset = offset + self.__dl_chunk_size
if nextoffset < rsize:
headers = { "Range" : "bytes={}-{}".format(
offset, nextoffset - 1) }
else:
headers = { "Range" : "bytes={}-".format(offset) }
subresult = self.__get(DPcsUrl + 'file', pars,
self.__downchunks_act, (rfile, offset, rsize, start_time), headers = headers)
if subresult != ENoError:
return subresult
if nextoffset < rsize:
offset += self.__dl_chunk_size
else:
break
# No exception above, then everything goes fine
result = ENoError
if self.__verify:
self.__current_file_size = getfilesize(self.__current_file)
result = self.__verify_current_file(self.__remote_json, False)
if result == ENoError:
self.pv("'{}' <== '{}' OK".format(self.__current_file, rfile))
else:
perr("'{}' <== '{}' FAILED".format(self.__current_file, rfile))
return result
def __downfile(self, remotefile, localfile):
# TODO: this is a quick patch
if not self.__shallinclude(localfile, remotefile, False):
# since we are not going to download it, there is no error
return ENoError
result = ENoError
rfile = remotefile
self.__remote_json = {}
self.pd("Downloading '{}' as '{}'".format(rfile, localfile))
self.__current_file = localfile
#if self.__verify or self.__resumedownload:
self.pd("Getting info of remote file '{}' for later verification".format(rfile))
result = self.__get_file_info(rfile)
if result != ENoError:
return result
offset = 0
self.pd("Checking if we already have the copy locally")
if os.path.isfile(localfile):
self.pd("Same-name local file '{}' exists, checking if contents match".format(localfile))
self.__current_file_size = getfilesize(self.__current_file)
if ENoError == self.__verify_current_file(self.__remote_json, False):
self.pd("Same local file '{}' already exists, skip downloading".format(localfile))
return ENoError
else:
if not self.shalloverwrite("Same-name locale file '{}' exists but is different, ".format(localfile) + \
"do you want to overwrite it? [y/N]"):
pinfo("Same-name local file '{}' exists but is different, skip downloading".format(localfile))
return ENoError
if self.__resumedownload and \
self.__compare_size(self.__current_file_size, self.__remote_json) == 2:
# revert back at least one download chunk
pieces = self.__current_file_size // self.__dl_chunk_size
if pieces > 1:
offset = (pieces - 1) * self.__dl_chunk_size
elif os.path.isdir(localfile):
if not self.shalloverwrite("Same-name direcotry '{}' exists, ".format(localfile) + \
"do you want to remove it? [y/N]"):
pinfo("Same-name directory '{}' exists, skip downloading".format(localfile))
return ENoError
self.pv("Directory with the same name '{}' exists, removing ...".format(localfile))
result = removedir(localfile, self.Verbose)
if result == ENoError:
self.pv("Removed")
else:
perr("Error removing the directory '{}'".format(localfile))
return result
ldir, file = os.path.split(localfile)
if ldir and not os.path.exists(ldir):
result = makedir(ldir, self.Verbose)
if result != ENoError:
perr("Fail to make directory '{}'".format(ldir))
return result
return self.__downchunks(rfile, offset)
def downfile(self, remotefile, localpath = ''):
''' Usage: downfile <remotefile> [localpath] - \
download a remote file.
remotefile - remote file at Baidu Yun (after app root directory at Baidu Yun)
localpath - local path.
if it ends with '/' or '\\', it specifies the local direcotry
if it specifies an existing directory, it is the local direcotry
if not specified, the local direcotry is the current directory '.'
otherwise, it specifies the local file name
To stream a file using downfile, you can use the 'mkfifo' trick with omxplayer etc.:
mkfifo /tmp/omx
bypy.py downfile <remotepath> /tmp/omx &
omxplayer /tmp/omx
'''
localfile = localpath
if not localpath:
localfile = os.path.basename(remotefile)
elif localpath[-1] == '\\' or \
localpath[-1] == '/' or \
os.path.isdir(localpath):
#localfile = os.path.join(localpath, os.path.basename(remotefile))
localfile = joinpath(localpath, os.path.basename(remotefile))
else:
localfile = localpath
pcsrpath = get_pcs_path(remotefile)
return self.__downfile(pcsrpath, localfile)
def __stream_act_actual(self, r, args):
pipe, csize = args
with open(pipe, 'wb') as f:
for chunk in r.iter_content(chunk_size = csize):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
# https://stackoverflow.com/questions/7127075/what-exactly-the-pythons-file-flush-is-doing
#os.fsync(f.fileno())
def __streaming_act(self, r, args):
return self.__stream_act_actual(r, args)
# NOT WORKING YET
def streaming(self, remotefile, localpipe, fmt = 'M3U8_480_360', chunk = 4 * OneM):
''' Usage: stream <remotefile> <localpipe> [format] [chunk] - \
stream a video / audio file converted to M3U format at cloud side, to a pipe.
remotefile - remote file at Baidu Yun (after app root directory at Baidu Yun)
localpipe - the local pipe file to write to
format - output video format (M3U8_320_240 | M3U8_480_224 | \
M3U8_480_360 | M3U8_640_480 | M3U8_854_480). Default: M3U8_480_360
chunk - chunk (initial buffering) size for streaming (default: 4M)
To stream a file, you can use the 'mkfifo' trick with omxplayer etc.:
mkfifo /tmp/omx
bypy.py downfile <remotepath> /tmp/omx &
omxplayer /tmp/omx
*** NOT WORKING YET ****
'''
pars = {
'method' : 'streaming',
'path' : get_pcs_path(remotefile),
'type' : fmt }
return self.__get(PcsUrl + 'file', pars,
self.__streaming_act, (localpipe, chunk), stream = True)
def __walk_remote_dir_act(self, r, args):
dirjs, filejs = args
j = r.json()
#self.pd("Remote path content JSON: {}".format(j))
paths = j['list']
for path in paths:
if path['isdir']:
dirjs.append(path)
else:
filejs.append(path)
return ENoError
def __walk_remote_dir(self, remotepath, proceed, args = None):
pars = {
'method' : 'list',
'path' : remotepath,
'by' : 'name',
'order' : 'asc' }
# Python parameters are by-reference and mutable, so they are 'out' by default
dirjs = []
filejs = []
result = self.__get(PcsUrl + 'file', pars, self.__walk_remote_dir_act, (dirjs, filejs))
self.pd("Remote dirs: {}".format(dirjs))
self.pd("Remote files: {}".format(filejs))
if result == ENoError:
subresult = proceed(remotepath, dirjs, filejs, args)
if subresult != ENoError:
self.pd("Error: {} while proceeding remote path'{}'".format(
subresult, remotepath))
result = subresult # we continue
for dirj in dirjs:
subresult = self.__walk_remote_dir(dirj['path'], proceed, args)
if subresult != ENoError:
self.pd("Error: {} while sub-walking remote dirs'{}'".format(
subresult, dirjs))
result = subresult
return result
def __prepare_local_dir(self, localdir):
result = ENoError
if os.path.isfile(localdir):
result = removefile(localdir, self.Verbose)
if result == ENoError:
if localdir and not os.path.exists(localdir):
result = makedir(localdir, self.Verbose)
return result
def __proceed_downdir(self, remotepath, dirjs, filejs, args):
result = ENoError
rootrpath, localpath = args
rlen = len(remotepath) + 1 # '+ 1' for the trailing '/', it bites.
rootlen = len(rootrpath) + 1 # ditto
result = self.__prepare_local_dir(localpath)
if result != ENoError:
perr("Fail to create prepare local directory '{}' for downloading, ABORT".format(localpath))
return result
for dirj in dirjs:
reldir = dirj['path'][rlen:]
#ldir = os.path.join(localpath, reldir)
ldir = joinpath(localpath, reldir)
result = self.__prepare_local_dir(ldir)
if result != ENoError:
perr("Fail to create prepare local directory '{}' for downloading, ABORT".format(ldir))
return result
for filej in filejs:
rfile = filej['path']
relfile = rfile[rootlen:]
#lfile = os.path.join(localpath, relfile)
lfile = joinpath(localpath, relfile)
self.__downfile(rfile, lfile)
return result
def downdir(self, remotepath = None, localpath = None):
''' Usage: downdir <remotedir> [localdir] - \
download a remote directory (recursively)
remotedir - remote directory at Baidu Yun (after app root directory at Baidu Yun)
localdir - local directory. if not specified, it is set to the current direcotry
'''
rpath = get_pcs_path(remotepath)
lpath = localpath
if not lpath:
lpath = '' # empty string does it, no need '.'
lpath = lpath.rstrip('/\\ ')
return self.__walk_remote_dir(rpath, self.__proceed_downdir, (rpath, lpath))
def __mkdir_act(self, r, args):
if self.Verbose:
j = r.json()
pr("path, ctime, mtime, fs_id")
pr("{path}, {ctime}, {mtime}, {fs_id}".format(**j))
return ENoError
def __mkdir(self, rpath, **kwargs):
# TODO: this is a quick patch
# the code still works because Baidu Yun doesn't require
# parent directory to exist remotely to upload / create a file
if not self.__shallinclude('.', rpath, True):
return ENoError
self.pd("Making remote directory '{}'".format(rpath))
pars = {
'method' : 'mkdir',
'path' : rpath }
return self.__post(PcsUrl + 'file', pars, self.__mkdir_act, **kwargs)
def mkdir(self, remotepath):
''' Usage: mkdir <remotedir> - \
create a directory at Baidu Yun
remotedir - the remote directory
'''
rpath = get_pcs_path(remotepath)
return self.__mkdir(rpath)
def __move_act(self, r, args):
j = r.json()
list = j['extra']['list']
fromp = list[0]['from']
to = list[0]['to']
self.pd("Remote move: '{}' =mm-> '{}' OK".format(fromp, to))
# aliases
def mv(self, fromp, to):
return self.move(fromp, to)
def rename(self, fromp, to):
return self.move(fromp, to)
def ren(self, fromp, to):
return self.move(fromp, to)
def move(self, fromp, to):
''' Usage: move/mv/rename/ren <from> <to> - \
move a file / dir remotely at Baidu Yun
from - source path (file / dir)
to - destination path (file / dir)
'''
frompp = get_pcs_path(fromp)
top = get_pcs_path(to)
pars = {
'method' : 'move',
'from' : frompp,
'to' : top }
self.pd("Remote moving: '{}' =mm=> '{}'".format(fromp, to))
return self.__post(PcsUrl + 'file', pars, self.__move_act)
def __copy_act(self, r, args):
j = r.json()
list = j['extra']['list']
fromp = list['from']
to = list['to']
self.pd("Remote copy: '{}' =cc=> '{}' OK".format(fromp, to))
return ENoError
# alias
def cp(self, fromp, to):
return self.copy(fromp, to)
def copy(self, fromp, to):
''' Usage: copy/cp <from> <to> - \
copy a file / dir remotely at Baidu Yun
from - source path (file / dir)
to - destination path (file / dir)
'''
frompp = get_pcs_path(fromp)
top = get_pcs_path(to)
pars = {
'method' : 'copy',
'from' : frompp,
'to' : top }
self.pd("Remote copying '{}' =cc=> '{}'".format(frompp, top))
return self.__post(PcsUrl + 'file', pars, self.__copy_act)
def __delete_act(self, r, args):
rid = r.json()['request_id']
if rid:
pr("Deletion request '{}' OK".format(rid))
pr("Usage 'list' command to confirm")
return ENoError
else:
perr("Deletion failed")
return EFailToDeleteFile
def __delete(self, rpath):
pars = {
'method' : 'delete',
'path' : rpath }
self.pd("Remote deleting: '{}'".format(rpath))
return self.__post(PcsUrl + 'file', pars, self.__delete_act)
# aliases
def remove(self, remotepath):
return self.delete(remotepath)
def rm(self, remotepath):
return self.delete(remotepath)
def delete(self, remotepath):
''' Usage: delete/remove/rm <remotepath> - \
delete a file / dir remotely at Baidu Yun
remotepath - destination path (file / dir)
'''
rpath = get_pcs_path(remotepath)
return self.__delete(rpath)
def __search_act(self, r, args):
print_pcs_list(r.json())
return ENoError
def search(self, keyword, remotepath = None, recursive = True):
''' Usage: search <keyword> [remotepath] [recursive] - \
search for a file using keyword at Baidu Yun
keyword - the keyword to search
remotepath - remote path at Baidu Yun, if not specified, it's app's root directory
resursive - search recursively or not. default is true
'''
rpath = get_pcs_path(remotepath)
pars = {
'method' : 'search',
'path' : rpath,
'wd' : keyword,
're' : '1' if recursive else '0'}
self.pd("Searching: '{}'".format(rpath))
return self.__get(PcsUrl + 'file', pars, self.__search_act)
def __listrecycle_act(self, r, args):
print_pcs_list(r.json())
return ENoError
def listrecycle(self, start = 0, limit = 1000):
''' Usage: listrecycle [start] [limit] - \
list the recycle contents
start - starting point, default: 0
limit - maximum number of items to display. default: 1000
'''
pars = {
'method' : 'listrecycle',
'start' : start,
'limit' : limit }
self.pd("Listing recycle '{}'")
return self.__get(PcsUrl + 'file', pars, self.__listrecycle_act)
def __restore_act(self, r, args):
path = args
pr("'{}' found and restored".format(path))
return ENoError
def __restore_search_act(self, r, args):
path = args
flist = r.json()['list']
fsid = None
for f in flist:
if os.path.normpath(f['path'].lower()) == os.path.normpath(path.lower()):
fsid = f['fs_id']
self.pd("fs_id for restoring '{}' found".format(fsid))
break
if fsid:
pars = {
'method' : 'restore',
'fs_id' : fsid }
return self.__post(PcsUrl + 'file', pars, self.__restore_act, path)
else:
perr("'{}' not found in the recycle bin".format(path))
def restore(self, remotepath):
''' Usage: restore <remotepath> - \
restore a file from the recycle bin
remotepath - the remote path to restore
'''
rpath = get_pcs_path(remotepath)
# by default, only 1000 items, more than that sounds a bit crazy
pars = {
'method' : 'listrecycle' }
self.pd("Searching for fs_id to restore")
return self.__get(PcsUrl + 'file', pars, self.__restore_search_act, rpath)
def __proceed_local_gather(self, dirlen, walk):
#names.sort()
(dirpath, dirnames, filenames) = walk
files = []
for name in filenames:
#fullname = os.path.join(dirpath, name)
fullname = joinpath(dirpath, name)
files.append((name, getfilesize(fullname), md5(fullname)))
reldir = dirpath[dirlen:].replace('\\', '/')
place = self.__local_dir_contents.get(reldir)
for dir in dirnames:
place.add(dir, PathDictTree('D'))
for file in files:
place.add(file[0], PathDictTree('F', size = file[1], md5 = file[2]))
return ENoError
def __gather_local_dir(self, dir):
self.__local_dir_contents = PathDictTree()
for walk in os.walk(dir, followlinks=self.__followlink):
self.__proceed_local_gather(len(dir), walk)
self.pd(self.__local_dir_contents)
def __proceed_remote_gather(self, remotepath, dirjs, filejs, args = None):
# NOTE: the '+ 1' is due to the trailing slash '/'
# be careful about the trailing '/', it bit me once, bitterly
rootrdir = args
rootlen = len(rootrdir)
dlen = len(remotepath) + 1
for d in dirjs:
self.__remote_dir_contents.get(remotepath[rootlen:]).add(
d['path'][dlen:], PathDictTree('D', size = d['size'], md5 = binascii.unhexlify(d['md5'])))
for f in filejs:
self.__remote_dir_contents.get(remotepath[rootlen:]).add(
f['path'][dlen:], PathDictTree('F', size = f['size'], md5 = binascii.unhexlify(f['md5'])))
return ENoError
def __gather_remote_dir(self, rdir):
self.__remote_dir_contents = PathDictTree()
self.__walk_remote_dir(rdir, self.__proceed_remote_gather, rdir)
self.pd("---- Remote Dir Contents ---")
self.pd(self.__remote_dir_contents)
def __compare(self, remotedir = None, localdir = None):
if not localdir:
localdir = '.'
self.pv("Gathering local directory ...")
self.__gather_local_dir(localdir)
self.pv("Done")
self.pv("Gathering remote directory ...")
self.__gather_remote_dir(remotedir)
self.pv("Done")
self.pv("Comparing ...")
# list merge, where Python shines
commonsame = []
commondiff = []
localonly = []
remoteonly = []
# http://stackoverflow.com/questions/1319338/combining-two-lists-and-removing-duplicates-without-removing-duplicates-in-orig
lps = self.__local_dir_contents.allpath()
rps = self.__remote_dir_contents.allpath()
dps = set(rps) - set(lps)
allpath = lps + list(dps)
for p in allpath:
local = self.__local_dir_contents.get(p)
remote = self.__remote_dir_contents.get(p)
if local is None: # must be in the remote dir, since p is from allpath
remoteonly.append((remote.type, p))
elif remote is None:
localonly.append((local.type, p))
else: # all here
same = False
if local.type == 'D' and remote.type == 'D':
type = 'D'
same = True
elif local.type == 'F' and remote.type == 'F':
type = 'F'
if local.extra['size'] == remote.extra['size'] and \
local.extra['md5'] == remote.extra['md5']:
same = True
else:
same = False
else:
type = local.type + remote.type
same = False
if same:
commonsame.append((type, p))
else:
commondiff.append((type, p))
self.pv("Done")
return commonsame, commondiff, localonly, remoteonly
def compare(self, remotedir = None, localdir = None):
''' Usage: compare [remotedir] [localdir] - \
compare the remote direcotry with the local directory
remotedir - the remote directory at Baidu Yun (after app's direcotry). \
if not specified, it defaults to the root directory.
localdir - the local directory, if not specified, it defaults to the current directory.
'''
same, diff, local, remote = self.__compare(get_pcs_path(remotedir), localdir)
pr("==== Same files ===")
for c in same:
pr("{} - {}".format(c[0], c[1]))
pr("==== Different files ===")
for d in diff:
pr("{} - {}".format(d[0], d[1]))
pr("==== Local only ====")
for l in local:
pr("{} - {}".format(l[0], l[1]))
pr("==== Remote only ====")
for r in remote:
pr("{} - {}".format(r[0], r[1]))
pr("\nStatistics:")
pr("--------------------------------")
pr("Same: {}".format(len(same)));
pr("Different: {}".format(len(diff)));
pr("Local only: {}".format(len(local)));
pr("Remote only: {}".format(len(remote)));
def syncdown(self, remotedir = '', localdir = '', deletelocal = False):
''' Usage: syncdown [remotedir] [localdir] [deletelocal] - \
sync down from the remote direcotry to the local directory
remotedir - the remote directory at Baidu Yun (after app's direcotry) to sync from. \
if not specified, it defaults to the root directory
localdir - the local directory to sync to if not specified, it defaults to the current directory.
deletelocal - delete local files that are not inside Baidu Yun direcotry, default is False
'''
result = ENoError
rpath = get_pcs_path(remotedir)
same, diff, local, remote = self.__compare(rpath, localdir)
# clear the way
for d in diff:
t = d[0]
p = d[1]
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if t == 'DF':
result = removedir(lcpath, self.Verbose)
subresult = self.__downfile(rcpath, lcpath)
if subresult != ENoError:
result = subresult
elif t == 'FD':
result = removefile(lcpath, self.Verbose)
subresult = makedir(lcpath, self.Verbose)
if subresult != ENoError:
result = subresult
else: # " t == 'F' " must be true
result = self.__downfile(rcpath, lcpath)
for r in remote:
t = r[0]
p = r[1]
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if t == 'F':
subresult = self.__downfile(rcpath, lcpath)
if subresult != ENoError:
result = subresult
else: # " t == 'D' " must be true
subresult = makedir(lcpath, self.Verbose)
if subresult != ENoError:
result = subresult
if deletelocal:
for l in local:
# use os.path.isfile()/isdir() instead of l[0], because we need to check file/dir existence.
# as we may have removed the parent dir previously during the iteration
#p = os.path.join(localdir, l[1])
p = joinpath(localdir, l[1])
if os.path.isfile(p):
subresult = removefile(p, self.Verbose)
if subresult != ENoError:
result = subresult
elif os.path.isdir(p):
subresult = removedir(p, self.Verbose)
if subresult != ENoError:
result = subresult
return result
def syncup(self, localdir = '', remotedir = '', deleteremote = False):
''' Usage: syncup [localdir] [remotedir] [deleteremote] - \
sync up from the local direcotry to the remote directory
localdir - the local directory to sync from if not specified, it defaults to the current directory.
remotedir - the remote directory at Baidu Yun (after app's direcotry) to sync to. \
if not specified, it defaults to the root directory
deleteremote - delete remote files that are not inside the local direcotry, default is False
'''
result = ENoError
rpath = get_pcs_path(remotedir)
#rpartialdir = remotedir.rstrip('/ ')
same, diff, local, remote = self.__compare(rpath, localdir)
# clear the way
for d in diff:
t = d[0] # type
p = d[1] # path
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if self.shalloverwrite("Do you want to overwrite '{}' at Baidu Yun? [y/N]".format(p)):
# this path is before get_pcs_path() since delete() expects so.
#result = self.delete(rpartialdir + '/' + p)
result = self.__delete(rcpath)
if t == 'F' or t == 'FD':
subresult = self.__upload_file(lcpath, rcpath)
if subresult != ENoError:
result = subresult
else: # " t == 'DF' " must be true
subresult = self.__mkdir(rcpath)
if subresult != ENoError:
result = subresult
else:
pinfo("Uploading '{}' skipped".format(lcpath))
for l in local:
t = l[0]
p = l[1]
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if t == 'F':
subresult = self.__upload_file(lcpath, rcpath)
if subresult != ENoError:
result = subresult
else: # " t == 'D' " must be true
subresult = self.__mkdir(rcpath)
if subresult != ENoError:
result = subresult
if deleteremote:
# i think the list is built top-down, so directories appearing later are either
# children or another set of directories
pp = '\\' # previous path, setting to '\\' make sure it won't be found in the first step
for r in remote:
#p = rpartialdir + '/' + r[1]
p = rpath + '/' + r[1]
if 0 != p.find(pp): # another path
#subresult = self.delete(p)
subresult = self.__delete(p)
if subresult != ENoError:
result = subresult
pp = p
return result
def dumpcache(self):
''' Usage: dumpcache - display file hash cache'''
if cached.cacheloaded:
#pprint.pprint(cached.cache)
MyPrettyPrinter().pprint(cached.cache)
return ENoError
else:
perr("Cache not loaded.")
return ECacheNotLoaded
def cleancache(self):
''' Usage: cleancache - remove invalid entries from hash cache file'''
if os.path.exists(HashCachePath):
try:
# backup first
backup = HashCachePath + '.lastclean'
shutil.copy(HashCachePath, backup)
self.pd("Hash Cache file '{}' backed up as '{}".format(
HashCachePath, backup))
cached.cleancache()
return ENoError
except:
perr("Exception:\n{}".format(traceback.format_exc()))
return EException
else:
return EFileNotFound
OriginalFloatTime = True
def onexit(retcode = ENoError):
# saving is the most important
# we save, but don't clean, why?
# think about unmount path, moved files,
# once we discard the information, they are gone.
# so unless the user specifically request a clean,
# we don't act too smart.
#cached.cleancache()
cached.savecache()
os.stat_float_times(OriginalFloatTime)
# if we flush() on Ctrl-C, we get
# IOError: [Errno 32] Broken pipe
sys.stdout.flush()
sys.exit(retcode)
def sighandler(signum, frame):
pr("Signal {} received, Abort".format(signum))
pr("Frame:\n{}".format(frame))
onexit(EAbort)
def main(argv=None): # IGNORE:C0111
''' Main Entry '''
# *** IMPORTANT ***
# We must set this in order for cache to work,
# as we need to get integer file mtime, which is used as the key of Hash Cache
global OriginalFloatTime
OriginalFloatTime = os.stat_float_times()
os.stat_float_times(False)
# --- IMPORTANT ---
result = ENoError
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
if sys.platform == 'win32':
#signal.signal(signal.CTRL_C_EVENT, sighandler)
#signal.signal(signal.CTRL_BREAK_EVENT, sighandler)
# bug, see: http://bugs.python.org/issue9524
pass
else:
signal.signal(signal.SIGBUS, sighandler)
signal.signal(signal.SIGHUP, sighandler)
# https://stackoverflow.com/questions/108183/how-to-prevent-sigpipes-or-handle-them-properly
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
signal.signal(signal.SIGQUIT, sighandler)
signal.signal(signal.SIGSYS, sighandler)
signal.signal(signal.SIGABRT, sighandler)
signal.signal(signal.SIGFPE, sighandler)
signal.signal(signal.SIGILL, sighandler)
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGSEGV, sighandler)
signal.signal(signal.SIGTERM, sighandler)
#program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_longdesc = __import__('__main__').__doc__.split("---\n")[1]
try:
# +++ DEPRECATED +++
# check if ApiKey, SecretKey and AppPcsPath are correctly specified.
#if not ApiKey or not SecretKey or not AppPcsPath:
if False:
ApiNotConfigured = '''
*** ABORT *** Baidu API not properly configured
- Please go to 'http://developer.baidu.com/' and create an application.
- Get the ApiKey, SecretKey and configure the App Path (default: '/apps/bypy/')
- Update the corresponding variables at the beginning of this file, \
right after the '# PCS configuration constants' comment.
- Try to run this program again
*** ABORT ***
'''
pr(ApiNotConfigured)
return EApiNotConfigured
# --- DEPRECATED ---
# setup argument parser
epilog = "Commands:\n"
summary = []
for k, v in ByPy.__dict__.items():
if callable(v) and v.__doc__:
help = v.__doc__.strip()
pos = help.find(ByPy.HelpMarker)
if pos != -1:
pos_body = pos + len(ByPy.HelpMarker)
helpbody = help[pos_body:]
helpline = helpbody.split('\n')[0].strip() + '\n'
if helpline.find('help') == 0:
summary.insert(0, helpline)
else:
summary.append(helpline)
remaining = summary[1:]
remaining.sort()
summary = [summary[0]] + remaining
epilog += ''.join(summary)
parser = ArgumentParser(
description=program_shortdesc + '\n\n' + program_longdesc,
formatter_class=RawDescriptionHelpFormatter, epilog=epilog)
# special
parser.add_argument("--TESTRUN", dest="TESTRUN", action="store_true", default=False, help="Perform python doctest [default: %(default)s]")
parser.add_argument("--PROFILE", dest="PROFILE", action="store_true", default=False, help="Profile the code [default: %(default)s]")
# help, version, program information etc
parser.add_argument('-V', '--version', action='version', version=program_version_message)
#parser.add_argument(dest="paths", help="paths to folder(s) with source file(s) [default: %(default)s]", metavar="path", nargs='+')
# debug, logging
parser.add_argument("-d", "--debug", dest="debug", action="count", default=0, help="enable debugging & logging [default: %(default)s]")
parser.add_argument("-v", "--verbose", dest="verbose", default=0, action="count", help="set verbosity level [default: %(default)s]")
# program tunning, configration (those will be passed to class ByPy)
parser.add_argument("-r", "--retry", dest="retry", default=5, help="number of retry attempts on network error [default: %(default)i times]")
parser.add_argument("-q", "--quit-when-fail", dest="quit", default=False, help="quit when maximum number of retry failed [default: %(default)s]")
parser.add_argument("-t", "--timeout", dest="timeout", default=60, help="network timeout in seconds [default: %(default)s]")
parser.add_argument("-s", "--slice", dest="slice", default=DefaultSliceSize, help="size of file upload slice (can use '1024', '2k', '3MB', etc) [default: {} MB]".format(DefaultSliceInMB))
parser.add_argument("--chunk", dest="chunk", default=DefaultDlChunkSize, help="size of file download chunk (can use '1024', '2k', '3MB', etc) [default: {} MB]".format(DefaultDlChunkSize / OneM))
parser.add_argument("-e", "--verify", dest="verify", action="store_true", default=False, help="Verify upload / download [default : %(default)s]")
parser.add_argument("-f", "--force-hash", dest="forcehash", action="store_true", default=False, help="force file MD5 / CRC32 calculation instead of using cached values [default: %(default)s]")
parser.add_argument("-l", "--list-file", dest="listfile", default=None, help="input list file (used by some of the commands only [default: %(default)s]")
parser.add_argument("--resume-download", dest="resumedl", default=True, help="resume instead of restarting when downloading if local file already exists [default: %(default)s]")
parser.add_argument("--include-regex", dest="incregex", default='', help="regular expression of files to include. if not specified (default), everything is included. for download, the regex applies to the remote files; for upload, the regex applies to the local files. to exclude files, think about your regex, some tips here: https://stackoverflow.com/questions/406230/regular-expression-to-match-string-not-containing-a-word [default: %(default)s]")
parser.add_argument("--on-dup", dest="ondup", default='overwrite', help="what to do when the same file / folder exists in the destination: 'overwrite', 'skip', 'prompt' [default: %(default)s]")
parser.add_argument("--no-symlink", dest="followlink", action="store_false", default=True, help="DON'T follow symbol links when uploading / syncing up [default: %(default)s]")
parser.add_argument(DisableSslCheckOption, dest="checkssl", action="store_false", default=True, help="DON'T verify host SSL cerificate [default: %(default)s]")
# action
parser.add_argument("-c", "--clean", dest="clean", action="count", default=0, help="1: clean settings (remove the token file) 2: clean settings and hash cache [default: %(default)s]")
# the MAIN parameter - what command to perform
parser.add_argument("command", nargs='*', help = "operations (quota / list)")
# Process arguments
args = parser.parse_args()
try:
slice_size = interpret_size(args.slice)
except (ValueError, KeyError):
pr("Error: Invalid slice size specified '{}'".format(args.slice))
return EArgument
try:
chunk_size = interpret_size(args.chunk)
except (ValueError, KeyError):
pr("Error: Invalid slice size specified '{}'".format(args.slice))
return EArgument
if args.TESTRUN:
return TestRun()
if args.PROFILE:
return Profile()
pr("Token file: '{}'".format(TokenFilePath))
pr("Hash Cache file: '{}'".format(HashCachePath))
pr("App root path at Baidu Yun '{}'".format(AppPcsPath))
pr("sys.stdin.encoding = {}".format(sys.stdin.encoding))
pr("sys.stdout.encoding = {}".format(sys.stdout.encoding))
if args.verbose > 0:
pr("Verbose level = {}".format(args.verbose))
pr("Debug = {}".format(args.debug))
pr("----\n")
if os.path.exists(HashCachePath):
cachesize = getfilesize(HashCachePath)
if cachesize > 10 * OneM or cachesize == -1:
pr((
"*** WARNING ***\n"
"Hash Cache file '{0}' is very large ({1}).\n"
"This may affect program's performance (high memory consumption).\n"
"You can first try to run 'bypy.py cleancache' to slim the file.\n"
"But if the file size won't reduce (this warning persists),"
" you may consider deleting / moving the Hash Cache file '{0}'\n"
"*** WARNING ***\n\n\n").format(HashCachePath, si_size(cachesize)))
if args.clean >= 1:
result = removefile(TokenFilePath, args.verbose)
if result == ENoError:
pr("Token file '{}' removed. You need to re-authorize "
"the application upon next run".format(TokenFilePath))
else:
perr("Failed to remove the token file '{}'".format(TokenFilePath))
perr("You need to remove it manually")
if args.clean >= 2:
subresult = os.remove(HashCachePath)
if subresult == ENoError:
pr("Hash Cache File '{}' removed.".format(HashCachePath))
else:
perr("Failed to remove the Hash Cache File '{}'".format(HashCachePath))
perr("You need to remove it manually")
result = subresult
return result
if len(args.command) <= 0 or \
(len(args.command) == 1 and args.command[0].lower() == 'help'):
parser.print_help()
return EArgument
elif args.command[0] in ByPy.__dict__: # dir(ByPy), dir(by)
timeout = None
if args.timeout:
timeout = float(args.timeout)
cached.usecache = not args.forcehash
cached.verbose = args.verbose
cached.debug = args.debug
cached.loadcache()
by = ByPy(slice_size = slice_size, dl_chunk_size = chunk_size,
verify = args.verify,
retry = int(args.retry), timeout = timeout,
quit_when_fail = args.quit,
listfile = args.listfile,
resumedownload = args.resumedl,
incregex = args.incregex,
ondup = args.ondup,
followlink = args.followlink,
checkssl = args.checkssl,
verbose = args.verbose, debug = args.debug)
uargs = []
for arg in args.command[1:]:
uargs.append(unicode(arg, SystemEncoding))
result = getattr(by, args.command[0])(*uargs)
else:
pr("Error: Command '{}' not available.".format(args.command[0]))
parser.print_help()
return EParameter
except KeyboardInterrupt:
### handle keyboard interrupt ###
pr("KeyboardInterrupt")
pr("Abort")
except Exception:
perr("Exception occurred:")
pr(traceback.format_exc())
pr("Abort")
# raise
onexit(result)
def TestRun():
import doctest
doctest.testmod()
return ENoError
def Profile():
import cProfile
import pstats
profile_filename = 'bypy_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(ENoError)
def unused():
''' just prevent unused warnings '''
inspect.stack()
if __name__ == "__main__":
main()
# vim: tabstop=4 noexpandtab shiftwidth=4 softtabstop=4 ff=unix fileencoding=utf-8
| 0.032698 |
#!/usr/bin/env python
import sys, os
import pprint
import numpy as np
from optparse import OptionParser
getopt = OptionParser()
getopt.add_option('-d', '--inputdelimiter', dest='inputsep', help='specify the input delimiter used in trace files', default="\t")
getopt.add_option('-D', '--outputdelimiter', dest='outputsep', help='specify the output delimiter for merged traces', default="\t")
getopt.add_option('-H', '--headers', dest='headers', help='file has headers', action='store_true', default=False)
getopt.add_option('-c', '--column', dest='column', help='column of interest', default=2)
getopt.add_option('-w', '--window', dest='window', help='moving average time window', default=1)
getopt.add_option('-o', '--output', dest='output', help='output file name', default="result")
getopt.add_option('-t', '--timestamp', dest='timestamp', help='index of timestamp column', default=1)
getopt.add_option('-u', '--undefined', dest='undefined', help='string that is filled into undefined cells', default="")
getopt.add_option('-b', '--boundaries', dest='bounds', help='percentage of deviation of measured from expected mac counter to be valid', default=0.1)
#getopt.add_option('-v', '--verbosity', action='count', dest='verbosity', help='set verbosity level', default=1)
(sopts, sargs) = getopt.parse_args()
def print_err(msg):
print >>sys.stderr, "Error: %s" % msg
def near(a, b, eps = 0.0000001):
"""
returns whether numerical values a and b are within a specific epsilon environment
"""
diff = abs(a-b)
return diff < eps
# check for boundaries
try:
errorwindow = float(sopts.bounds)
except:
errorwindow = 0.1
# check if files are specified
try:
fname = sys.argv[-1]
if (not os.path.isfile(fname) or len(sys.argv) <= 1):
raise Exception("")
except:
getopt.print_help()
sys.exit(-1)
fh = open(fname,"r")
inputsep = str(sopts.inputsep)
outputsep = str(sopts.outputsep)
windowsize = 0
try:
windowsize = float(sopts.window)
sopts.column = int(sopts.column)
sopts.timestamp = int(sopts.timestamp)
except:
pass
lf = fh.readline()
if (sopts.headers):
lf = fh.readline()
#hist = {}
windowcount = 0
#fh_hist = open("%s-histogram.csv" % sopts.output, "w")
fh_mean = open("%s-aggregation.csv" % sopts.output, "w")
mmac = {'values' : []}
mtx = {'values' : []}
mrx = {'values' : []}
med = {'values' : []}
# write header colums
fh_mean.write("%s%s%s%s%s%s%s%s%s\n" % ("timestamp", outputsep, "mac_counter_diff", outputsep, "tx_counter_diff", outputsep, "rx_counter_diff", outputsep, "ed_counter_diff"))
def shift_window(ma, window, currentts):
global windowcount
shiftcount = 0
while (ma['from'] <= currentts):
ma['from'] += window
windowcount += 1
shiftcount += 1
ma['to'] = ma['from']
ma['from'] -= window
windowcount -= 1
res = shiftcount > 1
return res # shifted?
while (lf != ""):
# values = lf.replace("\n", "").split(inputsep)
timestamp,mac_counter_diff,tx_counter_diff,rx_counter_diff,ed_counter_diff,noise,rssi,nav,tsf_upper,tsf_lower,phy_errors,potential_reset,expected_mac_count = lf.replace("\n", "").split(inputsep)
# currentval = float(values[sopts.column-1])
# if (currentval == sopts.undefined):
# lf = fh.readline()
# continue
# if ((bounds.has_key('min') and mac_counter_diff < bounds['min']) or (bounds.has_key('max') and mac_counter_diff > bounds['max'])):
if ( (mac_counter_diff < (expected_mac_count * (1 - errorwindow))) or (mac_counter_diff > (expected_mac_count * (1 + errorwindow) ) ) ):
lf = fh.readline()
continue
try:
mac = float(mac_counter_diff)
tx = float(tx_counter_diff)
rx = float(rx_counter_diff)
ed = float(ed_counter_diff)
except:
lf = fh.readline()
continue
# if (not hist.has_key(currentval)):
# hist[currentval] = 1
# else:
# hist[currentval] += 1
ts = float(timestamp)
if (not mmac.has_key('from')): # start of first time window for moving average
mmac['from'] = ts
mmac['to'] = mmac['from'] + windowsize
if (shift_window(mmac, windowsize, ts)):
meanmac = sum(mmac['values'])
meantx = sum(mtx['values'])
meanrx = sum(mrx['values'])
meaned = sum(med['values'])
fh_mean.write("%f%s%f%s%f%s%f%s%f\n" % (windowcount * windowsize, outputsep, meanmac, outputsep, meantx, outputsep, meanrx, outputsep, meaned))
mmac['values'] = []
mtx = {'values' : []}
mrx = {'values' : []}
med = {'values' : []}
mmac['values'].append(mac)
mtx['values'].append(tx)
mrx['values'].append(rx)
med['values'].append(ed)
lf = fh.readline()
fh_mean.close()
# write histogram
#for skey in hist:
# fh_hist.write("%f%s%d\n" % (skey, outputsep, hist[skey]))
#fh_hist.close()
fh.close()
| 0.030415 |
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
#
# This module is part of async and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Simplistic implementation of a graph"""
__all__ = ('Node', 'Graph')
class Node(object):
"""A Node in the graph. They know their neighbours, and have an id which should
resolve into a string"""
__slots__ = ('in_nodes', 'out_nodes', 'id')
def __init__(self, id=None):
self.id = id
self.in_nodes = list()
self.out_nodes = list()
def __str__(self):
return str(self.id)
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self.id)
class Graph(object):
"""A simple graph implementation, keeping nodes and providing basic access and
editing functions. The performance is only suitable for small graphs of not
more than 10 nodes !"""
__slots__ = "nodes"
def __init__(self):
self.nodes = list()
def __del__(self):
"""Deletes bidericational dependencies"""
for node in self.nodes:
node.in_nodes = None
node.out_nodes = None
# END cleanup nodes
# otherwise the nodes would keep floating around
def add_node(self, node):
"""Add a new node to the graph
:return: the newly added node"""
self.nodes.append(node)
return node
def remove_node(self, node):
"""Delete a node from the graph
:return: self"""
try:
del(self.nodes[self.nodes.index(node)])
except ValueError:
return self
# END ignore if it doesn't exist
# clear connections
for outn in node.out_nodes:
del(outn.in_nodes[outn.in_nodes.index(node)])
for inn in node.in_nodes:
del(inn.out_nodes[inn.out_nodes.index(node)])
node.out_nodes = list()
node.in_nodes = list()
return self
def add_edge(self, u, v):
"""Add an undirected edge between the given nodes u and v.
:return: self
:raise ValueError: If the new edge would create a cycle"""
if u is v:
raise ValueError("Cannot connect a node with itself")
# are they already connected ?
if u in v.in_nodes and v in u.out_nodes or \
v in u.in_nodes and u in v.out_nodes:
return self
# END handle connection exists
# cycle check - if we can reach any of the two by following either ones
# history, its a cycle
for start, end in ((u, v), (v,u)):
if not start.in_nodes:
continue
nodes = start.in_nodes[:]
seen = set()
# depth first search - its faster
while nodes:
n = nodes.pop()
if n in seen:
continue
seen.add(n)
if n is end:
raise ValueError("Connecting u with v would create a cycle")
nodes.extend(n.in_nodes)
# END while we are searching
# END for each direction to look
# connection is valid, set it up
u.out_nodes.append(v)
v.in_nodes.append(u)
return self
def input_inclusive_dfirst_reversed(self, node):
"""Return all input nodes of the given node, depth first,
It will return the actual input node last, as it is required
like that by the pool"""
stack = [node]
seen = set()
# depth first
out = list()
while stack:
n = stack.pop()
if n in seen:
continue
seen.add(n)
out.append(n)
# only proceed in that direction if visitor is fine with it
stack.extend(n.in_nodes)
# END call visitor
# END while walking
out.reverse()
return out
| 0.044745 |
#!/usr/bin/env python
from nose.tools import *
from networkx import *
from networkx.convert import *
from networkx.algorithms.operators import *
from networkx.generators.classic import barbell_graph,cycle_graph
from networkx.testing import *
class TestRelabel():
def test_convert_node_labels_to_integers(self):
# test that empty graph converts fine for all options
G=empty_graph()
H=convert_node_labels_to_integers(G,100)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(H.nodes(), [])
assert_equal(H.edges(), [])
for opt in ["default", "sorted", "increasing degree",
"decreasing degree"]:
G=empty_graph()
H=convert_node_labels_to_integers(G,100, ordering=opt)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(H.nodes(), [])
assert_equal(H.edges(), [])
G=empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
G.name="paw"
H=convert_node_labels_to_integers(G)
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
H=convert_node_labels_to_integers(G,1000)
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(H.nodes(), [1000, 1001, 1002, 1003])
H=convert_node_labels_to_integers(G,ordering="increasing degree")
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(degree(H,0), 1)
assert_equal(degree(H,1), 2)
assert_equal(degree(H,2), 2)
assert_equal(degree(H,3), 3)
H=convert_node_labels_to_integers(G,ordering="decreasing degree")
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(degree(H,0), 3)
assert_equal(degree(H,1), 2)
assert_equal(degree(H,2), 2)
assert_equal(degree(H,3), 1)
H=convert_node_labels_to_integers(G,ordering="increasing degree",
label_attribute='label')
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(degree(H,0), 1)
assert_equal(degree(H,1), 2)
assert_equal(degree(H,2), 2)
assert_equal(degree(H,3), 3)
# check mapping
assert_equal(H.node[3]['label'],'C')
assert_equal(H.node[0]['label'],'D')
assert_true(H.node[1]['label']=='A' or H.node[2]['label']=='A')
assert_true(H.node[1]['label']=='B' or H.node[2]['label']=='B')
def test_convert_to_integers2(self):
G=empty_graph()
G.add_edges_from([('C','D'),('A','B'),('A','C'),('B','C')])
G.name="paw"
H=convert_node_labels_to_integers(G,ordering="sorted")
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
H=convert_node_labels_to_integers(G,ordering="sorted",
label_attribute='label')
assert_equal(H.node[0]['label'],'A')
assert_equal(H.node[1]['label'],'B')
assert_equal(H.node[2]['label'],'C')
assert_equal(H.node[3]['label'],'D')
@raises(nx.NetworkXError)
def test_convert_to_integers_raise(self):
G = nx.Graph()
H=convert_node_labels_to_integers(G,ordering="increasing age")
def test_relabel_nodes_copy(self):
G=empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H=relabel_nodes(G,mapping)
assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_function(self):
G=empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
# function mapping no longer encouraged but works
def mapping(n):
return ord(n)
H=relabel_nodes(G,mapping)
assert_equal(sorted(H.nodes()), [65, 66, 67, 68])
def test_relabel_nodes_graph(self):
G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H=relabel_nodes(G,mapping)
assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_digraph(self):
G=DiGraph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H=relabel_nodes(G,mapping,copy=False)
assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_multigraph(self):
G=MultiGraph([('a','b'),('a','b')])
mapping={'a':'aardvark','b':'bear'}
G=relabel_nodes(G,mapping,copy=False)
assert_equal(sorted(G.nodes()), ['aardvark', 'bear'])
assert_edges_equal(sorted(G.edges()),
[('aardvark', 'bear'), ('aardvark', 'bear')])
def test_relabel_nodes_multidigraph(self):
G=MultiDiGraph([('a','b'),('a','b')])
mapping={'a':'aardvark','b':'bear'}
G=relabel_nodes(G,mapping,copy=False)
assert_equal(sorted(G.nodes()), ['aardvark', 'bear'])
assert_equal(sorted(G.edges()),
[('aardvark', 'bear'), ('aardvark', 'bear')])
@raises(KeyError)
def test_relabel_nodes_missing(self):
G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={0:'aardvark'}
G=relabel_nodes(G,mapping,copy=False)
def test_relabel_toposort(self):
K5=nx.complete_graph(4)
G=nx.complete_graph(4)
G=nx.relabel_nodes(G,dict( [(i,i+1) for i in range(4)]),copy=False)
nx.is_isomorphic(K5,G)
G=nx.complete_graph(4)
G=nx.relabel_nodes(G,dict( [(i,i-1) for i in range(4)]),copy=False)
nx.is_isomorphic(K5,G)
def test_relabel_selfloop(self):
G = nx.DiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
assert_equal(sorted(G.nodes()),['One','Three','Two'])
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
assert_equal(sorted(G.nodes()),['One','Three','Two'])
G = nx.MultiDiGraph([(1, 1)])
G = nx.relabel_nodes(G, {1: 0}, copy=False)
assert_equal(G.nodes(), [0])
| 0.030253 |
import matplotlib.pyplot as mpl
import numpy as np
from matplotlib.dates import *
import os
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter, DayLocator, HourLocator, WeekdayLocator
class Plot:
def __init__(self, file):
self.file = file
self._setDefaults()
self.showSkill = False
self.dst = False
def setDst(self, flag):
self.dst = flag
def _setDefaults(self):
self.ms = 4.0 # Marker size
self.lw = 1
self.green = [0,1,0]
self.blue = [0,0,1]
self.red = [1,0,0]
self.imgRes = 100
self.showX = 1
self.showTitle = 1
self.fs = 10
self.labelFs = 10
self.showGrid = 1
self.minOffset = np.nan;
self.maxOffset = np.nan;
def setOffsets(self, offsets):
self.offsets = offsets
def plot(self, ax):
self.plotCore(ax)
if(self.showGrid):
mpl.grid('on')
else:
mpl.grid('off')
def plotCore(self, ax):
assert False, "Not implemented"
def labelAxes(self):
mpl.xlabel("Time (PDT)", fontsize=self.labelFs, position=[0.5,0.1])
def disableX(self):
self.showX = 0
def disableTitle(self):
self.showTitle = 0;
# When set to true, will colour the ensemble dots based on how skillful they are predicted to be.
# Useful for analogs
def setShowSkill(self, b):
self.showSkill = b;
def setFontSize(self, fs):
self.fs = fs
# Fill an area along x, between yLower and yUpper
# Both yLower and yUpper most correspond to points in x (i.e. be in the same order)
def _fill(self, x, yLower, yUpper, col, alpha=1, zorder=0):
# This approach doesn't work, because it doesn't remove points with missing x or y
#X = np.hstack((x, x[::-1]))
#Y = np.hstack((yLower, yUpper[::-1]))
# Populate a list of non-missing points
X = list()
Y = list()
for i in range(0,len(x)):
if(not( np.isnan(x[i]) or np.isnan(yLower[i]))):
X.append(x[i])
Y.append(yLower[i])
for i in range(len(x)-1, -1, -1):
if(not (np.isnan(x[i]) or np.isnan(yUpper[i]))):
X.append(x[i])
Y.append(yUpper[i])
mpl.fill(X, Y, facecolor=col, alpha=alpha,linewidth=0, zorder=zorder)
# Generic (abstract) plot with time as x-axis
class TimePlot(Plot):
def __init__(self, file):
Plot.__init__(self, file)
self.shortRange = True
def setShortRange(self, flag):
self.shortRange = flag
def _xAxis(self, ax):
# X-axis labels
# Don't create ticks when the x-axis range is too big. Likely this is because of
# a problem with the input data. Some versions of python crash when trying to
# create too many ticks
range = mpl.xlim()
if(range[1] - range[0] < 100):
if(self.shortRange):
mpl.gca().xaxis.set_major_locator(DayLocator(interval=1))
mpl.gca().xaxis.set_minor_locator(HourLocator(interval=6))
mpl.gca().xaxis.set_major_formatter(DateFormatter('\n %a %d %b %Y'))
mpl.gca().xaxis.set_minor_formatter(DateFormatter('%H'))
else:
mpl.gca().xaxis.set_major_locator(WeekdayLocator(byweekday=(MO,TU,WE,TH,FR)))
mpl.gca().xaxis.set_major_formatter(DateFormatter('\n%Y-%m-%d'))
mpl.gca().xaxis.set_minor_locator(WeekdayLocator(byweekday=(SA,SU)))
mpl.gca().xaxis.set_minor_formatter(DateFormatter('\n%Y-%m-%d'))
mpl.xticks(rotation=90)
if(self.showX):
mpl.xlabel('Date', fontsize=self.labelFs)
majlabels = [tick.label1 for tick in mpl.gca().xaxis.get_major_ticks()]
for i in majlabels:
# Don't show the last label, since it will be outside the range
if(i == majlabels[len(majlabels)-1]):
i.set_visible(0)
if(not self.showX):
i.set_visible(0);
else:
if(self.shortRange):
i.set_horizontalalignment('left')
i.set_position((0,-0.035))
else:
i.set_horizontalalignment('right')
i.set_rotation(30);
i.set_verticalalignment('top')
i.set_fontsize(self.fs)
i.set_position((0,-0.035))
minlabels = [tick.label1 for tick in mpl.gca().xaxis.get_minor_ticks()]
for i in minlabels:
if(not self.showX):
i.set_visible(0);
else:
if(self.shortRange):
i.set_horizontalalignment('center')
i.set_rotation(0);
i.set_color("k")
else:
i.set_horizontalalignment('right')
i.set_rotation(30);
i.set_color((1,0,1)) # Weekend days are magenta
i.set_verticalalignment('top')
i.set_fontsize(self.fs)
ylabels = [tick.label1 for tick in mpl.gca().yaxis.get_major_ticks()]
for i in ylabels:
i.set_fontsize(self.fs)
# Gridlines
mpl.gca().xaxis.grid(True, which='major', color='k', zorder=-10, linestyle='-')
if(self.shortRange):
mpl.gca().xaxis.grid(True, which='minor', color='k', zorder=0, linestyle=':')
else:
mpl.gca().xaxis.grid(True, which='minor', color=(1,0,1), zorder=0, linestyle='-')
minOffset = min(self.file.getOffsets())
maxOffset = max(self.file.getOffsets())
if(not np.isnan(self.maxOffset)):
maxOffset = minOffset + self.maxOffset/24.0
mpl.xlim(minOffset, maxOffset)
def setOffsetRange(self, min, max):
self.minOffset = min;
self.maxOffset = max;
def getMarkerSize(self, i):
if(not self.showSkill):
mss = 6
elif(i == 0):
mss = self.ms
elif(i == 1):
mss = self.ms*28/36
elif(i == 2):
mss = self.ms*16/36
else:
mss = self.ms*10/36
return mss
def getMarkerColor(self, i):
if(not self.showSkill):
col = 'k'
elif(i == 0):
col = [0,0,0.6]
elif(i == 1):
col = [0.3,0.3,1]
elif(i == 2):
col = [0.7,0.7,1]
else:
col = self.red
return col
def getMarkerEdgeColor(self, i):
if(not self.showSkill):
mec = 'w'
elif(i < 3):
mec = 'k'
else:
mec = self.getMarkerColor(i)
return mec
def getMarkerStyle(self, i):
if(not self.showSkill):
mstyle = '.'
else:
mstyle = 'o'
return mstyle
###########################
# Single click meteograms #
###########################
class MeteoPlot(TimePlot):
def __init__(self, file):
Plot.__init__(self, file)
# Default colours (not used, always overridden)
self.col = [0,0,0]
self.shading = [0.1,0.1,0.1]
# Opaqueness of the shading
self.alpha = 0.3
# Set the size and position of the axis in the figure
mpl.gca().get_axes().set_position([0.1, 0.2, 0.87, 0.75])
self.setStyle("ubc")
# Set the style of the plots:
# cmbc:
def setStyle(self, style):
if(not (style == "ubc" or style == "cmbc")):
error('test')
self.style = style
if(style == "ubc"):
self.gridStyle = '--';
elif(style == "cmbc"):
self.gridStyle = '-';
# Set colour of lines and shadings for current plot
def setCol(self, col):
self.col = col;
shade = 0.6
# Shading should be: [1 shade shade] or [shade shade 1]
self.shading = [(col[0]>0)*(1-shade)+shade, (col[1]>0)*(1-shade)+shade, (col[2]>0)*(1-shade)+shade]
def plotCore(self,ax):
ens = self.file.getEnsemble()
obs = self.file.getObs()
dets = self.file.getDeterministic()
# Plots mean
mpl.plot(ens['offsets'], dets['values'], '-', color=self.col);
# Plot shading
self._fill(ens['offsets'], ens['values'][:,0], ens['values'][:,2], self.shading,
self.alpha, zorder=-20)
# Plot obs
mpl.plot(obs['offsets'], obs['values'],'.', color=self.col);
var = self.file.getVariable()
mpl.ylabel(var['name'] + " (" + var['units'] + ")", fontsize=self.labelFs)
self._xAxis(ax)
self._yAxis(ax)
def _yAxis(self, ax):
if(self.style == "cmbc"):
#mpl.gca().yaxis.set_major_locator(MultipleLocator(2))
[y_start, y_end] = mpl.ylim();
'''
y_start = min(ylims[:,0]);
y_end = max(ylims[:,1]);
# Determine ylimits
if(y_start == -999):
y_start = -20
else:
y_start = np.floor(y_start/2)*2
if(y_end == -999):
y_end = 10
else:
y_end = np.ceil(y_end/2)*2
# Always show at least down to -5
if(y_start > -5):
y_start = -5;
mpl.ylim([y_start, y_end]);
'''
[y_start, y_end] = mpl.ylim();
# Format x-axis
def _xAxis(self, ax):
# Set range
mpl.xlim(np.floor(min(self.file.getOffsets())), np.floor(max(self.file.getOffsets())))
# X-axis labels
mpl.gca().xaxis.set_major_locator(DayLocator(interval=1))
mpl.gca().xaxis.set_minor_locator(HourLocator(interval=6))
mpl.gca().xaxis.set_major_formatter(DateFormatter('\n %a %d %b %Y'))
mpl.gca().xaxis.set_minor_formatter(DateFormatter('%H'))
# Hour labels
minlabels = [tick.label1 for tick in mpl.gca().xaxis.get_minor_ticks()]
for i in minlabels:
i.set_fontsize(12)
# Date labels
majlabels = [tick.label1 for tick in mpl.gca().xaxis.get_major_ticks()]
counter = 0
numLabels = 4;
for i in majlabels:
if(counter < numLabels):
i.set_horizontalalignment('left')
i.set_verticalalignment('top')
i.set_fontsize(12)
i.set_position((0,-0.035)) # Moves major labels to the top of the graph
# The x-coordinate seems to be irrelevant. When y-coord
# is 1, the label is near the top. For 1.1 it is above the graph
else:
i.set_visible(0) # Turn off the last date label, since it is outside the graph
counter = counter + 1
mpl.gca().xaxis.grid(True, which='major', color='k', zorder=-10, linestyle='-', linewidth=2)
mpl.gca().xaxis.grid(True, which='minor', color='k', zorder=0, linestyle=self.gridStyle)
mpl.gca().yaxis.grid(True, which='major', color='k', zorder=0)
if(self.dst):
tzLabel = "PDT"
else:
tzLabel = "PST"
mpl.xlabel("Past Time (" + tzLabel + ") Future", fontsize=15, position=[0.5, 0.1])
mpl.gcf().set_size_inches(12,4)
class CdfPlot(TimePlot):
def __init__(self, file):
TimePlot.__init__(self, file)
self._showEns = True
self._showProb = True
self._showObs = True
def setShowEns(self, flag):
self._showEns = flag
def setShowProb(self, flag):
self._showProb = flag
def setShowObs(self, flag):
self._showObs = flag
def plotCore(self, ax):
ens = self.file.getEnsemble()
self._plotObs(ax)
self._plotDeterministic(ax)
if(self._showEns):
self._plotEnsemble(ax)
if(self._showProb):
self._plotProb(ax)
var = self.file.getVariable()
mpl.ylabel(var['name'] + " (" + var['units'] + ")", fontsize=self.labelFs)
self._xAxis(ax)
if(self.showTitle):
loc = self.file.getLocation()
mpl.title('Meteogram for ' + "%d %2.2f %2.2f" % (loc['id'],loc['lat'], loc['lon']), fontsize=self.fs);
def _plotObs(self, ax):
if(self._showObs):
obs = self.file.getObs()
mpl.plot(obs['offsets'], obs['values'], 'o-', mfc='w', mew=2, color=self.red,
mec=self.red, ms=self.ms*3/4, lw=self.lw, label="Obs", zorder=5)
# Draw one dot for each ensemble member
def _plotEnsemble(self, ax):
ens = self.file.getEnsemble()
nMembers = ens['values'].shape[1]
for i in range(0,nMembers):
col = self.getMarkerColor(i)
mss = self.getMarkerSize(i)
mec = self.getMarkerEdgeColor(i)
mstyle = self.getMarkerStyle(i)
if(i == 0):
mpl.plot(ens['offsets'], ens['values'][:,i], mstyle, mec=mec, ms=mss, mfc=col,
label="Ens members");
else:
mpl.plot(ens['offsets'], ens['values'][:,i], mstyle, mec=mec, ms=mss, mfc=col);
def plotMember(self, ax, member, col, name=""):
ens = self.file.getEnsemble()
mss = 5
mec = 'k'
mpl.plot(ens['offsets'], ens['values'][:,member], '-s', lw=2, mfc=col, color=col, mec=mec, ms=mss, label=name);
# Plots CDF lines
def _plotProb(self, ax):
cdf = self.file.getCdfs()
nLines = cdf['values'].shape[1]
for i in range(nLines-1,-1,-1):
if(i < (nLines-1)/2.0):
var = 1-float(i)/((nLines-1)/2);
ec = [0,0,var]; # Edgecolour
faceCol = var
else:
var = (i - (nLines-1)/2.0)/(float(nLines-1)/2+1)
ec = [var,0,0];
faceCol = (i+1- (nLines-1)/2.0)/(float(nLines-1)/2)
if(i == (nLines-1)/2.0):
ec = [0,1,0]
col = [faceCol,faceCol,faceCol];
if(i == 0 or i == nLines-1):
mstyle = '--'
else:
mstyle = '-'
lbl = "%d" % (round(cdf['cdfs'][i]*100.0)) + "%"
mpl.plot(cdf['offsets'], cdf['values'][:,i], mstyle, color=ec, lw=self.lw,
label=lbl, zorder=-10);
if(i < nLines-1):
# Only plot if not all values are missing
if(sum(np.isnan(cdf['values'][:,i])) < len(cdf['values'][:,0])):
self._fill(cdf['offsets'], cdf['values'][:,i], cdf['values'][:,i+1], col,
zorder=-20)
def _plotDeterministic(self, ax):
dets = self.file.getDeterministic()
mpl.plot(dets['offsets'], dets['values'], 'o-', mfc=[1,1,1], mew=2,
color=self.green, mec=self.green, ms=self.ms*3/4, lw=self.lw,
label="Deterministic");
class DiscretePlot(TimePlot):
def __init__(self, file):
Plot.__init__(self, file)
self.invertY = 0;
def setInvertY(self, flag):
self.invertY = flag
def plotCore(self, ax):
self._plotProb(ax)
var = self.file.getVariable()
if(var['name'] == "Precip24"):
ylab = "Prob of Precip (%)"
else:
ylab = "Probability (%)"
mpl.ylabel(ylab, fontsize=self.labelFs)
self._xAxis(ax)
mpl.ylim([0,100]);
if(self.showTitle):
mpl.title('Meteogram for ' + str(self.file.getLocation()['id']), fontsize=self.fs);
# Plots CDF lines
def _plotProb(self, ax):
p0 = self.file.getLowerDiscrete()
y = p0['values'][:]
if(self.invertY):
y = 1 - y;
mpl.plot(p0['offsets'], 100*y, 'k-', mew=2);
# Shows which dates were used to construct ensemble. Useful for analogs.
class DatesPlot(TimePlot):
def plotCore(self, ax):
dates = self.file.getDates()
nMembers = dates['values'].shape[1]
# Only draw if there are valid points.
# mpl crashes otherwise
if(dates['values'].size - np.isnan(dates['values']).sum() > 0):
mpl.gca().yaxis.set_major_locator(YearLocator())
mpl.gca().yaxis.set_major_formatter(DateFormatter('\n%b/%Y'))
mpl.gca().yaxis.set_minor_locator(MonthLocator(interval=1))
for i in range(0, nMembers):
col = self.getMarkerColor(i)
mss = self.getMarkerSize(i)
mec = self.getMarkerEdgeColor(i)
mstyle = self.getMarkerStyle(i)
mpl.plot(dates['offsets'], dates['values'][:,i], mstyle, mec=mec, ms=mss, mfc=col);
self._xAxis(ax)
mpl.title('Dates used to construct ensembles', fontsize=self.fs)
| 0.030657 |
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from threading import Lock
from copy import deepcopy
from time import sleep
from functools import wraps
from powerline.renderer import Renderer
from powerline.lib.config import ConfigLoader
from powerline import Powerline
from tests.lib import Args, replace_attr
class TestHelpers(object):
def __init__(self, config):
self.config = config
self.access_log = []
self.access_lock = Lock()
def loader_condition(self, path):
return (path in self.config) and path
def find_config_files(self, cfg_path, config_loader, loader_callback):
if cfg_path.endswith('.json'):
cfg_path = cfg_path[:-5]
if cfg_path.startswith('/'):
cfg_path = cfg_path.lstrip('/')
with self.access_lock:
self.access_log.append('check:' + cfg_path)
if cfg_path in self.config:
yield cfg_path
else:
if config_loader:
config_loader.register_missing(self.loader_condition, loader_callback, cfg_path)
raise IOError(('fcf:' if cfg_path.endswith('raise') else '') + cfg_path)
def load_json_config(self, config_file_path, *args, **kwargs):
if config_file_path.endswith('.json'):
config_file_path = config_file_path[:-5]
if config_file_path.startswith('/'):
config_file_path = config_file_path.lstrip('/')
with self.access_lock:
self.access_log.append('load:' + config_file_path)
try:
return deepcopy(self.config[config_file_path])
except KeyError:
raise IOError(config_file_path)
def pop_events(self):
with self.access_lock:
r = self.access_log[:]
self.access_log = []
return r
def log_call(func):
@wraps(func)
def ret(self, *args, **kwargs):
self._calls.append((func.__name__, args, kwargs))
return func(self, *args, **kwargs)
return ret
class TestWatcher(object):
events = set()
lock = Lock()
def __init__(self):
self._calls = []
@log_call
def watch(self, file):
pass
@log_call
def __call__(self, file):
with self.lock:
if file in self.events:
self.events.remove(file)
return True
return False
def _reset(self, files):
with self.lock:
self.events.clear()
self.events.update(files)
@log_call
def unsubscribe(self):
pass
class Logger(object):
def __init__(self):
self.messages = []
self.lock = Lock()
def _add_msg(self, attr, msg):
with self.lock:
self.messages.append(attr + ':' + msg)
def _pop_msgs(self):
with self.lock:
r = self.messages
self.messages = []
return r
def __getattr__(self, attr):
return lambda *args, **kwargs: self._add_msg(attr, *args, **kwargs)
class SimpleRenderer(Renderer):
def hlstyle(self, fg=None, bg=None, attr=None):
return '<{fg} {bg} {attr}>'.format(fg=fg and fg[0], bg=bg and bg[0], attr=attr)
class EvenSimplerRenderer(Renderer):
def hlstyle(self, fg=None, bg=None, attr=None):
return '{{{fg}{bg}{attr}}}'.format(
fg=fg and fg[0] or '-',
bg=bg and bg[0] or '-',
attr=attr if attr else '',
)
class TestPowerline(Powerline):
_created = False
def __init__(self, _helpers, **kwargs):
super(TestPowerline, self).__init__(**kwargs)
self._helpers = _helpers
self.find_config_files = _helpers.find_config_files
@staticmethod
def get_local_themes(local_themes):
return local_themes
@staticmethod
def get_config_paths():
return ['']
def _will_create_renderer(self):
return self.cr_kwargs
def _pop_events(self):
return self._helpers.pop_events()
renderer = EvenSimplerRenderer
class TestConfigLoader(ConfigLoader):
def __init__(self, _helpers, **kwargs):
watcher = TestWatcher()
super(TestConfigLoader, self).__init__(
load=_helpers.load_json_config,
watcher=watcher,
watcher_type='test',
**kwargs
)
def get_powerline(config, **kwargs):
helpers = TestHelpers(config)
return get_powerline_raw(
helpers,
TestPowerline,
_helpers=helpers,
ext='test',
renderer_module='tests.lib.config_mock',
logger=Logger(),
**kwargs
)
def select_renderer(simpler_renderer=False):
global renderer
renderer = EvenSimplerRenderer if simpler_renderer else SimpleRenderer
def get_powerline_raw(helpers, PowerlineClass, **kwargs):
if not isinstance(helpers, TestHelpers):
helpers = TestHelpers(helpers)
select_renderer(kwargs.pop('simpler_renderer', False))
pl = PowerlineClass(
config_loader=TestConfigLoader(
_helpers=helpers,
run_once=kwargs.get('run_once')
),
**kwargs
)
pl._watcher = pl.config_loader.watcher
return pl
def swap_attributes(config, powerline_module):
return replace_attr(powerline_module, 'os', Args(
path=Args(
isfile=lambda path: path.lstrip('/').replace('.json', '') in config,
join=os.path.join,
expanduser=lambda path: path,
realpath=lambda path: path,
dirname=os.path.dirname,
),
environ={},
))
def add_watcher_events(p, *args, **kwargs):
if isinstance(p._watcher, TestWatcher):
p._watcher._reset(args)
while not p._will_create_renderer():
sleep(kwargs.get('interval', 0.1))
if not kwargs.get('wait', True):
return
| 0.029295 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Created on Jan 24, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jan 24, 2012"
import os
import unittest
from pymatgen.core.structure import Structure
from pymatgen.io.cssr import Cssr
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.util.testing import PymatgenTest
class CssrTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
self.cssr = Cssr(p.structure)
def test_str(self):
expected_string = """10.4118 6.0672 4.7595
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe 0.2187 0.7500 0.4749
2 Fe 0.2813 0.2500 0.9749
3 Fe 0.7187 0.7500 0.0251
4 Fe 0.7813 0.2500 0.5251
5 P 0.0946 0.2500 0.4182
6 P 0.4054 0.7500 0.9182
7 P 0.5946 0.2500 0.0818
8 P 0.9054 0.7500 0.5818
9 O 0.0434 0.7500 0.7071
10 O 0.0966 0.2500 0.7413
11 O 0.1657 0.0461 0.2854
12 O 0.1657 0.4539 0.2854
13 O 0.3343 0.5461 0.7854
14 O 0.3343 0.9539 0.7854
15 O 0.4034 0.7500 0.2413
16 O 0.4566 0.2500 0.2071
17 O 0.5434 0.7500 0.7929
18 O 0.5966 0.2500 0.7587
19 O 0.6657 0.0461 0.2146
20 O 0.6657 0.4539 0.2146
21 O 0.8343 0.5461 0.7146
22 O 0.8343 0.9539 0.7146
23 O 0.9034 0.7500 0.2587
24 O 0.9566 0.2500 0.2929"""
self.assertEqual(str(self.cssr), expected_string)
def test_from_file(self):
filename = os.path.join(PymatgenTest.TEST_FILES_DIR, "Si.cssr")
cssr = Cssr.from_file(filename)
self.assertIsInstance(cssr.structure, Structure)
if __name__ == "__main__":
unittest.main()
| 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from six.moves import urllib
from girder.api.rest import getApiUrl, RestException
from .base import ProviderBase
from .. import constants
class Globus(ProviderBase):
_AUTH_URL = 'https://auth.globus.org/v2/oauth2/authorize'
_AUTH_SCOPES = ['urn:globus:auth:scope:auth.globus.org:view_identities',
'openid', 'profile', 'email']
_TOKEN_URL = 'https://auth.globus.org/v2/oauth2/token'
_API_USER_URL = 'https://auth.globus.org/v2/oauth2/userinfo'
def getClientIdSetting(self):
return self.model('setting').get(
constants.PluginSettings.GLOBUS_CLIENT_ID)
def getClientSecretSetting(self):
return self.model('setting').get(
constants.PluginSettings.GLOBUS_CLIENT_SECRET)
@classmethod
def getUrl(cls, state):
clientId = cls.model('setting').get(
constants.PluginSettings.GLOBUS_CLIENT_ID)
if clientId is None:
raise Exception('No Globus client ID setting is present.')
callbackUrl = '/'.join((getApiUrl(), 'oauth', 'globus', 'callback'))
query = urllib.parse.urlencode({
'response_type': 'code',
'access_type': 'online',
'client_id': clientId,
'redirect_uri': callbackUrl,
'state': state,
'scope': ' '.join(cls._AUTH_SCOPES)
})
return '%s?%s' % (cls._AUTH_URL, query)
def getToken(self, code):
params = {
'grant_type': 'authorization_code',
'code': code,
'client_id': self.clientId,
'client_secret': self.clientSecret,
'redirect_uri': self.redirectUri
}
resp = self._getJson(method='POST', url=self._TOKEN_URL,
data=params,
headers={'Accept': 'application/json'})
if 'error' in resp:
raise RestException(
'Got an error exchanging token from provider: "%s".' % resp,
code=502)
return resp
def getUser(self, token):
headers = {
'Authorization': 'Bearer {}'.format(token['access_token'])
}
resp = self._getJson(method='GET', url=self._API_USER_URL,
headers=headers)
oauthId = resp.get('sub')
if not oauthId:
raise RestException(
'Globus identity did not return a valid ID.', code=502)
email = resp.get('email')
if not email:
raise RestException(
'Globus identity did not return a valid email.', code=502)
name = resp['name'].split()
firstName = name[0]
lastName = name[-1]
return self._createOrReuseUser(oauthId, email, firstName, lastName)
| 0 |
from collections import Counter
from common.string import strip_html
from common.string import unescapeHTMLEntities
from common.funcfun import lmap, nothing
from functools import reduce
from operator import add
from preprocess.words import getstem, getWordsWithoutStopWords, stemAndRemoveAccents, stripAccents
from preprocess.html_remover import HTMLRemover
from common.string import normalize_text
from retrieval.ranking import document_score
import os
def getDocsStats(documents):
counters = list(enumerate(map(lambda x: Counter(x), documents)))
allwords = reduce(add, documents, [])
allwords_counter = Counter(allwords)
words = sorted(set(allwords))
occurencesIndex = lmap(lambda x: ((x, allwords_counter[x]), occurences(counters, x)), words)
wordscount = lmap(len, documents)
return {'allwords' : allwords_counter, 'occurences' : occurencesIndex, 'wordscount' : wordscount}
def occurences(counters, word):
return lmap(lambda x: (x[0], x[1][word]), filter(lambda x: word in x[1], counters))
def groupByKeylen(database, keylen):
dic = {}
for record in database:
key = record[0][0][:keylen]
if key in dic:
dic[key].append(record)
else:
dic[key] = [record]
return dic
def getKeywords(documents, index, elapsed, lang):
keywords = []
for doc in documents:
elapsed('getting keywords from ' + doc['url'])
distContent = set(doc['content']) #{getstem(x, lang) for x in set(doc['content'])}
keyValues = {}
for stem in distContent:
keyValues[stem] = round(document_score([stem], doc['id'], index, doc['words']), 8)
foo = sorted(keyValues.items(), key=lambda x: x[1], reverse = True)
keywords.append(foo)
return keywords
def toIndex(documents, stopwords, keylen, lang, elapsed = nothing):
htmlrem = HTMLRemover()
compiledDocuments = []
docID = 0
allRealWords = set()
for doc in documents:
try:
elapsed('parsing: ' + doc['url'])
if doc['type'] in ['html', 'txt']:
if doc['type'] == 'html':
content = unescapeHTMLEntities(doc['content'])
try:
content = htmlrem.getText(content)
except Exception:
content = strip_html(content)
title = htmlrem.title
description = htmlrem.description
if not title:
title = os.path.basename(doc['url'])
if doc['type'] == 'txt':
content = doc['content']
title = doc.get('title', os.path.basename(doc['url']))
description = doc.get('description', '')
words = getWordsWithoutStopWords(normalize_text(content), stopwords)
allRealWords |= stripAccents(words)
if words:
compiledDocuments.append({
'pureContent':words,
'content':stemAndRemoveAccents(words, lang),
'title':title,
'url':doc['url'],
'id':docID,
'description':description,
})
docID += 1
except Exception as err:
print('Cannot parse ' + str(doc['url']))
print(str(err))
if not compiledDocuments:
raise Exception('No document parsed')
elapsed('Collecting documents...')
sitesStats = getDocsStats([x['content'] for x in compiledDocuments])
for doc, wordscount in zip(compiledDocuments, sitesStats['wordscount']):
doc['words'] = wordscount
index = groupByKeylen(sitesStats['occurences'], keylen)
return {'index': index, 'allwords':sitesStats['allwords'],
'documents':compiledDocuments, 'allRealWords':allRealWords} | 0.036527 |
import CommonFunctions as common
import urllib
import urllib2
import os
import xbmcplugin
import xbmcgui
import xbmcaddon
import urlfetch
import re
import random
import time
from BeautifulSoup import BeautifulSoup
__settings__ = xbmcaddon.Addon(id='plugin.video.vietmovie')
__language__ = __settings__.getLocalizedString
home = __settings__.getAddonInfo('path')
icon = xbmc.translatePath( os.path.join( home, 'icon.png' ) )
thumbnails = xbmc.translatePath( os.path.join( home, 'thumbnails\\' ) )
__thumbnails = []
def get_thumbnail_url():
global __thumbnails
url = ''
try:
if len(__thumbnails) == 0:
content = make_request('https://raw.github.com/onepas/xbmc-addons/master/thumbnails/thumbnails.xml')
soup = BeautifulSoup(str(content), convertEntities=BeautifulSoup.HTML_ENTITIES)
__thumbnails = soup.findAll('thumbnail')
url = random.choice(__thumbnails).text
except:
pass
return url
def _makeCookieHeader(cookie):
cookieHeader = ""
for value in cookie.values():
cookieHeader += "%s=%s; " % (value.key, value.value)
return cookieHeader
def make_request(url, params = None, headers=None):
if headers is None:
headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20100101 Firefox/15.0.1',
'Referer' : 'http://www.google.com'}
try:
data = None
if params is not None:
data = urllib.urlencode(params)
req = urllib2.Request(url,data,headers)
f = urllib2.urlopen(req)
body=f.read()
return body
except urllib2.URLError, e:
print 'We failed to open "%s".' % url
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
if hasattr(e, 'code'):
print 'We failed with error code - %s.' % e.code
def build_menu():
content = make_request('http://megabox.vn/')
soup = BeautifulSoup(str(content), convertEntities=BeautifulSoup.HTML_ENTITIES)
items = soup.find('div',{'class' : 'navTop'}).findAll('div')
add_dir('[COLOR red]Nổi bật/Mới nhất[/COLOR]','', 4, get_thumbnail_url())
for item in items:
if item.parent.a.text != u'Thiết Bị' and item.parent.a.text != u'Videos':
prefix = item.parent.a.text
subItems = item.findAll('li')
for subItem in subItems:
if subItem.get('style') is None and subItem.a.get('style') is None:
title = '(' + prefix + ') ' + subItem.a.text
url = 'http://megabox.vn/' + subItem.a.get('href')
if 'the-loai' in url:
add_dir(title, url, 3, get_thumbnail_url())
else:
add_dir(title, url, 1, get_thumbnail_url())
add_dir('Tìm kiếm','', 100, get_thumbnail_url())
def home_hot():
add_dir('Dành cho bạn','http://megabox.vn/danh-cho-ban.html', 1, get_thumbnail_url())
add_dir('Mới ra lò','http://megabox.vn/moi-ra-lo.html', 1, get_thumbnail_url())
add_dir('Xem nhiều nhất','http://megabox.vn/hot.html', 1, get_thumbnail_url())
add_dir('Phim lẻ mới nhất','http://megabox.vn/phim-le/moi-nhat.html', 1, get_thumbnail_url())
add_dir('Phim bộ mới nhất','http://megabox.vn/phim-bo/moi-nhat.html', 1, get_thumbnail_url())
def search():
query = common.getUserInput('Tìm kiếm Phim', '')
if query is None:
return
content = make_request('http://megabox.vn/home/search/index',{'key':query})
soup = BeautifulSoup(str(content), convertEntities=BeautifulSoup.HTML_ENTITIES)
items = soup.findAll('div',{'class' : 'picAll'})
for item in items:
#espt = item.find('div',{'class':['espT', 'espT MHD']})
espt = item.find('div',{'class':'espT'})
mode = 2 #Phim bo
if espt is None:
mode = 10 #Phim le
espt = item.find('div',{'class':'espT MHD'})
if espt is not None:
espt = '['+ espt.text.replace(' ','') + ']-'
else:
espt = ''
if len(espt) > 0:
title = item.h4.text
href = item.a.get('href')
itemStr = str(item)
thumbIndex1 = itemStr.find('data-original')
lenPrefix = len('data-original="')
thumbIndex2 = itemStr.find('"',thumbIndex1+lenPrefix)
thumb = itemStr[thumbIndex1+lenPrefix:thumbIndex2]
d1 = itemStr.find('<p>')
d2 = itemStr.find('</p>',d1)
plot = itemStr[d1+3:d2]
add_dir(espt + title, href, mode, thumb + '?f.png',plot=plot)
def list_movies(url):
content = make_request(url)
soup = BeautifulSoup(str(content), convertEntities=BeautifulSoup.HTML_ENTITIES)
items = soup.findAll('div',{'class' : 'picAll'})
for item in items:
espt = item.find('div',{'class':'espT'})
mode = 2 #Phim bo
if espt is None:
mode = 10 #Phim le
espt = item.find('div',{'class':'espT MHD'})
if espt is not None:
espt = '['+ espt.b.text.replace(' ','') + ']-'
else:
mode = 1
espt = ''
title = item.find('div',{'class':'infoC'})
if title is not None:
title = title.h4.text
else:
title = item.find('div',{'class':'loadtxtP'}).a.get('title')
href = item.a.get('href')
thumb = item.find('img').get('src')
itemStr = str(item)
d1 = itemStr.find('<p>')
d2 = itemStr.find('</p>',d1)
plot = itemStr[d1+3:d2]
if '(0)' not in title:
add_dir(espt + title, href, mode, thumb + '?f.png',plot=plot)
#dung cho link the-loai.html
def list_movies_category(url):
content = make_request(url)
soup = BeautifulSoup(str(content), convertEntities=BeautifulSoup.HTML_ENTITIES)
items = soup.find('div',{'class' : 'contentSHL showsBoxEps categoriesBBox'}).findAll('div',{'class' : 'picAll'})
for item in items:
thumb = 'http://megabox.vn/' + item.img.get('data-original')
infoC = item.find('div',{'class' : 'infoC'})
href = infoC.a.get('href')
title = infoC.find('h4').text
if '(0)' not in title:
add_dir(title, href, 1, thumb + '?f.png')
def play_movie(url,p=True):
content = make_request(url)
soup = BeautifulSoup(str(content), convertEntities=BeautifulSoup.HTML_ENTITIES)
eps = soup.find('div',{'class' : 'contentBox showsBoxEps'})
#phim bo
if not p and eps is not None:
items = eps.findAll('div',{'class' : 'picAll'})
for item in items:
thumb = item.find('img').get('data-original')
infoC = item.find('div',{'class' : 'infoC'})
href = 'http://megabox.vn/' + item.a.get('href')
title = '[' + infoC.find('h4').text + '] ' + item.find('img').get('title')
add_dir(title, href, 10, thumb + '?f.png')
else:
m = re.findall('file:\W+http://(.*?)index.m3u8',content,re.DOTALL)
if (len(m) > 0):
url = 'http://' + m[0] + 'index.m3u8'
listitem = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
else:
bannerSlide = soup.find('div',{'class' : 'bannerSlide'})
if bannerSlide is not None:
url = bannerSlide.find('a').get('href')
content = make_request(url)
m = re.findall('file:\W+http://(.*?)index.m3u8',content,re.DOTALL)
if (len(m) > 0):
url = 'http://' + m[0] + 'index.m3u8'
listitem = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
else:
xbmcgui.Dialog().ok("Oops!","Không tìm thấy phim.","Link giả?")
else:
xbmcgui.Dialog().ok("Oops!","Không tìm thấy phim.","Link giả?")
def add_link(date, name, duration, href, thumb, desc):
u=sys.argv[0]+"?url="+urllib.quote_plus(href)+"&mode=10"
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=thumb)
liz.setInfo(type="Video", infoLabels={ "Title": name, "Plot": description, "Duration": duration})
liz.setProperty('IsPlayable', 'true')
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz)
def add_dir(name,url,mode,iconimage,query='',type='f',page=0,plot=''):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&query="+str(query)+"&type="+str(type)+"&page="+str(page)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, "plot":plot } )
isFolder = True;
if mode == 10:
liz.setProperty('IsPlayable', 'true')
isFolder = False
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=isFolder)
return ok
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
params=get_params()
url=''
name=None
mode=None
query=None
type='f'
page=0
try:
type=urllib.unquote_plus(params["type"])
except:
pass
try:
page=int(urllib.unquote_plus(params["page"]))
except:
pass
try:
query=urllib.unquote_plus(params["query"])
except:
pass
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
print "type: "+str(type)
print "page: "+str(page)
print "query: "+str(query)
if mode==None:
build_menu()
elif mode == 1:
list_movies(url)
elif mode == 2:
play_movie(url,False)
elif mode == 3:
list_movies_category(url)
elif mode == 4:
home_hot()
elif mode == 10:
play_movie(url)
elif mode == 100:
search()
xbmcplugin.endOfDirectory(int(sys.argv[1])) | 0.030922 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard.contrib.sahara import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:data_processing.clusters:index')
DETAILS_URL = reverse(
'horizon:project:data_processing.clusters:details', args=['id'])
class DataProcessingClusterTests(test.TestCase):
@test.create_stubs({api.sahara: ('cluster_list',)})
def test_index(self):
api.sahara.cluster_list(IsA(http.HttpRequest), {}) \
.AndReturn(self.clusters.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(
res, 'project/data_processing.clusters/clusters.html')
self.assertContains(res, 'Clusters')
self.assertContains(res, 'Name')
@test.create_stubs({api.sahara: ('cluster_template_list', 'image_list')})
def test_launch_cluster_get_nodata(self):
api.sahara.cluster_template_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.sahara.image_list(IsA(http.HttpRequest)) \
.AndReturn([])
self.mox.ReplayAll()
url = reverse(
'horizon:project:data_processing.clusters:configure-cluster')
res = self.client.get("%s?plugin_name=shoes&hadoop_version=1.1" % url)
self.assertContains(res, "No Images Available")
self.assertContains(res, "No Templates Available")
@test.create_stubs({api.sahara: ('cluster_get',)})
def test_event_log_tab(self):
cluster = self.clusters.list()[-1]
api.sahara.cluster_get(IsA(http.HttpRequest),
"cl2", show_progress=True).AndReturn(cluster)
self.mox.ReplayAll()
url = reverse(
'horizon:project:data_processing.clusters:events', args=["cl2"])
res = self.client.get(url)
data = json.loads(res.content)
self.assertIn("provision_steps", data)
self.assertEqual(data["need_update"], False)
step_0 = data["provision_steps"][0]
self.assertEqual(2, step_0["completed"])
self.assertEqual(2, len(step_0["events"]))
for evt in step_0["events"]:
self.assertEqual(True, evt["successful"])
step_1 = data["provision_steps"][1]
self.assertEqual(3, step_1["completed"])
self.assertEqual(0, len(step_1["events"]))
@test.create_stubs({api.sahara: ('cluster_list',
'cluster_delete')})
def test_delete(self):
cluster = self.clusters.first()
api.sahara.cluster_list(IsA(http.HttpRequest), {}) \
.AndReturn(self.clusters.list())
api.sahara.cluster_delete(IsA(http.HttpRequest), cluster.id)
self.mox.ReplayAll()
form_data = {'action': 'clusters__delete__%s' % cluster.id}
res = self.client.post(INDEX_URL, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
| 0 |
#!/usr/bin/env python
"""
This module provides simple wrappign functions for topia
"""
# from topia.termextract import extract
from data_parser import DataParser
from util.data_dumper import DataDumper
import numpy as np
import traceback
class KeywordExtractor(object):
def __init__(self, raw_labels, raw_data, dataset):
self.dataset = dataset
self.raw_labels = raw_labels
self.raw_data = raw_data
self.document_count = len(raw_data)
print("*** TRYING TO LOAD TFIDF MATRIX ***")
data_dumper = DataDumper(dataset)
found, self.tf_idf = data_dumper.load_matrix()
found = False
if found is False:
self.word_count = 0
self.words = {}
self.words_inverted = {}
documents_to_words = self.extract_word_distribution()
self.term_frequency = self.build_document_word_matrix(documents_to_words)
self.tf_idf = self.term_frequency * self.calculate_idf()
#data_dumper.save_matrix(self.tf_idf)
self.analyze_frequencies(self.term_frequency)
def calculate_idf(self):
## calculate idf for every word.
idf_arr = []
for i in range(self.term_frequency.shape[1]):
word_count = (self.term_frequency[:, i] != 0).sum() + 1
idf = np.log(self.document_count / word_count)
idf_arr.append(idf)
return idf_arr
def analyze_frequencies(self, term_frequency):
sum_dict = {}
word_dict = {}
for i in range(term_frequency.shape[1]):
column_sum = term_frequency[:, i].sum()
if column_sum in sum_dict:
sum_dict[column_sum] = sum_dict[column_sum] + 1
else:
sum_dict[column_sum] = 1
word_dict[column_sum] = self.words_inverted[i]
for column_sum, count in sum_dict.iteritems():
try:
print("Sum: {}, Count: {}, Word: {}".format(column_sum, count, word_dict[column_sum]))
except Exception:
traceback.print_exc()
def get_tfidf(self):
return self.tf_idf
def build_document_word_matrix(self, documents_to_words):
document_word_matrix = np.zeros((self.document_count, self.word_count))
for i, document_to_word in enumerate(documents_to_words):
for entry in document_to_word:
document_word_matrix[i][entry[0]] = entry[1]
print("Extracted words")
print(document_word_matrix.shape)
return document_word_matrix
def extract_word_distribution(self):
extractor = extract.TermExtractor()
indices = []
i = 0
for label in self.raw_labels:
if label in ["raw.abstract", "title", 'raw.title']:
indices.append(i)
i += 1
if len(indices) > 2:
indices = indices[1:]
total = 0
documents_to_words = []
for paper_data in self.raw_data:
paper_text = ''
for index in indices:
paper_text += paper_data[index]
total += len(paper_data[index])
document_to_words = []
keywords = extractor(paper_text)
for keyword in keywords:
if keyword[2] > 3:
break
word_id = self.insert_word(keyword[0])
word_count = keyword[1]
self.words_inverted[word_id] = keyword[0]
document_to_words.append((word_id, word_count))
documents_to_words.append(document_to_words)
print("EXtracted total {}".format(total))
return documents_to_words
def insert_word(self, word):
if word in self.words:
return self.words[word]
self.words[word] = self.word_count
self.word_count += 1
return self.word_count - 1
def get_word_id(self, word):
if word in self.words:
return self.words[word]
return -1
| 0.030285 |
# Warning - contains lots of spoilers
import magic
from grammar import Noun, ProperNoun
from level import Rarity, Item
Items = []
lastItem = CrookedStaff = magic.Staff(
Noun('a', 'crooked staff', 'crooked staves'),
damage = 2,
minMana = 50,
maxMana = 100,
weight = 10,
rarity = Rarity( worth = 20, freq = 1, minLevel = 1, maxLevel = 7 )
)
Items.append( lastItem )
lastItem = CrookedStaff = magic.Staff(
Noun('an', 'ornate staff', 'ornate staves'),
damage = 2,
minMana = 100,
maxMana = 200,
weight = 10,
rarity = Rarity( worth = 20, freq = 1, minLevel = 5 )
)
Items.append( lastItem )
lastItem = SteelStaff = magic.Staff(
Noun('a', 'steel staff', 'steel staves'),
damage = 5,
minMana = 0,
maxMana = 0,
weight = 15,
rarity = Rarity( worth = 5, freq = 1, minLevel = 1, maxLevel = 3 )
)
Items.append( lastItem )
lastItem = CrookedStaff = magic.Staff(
Noun('a', 'magnificent staff', 'magnificent staves'),
damage = 2,
minMana = 200,
maxMana = 300,
weight = 10,
rarity = Rarity( worth = 20, freq = 1, minLevel = 8 )
)
Items.append( lastItem )
lastItem = Tome = magic.Tome(
Noun( "a", "scroll of magic", "scrolls of magic" ),
rarity = Rarity( worth = 15,freq = 4 ),
weight = 5,
)
Items.append( lastItem )
lastItem = Tome = magic.TrapTalisman(
Noun( "a", "talisman of perception", "talismans of perception" ),
weight = 3,
rarity = Rarity( freq = 2, worth = 7 )
)
Items.append( lastItem )
lastItem = HealthTalisman = magic.HealthTalisman(
Noun( "a", "talisman of health", "talismans of health" ),
weight = 3,
rarity = Rarity(worth = 20, freq = 1 , minLevel = 5)
)
Items.append( lastItem )
lastItem = Treasure = magic.Treasure(
Noun( "a", "heavy spellbook", "heavy spellbooks" ),
weight = 10,
rarity = Rarity(worth = 10, freq = 1, minLevel = 4)
)
Items.append( lastItem )
lastItem = HealPotion = Item(
# restores a single HP -- you're meant to use magic.
Noun( "a", "vial of healing essences", "vials of healing essences" ),
'!',
'magenta',
itemType = "healing",
weight = 3,
rarity = Rarity( worth = 8, freq = 4 ),
)
Items.append( lastItem )
lastItem = MacGuffinMale = Item(
ProperNoun( "Professor Nislakh", "male" ),
'h',
'black',
itemType = "macguffin",
weight = 30, # mcg + n heavy books should be full capacity (no staff etc.) - might be a nice challenge for completists
rarity = Rarity( freq = 0 ), # should be freq 0! unique, generated when you bump into Nislakh.
isMacGuffin = True,
)
lastItem = MacGuffinFemale = Item(
ProperNoun( "Professor Nislene", "female" ),
'h',
'black',
itemType = "macguffin",
weight = 30, # mcg + n heavy books should be full capacity (no staff etc.) - might be a nice challenge for completists
rarity = Rarity( freq = 0 ), # should be freq 0! unique, generated when you bump into Nislakh.
isMacGuffin = True,
)
if __name__ == '__main__':
from level import DungeonDepth
for protorune in magic.generateProtorunes():
protorune.identify()
Items.append( protorune )
for dlevel in range(1,DungeonDepth+1):
items = [ item for item in Items if item.rarity.eligible( dlevel ) ]
totalWeight = sum( map( lambda item : item.rarity.frequency, items ) )
print "Dungeon level %d: %d items" % (dlevel, len(items))
for item in items:
print "\t%.2lf%%: %s" % (item.rarity.frequency * 100.0 / totalWeight, item.name.plural)
print
| 0.058642 |
import collections
import logging
from boto.exception import BotoServerError
from retrying import retry
import cfnparams.exceptions
def with_retry(cls, methods):
"""
Wraps the given list of methods in a class with an exponential-back
retry mechanism.
"""
retry_with_backoff = retry(
retry_on_exception=lambda e: isinstance(e, BotoServerError),
wait_exponential_multiplier=1000,
wait_exponential_max=10000
)
for method in methods:
m = getattr(cls, method, None)
if isinstance(m, collections.Callable):
setattr(cls, method, retry_with_backoff(m))
return cls
class Stack(object):
def __init__(self, cfn_stack):
"""
Creates a nicer representation of a boto.cloudformation.stack.Stack.
"""
self.stack_id = cfn_stack.stack_id
self.stack_name = cfn_stack.stack_name
self.outputs = {o.key: o.value for o in cfn_stack.outputs}
self.tags = cfn_stack.tags
class Resolver(object):
def __init__(self, cfn, strategy, filters):
self.cfn = with_retry(cfn, ['list_stacks', 'describe_stacks'])
self.strategy = strategy
self.filters = filters
def __call__(self, name, output):
logging.debug('Attempting to resolve output "{}" from stack "{}"'
.format(output, name))
for stack in self.strategy(self.cfn, name):
all_filters_match = all(
item in stack.tags.items()
for item in self.filters.items()
)
contains_output = output in stack.outputs
if all_filters_match and contains_output:
logging.debug('Found output "{}" in stack "{}"'
.format(output, stack.stack_name))
# return first match, ignoring any other possible matches
return stack.outputs[output]
else:
logging.debug('Did not find output "{}" in stack "{}"'
.format(output, stack.stack_name))
raise cfnparams.exceptions.ResolutionError(name, output)
class ResolveByName(object):
"""
Resolution strategy which will match stacks against their stack name.
"""
def __init__(self):
self.cache = collections.defaultdict(dict)
def __call__(self, cfn, name):
if name in self.cache:
for stack in self.cache[name].values():
logging.debug('Retrieved stack "{}" from cache'.format(name))
yield stack
else:
next_token = None
keep_listing = True
while keep_listing:
# Use list stacks which is more efficient than describe_stacks
# when a name is not specified
resp = cfn.describe_stacks(name, next_token)
for summary in resp:
stack = Stack(summary)
self.cache[name][stack.stack_id]
logging.debug('Retrieved stack "{}"'.format(name))
yield stack
next_token = resp.next_token
keep_listing = next_token is not None
raise StopIteration
class ResolveByTag(object):
"""
Resolution strategy which will match stacks against the value of the tag
provided.
"""
valid_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_COMPLETE',
'UPDATE_ROLLBACK_COMPLETE'
]
def __init__(self, tag):
self.tag = tag
self.cache = collections.defaultdict(dict)
def __call__(self, cfn, name):
# Optimistically serve stack from cache
if name in self.cache:
for stack in self.cache[name].values():
logging.debug('Retrieved stack "{}" from cache'.format(name))
yield stack
# Maybe it's not in the cache yet? Walk through the full list again
next_token = None
keep_listing = True
while keep_listing:
# Use list stacks which is more efficient than describe_stacks
# when a name is not specified
resp = cfn.list_stacks(self.valid_states, next_token)
for summary in resp:
# Already yielded this stack, skip it
if summary.stack_id in self.cache[name]:
logging.debug('Skipping already yielded stack "{}"'
.format(summary.stack_name))
continue
# First time we have seen this stack, lookup all details
stack = cfn.describe_stacks(summary.stack_id)
s = Stack(stack[0])
tagged_name = s.tags.get(self.tag)
if self.tag in s.tags:
self.cache[tagged_name][s.stack_id] = s
if tagged_name == name:
logging.debug('Retrieved stack "{}"'.format(name))
yield s
next_token = resp.next_token
keep_listing = next_token is not None
raise StopIteration
| 0 |
import numpy as N
import wave, subprocess
def nat_to_vn(nat):
try:
integer = int(nat)
except ValueError:
return ""
if integer < 0:
return ""
emptyset = "{}"
if integer == 0:
return "{}"
if integer > 0:
l = ["" for i in xrange(integer)]
l[0] = "{}"
for i in xrange(integer-1):
l[i+1] = "{%s}" % ",".join(l[0:i+1])
return "{" + ",".join(l) + "}"
def ordered_pair(p,q):
'''weiner definition (a,b) = {{{a},{}},{{b}}}'''
return "{{{%s},{}},{{%s}}}" % (p,q)
def int_to_vn(integer):
'''{p,q} is p~q, where p and q are ordered pairs of the form (a,b) where a,b are natural numbers and a-b = N'''
#N = 0 means (0,0) ~ {1,1}
if integer == 0:
p = ordered_pair(nat_to_vn(0),nat_to_vn(0))
q = ordered_pair(nat_to_vn(1),nat_to_vn(1))
return "{%s,%s}" % (p,q)
#Positive N is (N,0) ~ (N+1,1)
if integer > 0:
p = ordered_pair(nat_to_vn(integer),nat_to_vn(0))
q = ordered_pair(nat_to_vn(integer+1),nat_to_vn(1))
return "{%s,%s}" % (p,q)
#Negative N is (0,N) ~ (1,N+1)
if integer < 0:
p = ordered_pair(nat_to_vn(0),nat_to_vn(-1 * integer))
q = ordered_pair(nat_to_vn(1),nat_to_vn((-1 * integer) + 1))
return "{%s,%s}" % (p,q)
def q_to_vn(q1,q2):
'''{m,n} is m~n where m and n are ordered pairs such that m1*n2 - m2*n1 = 0 for integer m,n'''
#Division by 0 zero
if q2 == 0 and q1 !=0:
return ""
#Q = 0 means (0,0) ~ {1,1} | 0*1 - 1*0 = 0
if q1 == 0:
m = ordered_pair(int_to_vn(0),int_to_vn(0))
n = ordered_pair(int_to_vn(1),int_to_vn(1))
return "{%s,%s}" % (m,n)
#Otherwise q1,q2 = m1,m2, (q1,q2) ~ (-q1,q2) | q1*-q2 - -q1*q2 = 0
else:
m = ordered_pair(int_to_vn(q1),int_to_vn(q2))
n = ordered_pair(int_to_vn(-q1),int_to_vn(-q2))
return "{%s,%s}" % (m,n)
##############
class SoundFile:
def __init__ (self):
self.file = wave.open('temp.wav', 'wb')
self.sr = 44100
self.file.setparams((1, 2, self.sr, 44100*4, 'NONE', 'noncompressed'))
def write(self,signal):
self.file.writeframes(signal)
def close(self):
self.file.close()
def prepare_signal(freq):
def sine(x):
return VOL * N.sin(x)
def organ(x, no):
base = 0
for k in xrange(no):
M = 2**(k)
base = base + VOL/M * N.sin(M * xaxis)
return base
def evenorgan(x, no):
base = 0
for k in xrange(no):
if k%2:
M = 2**(k)
base = base + VOL/M * N.sin(M * xaxis)
return base
def oddorgan(x, no):
base = 0
for k in xrange(no):
if not k%2:
M = 2**(k)
base = base + VOL/M * N.sin(M * xaxis)
return base
def saworgan(x, no):
base = 0
for k in xrange(no):
M = 2**(k)
base = base + VOL/(N.exp(xaxis/2)*M) * N.sin(M * xaxis)
return base
# let's prepare signal
duration = NOTELENGTH
samplerate = 44100 # Hz
samples = duration*samplerate
frequency = freq # 440 Hz
period = samplerate / float(frequency) # in sample points
omega = N.pi * 2 / period
xaxis = N.arange(int(period),dtype = N.float) * omega
timbredict = {"sine": sine(xaxis),
"organ": organ(xaxis,TIMBRETERMS),
"oddorgan": oddorgan(xaxis,TIMBRETERMS),
"evenorgan": evenorgan(xaxis,TIMBRETERMS),
"saworgan": evenorgan(xaxis,TIMBRETERMS)}
ydata = timbredict[TIMBRE]
signal = N.resize(ydata, (samples,))
ssignal = ''
for i in range(len(signal)):
ssignal += wave.struct.pack('h',signal[i]) # transform to binary
return ssignal
def go_up(last):
out = last*(UP)**NUMHTONES
return out
def go_down(last):
out = last*(DOWN)**NUMHTONES
return out
def repeat(last):
return last
BASE = 110 #Hz
TET = 12
NUMHTONES = 2
NOTELENGTH = 0.2 #secs
TIMBRE = "saworgan"
TIMBRETERMS = 12
VOL = 16384
UP = float(2) ** (float(1)/float(TET))
DOWN = float(1)/UP #1/(2^(1/12))
tones = [BASE]
ruledict = {"u":go_up, "d":go_down, "r":repeat }
num = raw_input("What number would you like to musicalise?\nPlease input an integer or rational number (n/m)\n> ")
if "/" in num:
nums = num.split("/")
nume, denom = int(nums[0]), int(nums[1])
if nume > 10:
print("Numerator and/or denominator too large (greater than 10)")
quit()
elif denom > 10:
print("Denominator and/or denominator too large (greater than 10)")
quit()
else:
print("lol")
notestring = q_to_vn(nume,denom)
else:
if int(num) > 10:
print("Too large (greater than 10)")
quit()
if int(num) < -10:
print("Too small (less than -10)")
quit()
elif int(num) == 0:
print("Ok.\n(no sound 4 u)")
quit()
else:
notestring = int_to_vn(int(num))
dirlist = ["r"]
for note in notestring:
if note == "{":
dirlist = dirlist + ["u"]
if note == "}":
dirlist = dirlist + ["d"]
if note == ",":
dirlist = dirlist + ["r"]
else:
pass
tone = BASE
for rule in dirlist:
tone = ruledict[rule](tone)
tones.append(tone)
wav = SoundFile()
for tone in tones:
wav.write(prepare_signal(tone))
wav.close()
if "/" in num:
tnum = "%sover%s" % (nume,denom)
else:
tnum = num
wav = 'temp.wav' % (tnum, TET, BASE, NUMHTONES, NOTELENGTH, TIMBRE)
mp3 = 'peano-%s-%sTET-%sHz-%sst-%ss-%s.mp3' % (tnum, TET, BASE, NUMHTONES, NOTELENGTH, TIMBRE)
cmd = 'lame --preset fast medium %s %s' % (wav, mp3)
clr = 'rm %s' % wav
try:
subprocess.check_output(cmd, shell=True)
subprocess.call(clr, shell=True)
except:
print("Could not convert to mp3. Please try installing lame (and/or lunix lol) and see temp.wav for output. NB. Filename would have been %s" % mp3)
print notestring
| 0.04614 |
# Copyright (C) 2019 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""API to the hp-conrep utility to list the bios settings"""
import os
import pprint
import sys
import tempfile
import xml.etree.ElementTree as ET
from hardware.detect_utils import cmd
def get_hp_conrep(hwlst):
for i in hwlst:
if i[0:3] == ('system', 'product', 'vendor'):
if i[3] not in ['HPE', 'HP']:
return True, ""
output_file = next(tempfile._get_candidate_names())
status, output = cmd("hp-conrep --save -f {}".format(output_file))
if status != 0:
sys.stderr.write("Unable to run hp-conrep: %s\n" % output)
return False, ""
return_value = open(output_file).read()
os.remove(output_file)
return True, return_value
def dump_hp_bios(hwlst):
# handle output injection for testing purpose
valid, hpconfig = get_hp_conrep(hwlst)
if not valid:
return False
if hpconfig:
xml = ET.fromstring(hpconfig)
root = xml.iter("Section")
for child in root:
hwlst.append(('hp', 'bios', child.attrib['name'], child.text))
return True
if __name__ == "__main__":
hwlst = []
dump_hp_bios(hwlst)
pprint.pprint(hwlst)
| 0 |
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSHelperFn, AWSObject, AWSProperty, Ref
from .validators import boolean, integer
from . import cloudformation
EC2_INSTANCE_LAUNCH = "autoscaling:EC2_INSTANCE_LAUNCH"
EC2_INSTANCE_LAUNCH_ERROR = "autoscaling:EC2_INSTANCE_LAUNCH_ERROR"
EC2_INSTANCE_TERMINATE = "autoscaling:EC2_INSTANCE_TERMINATE"
EC2_INSTANCE_TERMINATE_ERROR = "autoscaling:EC2_INSTANCE_TERMINATE_ERROR"
TEST_NOTIFICATION = "autoscaling:TEST_NOTIFICATION"
# Termination Policy constants
Default = 'Default'
OldestInstance = 'OldestInstance'
NewestInstance = 'NewestInstance'
OldestLaunchConfiguration = 'OldestLaunchConfiguration'
ClosestToNextInstanceHour = 'ClosestToNextInstanceHour'
class Tag(AWSHelperFn):
def __init__(self, key, value, propogate):
self.data = {
'Key': key,
'Value': value,
'PropagateAtLaunch': propogate,
}
def JSONrepr(self):
return self.data
class NotificationConfiguration(AWSProperty):
props = {
'TopicARN': (basestring, True),
'NotificationTypes': (list, True),
}
class MetricsCollection(AWSProperty):
props = {
'Granularity': (basestring, True),
'Metrics': (list, False),
}
class Metadata(AWSHelperFn):
def __init__(self, init, authentication=None):
self.validate(init, authentication)
# get keys and values from init and authentication
# safe to use cause its always one key
initKey, initValue = init.data.popitem()
self.data = {initKey: initValue}
if authentication:
authKey, authValue = authentication.data.popitem()
self.data[authKey] = authValue
def validate(self, init, authentication):
if not isinstance(init, cloudformation.Init):
raise ValueError(
'init must be of type cloudformation.Init'
)
is_instance = isinstance(authentication, cloudformation.Authentication)
if authentication and not is_instance:
raise ValueError(
'authentication must be of type cloudformation.Authentication'
)
def JSONrepr(self):
return self.data
class AutoScalingGroup(AWSObject):
type = "AWS::AutoScaling::AutoScalingGroup"
props = {
'AvailabilityZones': (list, True),
'Cooldown': (integer, False),
'DesiredCapacity': (integer, False),
'HealthCheckGracePeriod': (int, False),
'HealthCheckType': (basestring, False),
'InstanceId': (basestring, False),
'LaunchConfigurationName': (basestring, True),
'LoadBalancerNames': (list, False),
'MaxSize': (integer, True),
'MetricsCollection': ([MetricsCollection], False),
'MinSize': (integer, True),
'NotificationConfiguration': (NotificationConfiguration, False),
'PlacementGroup': (basestring, False),
'Tags': (list, False), # Although docs say these are required
'TerminationPolicies': ([basestring], False),
'VPCZoneIdentifier': (list, False),
}
def validate(self):
if 'UpdatePolicy' in self.resource:
update_policy = self.resource['UpdatePolicy']
isMinRef = isinstance(update_policy.MinInstancesInService, Ref)
isMaxRef = isinstance(self.MaxSize, Ref)
if not (isMinRef or isMaxRef):
minCount = int(update_policy.MinInstancesInService)
maxCount = int(self.MaxSize)
if minCount >= maxCount:
raise ValueError(
"The UpdatePolicy attribute "
"MinInstancesInService must be less than the "
"autoscaling group's MaxSize")
return True
class LaunchConfiguration(AWSObject):
type = "AWS::AutoScaling::LaunchConfiguration"
props = {
'AssociatePublicIpAddress': (boolean, False),
'BlockDeviceMappings': (list, False),
'EbsOptimized': (boolean, False),
'IamInstanceProfile': (basestring, False),
'ImageId': (basestring, True),
'InstanceId': (basestring, False),
'InstanceMonitoring': (boolean, False),
'InstanceType': (basestring, True),
'KernelId': (basestring, False),
'KeyName': (basestring, False),
'Metadata': (Metadata, False),
'RamDiskId': (basestring, False),
'SecurityGroups': (list, False),
'SpotPrice': (basestring, False),
'UserData': (basestring, False),
}
class ScalingPolicy(AWSObject):
type = "AWS::AutoScaling::ScalingPolicy"
props = {
'AdjustmentType': (basestring, True),
'AutoScalingGroupName': (basestring, True),
'Cooldown': (integer, False),
'ScalingAdjustment': (basestring, True),
}
class ScheduledAction(AWSObject):
type = "AWS::AutoScaling::ScheduledAction"
props = {
'AutoScalingGroupName': (basestring, True),
'DesiredCapacity': (integer, False),
'EndTime': (basestring, True),
'MaxSize': (integer, False),
'MinSize': (integer, False),
'Recurrence': (basestring, True),
'StartTime': (basestring, True),
}
class Trigger(AWSObject):
type = "AWS::AutoScaling::Trigger"
props = {
'AutoScalingGroupName': (basestring, True),
'BreachDuration': (integer, True),
'Dimensions': (list, True),
'LowerBreachScaleIncrement': (integer, False),
'LowerThreshold': (integer, True),
'MetricName': (basestring, True),
'Namespace': (basestring, True),
'Period': (integer, True),
'Statistic': (basestring, True),
'Unit': (basestring, False),
'UpperBreachScaleIncrement': (integer, False),
'UpperThreshold': (integer, True),
}
class EBSBlockDevice(AWSProperty):
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html
props = {
'DeleteOnTermination': (boolean, False),
'Iops': (integer, False),
'SnapshotId': (basestring, False),
'VolumeSize': (integer, False),
'VolumeType': (basestring, False),
}
class BlockDeviceMapping(AWSProperty):
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html
props = {
'DeviceName': (basestring, True),
'Ebs': (EBSBlockDevice, False),
'NoDevice': (boolean, False),
'VirtualName': (basestring, False),
}
| 0 |
from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import warnings
import zipfile
from distutils.util import change_root
from distutils import sysconfig
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.distlib.markers import interpret as markers_interpret
from pip._vendor.six.moves import configparser
from pip._vendor.six.moves.urllib import parse as urllib_parse
import pip.wheel
from pip.compat import native_str, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path, make_path_relative,
call_subprocess, read_text_file, FakeFile, _make_build_dir,
)
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel, wheel_ext
from pip._vendor.packaging.version import Version
logger = logging.getLogger(__name__)
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
url=None, as_egg=False, update=True, editable_options=None,
pycompile=True, markers=None, isolated=False):
self.extras = ()
if isinstance(req, six.string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self.url = url
self.as_egg = as_egg
self.markers = markers
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
self._temp_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.pycompile = pycompile
self.isolated = isolated
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False):
name, url, extras_override, editable_options = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
url=url,
editable_options=editable_options,
isolated=isolated)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(cls, name, comes_from=None, isolated=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
url = None
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
elif (os.path.isdir(path)
and (os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(path):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not "
"found." % name
)
link = Link(path_to_url(name))
elif is_archive_file(path):
if not os.path.isfile(path):
logger.warning(
'Requirement %r looks like a filename, but the file does '
'not exist',
name
)
link = Link(path_to_url(name))
# it's a local file, dir, or url
if link:
url = link.url
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', url):
url = path_to_url(os.path.normpath(os.path.abspath(link.path)))
# wheel file
if link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
return cls(req, comes_from, url=url, markers=markers,
isolated=isolated)
def __str__(self):
if self.req:
s = str(self.req)
if self.url:
s += ' from %s' % self.url
else:
s = self.url
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
@property
def specifier(self):
return self.req.specifier
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def correct_build_location(self):
"""If the build location was a temporary directory, this will move it
to a new more permanent location"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
old_location = self._temp_build_dir
new_build_dir = self._ideal_build_dir
del self._ideal_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
new_location = os.path.join(new_build_dir, name)
if not os.path.exists(new_build_dir):
logger.debug('Creating directory %s', new_build_dir)
_make_build_dir(new_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def url_name(self):
if self.req is None:
return None
return urllib_parse.quote(self.req.project_name.lower())
@property
def setup_py(self):
try:
import setuptools # noqa
except ImportError:
# Setuptools is not available
raise InstallationError(
"setuptools must be installed to install from a source "
"distribution"
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.url,
)
with indent_log():
# if it's distribute>=0.7, it won't contain an importable
# setuptools, and having an egg-info dir blocks the ability of
# setup.py to find setuptools plugins, so delete the egg-info dir
# if no setuptools. it will get recreated by the run of egg_info
# NOTE: this self.name check only works when installing from a
# specifier (not archive path/urls)
# TODO: take this out later
if (self.name == 'distribute'
and not os.path.isdir(
os.path.join(self.source_dir, 'setuptools'))):
rmtree(os.path.join(self.source_dir, 'distribute.egg-info'))
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=cwd,
filter_stdout=self._filter_install,
show_stdout=False,
command_level=logging.DEBUG,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(
pkg_resources.parse_version(self.pkg_info()["Version"]),
Version):
op = "=="
else:
op = "==="
self.req = pkg_resources.Requirement.parse(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
]))
self.correct_build_location()
# FIXME: This is a lame hack, entirely for PasteScript which has
# a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
import tokenize
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(
getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'),
__file__,
'exec'
))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
)
or os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep)
+ (os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(self.name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
if dist:
return dist.version
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if version not in self.req:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.url:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.url.startswith('file:'):
# Static paths don't get updated
return
assert '+' in self.url, "bad url: %r" % self.url
if not self.update:
return
vc_type, url = self.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.url, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
if develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif egg_info_exists and dist.egg_info.endswith('.egg-info'):
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(self.name),
RemovedInPip8Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = configparser.SafeConfigParser()
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.project_name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error(
"Can't commit %s, nothing uninstalled.", self.project_name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self):
if self.markers is not None:
return markers_interpret(self.markers)
else:
return True
def install(self, install_options, global_options=(), root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root)
self.install_succeeded = True
return
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
# FIXME: I'm not sure if this is a reasonable location;
# probably not but we can't put it in the default location, as
# that is a virtualenv symlink that isn't writable
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str)]
logger.info('Running setup.py install for %s', self.name)
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.source_dir,
filter_stdout=self._filter_install,
show_stdout=False,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
make_path_relative(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
os.rmdir(temp_location)
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
with indent_log():
# FIXME: should we do --install-headers here too?
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
[
sys.executable,
'-c',
"import setuptools, tokenize; __file__=%r; exec(compile("
"getattr(tokenize, 'open', open)(__file__).read().replace"
"('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
]
+ list(global_options)
+ ['develop', '--no-deps']
+ list(install_options),
cwd=cwd, filter_stdout=self._filter_install,
show_stdout=False)
self.install_succeeded = True
def _filter_install(self, line):
level = logging.INFO
for regex in [
r'^running .*',
r'^writing .*',
'^creating .*',
'^[Cc]opying .*',
r'^reading .*',
r"^removing .*\.egg-info' \(and everything under it\)$",
r'^byte-compiling ',
r'^SyntaxError:',
r'^SyntaxWarning:',
r'^\s*Skipping implicit fixer: ',
r'^\s*(warning: )?no previously-included (files|directories) ',
r'^\s*warning: no files found matching \'.*\'',
# Not sure what this warning is, but it seems harmless:
r"^warning: manifest_maker: standard file '-c' not found$"]:
if not line or re.search(regex, line.strip()):
level = logging.DEBUG
break
return (level, line)
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately."""
if self.req is None:
return False
try:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# if we've already set distribute as a conflict to setuptools
# then this check has already run before. we don't want it to
# run again, and return False, since it would block the uninstall
# TODO: remove this later
if (self.req.project_name == 'setuptools'
and self.conflicts_with
and self.conflicts_with.project_name == 'distribute'):
return True
else:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv()
and dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.url and '.whl' in self.url
def move_wheel_files(self, wheeldir, root=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return (
None,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
{},
)
else:
return None, url_no_extras, None, {}
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception as exc:
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, exc)
)
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, None, options
| 0 |
from Utils import runCommand
from Config import Config
import os
import threading
import subprocess
BUILDURL="http://koji.fedoraproject.org/koji/taskinfo?taskID=%s"
# DRY MODE
# - don't run any command changing a state
#
# DECOMPOSITION
# - low level commands
# - simple commands (wrappers over low level commands)
# - multi commands (running simple commands over chosen branches)
# mappings of branches to build candidates
branch2bc = {
'f20': 'f20-candidate',
'f21': 'f21-candidate',
'f22': 'f22-candidate',
'f23': 'f23-candidate',
'el6': 'el6-candidate'
}
branch2build = {
'f20': 'f20-build',
'f21': 'f21-build',
'f22': 'f22-build',
'f23': 'f23-build',
'el6': 'dist-6E-epel-build'
}
branch2tag = {
'f20': 'fc20',
'f21': 'fc21',
'f22': 'fc22',
'f23': 'fc23',
'el6': 'el6'
}
class LowLevelCommand:
def __init__(self, dry=False, debug=False):
self.dry = dry
self.debug = debug
def runFedpkgSrpm(self):
"""
Run 'fedpkg srpm'.
It returns so, se, rc triple.
"""
if self.debug == True:
print "Running 'fedpkg srpm'"
if self.dry == True:
so = "Wrote: gofed-test-0.6.2-0.3.git89088df.fc20.src.rpm"
se = ""
rc = 0
return so, se, rc
else:
return runCommand("fedpkg srpm")
def runFedpkgScratchBuild(self, srpm):
"""
Run 'fedpkg scratch-build --nowait --srpm=SRPM'
"""
if self.debug == True:
print "Running 'fedpkg scratch-build --nowait --srpm=SRPM'"
if self.dry == True:
so = "Created task: 1"
se = ""
rc = 0
return so, se, rc
else:
return runCommand("fedpkg scratch-build --nowait --srpm=%s" % srpm)
def runFedpkgBuild(self):
"""
Run 'fedpkg build --nowait'
"""
if self.debug == True:
print "Running 'fedpkg build --nowait'"
if self.dry == True:
so = "Created task: 1"
se = ""
rc = 0
return so, se, rc
else:
return runCommand("fedpkg build --nowait")
def runGitPull(self):
"""
Run 'git pull'.
It returns so, se, rc triple.
"""
if self.debug == True:
print "Running 'git pull'"
if self.dry == True:
so = ""
se = ""
rc = 0
return so, se, rc
else:
return runCommand("git pull")
def runFedpkgPush(self):
"""
Run 'fedpkg push'.
It returns so, se, rc triple.
"""
if self.debug == True:
print "Running 'fedpkg push'"
if self.dry == True:
so = ""
se = ""
rc = 0
return so, se, rc
else:
return runCommand("fedpkg push")
def runFedpkgUpdate(self):
"""
Run 'fedpkg update'.
It returns so, se, rc triple.
"""
if self.debug == True:
print "Running 'fedpkg update'"
if self.dry == True:
so = ""
se = ""
rc = 0
return so, se, rc
else:
subprocess.call("fedpkg update", shell=True)
return ""
def runFedpkgSwitchBranch(self, branch):
"""
Run 'fedpkg switch-branch'.
It returns so, se, rc triple.
"""
if self.debug == True:
print "Running 'fedpkg switch-branch'"
if self.dry == True:
so = ""
se = ""
rc = 0
return so, se, rc
else:
return runCommand("fedpkg switch-branch %s" % branch)
def runGitCherryPick(self, commit="master"):
"""
Run 'git cherry-pick COMMIT'.
It returns so, se, rc triple.
"""
if self.debug == True:
print "Running 'git cherry-pick %s'" % commit
if self.dry == True:
so = ""
se = ""
rc = 0
return so, se, rc
else:
return runCommand("git cherry-pick %s" % commit)
def runGitReset(self, branch):
"""
Run 'git reset --hard remotes/origin/BRANCH'.
It returns so, se, rc triple.
"""
if self.debug == True:
print "Running 'git reset --hard remotes/origin/%s'" % branch
if self.dry == True:
so = ""
se = ""
rc = 0
return so, se, rc
else:
return runCommand("git reset --hard remotes/origin/%s" % branch)
def runGitMerge(self, branch):
"""
Run 'git merge BRANCH'.
It returns so, se, rc triple.
"""
if self.debug == True:
print "Running 'git merge %s'" % branch
if self.dry == True:
so = ""
se = ""
rc = 0
return so, se, rc
else:
return runCommand("git merge %s" % branch)
def runBodhiOverride(self, branch, name):
"""
Run 'bodhi --buildroot-override=BUILD for TAG --duration=DURATION --notes=NOTES'.
It returns so, se, rc triple.
"""
build = "%s.%s" % (name, branch2tag[branch])
long_tag = branch2bc[branch]
build_tag = branch2build[branch]
if self.debug == True:
print "Running 'bodhi --buildroot-override=%s for %s --duration=20 --notes='temp non-stable dependecy waiting for stable''" % (build, long_tag)
if self.dry == True:
so = ""
se = ""
rc = 0
return so, se, rc
else:
return runCommand("bodhi --buildroot-override=%s for %s --duration=20 --notes='temp non-stable dependecy waiting for stable'" % (build, long_tag))
def runKojiWaitOverride(self, branch, name):
"""
Run 'koji wait-repo TAG --build=BUILD'.
It returns so, se, rc triple.
"""
build = "%s.%s" % (name, branch2tag[branch])
build_tag = branch2build[branch]
if self.debug == True:
print "Running 'koji wait-repo %s --build=%s'" % (build_tag, build)
if self.dry == True:
so = ""
se = ""
rc = 0
return so, se, rc
else:
return runCommand("koji wait-repo %s --build=%s" % (build_tag, build))
def runGitLog(self, depth):
"""
Run 'git log --pretty=format:"%%H" -n DEPTH'.
It returns so, se, rc triple.
"""
if self.debug == True:
print "Running 'git log --pretty=format:\"%%H\" -n %s'" % depth
if self.dry == True:
so = """4e604fecc22b498e0d46854ee4bfccdfc1932b12
c46cdd60710b184f834b54cff80502027b66c5e0
6170e22ecb5923bbd22a311f172fcf59c5f16c08
0fc92e675c90e7b9e1eaba0c4837093b9b365317
21cf1880e696fd7047f8b0f5605ffa72dde6c504
8025678aab1c404aecdd6d7e5b3afaf9942ef6c6
6ed91011294946fd7ca6e6382b9686e12deda9be
ec0ebc48684bccbd4793b83edf14c59076edb1eb
adf728db9355a86332e17436a78f54a769e194be"""
se = ""
rc = 0
return so, se, rc
else:
return runCommand("git log --pretty=format:\"%%H\" -n %s" % depth)
class SimpleCommand:
def __init__(self, dry=False, debug=False):
self.dry = dry
self.debug = debug
self.llc = LowLevelCommand(dry=self.dry, debug=self.debug)
def makeSRPM(self):
so, _, rc = self.llc.runFedpkgSrpm()
if rc != 0:
return ""
for line in so.split("\n"):
line = line.strip()
if line == "":
continue
parts = line.split(" ")
if len(parts) != 2:
continue
return parts[1]
return ""
def runBuild(self):
so, _, rc = self.llc.runFedpkgBuild()
if rc != 0:
return -1
task_lines = filter(lambda l: l.startswith("Created task:"), so.split("\n"))
if len(task_lines) != 1:
return -1
task_id = task_lines[0].strip().split(" ")[-1]
if task_id.isdigit():
return int(task_id)
return -1
def runScratchBuild(self, srpm):
so, _, rc = self.llc.runFedpkgScratchBuild(srpm)
if rc != 0:
return -1
task_lines = filter(lambda l: l.startswith("Created task:"), so.split("\n"))
if len(task_lines) != 1:
return -1
task_id = task_lines[0].strip().split(" ")[-1]
if task_id.isdigit():
return int(task_id)
return -1
def pullBranch(self, branch):
so, se, rc = self.llc.runGitPull()
if rc != 0:
return se
return ""
def pushBranch(self, branch):
so, se, rc = self.llc.runFedpkgPush()
if rc != 0:
return se
return ""
def updateBranch(self, branch):
self.llc.runFedpkgUpdate()
return ""
def mergeMaster(self):
so, se, rc = self.llc.runGitMerge("master")
if rc != 0 or se != "":
return se
return ""
def overrideBuild(self, branch, name):
so, se, rc = self.llc.runBodhiOverride(branch, name)
if rc != 0 or se != "":
return se
return ""
def waitForOverrideBuild(self, branch, name):
so, se, rc = self.llc.runKojiWaitOverride(branch, name)
if rc != 0:
return se
return ""
def getGitCommits(self, depth):
so, sr, rc = self.llc.runGitLog(depth)
if rc != 0:
return "Unable to list commits: %s" % se, []
commits = []
for line in so.split("\n"):
line = line.strip()
if line == "":
continue
commits.append(line)
return "", commits
class WatchTaskThread(threading.Thread):
def __init__(self, task_id):
super(WatchTaskThread, self).__init__()
self.task_id = task_id
self.err = ""
def run(self):
runCommand("koji watch-task %s --quiet" % self.task_id)
def getError(self):
return self.err
class WaitTaskThread(threading.Thread):
def __init__(self, task_id):
super(WaitTaskThread, self).__init__()
self.task_id = task_id
self.state = False
self.err = ""
def run(self):
so, se, rc = runCommand("koji taskinfo %s" % self.task_id)
if rc != 0:
self.err = "Unable to get taskinfo for %s branch's %s task: %s" % (branch, task_id, se)
return
state_lines = filter(lambda l: l.startswith("State"), so.split("\n"))
state = state_lines[0].split(" ")[1]
if state == "closed":
self.state = True
def getState(self):
return self.state
def getError(self):
return self.err
class MultiCommand:
def __init__(self, dry=False, debug=False):
self.dry = dry
self.debug = debug
self.sc = SimpleCommand(debug=self.debug, dry=self.dry)
self.llc = LowLevelCommand(debug=self.debug, dry=self.dry)
def _buildBranches(self, branches, scratch=True):
task_ids = {}
# init [scratch] builds
for branch in branches:
print "Branch %s" % branch
so, _, rc = self.llc.runFedpkgSwitchBranch(branch)
if rc != 0:
print "Unable to switch to %s branch" % branch
continue
srpm = ""
if scratch:
srpm = self.sc.makeSRPM()
if srpm == "":
print "Unable to create srpm"
continue
task_id = -1
if scratch:
task_id = self.sc.runScratchBuild(srpm)
else:
task_id = self.sc.runBuild()
if task_id == -1:
print "Unable to initiate task"
continue
task_ids[branch] = task_id
if scratch:
print "Scratch build %s initiated" % (BUILDURL % task_id)
else:
print "Build %s initiated" % (BUILDURL % task_id)
return task_ids
def _waitForTasks(self, task_ids):
thread_list = {}
for branch in task_ids:
task_id = task_ids[branch]
print "Watching %s branch, %s" % (branch, BUILDURL % task_id)
if self.dry == False:
thread_list[branch] = WatchTaskThread(task_id)
thread_list[branch].start()
if self.dry == False:
for branch in task_ids:
thread_list[branch].join()
err = thread_list[branch].getError()
if err != "":
print err
def _checkTasks(self, task_ids):
all_done = True
print "Checking finished tasks..."
thread_list = {}
if self.dry == False:
for branch in task_ids:
task_id = task_ids[branch]
thread_list[branch] = WaitTaskThread(task_id)
thread_list[branch].start()
for branch in task_ids:
thread_list[branch].join()
for branch in task_ids:
if self.dry == True:
print "%s: closed" % branch
continue
if thread_list[branch].getState():
print "%s: closed" % branch
else:
all_done = False
print "%s: failed" % branch
return all_done
def scratchBuildBranches(self, branches):
# init [scratch] builds
task_ids = self._buildBranches(branches)
print ""
# wait for builds
self._waitForTasks(task_ids)
print ""
# check out builds
return self._checkTasks(task_ids)
def buildBranches(self, branches):
# init [scratch] builds
task_ids = self._buildBranches(branches, scratch=False)
print ""
# wait for builds
self._waitForTasks(task_ids)
print ""
# check out builds
return self._checkTasks(task_ids)
def pullBranches(self, branches):
print "Pulling from branches: %s" % ", ".join(branches)
all_done = True
for branch in branches:
print "Branch %s" % branch
so, _, rc = self.llc.runFedpkgSwitchBranch(branch)
if rc != 0:
print "Unable to switch to %s branch" % branch
all_done = False
continue
err = self.sc.pullBranch(branch)
if err != "":
print "%s: %s" % (branch, err)
all_done = False
return all_done
def pushBranches(self, branches):
print "Pushing to branches: %s" % ",".join(branches)
all_done = True
for branch in branches:
print "Branch %s" % branch
so, _, rc = self.llc.runFedpkgSwitchBranch(branch)
if rc != 0:
print "Unable to switch to %s branch" % branch
all_done = False
continue
err = self.sc.pushBranch(branch)
if err != "":
print "%s: %s" % (branch, err)
all_done = False
return all_done
def updateBranches(self, branches):
print "Updating branches: %s" % ",".join(branches)
all_done = True
for branch in branches:
print "Branch %s" % branch
so, _, rc = self.llc.runFedpkgSwitchBranch(branch)
if rc != 0:
print "Unable to switch to %s branch" % branch
all_done = False
continue
err = self.sc.updateBranch(branch)
if err != "":
print "%s: %s" % (branch, err)
all_done = False
return all_done
def overrideBuilds(self, branches, name):
print "Overriding builds for branches: %s" % ",".join(branches)
all_done = True
for branch in branches:
print "Branch %s" % branch
print "Overriding..."
err = self.sc.overrideBuild(branch, name)
if err != "":
print "%s: %s" % (branch, err)
all_done = False
return all_done
def waitForOverrides(self, branches, name):
print "Waiting for overrided builds for branches: %s" % ",".join(branches)
all_done = True
for branch in branches:
print "Branch %s" % branch
print "Waiting..."
err = self.sc.waitForOverrideBuild(branch, name)
if err != "":
print "%s: %s" % (branch, err)
all_done = False
return all_done
def cherryPickMaster(self, branches, verbose=True, start_commit="", depth=20):
err = []
gcp_commits = ["master"]
if start_commit != "":
if verbose:
print "Switching to master branch"
_, _, rc = self.llc.runFedpkgSwitchBranch('master')
if rc != 0:
err.append("Unable to switch to master branch")
return err
if verbose:
print "Searching for %s commit in the last %s commits" % (start_commit, depth)
e, commits = self.sc.getGitCommits(depth)
if e != "":
err.append(e)
return err
try:
index = commits.index(start_commit)
gcp_commits = commits[:index + 1]
gcp_commits = gcp_commits[::-1]
except ValueError:
err.append("Commit %s not found in the last %s commits" % (start_commit, depth))
return err
if verbose:
print "Commits found:"
for commit in gcp_commits:
print commit
def mergeMaster(self, branches, verbose=True):
print "Merging branches: %s" % ",".join(branches)
err = []
for branch in branches:
if branch == "master":
continue
_, _, rc = self.llc.runFedpkgSwitchBranch(branch)
if rc != 0:
err.append("Unable to switch to %s branch" % branch)
err.append("Skipping %s branch" % branch)
if verbose:
print "\n".join(err)
continue
if verbose:
print "Switched to %s branch" % branch
se = self.sc.mergeMaster()
if se != "":
err.append("%s: unable to git merge master: %s" % (branch, se))
if verbose:
print err[-1]
return err
return err
def resetBranchesToOrigin(self, branches):
for branch in branches:
_, _, rc = self.llc.runFedpkgSwitchBranch(branch)
if rc != 0:
print "Warning: unable to switch to %s branch" % branch
print "Skipping %s branch" % branch
continue
print "Switched to %s branch" % branch
so, se, rc = self.llc.runGitReset(branch)
STEP_CLONE_REPO=1
STEP_DOWNLOAD_SRPM=2
STEP_IMPORT_SRPM=3
STEP_HAS_RESOLVES=4
STEP_CLONE_TO_BRANCHES=5
STEP_SCRATCH_BUILD=6
STEP_PUSH=7
STEP_BUILD=8
STEP_UPDATE=9
STEP_OVERRIDE=10
STEP_END=10
class PhaseMethods:
def __init__(self, dry=False, debug=False):
self.phase = STEP_END
self.mc = MultiCommand(dry=dry, debug=debug)
self.branches = Config().getBranches()
def setBranches(self, branches):
self.branches = branches
def startWithScratchBuild(self):
self.phase = STEP_SCRATCH_BUILD
def startWithPush(self):
self.phase = STEP_PUSH
def startWithBuild(self):
self.phase = STEP_BUILD
def startWithUpdate(self):
self.phase = STEP_UPDATE
def runPhase(self, phase):
if phase == STEP_SCRATCH_BUILD:
return self.mc.scratchBuildBranches(self.branches)
if phase == STEP_PUSH:
return self.mc.pushBranches(self.branches)
if phase == STEP_BUILD:
return self.mc.buildBranches(self.branches)
if phase == STEP_UPDATE:
branches = Config().getUpdates()
branches = list(set(branches) & set(self.branches))
return self.mc.updateBranches(branches)
return 1
def getPhaseName(self, phase):
if phase == STEP_SCRATCH_BUILD:
return "Scratch build phase"
if phase == STEP_PUSH:
return "Push phase"
if phase == STEP_BUILD:
return "Build phase"
if phase == STEP_UPDATE:
return "Update phase"
return ""
def run(self):
for i in range(1, STEP_END):
if i < self.phase:
continue
phase_name = self.getPhaseName(i)
if phase_name == "":
print "Phase %s unknown" % i
break
print 60*"#"
sl = len(phase_name)
print ((60-sl)/2)*" " + phase_name
print 60*"#"
print ""
if not self.runPhase(i):
break
| 0.036892 |
# -*- coding: utf-8 -*-
"""
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
date: Mon Jan 19 12:32:47 2015
@author: daniel
"""
class GameState:
def __init__(self):
self.start_time = 0
self.is_won = False
self.is_game_over = False
self.click_count = 0
self.quit = False
self.is_started = False
self.is_restart = False
def __str__(self):
return """
Start time : {0}
Is won? : {1}
Is game over?: {2}
Click count : {3}
Time to quit?: {4}
Started? : {5}""".format(self.start_time, self.is_won,
self.is_game_over, self.click_count,
self.quit, self.is_started)
def start(self, time):
self.start_time = time
self.is_started = True
def get_current_time(self, time):
return (time - self.start_time) // 1000
def left_click(self):
self.click_count += 1
def right_click(self, time):
pass
def game_over(self):
self.is_game_over = True
def win(self):
self.is_won = True
def restart(self, time):
self.start_time = time
self.is_game_over = False
self.is_won = False
self.is_started = False
self.click_count = 0
self.is_restart = True
| 0 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import io
import json
import copy
import tempfile
import shutil
import zipfile
import six
import requests
import datapackage.schema
from .resource import Resource
from .exceptions import (
DataPackageException,
)
class DataPackage(object):
'''Class for loading, validating and working with a Data Package.
Args:
metadata (dict, str or file-like object, optional): The contents of the
`datapackage.json` file. It can be a ``dict`` with its contents,
a ``string`` with the local path for the file or its URL, or a
file-like object. It also can point to a `ZIP` file with one and
only one `datapackage.json` (it can be in a subfolder). If
you're passing a ``dict``, it's a good practice to also set the
``default_base_path`` parameter to the absolute `datapackage.json`
path.
schema (dict or str, optional): The schema to be used to validate this
data package. It can be a ``dict`` with the schema's contents or a
``str``. The string can contain the schema's ID if it's in the
registry, a local path, or an URL.
default_base_path (str, optional): The default path to be used to load
resources located on the local disk that don't define a base path
themselves. This will usually be the path for the
`datapackage.json` file. If the :data:`metadata` parameter was the
path to the `datapackage.json`, this will automatically be set to
its base path.
Raises:
DataPackageException: If the :data:`metadata` couldn't be loaded or was
invalid.
SchemaError: If the :data:`schema` couldn't be loaded or was invalid.
RegistryError: If there was some problem loading the :data:`schema`
from the registry.
'''
def __init__(self, metadata=None, schema='base', default_base_path=None):
metadata = self._extract_zip_if_possible(metadata)
self._metadata = self._load_metadata(metadata)
self._schema = self._load_schema(schema)
self._base_path = self._get_base_path(metadata, default_base_path)
self._resources = self._load_resources(self.metadata,
self.base_path)
def __del__(self):
self._remove_tempdir_if_exists()
@property
def metadata(self):
'''dict: The metadata of this data package. Its attributes can be
changed.'''
return self._metadata
@property
def schema(self):
''':class:`.Schema`: This data package's schema.
Check https://github.com/okfn/datapackage-validate-py for documentation
on its attributes.
'''
return self._schema
@property
def base_path(self):
'''str: The base path of this Data Package (can be None).'''
return self._base_path
@property
def resources(self):
'''The resources defined in this data package (can be empty).
To add or remove resources, alter the `resources` attribute of the
:data:`metadata`.
:returns: The resources.
:rtype: tuple of :class:`.Resource`
'''
self._resources = self._update_resources(self._resources,
self.metadata,
self.base_path)
return self._resources
@property
def attributes(self):
'''tuple: The union of the attributes defined in the schema and the
data package (can be empty).'''
attributes = set(self.to_dict().keys())
try:
attributes.update(self.schema.properties.keys())
except AttributeError:
pass
return tuple(attributes)
@property
def required_attributes(self):
'''tuple: The schema's required attributed (can be empty).'''
required = ()
try:
if self.schema.required is not None:
required = tuple(self.schema.required)
except AttributeError:
pass
return required
def to_dict(self):
'''dict: Convert this Data Package to dict.'''
return copy.deepcopy(self.metadata)
def to_json(self):
'''str: Convert this Data Package to a JSON string.'''
return json.dumps(self.metadata)
def safe(self):
'''bool: Return if it's safe to load this datapackage's resources.
A Data Package is safe if it has no resources, or if all of its
resources are either:
* Inline;
* Remote;
* Local relative to the Data Package's base path.
Even though we don't check the remote resources' URLs, keep in mind
that they can be an attack vector as well. For example, a malicious
user may set a resource URL to an address only accessible by the
machine that's parsing the datapackage. That might be a problem or not,
depending on your specific usage.
'''
local_resources = [resource for resource in self.resources
if resource.local_data_path]
if not self.base_path:
return len(local_resources) == 0
else:
for resource in local_resources:
if not resource.local_data_path.startswith(self.base_path):
return False
return True
def save(self, file_or_path):
'''Validates and saves this Data Package contents into a zip file.
It creates a zip file into ``file_or_path`` with the contents of this
Data Package and its resources. Every resource which content lives in
the local filesystem will be copied to the zip file. Consider the
following Data Package descriptor::
{
"name": "gdp",
"resources": [
{"name": "local", "format": "CSV", "path": "data.csv"},
{"name": "inline", "data": [4, 8, 15, 16, 23, 42]},
{"name": "remote", "url": "http://someplace.com/data.csv"}
]
}
The final structure of the zip file will be::
./datapackage.json
./data/local.csv
With the contents of `datapackage.json` being the same as returned by
:func:`to_json`.
The resources' file names are generated based on their `name` and
`format` fields if they exist. If the resource has no `name`, it'll be
used `resource-X`, where `X` is the index of the resource in the
`resources` list (starting at zero). If the resource has `format`,
it'll be lowercased and appended to the `name`, becoming
"`name.format`".
Args:
file_or_path (string or file-like object): The file path or a
file-like object where the contents of this Data Package will
be saved into.
Raises:
ValidationError: If the Data Package is invalid.
DataPackageException: If there was some error writing the package.
'''
self.validate()
def arcname(resource):
basename = resource.metadata.get('name')
resource_format = resource.metadata.get('format')
if not basename:
index = self.resources.index(resource)
basename = 'resource-{index}'.format(index=index)
if resource_format:
basename = '.'.join([basename, resource_format.lower()])
return os.path.join('data', basename)
try:
with zipfile.ZipFile(file_or_path, 'w') as z:
metadata = json.loads(self.to_json())
for i, resource in enumerate(self.resources):
path = resource.local_data_path
if path:
path_inside_dp = arcname(resource)
z.write(path, path_inside_dp)
metadata['resources'][i]['path'] = path_inside_dp
z.writestr('datapackage.json', json.dumps(metadata))
except (IOError,
zipfile.BadZipfile,
zipfile.LargeZipFile) as e:
six.raise_from(DataPackageException(e), e)
def validate(self):
'''Validate this Data Package.
Raises:
ValidationError: If the Data Package is invalid.
'''
self.schema.validate(self.to_dict())
def iter_errors(self):
'''Lazily yields each ValidationError for the received data dict.
Returns:
iter: ValidationError for each error in the data.
'''
return self.schema.iter_errors(self.to_dict())
def _extract_zip_if_possible(self, metadata):
'''str: Path to the extracted datapackage.json if metadata points to
ZIP, or the unaltered metadata otherwise.'''
result = metadata
try:
if isinstance(metadata, six.string_types):
res = requests.get(metadata)
res.raise_for_status()
result = res.content
except (IOError,
ValueError,
requests.exceptions.RequestException):
pass
try:
the_zip = result
if isinstance(the_zip, bytes):
try:
os.path.isfile(the_zip)
except (TypeError, ValueError):
# the_zip contains the zip file contents
the_zip = io.BytesIO(the_zip)
if zipfile.is_zipfile(the_zip):
with zipfile.ZipFile(the_zip, 'r') as z:
self._validate_zip(z)
descriptor_path = [f for f in z.namelist()
if f.endswith('datapackage.json')][0]
self._tempdir = tempfile.mkdtemp('-datapackage')
z.extractall(self._tempdir)
result = os.path.join(self._tempdir, descriptor_path)
else:
result = metadata
except (TypeError,
zipfile.BadZipfile):
pass
if hasattr(metadata, 'seek'):
# Rewind metadata if it's a file, as we read it for testing if it's
# a zip file
metadata.seek(0)
return result
def _validate_zip(self, the_zip):
datapackage_jsons = [f for f in the_zip.namelist()
if f.endswith('datapackage.json')]
if len(datapackage_jsons) != 1:
msg = 'DataPackage must have only one "datapackage.json" (had {n})'
raise DataPackageException(msg.format(n=len(datapackage_jsons)))
def _load_metadata(self, metadata):
the_metadata = metadata
if the_metadata is None:
the_metadata = {}
if isinstance(the_metadata, six.string_types):
try:
if os.path.isfile(the_metadata):
with open(the_metadata, 'r') as f:
the_metadata = json.load(f)
else:
req = requests.get(the_metadata)
req.raise_for_status()
the_metadata = req.json()
except (IOError,
ValueError,
requests.exceptions.RequestException) as e:
msg = 'Unable to load JSON at \'{0}\''.format(metadata)
six.raise_from(DataPackageException(msg), e)
if hasattr(the_metadata, 'read'):
try:
the_metadata = json.load(the_metadata)
except ValueError as e:
six.raise_from(DataPackageException(str(e)), e)
if not isinstance(the_metadata, dict):
msg = 'Data must be a \'dict\', but was a \'{0}\''
raise DataPackageException(msg.format(type(the_metadata).__name__))
return the_metadata
def _load_schema(self, schema):
return datapackage.schema.Schema(schema)
def _get_base_path(self, metadata, default_base_path):
base_path = default_base_path
if isinstance(metadata, six.string_types):
if os.path.exists(metadata):
base_path = os.path.dirname(os.path.abspath(metadata))
else:
# suppose metadata is a URL
base_path = os.path.dirname(metadata)
return base_path
def _load_resources(self, metadata, base_path):
return self._update_resources((), metadata, base_path)
def _update_resources(self, current_resources, metadata, base_path):
resources_dicts = metadata.get('resources')
new_resources = []
if resources_dicts is not None:
for resource_dict in resources_dicts:
resource = [res for res in current_resources
if res.metadata == resource_dict]
if not resource:
resource = [Resource.load(resource_dict, base_path)]
new_resources.append(resource[0])
return tuple(new_resources)
def _remove_tempdir_if_exists(self):
if hasattr(self, '_tempdir') and os.path.exists(self._tempdir):
shutil.rmtree(self._tempdir, ignore_errors=True)
| 0 |
# Copyright 2017 CodiLime
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import random
from veles.compatibility import pep487
from veles.compatibility.int_bytes import int_to_bytes
class NodeID(pep487.NewObject):
WIDTH = 24
_NULL_VAL = b'\x00'*WIDTH
_ROOT_VAL = b'\xff'*WIDTH
_rand = random.SystemRandom()
def __init__(self, value=None):
if value is None:
value = int_to_bytes(
self._rand.getrandbits(192), self.WIDTH, 'little')
if isinstance(value, bytearray):
value = bytes(value)
if not isinstance(value, bytes):
raise TypeError('wrong type provided')
if len(value) != self.WIDTH or value == self._NULL_VAL:
raise ValueError('value is not valid id')
self._bytes = value
@staticmethod
def from_hex(value):
return NodeID(binascii.a2b_hex(value))
@property
def bytes(self):
return self._bytes
def __str__(self):
if self == self.root_id:
return 'root'
return binascii.b2a_hex(self.bytes).decode('ascii')
def __repr__(self):
if self == self.root_id:
return 'NodeID.root_id'
return 'NodeID.from_hex("{}")'.format(str(self))
def __eq__(self, other):
if isinstance(other, NodeID):
return self.bytes == other.bytes
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.bytes)
NodeID.root_id = NodeID(NodeID._ROOT_VAL)
| 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# client.py - i2p clients share what they know to a centralized server.
# Author: Chris Barry <chris@barry.im>
# License: This is free and unencumbered software released into the public domain.
import argparse
import i2py.netdb
import i2py.control
import i2py.control.pyjsonrpc
import collections
import os
import random
import pyasn
import time
import pprint
asndb = ''
routers = []
asn = collections.defaultdict(int)
VERSION = 1
# Aggregreates a buncha data
def print_entry(ent):
n = ent.dict()
country = '??'
ipv6 = False
firewalled = False
for a in n['addrs']:
if a.location and a.location.country:
country = a.location.country
ipv6 = 1 if ':' in a.location.ip else 0
firewalled = 1 if a.firewalled else 0
break
#for a in n['addrs']:
for a in n['addrs']:
try:
asn[asndb.lookup(a.location.ip)[0]] += 1
except:
# This generally happens when a node is firewalled.
continue
routers.append({
'public_key' : n['pubkey'],
'sign_key' : n['cert']['signature_type'],
'crypto_key' : n['cert']['crypto_type'],
'version' : n['options']['router.version'],
'caps' : n['options']['caps'],
'country' : country,
'ipv6' : ipv6,
'firewalled' : firewalled,
})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cron', help='prints cron entry', action='store_true')
parser.add_argument('-d', '--debug', help='prints request json instead of sending', action='store_true')
parser.add_argument('-i', '--i2p-directory', help='I2P home',type=str, default=os.path.join(os.environ['HOME'],'.i2p','netDb'))
parser.add_argument('-s', '--server', help='where to send data',type=str, default='tuuql5avhexhn7oq4lhyfythxejgk4qpavxvtniu3u3hwfwkogmq.b32.i2p')
parser.add_argument('-p', '--port', help='where to send data',type=int, default='80')
parser.add_argument('-t', '--token', help='token to prove yourself',type=str, default='')
args = parser.parse_args()
if args.cron:
# Assumes you are running the command from client.
print('{} * * * * http_proxy=\'http://127.0.0.1:4444\' python {}/client.py --token $TOKEN'.format(random.randrange(0,55),os.getcwd()))
raise SystemExit(1)
if not args.token:
print('Use a token. See --help for usage.')
raise SystemExit(1)
rpc = i2py.control.pyjsonrpc.HttpClient(
url = ''.join(['http://',args.server,':',str(args.port)]),
gzipped = True
)
asndb = pyasn.pyasn('ipasn_20150224.dat')
# Local router stuff
try:
a = i2py.control.I2PController()
except:
print('I2PControl not installed, or router is down.')
raise SystemExit(1)
ri_vals = a.get_router_info()
this_router = {
'activepeers' : ri_vals['i2p.router.netdb.activepeers'],
'fastpeers' : ri_vals['i2p.router.netdb.fastpeers'],
'tunnelsparticipating' : ri_vals['i2p.router.net.tunnels.participating'],
'decryptFail' : a.get_rate(stat='crypto.garlic.decryptFail', period=3600),
# TODO: This is being all weird.
#'peer.failedLookupRate' : a.get_rate(stat='peer.failedLookupRate', period=3600),
'failedLookupRate' : 0,
'streamtrend' : a.get_rate(stat='stream.trend', period=3600),
'windowSizeAtCongestion' : a.get_rate(stat='stream.con.windowSizeAtCongestion', period=3600),
}
# NetDB Stuff
i2py.netdb.inspect(hook=print_entry,netdb_dir=args.i2p_directory)
if args.debug:
# To check the approximate size of a request, run this. No network call is sent. Results in bytes.
# $ python client.py | gzip --stdout | wc --bytes
print(i2py.control.pyjsonrpc.create_request_json('collect', token=args.token, netdb=routers, local=this_router, version=VERSION))
else:
# Try submitting 5 times, delay 10s if it fails.
for i in range(5):
try:
rpc.collect(token=args.token, netdb=routers, local=this_router, asn=asn, version=VERSION)
# only reaches if submits
break
except i2py.control.pyjsonrpc.JsonRpcError, err:
print('Error code {}: {} -- {}'.format(err.code, err.message, err.data))
break
except:
print('retrying')
time.sleep(10)
else:
print('Could not submit due to other error.')
| 0.035595 |
import datetime
from django.contrib.auth.models import User
from django.db import models
from tidings.models import NotificationsMixin
from kitsune import kbforums
from kitsune.sumo.helpers import urlparams, wiki_to_html
from kitsune.sumo.models import ModelBase
from kitsune.sumo.urlresolvers import reverse
from kitsune.wiki.models import Document
def _last_post_from(posts, exclude_post=None):
"""Return the most recent post in the given set, excluding the given post.
If there are none, return None.
"""
if exclude_post:
posts = posts.exclude(id=exclude_post.id)
posts = posts.order_by('-created')
try:
return posts[0]
except IndexError:
return None
class ThreadLockedError(Exception):
"""Trying to create a post in a locked thread."""
class Thread(NotificationsMixin, ModelBase):
title = models.CharField(max_length=255)
document = models.ForeignKey(Document)
created = models.DateTimeField(default=datetime.datetime.now,
db_index=True)
creator = models.ForeignKey(User, related_name='wiki_thread_set')
last_post = models.ForeignKey('Post', related_name='last_post_in',
null=True)
replies = models.IntegerField(default=0)
is_locked = models.BooleanField(default=False)
is_sticky = models.BooleanField(default=False, db_index=True)
class Meta:
ordering = ['-is_sticky', '-last_post__created']
permissions = (
('lock_thread', 'Can lock KB threads'),
('sticky_thread', 'Can sticky KB threads'),
)
@property
def last_page(self):
"""Returns the page number for the last post."""
return self.replies / kbforums.POSTS_PER_PAGE + 1
def __unicode__(self):
return self.title
def new_post(self, creator, content):
"""Create a new post, if the thread is unlocked."""
if self.is_locked:
raise ThreadLockedError
return self.post_set.create(creator=creator, content=content)
def get_absolute_url(self):
return reverse('wiki.discuss.posts',
locale=self.document.locale,
kwargs={'document_slug': self.document.slug,
'thread_id': self.id})
def update_last_post(self, exclude_post=None):
"""Set my last post to the newest, excluding the given post."""
last = _last_post_from(self.post_set, exclude_post=exclude_post)
self.last_post = last
# If self.last_post is None, and this was called from Post.delete,
# then Post.delete will erase the thread, as well.
class Post(ModelBase):
thread = models.ForeignKey(Thread)
content = models.TextField()
creator = models.ForeignKey(User, related_name='wiki_post_set')
created = models.DateTimeField(default=datetime.datetime.now,
db_index=True)
updated = models.DateTimeField(default=datetime.datetime.now,
db_index=True)
updated_by = models.ForeignKey(User,
related_name='wiki_post_last_updated_by',
null=True)
class Meta:
ordering = ['created']
def __unicode__(self):
return self.content[:50]
def save(self, *args, **kwargs):
"""
Override save method to update parent thread info and take care of
created and updated.
"""
new = self.id is None
now = datetime.datetime.now()
if new:
self.created = now
self.updated = now
super(Post, self).save(*args, **kwargs)
if new:
self.thread.replies = self.thread.post_set.count() - 1
self.thread.last_post = self
self.thread.save()
def delete(self, *args, **kwargs):
"""Override delete method to update parent thread info."""
thread = Thread.objects.get(pk=self.thread.id)
if thread.last_post_id and thread.last_post_id == self.id:
thread.update_last_post(exclude_post=self)
thread.replies = thread.post_set.count() - 2
thread.save()
super(Post, self).delete(*args, **kwargs)
# If I was the last post in the thread, delete the thread.
if thread.last_post is None:
thread.delete()
@property
def page(self):
"""Get the page of the thread on which this post is found."""
t = self.thread
earlier = t.post_set.filter(created__lte=self.created).count() - 1
if earlier < 1:
return 1
return earlier / kbforums.POSTS_PER_PAGE + 1
def get_absolute_url(self):
query = {}
if self.page > 1:
query['page'] = self.page
url_ = reverse('wiki.discuss.posts',
locale=self.thread.document.locale,
kwargs={'document_slug': self.thread.document.slug,
'thread_id': self.thread.id})
return urlparams(url_, hash='post-%s' % self.id, **query)
@property
def content_parsed(self):
return wiki_to_html(self.content)
| 0 |
import forge
from forge.models.packages import Package
from forge.models.groups import Group
class Add(object):
def __init__(self,json_args,session):
if type(json_args) != type({}):
raise TypeError("JSON Arg must be dict type")
if 'name' and 'group' and 'distro' not in json_args.keys():
raise forge.ArgumentError()
self.name = json_args['name']
self.group = json_args['group']
self.distro = json_args['distro']
self.session = session
def call(self):
group = self.session.query(Group).filter(Group.name == self.group).filter(Group.distribution == self.distro).one()
if not group:
raise LookupError("No Group: %s - %s"%self.group,self.distro)
package = Package(self.name)
group.packages.append(package)
self.session.add(package)
self.session.commit()
return {'name':self.name,'group':self.group,'distro':self.distro}
| 0.026265 |
#
# Python wrapper for the CUDA NCS kernel.
#
# Hazen 08/19
#
import numpy
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
import warnings
import time
import pyCUDANCS
#
# CUDA setup.
#
kernel_code = pyCUDANCS.loadNCSKernel()
mod = SourceModule(kernel_code, **pyCUDANCS.src_module_args)
ncsReduceNoise = mod.get_function("ncsReduceNoise")
class NCSCUDAException(Exception):
pass
class NCSCUDA(object):
def __init__(self, strict = True, **kwds):
super().__init__(**kwds)
self.size = 16
self.strict = strict
def reduceNoise(self, images, alpha, verbose = False):
"""
Ideally you process lots of images at the same time for
optimal GPU utilization.
Note: Any zero or negative values in the image should be
set to a small positive value like 1.0.
images - A list of images to run NCS on (in units of e-).
alpha - NCS alpha term.
"""
s_size = self.size - 2
im0_shape = images[0].shape
# First, figure out how many sub-regions in total.
pad_image = numpy.pad(images[0], 1, 'edge')
pad_gamma = numpy.pad(self.gamma, 1, 'edge').astype(numpy.float32)
num_sr = 0
for i in range(0, pad_image.shape[0], s_size):
for j in range(0, pad_image.shape[1], s_size):
num_sr += 1
num_sr = num_sr * len(images)
if verbose:
print("Creating", num_sr, "sub-regions.")
# Now chop up the images into lots of sub-regions.
data_in = numpy.zeros((num_sr, self.size, self.size), dtype = numpy.float32)
gamma = numpy.zeros((num_sr, self.size, self.size), dtype = numpy.float32)
data_out = numpy.zeros((num_sr, self.size, self.size), dtype = numpy.float32)
iters = numpy.zeros(num_sr, dtype = numpy.int32)
status = numpy.zeros(num_sr, dtype = numpy.int32)
# These store where the sub-regions came from.
im_i = numpy.zeros(num_sr, dtype = numpy.int32)
im_bx = numpy.zeros(num_sr, dtype = numpy.int32)
im_ex = numpy.zeros(num_sr, dtype = numpy.int32)
im_by = numpy.zeros(num_sr, dtype = numpy.int32)
im_ey = numpy.zeros(num_sr, dtype = numpy.int32)
counter = 0
for h in range(len(images)):
if (images[h].shape[0] != im0_shape[0]) or (images[h].shape[1] != im0_shape[1]):
raise NCSCUDAException("All images must be the same size!")
pad_image = numpy.pad(images[h], 1, 'edge')
for i in range(0, pad_image.shape[0], s_size):
if ((i + self.size) > pad_image.shape[0]):
bx = pad_image.shape[0] - self.size
else:
bx = i
ex = bx + self.size
for j in range(0, pad_image.shape[1], s_size):
if ((j + self.size) > pad_image.shape[1]):
by = pad_image.shape[1] - self.size
else:
by = j
ey = by + self.size
data_in[counter,:,:] = pad_image[bx:ex,by:ey].astype(numpy.float32)
gamma[counter,:,:] = pad_gamma[bx:ex,by:ey]
im_i[counter] = h
im_bx[counter] = bx
im_ex[counter] = ex
im_by[counter] = by
im_ey[counter] = ey
counter += 1
assert (counter == num_sr)
assert (data_in.dtype == numpy.float32)
assert (gamma.dtype == numpy.float32)
# Run NCS noise reduction kernel on the sub-regions.
#
# FIXME: We could probably do a better job measuring the elapsed time.
#
start_time = time.time()
ncsReduceNoise(drv.In(data_in),
drv.In(gamma),
drv.In(self.otf_mask),
drv.Out(data_out),
drv.Out(iters),
drv.Out(status),
numpy.float32(alpha),
block = (16,1,1),
grid = (num_sr,1))
e_time = time.time() - start_time
if verbose:
print("Processed {0:d} sub-regions in {1:.6f} seconds.".format(num_sr, e_time))
# Check status.
failures = {}
if (numpy.count_nonzero(status != 0) > 0):
n_fails = numpy.count_nonzero(status != 0)
n_maxp = numpy.count_nonzero(status == -5)
if (n_maxp != n_fails):
warnings.warn("Noise reduction failed on {0:d} sub-regions.".format(n_fails))
# Count number of failures of each type.
for i in range(-5,0):
nf = numpy.count_nonzero(status == i)
if (nf != 0):
failures[i] = nf
# FIXME: This needs to be kept in sync with the OpenCL.
failure_types = {-1 : "Unstarted",
-2 : "Reached maximum iterations",
-3 : "Increasing gradient",
-4 : "Reached minimum step size",
-5 : "Reached machine precision"}
if verbose:
print("Minimum iterations: {0:d}".format(numpy.min(iters)))
print("Maximum iterations: {0:d}".format(numpy.max(iters)))
print("Median iterations: {0:.3f}".format(numpy.median(iters)))
if bool(failures):
for key in failures.keys():
print(failures[key], "failures of type '" + failure_types[key] + "'")
print()
# Assemble noise corrected images.
nc_images = []
cur_i = -1
cur_image = None
for i in range(num_sr):
if (cur_i != im_i[i]):
cur_i = im_i[i]
cur_image = numpy.zeros(im0_shape, dtype = numpy.float32)
nc_images.append(cur_image)
cur_image[im_bx[i]:im_ex[i]-2,im_by[i]:im_ey[i]-2] = data_out[i, 1:-1,1:-1]
return nc_images
def setGamma(self, gamma):
"""
The assumption is that this is the same for all the images.
gamma - CMOS variance (in units of e-).
"""
self.gamma = gamma.astype(numpy.float32)
def setOTFMask(self, otf_mask):
# Checks.
if self.strict:
if (otf_mask.shape[0] != otf_mask.shape[1]):
raise NCSCUDAException("OTF must be square!")
if (otf_mask.size != self.size*self.size):
raise NCSCUDAException("OTF size must match sub-region size!")
if not checkOTFMask(otf_mask):
raise NCSCUDAException("OTF does not have the expected symmetry!")
self.otf_mask = numpy.fft.fftshift(otf_mask).astype(numpy.float32)
def checkOTFMask(otf_mask):
"""
Verify that the OTF mask has the correct symmetries.
"""
otf_mask_fft = numpy.fft.ifft2(numpy.fft.fftshift(otf_mask))
if (numpy.max(numpy.imag(otf_mask_fft)) > 1.0e-12):
return False
else:
return True
def reduceNoise(images, gamma, otf_mask, alpha, strict = True, verbose = False):
"""
Run NCS on an image using OpenCL.
Note: All zero and negatives values in the images should
be replaced with a small positive value like 1.0.
images - The image to run NCS on (in units of e-).
gamma - CMOS variance (in units of e-).
otf_mask - 16 x 16 array containing the OTF mask.
alpha - NCS alpha term.
"""
ncs = NCSCUDA()
ncs.setOTFMask(otf_mask)
ncs.setGamma(gamma)
return ncs.reduceNoise(images, alpha, verbose = verbose)
| 0.025493 |
from django.db import models
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
User = get_user_model()
HIRED_STATUS=(
('1', 'espera'),
('2', 'aceptado'),
('3', 'finalizado'),
)
# Create your models here.
class Auto(models.Model):
user = models.ForeignKey("auth.User",null=True)
modelo= models.CharField(max_length=264)
plazas= models.CharField(max_length=264)
aire_acondicionado= models.CharField(max_length=264)
tipo_transmision= models.CharField(max_length=264)
lugar_recogida= models.CharField(max_length=264)
precio= models.FloatField()
jornada= models.IntegerField()
is_alquiled= models.BooleanField()
def __str__(self):
return self.modelo
def get_absolute_url(self):
return reverse("auto:auto_list")
class Contrato(models.Model):
User_alquila=models.ForeignKey("auth.User",null=True,related_name="user_alquila")
User_contrata=models.ForeignKey("auth.User",null=True,related_name="user_contrata")
auto =models.ForeignKey(Auto,null=True,related_name="auto_contrata")
hired_status=models.CharField(max_length = 1, choices = HIRED_STATUS,
null = False, default = HIRED_STATUS[0][0])
#class Meta:
# unique_together = (('User_alquila', 'User_contrata','auto'),) | 0.046549 |
"""Find duplicate files inside a directory tree."""
from os import walk, remove, stat
from os.path import join as joinpath
from md5 import md5
def find_duplicates( rootdir ):
"""Find duplicate files in directory tree."""
filesizes = {}
# Build up dict with key as filesize and value is list of filenames.
for path, dirs, files in walk( rootdir ):
for filename in files:
filepath = joinpath( path, filename )
filesize = stat( filepath ).st_size
filesizes.setdefault( filesize, [] ).append( filepath )
unique = set()
duplicates = []
# We are only interested in lists with more than one entry.
for files in [ flist for flist in filesizes.values() if len(flist)>1 ]:
for filepath in files:
with open( filepath ) as openfile:
filehash = md5( openfile.read() ).hexdigest()
if filehash not in unique:
unique.add( filehash )
else:
duplicates.append( filepath )
return duplicates
if __name__ == '__main__':
from argparse import ArgumentParser
PARSER = ArgumentParser( description='Finds duplicate files.' )
PARSER.add_argument( 'root', metavar='R', help='Dir to search.' )
PARSER.add_argument( '-remove', action='store_true',
help='Delete duplicate files.' )
ARGS = PARSER.parse_args()
DUPS = find_duplicates( ARGS.root )
print '%d Duplicate files found.' % len(DUPS)
for f in sorted(DUPS):
if ARGS.remove == True:
remove( f )
print '\tDeleted '+ f
else:
print '\t'+ f
| 0.026618 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint
def set_new_name(doc):
"""Sets the `name`` property for the document based on various rules.
1. If amened doc, set suffix.
3. If `autoname` method is declared, then call it.
4. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
2. If `name` is already defined, use that name
5. If no rule defined, use hash.
#### Note:
:param doc: Document to be named."""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname
if getattr(doc, "amended_from", None):
_set_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
elif hasattr(doc, "autoname"):
doc.run_method("autoname")
elif autoname:
if autoname.startswith('field:'):
fieldname = autoname[6:]
doc.name = (doc.get(fieldname) or "").strip()
if not doc.name:
frappe.throw(_("{0} is required").format(doc.meta.get_label(fieldname)))
raise Exception, 'Name is required'
if autoname.startswith("naming_series:"):
set_name_by_naming_series(doc)
elif "#" in autoname:
doc.name = make_autoname(autoname)
elif autoname=='Prompt':
# set from __newname in save.py
if not doc.name:
frappe.throw(_("Name not set via Prompt"))
if not doc.name:
doc.name = make_autoname('hash', doc.doctype)
doc.name = validate_name(doc.doctype, doc.name)
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+'.#####')
def make_autoname(key, doctype=''):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key=="hash":
return frappe.generate_hash(doctype, 10)
if not "#" in key:
key = key + ".#####"
elif not "." in key:
frappe.throw(_("Invalid naming series (. missing)") + (_(" for {0}").format(doctype) if doctype else ""))
n = ''
l = key.split('.')
series_set = False
today = now_datetime()
for e in l:
en = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
en = getseries(n, digits, doctype)
series_set = True
elif e=='YY':
en = today.strftime('%y')
elif e=='MM':
en = today.strftime('%m')
elif e=='DD':
en = today.strftime("%d")
elif e=='YYYY':
en = today.strftime('%Y')
else: en = e
n+=en
return n
def getseries(key, digits, doctype=''):
# series created ?
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("update tabSeries set current = current+1 where name=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("insert into tabSeries (name, current) values (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name):
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
return
else:
prefix = key
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("update tabSeries set current=current-1 where name=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name: return 'No Name Specified for %s' % doctype
if name.startswith('New '+doctype):
frappe.throw(_('There were some errors setting the name, please contact the administrator'), frappe.NameError)
if case=='Title Case': name = name.title()
if case=='UPPER CASE': name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and (doctype == name) and (name!="DocType"):
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
return name
def _set_amended_name(doc):
am_id = 1
am_prefix = doc.amended_from
if frappe.db.get_value(doc.doctype, doc.amended_from, "amended_from"):
am_id = cint(doc.amended_from.split('-')[-1]) + 1
am_prefix = '-'.join(doc.amended_from.split('-')[:-1]) # except the last hyphen
doc.name = am_prefix + '-' + str(am_id)
return doc.name
def append_number_if_name_exists(doc):
if frappe.db.exists(doc.doctype, doc.name):
last = frappe.db.sql("""select name from `tab{}`
where name regexp '{}-[[:digit:]]+'
order by length(name) desc, name desc limit 1""".format(doc.doctype, doc.name))
if last:
count = str(cint(last[0][0].rsplit("-", 1)[1]) + 1)
else:
count = "1"
doc.name = "{0}-{1}".format(doc.name, count)
def de_duplicate(doctype, name):
original_name = name
count = 0
while True:
if frappe.db.exists(doctype, name):
count += 1
name = "{0}-{1}".format(original_name, count)
else:
break
return name
| 0.030442 |
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.jsrouting
~~~~~~~~~~~~~~~~~~~~~~~~~~
Addon module that allows to create a JavaScript function from a map
that generates rules.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from simplejson import dumps
except ImportError:
def dumps(*args):
raise RuntimeError('simplejson required for jsrouting')
from inspect import getmro
from werkzeug.templates import Template
from werkzeug.routing import NumberConverter
_javascript_routing_template = Template(u'''\
<% if name_parts %>\
<% for idx in xrange(0, len(name_parts) - 1) %>\
if (typeof ${'.'.join(name_parts[:idx + 1])} === 'undefined') \
${'.'.join(name_parts[:idx + 1])} = {};
<% endfor %>\
${'.'.join(name_parts)} = <% endif %>\
(function (server_name, script_name, subdomain, url_scheme) {
var converters = ${', '.join(converters)};
var rules = $rules;
function in_array(array, value) {
if (array.indexOf != undefined) {
return array.indexOf(value) != -1;
}
for (var i = 0; i < array.length; i++) {
if (array[i] == value) {
return true;
}
}
return false;
}
function array_diff(array1, array2) {
array1 = array1.slice();
for (var i = array1.length-1; i >= 0; i--) {
if (in_array(array2, array1[i])) {
array1.splice(i, 1);
}
}
return array1;
}
function split_obj(obj) {
var names = [];
var values = [];
for (var name in obj) {
if (typeof(obj[name]) != 'function') {
names.push(name);
values.push(obj[name]);
}
}
return {names: names, values: values, original: obj};
}
function suitable(rule, args) {
var default_args = split_obj(rule.defaults || {});
var diff_arg_names = array_diff(rule.arguments, default_args.names);
for (var i = 0; i < diff_arg_names.length; i++) {
if (!in_array(args.names, diff_arg_names[i])) {
return false;
}
}
if (array_diff(rule.arguments, args.names).length == 0) {
if (rule.defaults == null) {
return true;
}
for (var i = 0; i < default_args.names.length; i++) {
var key = default_args.names[i];
var value = default_args.values[i];
if (value != args.original[key]) {
return false;
}
}
}
return true;
}
function build(rule, args) {
var tmp = [];
var processed = rule.arguments.slice();
for (var i = 0; i < rule.trace.length; i++) {
var part = rule.trace[i];
if (part.is_dynamic) {
var converter = converters[rule.converters[part.data]];
var data = converter(args.original[part.data]);
if (data == null) {
return null;
}
tmp.push(data);
processed.push(part.name);
} else {
tmp.push(part.data);
}
}
tmp = tmp.join('');
var pipe = tmp.indexOf('|');
var subdomain = tmp.substring(0, pipe);
var url = tmp.substring(pipe+1);
var unprocessed = array_diff(args.names, processed);
var first_query_var = true;
for (var i = 0; i < unprocessed.length; i++) {
if (first_query_var) {
url += '?';
} else {
url += '&';
}
first_query_var = false;
url += encodeURIComponent(unprocessed[i]);
url += '=';
url += encodeURIComponent(args.original[unprocessed[i]]);
}
return {subdomain: subdomain, path: url};
}
function lstrip(s, c) {
while (s && s.substring(0, 1) == c) {
s = s.substring(1);
}
return s;
}
function rstrip(s, c) {
while (s && s.substring(s.length-1, s.length) == c) {
s = s.substring(0, s.length-1);
}
return s;
}
return function(endpoint, args, force_external) {
args = split_obj(args);
var rv = null;
for (var i = 0; i < rules.length; i++) {
var rule = rules[i];
if (rule.endpoint != endpoint) continue;
if (suitable(rule, args)) {
rv = build(rule, args);
if (rv != null) {
break;
}
}
}
if (rv == null) {
return null;
}
if (!force_external && rv.subdomain == subdomain) {
return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/');
} else {
return url_scheme + '://'
+ (rv.subdomain ? rv.subdomain + '.' : '')
+ server_name + rstrip(script_name, '/')
+ '/' + lstrip(rv.path, '/');
}
};
})''')
def generate_map(map, name='url_map'):
"""
Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation!
"""
map.update()
rules = []
converters = []
for rule in map.iter_rules():
trace = [{
'is_dynamic': is_dynamic,
'data': data
} for is_dynamic, data in rule._trace]
rule_converters = {}
for key, converter in rule._converters.iteritems():
js_func = js_to_url_function(converter)
try:
index = converters.index(js_func)
except ValueError:
converters.append(js_func)
index = len(converters) - 1
rule_converters[key] = index
rules.append({
u'endpoint': rule.endpoint,
u'arguments': list(rule.arguments),
u'converters': rule_converters,
u'trace': trace,
u'defaults': rule.defaults
})
return _javascript_routing_template.render({
'name_parts': name and name.split('.') or [],
'rules': dumps(rules),
'converters': converters
})
def generate_adapter(adapter, name='url_for', map_name='url_map'):
"""Generates the url building function for a map."""
values = {
u'server_name': dumps(adapter.server_name),
u'script_name': dumps(adapter.script_name),
u'subdomain': dumps(adapter.subdomain),
u'url_scheme': dumps(adapter.url_scheme),
u'name': name,
u'map_name': map_name
}
return u'''\
var %(name)s = %(map_name)s(
%(server_name)s,
%(script_name)s,
%(subdomain)s,
%(url_scheme)s
);''' % values
def js_to_url_function(converter):
"""Get the JavaScript converter function from a rule."""
if hasattr(converter, 'js_to_url_function'):
data = converter.js_to_url_function()
else:
for cls in getmro(type(converter)):
if cls in js_to_url_functions:
data = js_to_url_functions[cls](converter)
break
else:
return 'encodeURIComponent'
return '(function(value) { %s })' % data
def NumberConverter_js_to_url(conv):
if conv.fixed_digits:
return u'''\
var result = value.toString();
while (result.length < %s)
result = '0' + result;
return result;''' % conv.fixed_digits
return u'return value.toString();'
js_to_url_functions = {
NumberConverter: NumberConverter_js_to_url
}
| 0 |
import logging
import traceback
from datetime import date
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import connection, transaction
# NOTE: This import is just so _fire_task gets registered with celery.
import tidings.events # noqa
from celery import task
from multidb.pinning import pin_this_thread, unpin_this_thread
from django_statsd.clients import statsd
from zendesk import ZendeskError
from kitsune.kbadge.utils import get_or_create_badge
from kitsune.questions.config import ANSWERS_PER_PAGE
from kitsune.questions.marketplace import submit_ticket
from kitsune.search.es_utils import ES_EXCEPTIONS
from kitsune.search.tasks import index_task
from kitsune.sumo.decorators import timeit
log = logging.getLogger('k.task')
@task(rate_limit='1/s')
@timeit
def update_question_votes(question_id):
from kitsune.questions.models import Question
log.debug('Got a new QuestionVote for question_id=%s.' % question_id)
statsd.incr('questions.tasks.update')
# Pin to master db to avoid lag delay issues.
pin_this_thread()
try:
q = Question.objects.get(id=question_id)
q.sync_num_votes_past_week()
q.save(force_update=True)
except Question.DoesNotExist:
log.info('Question id=%s deleted before task.' % question_id)
unpin_this_thread()
@task(rate_limit='4/s')
@timeit
def update_question_vote_chunk(data):
"""Update num_votes_past_week for a number of questions."""
# First we recalculate num_votes_past_week in the db.
log.info('Calculating past week votes for %s questions.' % len(data))
ids = ','.join(map(str, data))
sql = """
UPDATE questions_question q
SET num_votes_past_week = (
SELECT COUNT(created)
FROM questions_questionvote qv
WHERE qv.question_id = q.id
AND qv.created >= DATE(SUBDATE(NOW(), 7))
)
WHERE q.id IN (%s);
""" % ids
cursor = connection.cursor()
cursor.execute(sql)
if not transaction.get_connection().in_atomic_block:
transaction.commit()
# Next we update our index with the changes we made directly in
# the db.
if data and settings.ES_LIVE_INDEXING:
# Get the data we just updated from the database.
sql = """
SELECT id, num_votes_past_week
FROM questions_question
WHERE id in (%s);
""" % ids
cursor = connection.cursor()
cursor.execute(sql)
# Since this returns (id, num_votes_past_week) tuples, we can
# convert that directly to a dict.
id_to_num = dict(cursor.fetchall())
try:
# Fetch all the documents we need to update.
from kitsune.questions.models import QuestionMappingType
from kitsune.search import es_utils
es_docs = es_utils.get_documents(QuestionMappingType, data)
# For each document, update the data and stick it back in the
# index.
for doc in es_docs:
# Note: Need to keep this in sync with
# Question.extract_document.
num = id_to_num[int(doc[u'id'])]
doc[u'question_num_votes_past_week'] = num
QuestionMappingType.index(doc, id_=doc['id'])
except ES_EXCEPTIONS:
# Something happened with ES, so let's push index updating
# into an index_task which retries when it fails because
# of ES issues.
index_task.delay(QuestionMappingType, id_to_num.keys())
@task(rate_limit='4/m')
@timeit
def update_answer_pages(question):
log.debug('Recalculating answer page numbers for question %s: %s' %
(question.pk, question.title))
i = 0
answers = question.answers.using('default').order_by('created')
for answer in answers.filter(is_spam=False):
answer.page = i / ANSWERS_PER_PAGE + 1
answer.save(no_notify=True)
i += 1
@task()
@timeit
def maybe_award_badge(badge_template, year, user):
"""Award the specific badge to the user if they've earned it."""
badge = get_or_create_badge(badge_template, year)
# If the user already has the badge, there is nothing else to do.
if badge.is_awarded_to(user):
return
# Count the number of replies tweeted in the current year.
from kitsune.questions.models import Answer
qs = Answer.objects.filter(
creator=user,
created__gte=date(year, 1, 1),
created__lt=date(year + 1, 1, 1))
# If the count is 30 or higher, award the badge.
if qs.count() >= 30:
badge.award_to(user)
return True
class PickleableZendeskError(Exception):
"""Zendesk error that captures information and can be pickled
This is like kitsune/search/tasks.py:IndexingTaskError and is
totally goofy.
"""
def __init__(self):
super(PickleableZendeskError, self).__init__(traceback.format_exc())
@task()
@timeit
def escalate_question(question_id):
"""Escalate a question to zendesk by submitting a ticket."""
from kitsune.questions.models import Question
question = Question.objects.get(id=question_id)
url = 'https://{domain}{url}'.format(
domain=Site.objects.get_current().domain,
url=question.get_absolute_url())
try:
submit_ticket(
email='support@mozilla.com',
category='Escalated',
subject=u'[Escalated] {title}'.format(title=question.title),
body=u'{url}\n\n{content}'.format(url=url,
content=question.content),
tags=[t.slug for t in question.tags.all()])
except ZendeskError:
# This is unpickleable, so we need to unwrap it a bit
raise PickleableZendeskError()
| 0 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate, nowdate, fmt_money
from frappe import msgprint, _
from frappe.model.document import Document
form_grid_templates = {
"journal_entries": "templates/form_grid/bank_reconciliation_grid.html"
}
class BankReconciliation(Document):
def get_payment_entries(self):
if not (self.bank_account and self.from_date and self.to_date):
msgprint("Bank Account, From Date and To Date are Mandatory")
return
condition = ""
if not self.include_reconciled_entries:
condition = "and (clearance_date is null or clearance_date='0000-00-00')"
journal_entries = frappe.db.sql("""
select
"Journal Entry" as payment_document, t1.name as payment_entry,
t1.cheque_no as cheque_number, t1.cheque_date,
t2.debit_in_account_currency as debit, t2.credit_in_account_currency as credit,
t1.posting_date, t2.against_account, t1.clearance_date, t2.account_currency
from
`tabJournal Entry` t1, `tabJournal Entry Account` t2
where
t2.parent = t1.name and t2.account = %s and t1.docstatus=1
and t1.posting_date >= %s and t1.posting_date <= %s
and ifnull(t1.is_opening, 'No') = 'No' {0}
order by t1.posting_date ASC, t1.name DESC
""".format(condition), (self.bank_account, self.from_date, self.to_date), as_dict=1)
payment_entries = frappe.db.sql("""
select
"Payment Entry" as payment_document, name as payment_entry,
reference_no as cheque_number, reference_date as cheque_date,
if(paid_from=%(account)s, paid_amount, "") as credit,
if(paid_from=%(account)s, "", received_amount) as debit,
posting_date, ifnull(party,if(paid_from=%(account)s,paid_to,paid_from)) as against_account, clearance_date,
if(paid_to=%(account)s, paid_to_account_currency, paid_from_account_currency) as account_currency
from `tabPayment Entry`
where
(paid_from=%(account)s or paid_to=%(account)s) and docstatus=1
and posting_date >= %(from)s and posting_date <= %(to)s {0}
order by
posting_date ASC, name DESC
""".format(condition),
{"account":self.bank_account, "from":self.from_date, "to":self.to_date}, as_dict=1)
entries = sorted(list(payment_entries)+list(journal_entries),
key=lambda k: k['posting_date'] or getdate(nowdate()))
self.set('payment_entries', [])
self.total_amount = 0.0
for d in entries:
row = self.append('payment_entries', {})
d.amount = fmt_money(d.debit if d.debit else d.credit, 2, d.account_currency) + " " + (_("Dr") if d.debit else _("Cr"))
d.pop("credit")
d.pop("debit")
d.pop("account_currency")
row.update(d)
self.total_amount += flt(d.amount)
def update_clearance_date(self):
clearance_date_updated = False
for d in self.get('payment_entries'):
if d.clearance_date:
if not d.payment_document:
frappe.throw(_("Row #{0}: Payment document is required to complete the trasaction"))
if d.cheque_date and getdate(d.clearance_date) < getdate(d.cheque_date):
frappe.throw(_("Row #{0}: Clearance date {1} cannot be before Cheque Date {2}")
.format(d.idx, d.clearance_date, d.cheque_date))
if d.clearance_date or self.include_reconciled_entries:
if not d.clearance_date:
d.clearance_date = None
frappe.db.set_value(d.payment_document, d.payment_entry, "clearance_date", d.clearance_date)
frappe.db.sql("""update `tab{0}` set clearance_date = %s, modified = %s
where name=%s""".format(d.payment_document),
(d.clearance_date, nowdate(), d.payment_entry))
clearance_date_updated = True
if clearance_date_updated:
self.get_payment_entries()
msgprint(_("Clearance Date updated"))
else:
msgprint(_("Clearance Date not mentioned"))
| 0.028844 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import fnmatch
import IECore
import Gaffer
import GafferUI
## This class forms the base class for all uis for nodes.
class NodeUI( GafferUI.Widget ) :
def __init__( self, node, topLevelWidget, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__node = node
self.__readOnly = False
## Returns the node the ui represents.
def node( self ) :
return self.__node
## Should be implemented by derived classes to return
# a PlugValueWidget they are using to represent the
# specified plug. Since many UIs are built lazily on
# demand, this may return None unless lazy=False is
# passed to force creation of parts of the UI that
# otherwise are not yet visible to the user.
def plugValueWidget( self, plug, lazy=True ) :
return None
## Can be called to make the UI read only - must
# be implemented appropriately by derived classes.
def setReadOnly( self, readOnly ) :
assert( isinstance( readOnly, bool ) )
self.__readOnly = readOnly
def getReadOnly( self ) :
return self.__readOnly
## Creates a NodeUI instance for the specified node.
@classmethod
def create( cls, node ) :
nodeHierarchy = IECore.RunTimeTyped.baseTypeIds( node.typeId() )
for typeId in [ node.typeId() ] + nodeHierarchy :
nodeUI = cls.__nodeUIs.get( typeId, None )
if nodeUI is not None :
return nodeUI( node )
assert( 0 )
__nodeUIs = {}
## Registers a subclass of NodeUI to be used with a specific node type.
@classmethod
def registerNodeUI( cls, nodeClassOrTypeId, nodeUICreator ) :
assert( callable( nodeUICreator ) )
if isinstance( nodeClassOrTypeId, IECore.TypeId ) :
nodeTypeId = nodeClassOrTypeId
else :
nodeTypeId = nodeClassOrTypeId.staticTypeId()
cls.__nodeUIs[nodeTypeId] = nodeUICreator
GafferUI.Nodule.registerNodule( Gaffer.Node, "user", lambda plug : None )
Gaffer.Metadata.registerPlugValue( Gaffer.Node, "user", "nodeUI:section", "User" )
| 0.027858 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from six.moves import BaseHTTPServer
from thrift.server import TServer
from thrift.transport import TTransport
class ResponseException(Exception):
"""Allows handlers to override the HTTP response
Normally, THttpServer always sends a 200 response. If a handler wants
to override this behavior (e.g., to simulate a misconfigured or
overloaded web server during testing), it can raise a ResponseException.
The function passed to the constructor will be called with the
RequestHandler as its only argument.
"""
def __init__(self, handler):
self.handler = handler
class THttpServer(TServer.TServer):
"""A simple HTTP-based Thrift server
This class is not very performant, but it is useful (for example) for
acting as a mock version of an Apache-based PHP Thrift endpoint.
"""
def __init__(self,
processor,
server_address,
inputProtocolFactory,
outputProtocolFactory=None,
server_class=BaseHTTPServer.HTTPServer):
"""Set up protocol factories and HTTP server.
See BaseHTTPServer for server_address.
See TServer for protocol factories.
"""
if outputProtocolFactory is None:
outputProtocolFactory = inputProtocolFactory
TServer.TServer.__init__(self, processor, None, None, None,
inputProtocolFactory, outputProtocolFactory)
thttpserver = self
class RequestHander(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
# Don't care about the request path.
itrans = TTransport.TFileObjectTransport(self.rfile)
otrans = TTransport.TFileObjectTransport(self.wfile)
itrans = TTransport.TBufferedTransport(
itrans, int(self.headers['Content-Length']))
otrans = TTransport.TMemoryBuffer()
iprot = thttpserver.inputProtocolFactory.getProtocol(itrans)
oprot = thttpserver.outputProtocolFactory.getProtocol(otrans)
try:
thttpserver.processor.process(iprot, oprot)
except ResponseException as exn:
exn.handler(self)
else:
self.send_response(200)
self.send_header("content-type", "application/x-thrift")
self.end_headers()
self.wfile.write(otrans.getvalue())
self.httpd = server_class(server_address, RequestHander)
def serve(self):
self.httpd.serve_forever()
| 0 |
"""If you want your program to be translated into multiple languages you need
to do the following:
- Pass all strings that should be translated through the '_' function, eg:
print _('Hello World!')
- Create a Messages subdirectory in your application.
- Run 'pygettext *.py' to extract all the marked strings.
- Copy messages.pot as Messages/<lang>.po and edit (see ROX-Lib2's README).
- Use msgfmt to convert the .po files to .gmo files.
- In your application, use the rox.i18n.translation() function to set the _ function:
__builtins__._ = rox.i18n.translation(os.path.join(rox.app_dir, 'Messages'))
(for libraries, just do '_ ='; don't mess up the builtins)
Note that the marked strings must be fixed. If you're using formats, mark up the
format, eg:
print _('You have %d lives remaining') % lives
You might like to look at the scripts in ROX-Lib2's Messages directory for
more help.
"""
import os
def _expand_lang(locale):
from locale import normalize
locale = normalize(locale)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
COMPONENT_MODIFIER = 1 << 2
# split up the locale into its base components
mask = 0
pos = locale.find('@')
if pos >= 0:
modifier = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = locale.find('.')
if pos >= 0:
codeset = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = locale.find('_')
if pos >= 0:
territory = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = locale
ret = []
for i in range(mask+1):
if not (i & ~mask): # if all components for this combo exist ...
val = language
if i & COMPONENT_TERRITORY: val += territory
if i & COMPONENT_CODESET: val += codeset
if i & COMPONENT_MODIFIER: val += modifier
ret.append(val)
ret.reverse()
return ret
def expand_languages(languages = None):
# Get some reasonable defaults for arguments that were not supplied
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
return nelangs
# Locate a .mo file using the ROX strategy
def find(messages_dir, languages = None):
"""Look in messages_dir for a .gmo file for the user's preferred language
(or override this with the 'languages' argument). Returns the filename, or
None if there was no translation."""
# select a language
for lang in expand_languages(languages):
if lang == 'C':
break
mofile = os.path.join(messages_dir, '%s.gmo' % lang)
if os.path.exists(mofile):
return mofile
return None
def translation(messages_dir, languages = None):
"""Load the translation for the user's language and return a function
which translates a string into its unicode equivalent."""
mofile = find(messages_dir, languages)
if not mofile:
return lambda x: x
import gettext
return gettext.GNUTranslations(file(mofile)).ugettext
langs = expand_languages()
| 0.029951 |
#!/usr/bin/env python
'''
OWASP ZSC | ZCR Shellcoder
ZeroDay Cyber Research
Z3r0D4y.Com
Ali Razmjoo
'''
import urllib2
from core import color
def startu(__version__):
url = 'https://raw.githubusercontent.com/Ali-Razmjoo/ZCR-Shellcoder-Archive/master/last_version'
up_url = 'https://raw.githubusercontent.com/Ali-Razmjoo/ZCR-Shellcoder-Archive/master/'
err = 0
try:
last_version = urllib2.urlopen(url).read()
last_version = last_version.rsplit()[0]
except:
print '%sConnection Error!%s\n\n'%(color.color('red'),color.color('reset'))
err = 1
if err is 0:
update = True
if str(last_version) == str(__version__):
print '%syou are using the last version of software : %s%s%s'%(color.color('green'),color.color('red'),last_version,color.color('reset'))
update = False
if update is True:
print '%syour software version: %s%s%s\nlast version released: %s%s%s\n\nDownloading %szcr_shellcoder_%s%s%s.zip%s\n\n\n'%(color.color('green'),color.color('cyan'),str(__version__),color.color('green'),color.color('red'),str(last_version),color.color('green'),color.color('yellow'),color.color('red'),str(last_version),color.color('yellow'),color.color('reset'))
up_url = up_url + 'zcr_shellcoder_%s.zip'%(last_version)
try:
file_name = up_url.split('/')[-1]
u = urllib2.urlopen(up_url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "%sDownloading: %s%s%s Bytes: %s%s%s" % (color.color('white'),color.color('yellow'),file_name,color.color('white'),color.color('red'), file_size,color.color('blue'))
file_size_dl = 0
block_sz = 10
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
print '%sFile Downloaded: %s%s%s\n\n'%(color.color('cyan'),color.color('yellow'),file_name,color.color('reset'))
except:
print '%sConnection Error!%s\n\n'%(color.color('red'),color.color('reset'))
| 0.037401 |
import sys
PY3 = sys.version_info[0] == 3
if PY3:
MAXSIZE = sys.maxsize
def bytes_to_str(b):
if isinstance(b, bytes):
return str(b, 'utf8')
return b
def str_to_bytes(s):
if isinstance(s, bytes):
return s
return s.encode('utf8')
import urllib.parse
unquote_plus = urllib.parse.unquote_plus
else:
if sys.platform == "java":
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def bytes_to_str(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
def str_to_bytes(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
import urllib
unquote_plus = urllib.unquote_plus
| 0 |
try:
import requests
except ImportError:
message = ('Missing "requests", please install it using pip:\n'
'pip install requests')
raise ImportError(message)
try:
import json
except ImportError:
message = ('Missing "json", please install it using pip:\n'
'pip install json')
raise ImportError(message)
from octopus_error import OctopusError
from st2actions.runners.pythonrunner import Action
__all__ = [
'OctopusDeployAction',
]
class OctopusDeployAction(Action):
def __init__(self, config):
super(OctopusDeployAction, self).__init__(config)
self.client = self._init_client()
def _init_client(self):
api_key = self.config['api_key']
host = self.config['host']
port = self.config['port']
return OctopusDeployClient(api_key=api_key, host=host, port=port)
def _build_uri(self):
# big assumption but it'll cover 99% case,
# as octopus runs https by default
start = "http://" if self.client.port is 80 else "https://"
return start + self.client.host + ":" + str(self.client.port) + "/api/"
def make_post_request(self, action, payload):
response = requests.post(self._build_uri() + action,
data=json.dumps(payload), verify=False,
headers=self.client.headers)
if response.status_code != 200:
raise OctopusError(response.text, response.status_code)
return response.json()
def make_get_request(self, action, params=None):
response = requests.get(self._build_uri() + action,
verify=False,
params=params,
headers=self.client.headers)
if response.status_code != 200:
raise OctopusError(response.text, response.status_code)
return response.json()
class OctopusDeployClient(object):
def __init__(self, api_key, host, port):
self.api_key = api_key
self.host = host
self.port = port
self.headers = {'X-Octopus-ApiKey': self.api_key,
'Content-type': 'application/json',
'Accept': 'text/plain'}
| 0 |
#!/env/python3
import psycopg2
import argparse
from progress.bar import Bar
from pysam import VariantFile
DBNAME = "regovar-dev"
USER = "regovar"
HOST = "localhost"
PASSWORD = "regovar"
SHEMA_FILE = "poc_005.sql"
#========================================================
def create_schema(conn):
print("create schema ...")
cursor = conn.cursor()
cursor.execute(str(open(SHEMA_FILE, "r").read()))
conn.commit()
cursor.close()
#========================================================
def connect_database():
try:
conn = psycopg2.connect("dbname={} user={} host={} password={}".format(DBNAME,USER,HOST,PASSWORD))
print("connection success ...")
except:
print("Impossible de se connecter à postgreSQL")
return conn
#========================================================
def import_vcf(conn, filename):
print("import vcf ...")
# On crée le schema.. Si ca existe deja on supprime tout . Le schema est dans le namespace regovar. Donc tinquiete , ca va pas effacer tes tables.
create_schema(conn)
print("count records ...")
count = sum([1 for i in VariantFile(filename)])
print(count,"records found")
bar = Bar('Importing variants... ', max = count, suffix='%(percent)d%%')
bcf_in = VariantFile(filename)
curr = conn.cursor()
variant_ids = dict()
samples_vars = dict()
# On loop sur les variants
for rec in bcf_in.fetch():
chrom = str(rec.chrom)
pos = int(rec.pos)
ref = str(rec.ref)
alt = str(rec.alts[0])
try:
curr.execute("INSERT INTO regovar.variants (bin,chr,pos,ref,alt) VALUES (%s,%s,%s,%s,%s) RETURNING id", (0,chrom,pos,ref,alt)) # it's a tupple ( x,)
last_id = curr.fetchone()[0]
except Exception as e :
last_id = None
# Si bien inseré , alors on sauvegarde l'identifiant . Les tupples sonr hachable! On en profite
if last_id is not None:
variant_ids[(chrom,pos,ref,alt)] = last_id
#Sauvegarde des samples en memoire
for name in rec.samples:
if name not in samples_vars:
samples_vars[name] = list()
samples_vars[name].append(last_id)
bar.next()
bar.finish()
conn.commit()
# Insertion des filenames
curr.execute("INSERT INTO regovar.files (path) VALUES (%s) RETURNING id", (filename,)) # it's a tupple ( x,)
file_id = curr.fetchone()[0]
for name in samples_vars.keys():
bar = Bar('Importing sample: {} '.format(name), max = len(samples_vars[name]), suffix='%(percent)d%%')
curr.execute("INSERT INTO regovar.samples (name,description,file_id) VALUES (%s,%s,%s) RETURNING id", (name,"",file_id)) # it's a tupple ( x,)
sample_id = curr.fetchone()[0]
# Insert dans sample_variants
for var_id in samples_vars[name]:
curr.execute("INSERT INTO regovar.sample_has_variants (sample_id,variant_id,genotype) VALUES (%s,%s,%s) RETURNING id", (sample_id,var_id,-1)) # it's a tupple ( x,)
bar.next()
bar.finish()
conn.commit()
# On importe les samples
#========================================================
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--create', help='create database schema', action='store_true')
parser.add_argument('-i', '--input',help='import vcf file', default = None)
args = parser.parse_args()
if args.create is True :
conn = connect_database()
create_schema(conn)
conn.close()
else:
if args.create is False and args.input is not None:
conn = connect_database()
import_vcf(conn, args.input)
else:
parser.print_help()
| 0.041329 |
# Copyright (C) 2010-2015 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import obnamlib
class ForgetPolicySyntaxError(obnamlib.ObnamError):
msg = 'Forget policy syntax error: {policy}'
class DuplicatePeriodError(obnamlib.ObnamError):
msg = 'Forget policy may not duplicate period ({period}): {policy}'
class SeparatorError(obnamlib.ObnamError):
msg = ('Forget policy must have rules separated by commas, '
'see position {position}: {policy}')
class ForgetPolicy(object):
'''Parse and interpret a policy for what to forget and what to keep.
See documentation for the --keep option for details.
'''
periods = {
'h': 'hourly',
'd': 'daily',
'w': 'weekly',
'm': 'monthly',
'y': 'yearly',
}
rule_pat = re.compile(r'(?P<count>\d+)(?P<period>(h|d|w|m|y))')
def parse(self, optarg):
'''Parse the argument of --keep.
Return a dictionary indexed by 'hourly', 'daily', 'weekly',
'monthly', 'yearly', and giving the number of generations
to keep for each time period.
'''
remaining = optarg
m = self.rule_pat.match(remaining)
if not m:
raise ForgetPolicySyntaxError(policy=optarg)
result = dict((y, None) for x, y in self.periods.iteritems())
while m:
count = int(m.group('count'))
period = self.periods[m.group('period')]
if result[period] is not None:
raise DuplicatePeriodError(period=period, policy=optarg)
result[period] = count
remaining = remaining[m.end():]
if not remaining:
break
if not remaining.startswith(','):
position = len(optarg) - len(remaining) + 1
raise SeparatorError(position=position, policy=optarg)
remaining = remaining[1:]
m = self.rule_pat.match(remaining)
result.update((x, 0) for x, y in result.iteritems() if y is None)
return result
def last_in_each_period(self, period, genlist):
formats = {
'hourly': '%Y-%m-%d %H',
'daily': '%Y-%m-%d',
'weekly': '%Y-%W',
'monthly': '%Y-%m',
'yearly': '%Y',
}
matches = []
for genid, dt in genlist:
formatted = dt.strftime(formats[period])
if not matches:
matches.append((genid, formatted))
elif matches[-1][1] == formatted:
matches[-1] = (genid, formatted)
else:
matches.append((genid, formatted))
return [genid for genid, formatted in matches]
def match(self, rules, genlist):
'''Match a parsed ruleset against a list of generations and times.
The ruleset should be of the form returned by the parse method.
genlist should be a list of generation identifiers and timestamps.
Identifiers can be anything, timestamps should be an instance
of datetime.datetime, with no time zone (it is ignored).
genlist should be in ascending order by time: oldest one first.
Return value is all those pairs from genlist that should be
kept (i.e., which match the rules).
'''
result_ids = set()
for period in rules:
genids = self.last_in_each_period(period, genlist)
if rules[period]:
for genid in genids[-rules[period]:]:
result_ids.add(genid)
return [(genid, dt) for genid, dt in genlist
if genid in result_ids]
| 0 |
import random
import datetime
import sys
import crs
import encdec
import prover
import verifier
system_random = random.SystemRandom()
def secure_shuffle(lst):
random.shuffle(lst, random=system_random.random)
def random_permutation(n):
s = list(range(n))
secure_shuffle(s)
return s
def initialize(n):
gk = crs.mk_gk()
Chi = crs.mk_Chi(gk.q)
CRS, td = crs.mk_crs(n, gk, Chi)
return gk, Chi, CRS
def mk_s_randoms(n, q):
return [[q.random() for j in range(2)] for i in range(n)]
def demo(n, messages):
gk, Chi, CRS = initialize(n)
secret = Chi.gamma
pk1 = CRS.pk1
pk2 = CRS.pk2
ciphertexts = encrypt_messages(gk.q, pk1, pk2, messages)
start = datetime.datetime.now()
sigma = random_permutation(n)
print("SIGMA = %s" % sigma)
s_randoms = mk_s_randoms(n, gk.q)
proof = prover.prove(n, CRS, ciphertexts, sigma, s_randoms)
shuffled_ciphertexts, \
(A1, A2), pi_1sp, pi_c1_1, pi_c1_2, pi_c2_1, pi_c2_2 = proof
perm_ok, valid, consistent = verifier.verify(
n, CRS, ciphertexts, shuffled_ciphertexts,
A1, A2, pi_1sp, pi_c1_1, pi_c1_2, pi_c2_1, pi_c2_2)
print("VERIFY: %s %s %s" % (perm_ok, valid, consistent))
end = datetime.datetime.now()
TABLES = encdec.make_tables(pk1, pk2, n)
shuffled_ms = decrypt_messages(gk.q, secret, TABLES, shuffled_ciphertexts)
print(shuffled_ms)
print("elapsed: %s" % (end - start))
def encrypt_messages(order, pk1, pk2, messages):
return [encdec.encrypt(order, pk1, pk2, message) for message in messages]
def decrypt_messages(order, secret, tables, ciphertexts):
return [encdec.decrypt(order, cs, secret, tables) for cs in ciphertexts]
if __name__ == '__main__':
n = 10
if len(sys.argv) >= 2:
n = int(sys.argv[1])
messages = list(range(n))
demo(len(messages), messages)
| 0 |
#
# Code by Alexander Pruss and under the MIT license
#
import mcpi.minecraft as minecraft
import mcpi.block as block
import lsystem
mc = minecraft.Minecraft()
playerPos = mc.player.getPos()
DIRECTIONS = ((1,0),(0,1),(-1,0),(0,-1))
pos = (int(playerPos.x),int(playerPos.y),int(playerPos.z))
direction = 0
def go():
global pos
dx = DIRECTIONS[direction][0]
dz = DIRECTIONS[direction][1]
# draw a wall
mc.setBlocks(pos,pos[0]+dx*4,pos[1]+4,pos[2]+dz*4,block.BRICK_BLOCK)
# draw a door in it
mc.setBlocks(pos[0]+dx*2,pos[1],pos[2]+dz*2,pos[0]+dx*2,pos[1]+1,pos[2]+dz*2,block.AIR)
pos = (pos[0]+dx*4, pos[1], pos[2]+dz*4)
def left():
global direction
direction -= 1
if direction == -1:
direction = 3
def right():
global direction
direction += 1
if direction == 4:
direction = 0
rules = {'X':'X+YF+', 'Y':'-FX-Y'}
dictionary = { '+': left,
'-': right,
'F': go }
lsystem.lsystem('FX', rules, dictionary, 14)
#go()
| 0.027205 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test `astroquery.utils.timer`.
.. note::
The tests only compare rough estimates as
performance is machine-dependent.
"""
# STDLIB
import time
# THIRD-PARTY
import pytest
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from astropy.modeling.fitting import ModelsError
# LOCAL
from ..timer import RunTimePredictor
def func_to_time(x):
"""This sleeps for y seconds for use with timing tests.
.. math::
y = 5 * x - 10
"""
y = 5.0 * np.asarray(x) - 10
time.sleep(y)
return y
def test_timer():
"""Test function timer."""
p = RunTimePredictor(func_to_time)
# --- These must run before data points are introduced. ---
with pytest.raises(ValueError):
p.do_fit()
with pytest.raises(RuntimeError):
p.predict_time(100)
# --- These must run next to set up data points. ---
with pytest.warns(AstropyUserWarning, match="ufunc 'multiply' did not "
"contain a loop with signature matching types"):
p.time_func([2.02, 2.04, 2.1, 'a', 2.3])
p.time_func(2.2) # Test OrderedDict
assert p._funcname == 'func_to_time'
assert p._cache_bad == ['a']
k = list(p.results.keys())
v = list(p.results.values())
np.testing.assert_array_equal(k, [2.02, 2.04, 2.1, 2.3, 2.2])
np.testing.assert_allclose(v, [0.1, 0.2, 0.5, 1.5, 1.0])
# --- These should only run once baseline is established. ---
with pytest.raises(ModelsError):
a = p.do_fit(model='foo')
with pytest.raises(ModelsError):
a = p.do_fit(fitter='foo')
a = p.do_fit()
assert p._power == 1
# Perfect slope is 5, with 10% uncertainty
assert 4.5 <= a[1] <= 5.5
# Perfect intercept is -10, with 1-sec uncertainty
assert -11 <= a[0] <= -9
# --- These should only run once fitting is completed. ---
# Perfect answer is 490, with 10% uncertainty
t = p.predict_time(100)
assert 441 <= t <= 539
# Repeated call to access cached run time
t2 = p.predict_time(100)
assert t == t2
| 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api, exceptions, _
class L10nEsAeatMod111Report(models.Model):
_description = 'AEAT 111 report'
_inherit = 'l10n.es.aeat.report.tax.mapping'
_name = 'l10n.es.aeat.mod111.report'
def _get_export_conf(self):
try:
return self.env.ref(
'l10n_es_aeat_mod111.aeat_mod111_main_export_config').id
except ValueError:
return self.env['aeat.model.export.config']
number = fields.Char(default='111')
export_config = fields.Many2one(default=_get_export_conf)
casilla_01 = fields.Integer(
string='[01] Nº de perceptores', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [01]: Rendimientos del trabajo - '
'Rendimientos dinerarios - Nº de perceptores')
casilla_02 = fields.Float(
string='[02] Importe de las percepciones', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [02]: Rendimientos del trabajo - '
'Rendimientos dinerarios - Importe de las percepciones')
casilla_03 = fields.Float(
string='[03] Importe de las retenciones', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [03]: Rendimientos del trabajo - '
'Rendimientos dinerarios - Importe de las retenciones')
casilla_04 = fields.Integer(
string='[04] Nº de perceptores', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [04]: Rendimientos del trabajo - '
'Rendimientos en especie - Nº de perceptores')
casilla_05 = fields.Float(
string='[05] Valor percepciones en especie', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [05]: Rendimientos del trabajo - '
'Rendimientos en especie - Valor percepciones en especie')
casilla_06 = fields.Float(
string='[06] Importe de los ingresos a cuenta', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [06]: Rendimientos del trabajo - '
'Rendimientos en especie - Importe de los ingresos a cuenta')
casilla_07 = fields.Integer(
string='[07] Nº de perceptores', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [07]: Rendimientos de actividades económicas - '
'Rendimientos dinerarios - Nº de perceptores')
casilla_08 = fields.Float(
string='[08] Importe de las percepciones', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [08]: Rendimientos de actividades económicas - '
'Rendimientos dinerarios - Importe de las percepciones')
casilla_09 = fields.Float(
string='[09] Importe de las retenciones', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [09]: Rendimientos de actividades económicas - '
'Rendimientos dinerarios - Importe de las retenciones')
casilla_10 = fields.Integer(
string='[10] Nº de perceptores ', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [10]: Rendimientos de actividades económicas - '
'Rendimientos en especie - Nº de perceptores')
casilla_11 = fields.Float(
string='[11] Valor percepciones en especie', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [11]: Rendimientos de actividades económicas - '
'Rendimientos en especie - Valor percepciones en especie')
casilla_12 = fields.Float(
string='[12] Importe de los ingresos a cuenta', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [12]: Rendimientos de actividades económicas - '
'Rendimientos en especie - Importe de los ingresos a cuenta')
casilla_13 = fields.Integer(
string='[13] Nº de perceptores', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [13]: Premios por la participación en juegos, '
'concursos, rifas o combinaciones aleatorias - '
'Premios en metálico - Nº de perceptores')
casilla_14 = fields.Float(
string='[14] Importe de las percepciones', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [14]: Premios por la participación en juegos, '
'concursos, rifas o combinaciones aleatorias - '
'Premios en metálico - Importe de las percepciones')
casilla_15 = fields.Float(
string='[15] Importe de las retenciones', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [15]: Premios por la participación en juegos, '
'concursos, rifas o combinaciones aleatorias - '
'Premios en metálico - Importe de las retenciones')
casilla_16 = fields.Integer(
string='[16] Nº de perceptores', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [16]: Premios por la participación en juegos, '
'concursos, rifas o combinaciones aleatorias - '
'Premios en especie - Nº de perceptores')
casilla_17 = fields.Float(
string='[17] Valor percepciones en especie', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [17]: Premios por la participación en juegos, '
'concursos, rifas o combinaciones aleatorias - '
'Premios en especie - Valor percepciones en especie')
casilla_18 = fields.Float(
string='[18] Importe de los ingresos a cuenta', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [18]: Premios por la participación en juegos, '
'concursos, rifas o combinaciones aleatorias - '
'Premios en especie - Importe de los ingresos a cuenta')
casilla_19 = fields.Integer(
string='[19] Nº de perceptores', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [19]: Ganancias patrimoniales derivadas de los '
'aprovechamientos forestales de los vecinos en montes públicos - '
'Percepciones dinerarias - Nº de perceptores')
casilla_20 = fields.Float(
string='[20] Importe de las percepciones', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [20]: Ganancias patrimoniales derivadas de los '
'aprovechamientos forestales de los vecinos en montes públicos - '
'Percepciones dinerarias - Importe de las percepciones')
casilla_21 = fields.Float(
string='[21] Importe de las retenciones', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [21]: Ganancias patrimoniales derivadas de los '
'aprovechamientos forestales de los vecinos en montes públicos - '
'Percepciones dinerarias - Importe de las retenciones')
casilla_22 = fields.Integer(
string='[22] Nº de perceptores', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [22]: Ganancias patrimoniales derivadas de los '
'aprovechamientos forestales de los vecinos en montes públicos - '
'Percepciones en especie - Nº de perceptores')
casilla_23 = fields.Float(
string='[23] Valor percepciones en especie', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [23]: Ganancias patrimoniales derivadas de los '
'aprovechamientos forestales de los vecinos en montes públicos - '
'Percepciones en especie - Valor percepciones en especie')
casilla_24 = fields.Float(
string='[24] Importe de los ingresos a cuenta', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [24]: Ganancias patrimoniales derivadas de los '
'aprovechamientos forestales de los vecinos en montes públicos - '
'Percepciones en especie - Importe de los ingresos a cuenta')
casilla_25 = fields.Integer(
string='[25] Nº de perceptores', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [25]: Contraprestaciones por la cesión de derechos de '
'imagen: ingresos a cuenta previstos en el artículo 92.8 de la '
'Ley del Impuesto - Contraprestaciones dinerarias o en especie '
'- Nº de perceptores')
casilla_26 = fields.Float(
string='[26] Contraprestaciones satisfechas', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [26]: Contraprestaciones por la cesión de derechos de '
'imagen: ingresos a cuenta previstos en el artículo 92.8 de la '
'Ley del Impuesto - Contraprestaciones dinerarias o en especie '
'- Contraprestaciones satisfechas')
casilla_27 = fields.Float(
string='[27] Importe de los ingresos a cuenta', readonly=True,
states={'calculated': [('readonly', False)]},
help='Casilla [27]: Contraprestaciones por la cesión de derechos de '
'imagen: ingresos a cuenta previstos en el artículo 92.8 de la '
'Ley del Impuesto - Contraprestaciones dinerarias o en especie '
'- Importe de los ingresos a cuenta')
casilla_28 = fields.Float(
string='[28] Suma de retenciones',
readonly=True, compute='_compute_28',
help='Total liquidación - Suma de retenciones e ingresos a cuenta: '
'([03] + [06] + [09] + [12] + [15] + [18] + [21] + [24] + [27])')
casilla_29 = fields.Float(
string='[29] Resultados a ingresar anteriores', readonly=True,
states={'calculated': [('readonly', False)]},
help='Total liquidación - A deducir (exclusivamente en caso de '
'autoliquidación complementaria): Resultados a ingresar de '
'anteriores autoliquidaciones por el mismo concepto, ejercicio '
'y período')
casilla_30 = fields.Float(
string='[30] Resultado a ingresar',
readonly=True, compute='_compute_30',
help='Total liquidación - Resultado a ingresar: ([28] - [29])')
codigo_electronico_anterior = fields.Char(
string='Código electrónico', size=16, readonly=True,
states={'draft': [('readonly', False)]},
help='Código electrónico de la declaración anterior (si se presentó '
'telemáticamente). A cumplimentar sólo en el caso de declaración '
'complementaria.')
currency_id = fields.Many2one(
comodel_name='res.currency', string='Moneda', readonly=True,
related='company_id.currency_id', store=True)
tipo_declaracion = fields.Selection(
[('I', 'Ingreso'), ('U', 'Domiciliación'),
('G', 'Ingreso a anotar en CCT'), ('N', 'Negativa')],
string='Tipo de declaración', readonly=True,
states={'draft': [('readonly', False)]}, required=True)
contact_mobile_phone = fields.Char(
string="Mobile Phone", size=9,
states={'calculated': [('required', True)],
'confirmed': [('readonly', True)]})
colegio_concertado = fields.Boolean(
string='Colegio concertado', readonly=True,
states={'draft': [('readonly', False)]}, default=False)
move_lines_02 = fields.Many2many(
comodel_name='account.move.line',
relation='mod111_account_move_line02_rel',
column1='mod111', column2='account_move_line')
move_lines_03 = fields.Many2many(
comodel_name='account.move.line',
relation='mod111_account_move_line03_rel',
column1='mod111', column2='account_move_line')
move_lines_05 = fields.Many2many(
comodel_name='account.move.line',
relation='mod111_account_move_line05_rel',
column1='mod111', column2='account_move_line')
move_lines_06 = fields.Many2many(
comodel_name='account.move.line',
relation='mod111_account_move_line06_rel',
column1='mod111', column2='account_move_line')
move_lines_08 = fields.Many2many(
comodel_name='account.move.line',
relation='mod111_account_move_line08_rel',
column1='mod111', column2='account_move_line')
move_lines_09 = fields.Many2many(
comodel_name='account.move.line',
relation='mod111_account_move_line09_rel',
column1='mod111', column2='account_move_line')
@api.one
@api.constrains('codigo_electronico_anterior', 'previous_number')
def _check_complementary(self):
if (self.type == 'C' and
not self.codigo_electronico_anterior and
not self.previous_number):
raise exceptions.Warning(
_('Si se marca la casilla de liquidación complementaria,'
' debe rellenar el código electrónico o'
' el número de justificante de la declaración anterior.'))
def __init__(self, pool, cr):
self._aeat_number = '111'
super(L10nEsAeatMod111Report, self).__init__(pool, cr)
@api.multi
def _get_move_line_domain(self, codes, periods=None,
include_children=True):
domain = super(L10nEsAeatMod111Report, self)._get_move_line_domain(
codes, periods=periods, include_children=include_children)
if self.env.context.get('no_partner'):
return filter(lambda line: line[0] != 'partner_id', domain)
return domain
@api.multi
def calculate(self):
self.ensure_one()
# I. Rendimientos del trabajo
move_lines02 = self._get_tax_code_lines(
['IRPATBI'], periods=self.periods)
move_lines03 = self._get_tax_code_lines(
['IRPATC'], periods=self.periods)
move_lines05 = self._get_tax_code_lines(
['IRPTBIE'], periods=self.periods)
move_lines06 = self._get_tax_code_lines(
['IRPATCE'], periods=self.periods)
self.move_lines_02 = move_lines02.ids
self.move_lines_03 = move_lines03.ids
self.move_lines_05 = move_lines05.ids
self.move_lines_06 = move_lines06.ids
self.casilla_01 = len(
(move_lines02 + move_lines03).mapped('partner_id'))
self.casilla_02 = sum(move_lines02.mapped('tax_amount'))
self.casilla_03 = sum(move_lines03.mapped('tax_amount'))
self.casilla_04 = len(
(move_lines05 + move_lines06).mapped('partner_id'))
self.casilla_05 = sum(move_lines05.mapped('tax_amount'))
self.casilla_06 = sum(move_lines06.mapped('tax_amount'))
# II. Rendimientos de actividades económicas
move_lines08 = self._get_tax_code_lines(
['IRPBIAE'], periods=self.periods)
move_lines09 = self._get_tax_code_lines(
['ITRPCAE'], periods=self.periods)
self.move_lines_08 = move_lines08.ids
self.move_lines_09 = move_lines09.ids
self.casilla_08 = sum(move_lines08.mapped('tax_amount'))
self.casilla_09 = sum(move_lines09.mapped('tax_amount'))
self.casilla_07 = len(
(move_lines08 + move_lines09).mapped('partner_id'))
# III. Premios por la participación en juegos, concursos,
# rifas o combinaciones aleatorias
# El usuario lo introduce a mano después de calcular
# IV. Ganancias patrimoniales derivadas de los aprovechamientos
# forestales de los vecinos en montes públicos
# El usuario lo introduce a mano después de calcular
# V. Contraprestaciones por la cesión de derechos de imagen:
# ingresos a cuenta previstos en el artículo 92.8 de la
# Ley del Impuesto
# El usuario lo introduce a mano después de calcular
@api.one
@api.depends('casilla_03', 'casilla_06', 'casilla_09', 'casilla_12',
'casilla_15', 'casilla_18', 'casilla_21', 'casilla_24',
'casilla_27')
def _compute_28(self):
self.casilla_28 = (
self.casilla_03 + self.casilla_06 + self.casilla_09 +
self.casilla_12 + self.casilla_15 + self.casilla_18 +
self.casilla_21 + self.casilla_24 + self.casilla_27
)
@api.one
@api.depends('casilla_28', 'casilla_29')
def _compute_30(self):
self.casilla_30 = self.casilla_28 - self.casilla_29
@api.multi
def show_move_lines(self):
move_lines = []
if self.env.context.get('move_lines_08', False):
move_lines = self.move_lines_08.ids
elif self.env.context.get('move_lines_09', False):
move_lines = self.move_lines_09.ids
elif self.env.context.get('move_lines_02', False):
move_lines = self.move_lines_02.ids
elif self.env.context.get('move_lines_03', False):
move_lines = self.move_lines_03.ids
elif self.env.context.get('move_lines_05', False):
move_lines = self.move_lines_05.ids
elif self.env.context.get('move_lines_06', False):
move_lines = self.move_lines_06.ids
view_id = self.env.ref('l10n_es_aeat.view_move_line_tree')
return {'type': 'ir.actions.act_window',
'name': _('Account Move Lines'),
'view_mode': 'tree,form',
'view_type': 'form',
'views': [(view_id.id, 'tree')],
'view_id': False,
'res_model': 'account.move.line',
'domain': [('id', 'in', move_lines)]
}
| 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields
class ProductProductType(models.Model):
_name = 'product.product.type'
name = fields.Char(string='Product Type', translate=True)
class ProductTemplate(models.Model):
_inherit = 'product.template'
product_type = fields.Many2one(comodel_name='product.product.type',
string='Product Type')
| 0 |
#!/usr/bin/env python
from struct import *;
from collections import namedtuple;
import getopt;
import sys;
class MdbShmReader:
def __init__(self,filename):
self.filename = filename;
self.readFile();
def readFile(self):
try:
self.fd = open(self.filename,"r");
except:
print "open file failed"
exit(-1);
def getPoolInfo(self):
try:
self.fd.seek(0);
buf = self.fd.read(20);
poolinfo = namedtuple('poolinfo','inited page_size total_pgaes free_pages current_page');
self.mpool = poolinfo._make(unpack('iiiii',buf));
#print "\033[0;32mMdb Info:\033[0;m"
print " "
print " inited :",self.mpool.inited
print " page_size :",self.mpool.page_size
print " total_pgaes :",self.mpool.total_pgaes
print " free_pages :",self.mpool.free_pages
print " current_page :",self.mpool.current_page
print " "
except:
print "getPoolInfo failed"
pass;
def getCacheInfo(self):
try:
self.fd.seek(524288);
buf = self.fd.read(16);
cacheinfo = namedtuple('cacheinfo','inited max_slab_id base_size factor');
self.cinfo = cacheinfo._make(unpack('iiif',buf));
#print self.cinfo;
print " cache info:"
print " inited :",self.cinfo.inited;
print " max_slab_id :",self.cinfo.max_slab_id;
print " base_size :",self.cinfo.base_size;
print " "
except:
pass;
def getSlabInfo(self):
#try:
self.fd.seek(524288+16);
print "id -- size -- perslab -- item_count--evict_count--full_pages--partial_pages--free_pages"
for i in range(0,self.cinfo.max_slab_id+1):
self.fd.read(16); #Pool & Cache pointer
buf = self.fd.read(16);
slabinfo = namedtuple('slabinfo','slabid slabsize perslab page_size');
sinfo = slabinfo._make(unpack('iiii',buf));
#print sinfo;
ilist = namedtuple('ilist','head tail');
buf = self.fd.read(16);
ilist_e = ilist._make(unpack('LL',buf));
self.fd.read(32752);
#self.fd.read(32768); #list & info
buf = self.fd.read(28);
slabinfo_2 = namedtuple('slabinfo_2','item_count evict_count full_pages partial_pages free_pages');
sinfo_2 = slabinfo_2._make(unpack('LLiii',buf));
#print sinfo_2;
self.fd.read(36); #list etc. and partial_pages_bucket_num/index/count, and the 4 pack bytes
print "%2d"%sinfo.slabid,"%8d"%sinfo.slabsize,"%8d"%sinfo.perslab,"%12d"%sinfo_2.item_count,\
"%12d"%sinfo_2.evict_count,"%8d"%sinfo_2.full_pages,"%10d"%sinfo_2.partial_pages,"%10d"%sinfo_2.free_pages,"%10d"%ilist_e.head,"%10d"%ilist_e.tail
partial_len = ((sinfo.perslab + 9) / 10 ) * 4;
self.fd.read(partial_len);
#except:
# pass
def getHashInfo(self):
try:
self.fd.seek(16384);
buf = self.fd.read(16);
hashinfo = namedtuple('hashinfo','inited bucket_size item_count start_page');
self.hinfo = hashinfo._make(unpack('iiii',buf));
#print self.hinfo;
print " Hash Info:"
print " inited :",self.hinfo.inited;
print " bucket_size :",self.hinfo.bucket_size;
print " item_count :",self.hinfo.item_count;
print " start_page :",self.hinfo.start_page;
print " "
except:
pass;
def getBitMapInfo(self):
try:
self.fd.seek(20);
buf = self.fd.read(8192);
start=0;
end=0;
prev=();
for b in buf:
t = unpack('c',b);
if prev==():
pass;
elif prev == t:
end += 1;
else:
print start,"-",end,":",prev.__str__()[4:6]
end += 1;
start = end;
prev = t;
print start,"-",end,":",prev.__str__()[4:6];
except:
pass;
def getHashTable(self):
#try:
self.getHashInfo();
self.fd.seek(self.mpool.page_size * self.hinfo.start_page);
bucket = None;
for i in range (0,self.hinfo.bucket_size):
buf = self.fd.read(8);
bucket = unpack('L',buf);
if bucket[0] == 0:
continue;
print "bucket","%8d"%i,
old_pos = self.fd.tell();
while bucket[0] != 0:
print "->","%20d"%bucket[0],
page_id,slab_size,page_offset = MdbShmReader.idToDetail(bucket[0]);
self.fd.seek(page_id * self.mpool.page_size + slab_size * page_offset + 24);
buf = self.fd.read(8); #h_next
bucket = unpack('L',buf);
print ""
self.fd.seek(old_pos);
#except:
# pass;
def getStatInfo(self):
try:
self.fd.seek(32768);
print "area quota datasize itemcount hit get put rem evict"
for i in range(0,1024):
buf = self.fd.read(72);
statinfo = namedtuple('statinfo','quota data_size space_size item_count hit_count get_count put_count remove_count evict_count');
sinfo = statinfo._make(unpack('LLLLLLLLL',buf));
if sinfo.quota != 0 or sinfo.item_count != 0:
print "%4d"%i,"%12d"%sinfo.quota,"%12d"%sinfo.data_size,"%12d"%sinfo.item_count,"%12d"%sinfo.hit_count,"%12d"%sinfo.get_count,"%12d"%sinfo.put_count,"%12d"%sinfo.remove_count,"%12d"%sinfo.evict_count
except:
print "except"
pass;
@staticmethod
def idToDetail(id):
#try:
# self.cinfo
#except NameError:
# self.cinfo = None;
#if self.cinfo is None:
# self.getCacheInfo();
page_id=((id>>36) & ((1<<16)-1));
slab_size=((id)&((1<<20)-1));
page_offset=((id>>20)&((1<<16)-1));
return (page_id,slab_size,page_offset);
def whichSlab(size):
start=62
factor=1.1
pagesize=1048576
slab_array=[]
i=0
while (i<100 and start < pagesize/2):
slab_array.append(start);
start = int(start * factor);
start = ((start + 7) & (~0x7));
slab_array.append(1048552);
#print slab_array;
i=0
while size+46+2 > slab_array[i]:
i+=1;
print i,":",slab_array[i]
def main():
try:
opts,args = getopt.getopt(sys.argv[1:],"f:i:s:");
except getopt.GetOptError,err:
exit(-1);
viewid=False;
id=None;
filename = None;
slabsize = None;
for o,a in opts:
if o == "-i":
viewid=True;
try:
id=int(a,10);
except ValueError:
id=int(a,16);
elif o == "-f":
filename = a;
elif o == "-s":
slabsize = int(a);
if filename is None and id is None and slabsize is None:
usage();
exit(-1);
if viewid:
page_id,slab_size,page_offset = MdbShmReader.idToDetail(id);
print "page_id:",page_id,"slab_size:",slab_size,"page_offset:",page_offset
elif slabsize:
whichSlab(slabsize);
else:
reader = MdbShmReader(filename);
reader.getPoolInfo();
reader.getCacheInfo();
reader.getHashInfo();
reader.getStatInfo();
#reader.getBitMapInfo();
reader.getSlabInfo();
#reader.getHashTable();
def usage():
print "mdbshm_reader.py -f shm file"
print " -i id"
print " -s size"
if __name__ == "__main__":
main();
| 0.058639 |
from test.integration.base import DBTIntegrationTest, use_profile
import random
import time
class TestBaseBigQueryRun(DBTIntegrationTest):
@property
def schema(self):
return "bigquery_test_022"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
'data-paths': ['data'],
'macro-paths': ['macros'],
'seeds': {
'quote_columns': False,
},
}
@property
def profile_config(self):
return self.bigquery_profile()
def assert_nondupes_pass(self):
# The 'dupe' model should fail, but all others should pass
test_results = self.run_dbt(['test'], expect_pass=False)
for result in test_results:
if 'dupe' in result.node.name:
self.assertEqual(result.status, 'fail')
self.assertFalse(result.skipped)
self.assertTrue(int(result.message) > 0)
# assert that actual tests pass
else:
self.assertEqual(result.status, 'pass')
self.assertFalse(result.skipped)
# message = # of failing rows
self.assertEqual(int(result.message), 0)
class TestSimpleBigQueryRun(TestBaseBigQueryRun):
@use_profile('bigquery')
def test__bigquery_simple_run(self):
# make sure seed works twice. Full-refresh is a no-op
self.run_dbt(['seed'])
self.run_dbt(['seed', '--full-refresh'])
results = self.run_dbt()
# Bump expected number of results when adding new model
self.assertEqual(len(results), 11)
self.assert_nondupes_pass()
class TestUnderscoreBigQueryRun(TestBaseBigQueryRun):
prefix = "_test{}{:04}".format(int(time.time()), random.randint(0, 9999))
@use_profile('bigquery')
def test_bigquery_run_twice(self):
self.run_dbt(['seed'])
results = self.run_dbt()
self.assertEqual(len(results), 11)
results = self.run_dbt()
self.assertEqual(len(results), 11)
self.assert_nondupes_pass()
| 0 |
#!/usr/bin/env python
from numpy import array,hstack
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat]]
def transfer_multitask_l12_logistic_regression (fm_train=traindat,fm_test=testdat,label_train=label_traindat):
from modshogun import BinaryLabels, RealFeatures, Task, TaskGroup, MultitaskL12LogisticRegression
features = RealFeatures(hstack((traindat,traindat)))
labels = BinaryLabels(hstack((label_train,label_train)))
n_vectors = features.get_num_vectors()
task_one = Task(0,n_vectors//2)
task_two = Task(n_vectors//2,n_vectors)
task_group = TaskGroup()
task_group.append_task(task_one)
task_group.append_task(task_two)
mtlr = MultitaskL12LogisticRegression(0.1,0.1,features,labels,task_group)
mtlr.set_tolerance(1e-2) # use 1e-2 tolerance
mtlr.set_max_iter(10)
mtlr.train()
mtlr.set_current_task(0)
out = mtlr.apply_regression().get_labels()
return out
if __name__=='__main__':
print('TransferMultitaskL12LogisticRegression')
transfer_multitask_l12_logistic_regression(*parameter_list[0])
| 0.03028 |
# Copyright 2013 Huawei Technologies Co.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import matchers
from tempest.api.volume import base
from tempest import config
from tempest import test
CONF = config.CONF
class SnapshotV2MetadataTestJSON(base.BaseVolumeTest):
@classmethod
def skip_checks(cls):
super(SnapshotV2MetadataTestJSON, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder snapshot feature disabled")
@classmethod
def setup_clients(cls):
super(SnapshotV2MetadataTestJSON, cls).setup_clients()
cls.client = cls.snapshots_client
@classmethod
def resource_setup(cls):
super(SnapshotV2MetadataTestJSON, cls).resource_setup()
# Create a volume
cls.volume = cls.create_volume()
# Create a snapshot
cls.snapshot = cls.create_snapshot(volume_id=cls.volume['id'])
cls.snapshot_id = cls.snapshot['id']
def tearDown(self):
# Update the metadata to {}
self.client.update_snapshot_metadata(self.snapshot_id, metadata={})
super(SnapshotV2MetadataTestJSON, self).tearDown()
@test.idempotent_id('a2f20f99-e363-4584-be97-bc33afb1a56c')
def test_create_get_delete_snapshot_metadata(self):
# Create metadata for the snapshot
metadata = {"key1": "value1",
"key2": "value2",
"key3": "value3"}
expected = {"key2": "value2",
"key3": "value3"}
body = self.client.create_snapshot_metadata(
self.snapshot_id, metadata)['metadata']
# Get the metadata of the snapshot
body = self.client.show_snapshot_metadata(
self.snapshot_id)['metadata']
self.assertThat(body.items(), matchers.ContainsAll(metadata.items()))
# Delete one item metadata of the snapshot
self.client.delete_snapshot_metadata_item(
self.snapshot_id, "key1")
body = self.client.show_snapshot_metadata(
self.snapshot_id)['metadata']
self.assertThat(body.items(), matchers.ContainsAll(expected.items()))
self.assertNotIn("key1", body)
@test.idempotent_id('bd2363bc-de92-48a4-bc98-28943c6e4be1')
def test_update_snapshot_metadata(self):
# Update metadata for the snapshot
metadata = {"key1": "value1",
"key2": "value2",
"key3": "value3"}
update = {"key3": "value3_update",
"key4": "value4"}
# Create metadata for the snapshot
body = self.client.create_snapshot_metadata(
self.snapshot_id, metadata)['metadata']
# Get the metadata of the snapshot
body = self.client.show_snapshot_metadata(
self.snapshot_id)['metadata']
self.assertThat(body.items(), matchers.ContainsAll(metadata.items()))
# Update metadata item
body = self.client.update_snapshot_metadata(
self.snapshot_id, metadata=update)['metadata']
# Get the metadata of the snapshot
body = self.client.show_snapshot_metadata(
self.snapshot_id)['metadata']
self.assertEqual(update, body)
@test.idempotent_id('e8ff85c5-8f97-477f-806a-3ac364a949ed')
def test_update_snapshot_metadata_item(self):
# Update metadata item for the snapshot
metadata = {"key1": "value1",
"key2": "value2",
"key3": "value3"}
update_item = {"key3": "value3_update"}
expect = {"key1": "value1",
"key2": "value2",
"key3": "value3_update"}
# Create metadata for the snapshot
body = self.client.create_snapshot_metadata(
self.snapshot_id, metadata)['metadata']
# Get the metadata of the snapshot
body = self.client.show_snapshot_metadata(
self.snapshot_id)['metadata']
self.assertThat(body.items(), matchers.ContainsAll(metadata.items()))
# Update metadata item
body = self.client.update_snapshot_metadata_item(
self.snapshot_id, "key3", meta=update_item)['meta']
# Get the metadata of the snapshot
body = self.client.show_snapshot_metadata(
self.snapshot_id)['metadata']
self.assertThat(body.items(), matchers.ContainsAll(expect.items()))
class SnapshotV1MetadataTestJSON(SnapshotV2MetadataTestJSON):
_api_version = 1
| 0 |
# -*- coding: utf-8 -*-
#from DevTemplate import Ui_Form
from PyQt4 import QtCore, QtGui
from acq4.pyqtgraph.WidgetGroup import WidgetGroup
from acq4.pyqtgraph.parametertree import *
import collections
class CameraDeviceGui(QtGui.QWidget):
def __init__(self, dev, win):
#pdb.set_trace()
QtGui.QWidget.__init__(self)
self.dev = dev
self.win = win
#self.cam = self.dev.cam
#self.ui = Ui_Form()
#self.ui.setupUi(self)
self.layout = QtGui.QGridLayout()
self.layout.setContentsMargins(0,0,0,0)
self.setLayout(self.layout)
self.params = self.dev.listParams()
self.stateGroup = WidgetGroup([])
#self.labels = {}
params = []
for k, p in self.params.iteritems():
try:
val = self.dev.getParam(k)
except:
continue
if not p[1]: ## read-only param
params.append({'name': k, 'readonly': True, 'value': val, 'type': 'str'})
#w = QtGui.QLabel()
#w.setText(str(val))
#self.labels[k] = w
else: ## parameter is writable
if type(p[0]) is tuple:
if len(p[0]) == 3:
(mn, mx, step) = p[0]
elif len(p[0]) == 2:
(mn, mx) = p[0]
step = 1
elif len(p[0]) == 0:
(mn,mx) = (10.E-6,100.)
step = 1
if type(mx) in [int, long] and type(mn) in [int, long]:
params.append({'name': k, 'type': 'int', 'value': val, 'limits': (mn, mx), 'step': step})
else:
params.append({'name': k, 'type': 'float', 'value': val, 'limits': (mn, mx), 'dec': True, 'step': 1})
if k == 'exposure':
params[-1]['suffix'] = 's'
params[-1]['siPrefix'] = True
params[-1]['minStep'] = 1e-6
elif type(p[0]) is list:
#print k, val, p
params.append({'name': k, 'type': 'list', 'value': val, 'values': p[0]})
#elif 'BOOL' in typ:
# w = QtGui.QCheckBox()
# w.setChecked(val)
else:
print " Ignoring parameter '%s': %s" % (k, str(p))
continue
#self.stateGroup.addWidget(w, k)
#self.ui.formLayout_2.addRow(k, w)
#self.stateGroup.sigChanged.connect(self.stateChanged)
self.paramSet = Parameter(name='cameraParams', type='group', children=params)
self.paramWidget = ParameterTree()
self.paramWidget.setParameters(self.paramSet, showTop=False)
self.layout.addWidget(self.paramWidget)
self.paramSet.sigTreeStateChanged.connect(self.stateChanged)
#self.ui.reconnectBtn.clicked.connect(self.reconnect)
self.dev.sigParamsChanged.connect(self.paramsChanged)
#for k, p in self.params.iteritems():
##p = self.params[k]
##print p
##if not p[1]:
##continue
#try:
#val = self.dev.getParam(k)
#except:
#continue
##typ = self.cam.getParamTypeName(p)
#if not p[1]: ## read-only param
#w = QtGui.QLabel()
#w.setText(str(val))
#self.labels[k] = w
#else: ## parameter is writable
#if type(p[0]) is tuple:
#if len(p[0]) == 3:
#(mn, mx, step) = p[0]
#elif len(p[0]) == 2:
#(mn, mx) = p[0]
#step = 1
#if type(mx) in [int, long] and type(mn) in [int, long]:
#w = QtGui.QSpinBox()
#intmax = (2**16)-1
#if mx is None or mx > intmax:
#mx = intmax
#mn = int(mn)
#mx = int(mx)
#step = int(step)
#w.setRange(mn, mx)
#w.setSingleStep(step)
##print k, "val:", val, type(val)
#w.setValue(val)
#else:
#w = SpinBox()
#w.setOpts(value=val, range=(mn, mx), dec=True, step=1)
#elif type(p[0]) is list:
#w = QtGui.QComboBox()
##(opts, vals) = self.cam.getEnumList(p)
#for i in range(len(p[0])):
#w.addItem(str(p[0][i]))
#if p[0][i] == val:
#w.setCurrentIndex(i)
##elif 'BOOL' in typ:
## w = QtGui.QCheckBox()
## w.setChecked(val)
#else:
#print " Ignoring parameter '%s': %s" % (k, str(p))
#continue
#self.stateGroup.addWidget(w, k)
#self.ui.formLayout_2.addRow(k, w)
##QtCore.QObject.connect(self.stateGroup, QtCore.SIGNAL('changed'), self.stateChanged)
#self.stateGroup.sigChanged.connect(self.stateChanged)
##QtCore.QObject.connect(self.ui.reconnectBtn, QtCore.SIGNAL('clicked()'), self.reconnect)
#self.ui.reconnectBtn.clicked.connect(self.reconnect)
##QtCore.QObject.connect(self.dev, QtCore.SIGNAL('paramsChanged'), self.paramsChanged)
#self.dev.sigParamsChanged.connect(self.paramsChanged)
##print "Done with UI"
def stateChanged(self, param, changes):
#print "tree state changed:"
## called when state is changed by user
vals = collections.OrderedDict()
for param, change, data in changes:
if change == 'value':
#print param.name(), param.value()
vals[param.name()] = param.value()
self.dev.setParams(vals)
def paramsChanged(self, params):
#print "Camera param changed:", params
## Called when state of camera has changed
for p in params.keys()[:]: ## flatten out nested dicts
if isinstance(params[p], dict):
for k in params[p]:
params[k] = params[p][k]
try: ## need to ignore tree-change signals while updating it.
self.paramSet.sigTreeStateChanged.disconnect(self.stateChanged)
for k, v in params.iteritems():
self.paramSet[k] = v
for p2 in self.params[k][3]: ## Update bounds if needed
newBounds = self.dev.listParams([p2])[p2][0]
self.paramSet.param(p2).setLimits(newBounds)
finally:
self.paramSet.sigTreeStateChanged.connect(self.stateChanged)
#self.stateGroup.blockSignals(True)
#self.stateGroup.setState(params)
#print "State:", self.stateGroup.state()
#for p in params:
#if not self.params[p][1]:
#self.labels[p].setText(str(params[p])) ## Update read-only labels
#else:
#for p2 in self.params[p][3]: ## Update bounds if needed
#newBounds = self.dev.listParams([p2])[p2][0]
#w = self.stateGroup.findWidget(p2)
##print "Update bounds for %s: %s" % (p2, str(newBounds))
#if type(newBounds) is tuple:
#(mn, mx, step) = newBounds
#if isinstance(w, QtGui.QSpinBox):
#intmax = (2**16)-1
#if mx is None or mx > intmax:
#mx = intmax
#mn = int(mn)
#mx = int(mx)
#step = int(step)
#w.setRange(mn, mx)
#w.setSingleStep(step)
#else:
#w.setOpts(range=(mn, mx))
##elif type(newBounds) is list:
##w.clear()
##for i in range(len(newBounds)):
##w.addItem(str(p[0][i]))
##if p[0][i] == val:
##w.setCurrentIndex(i)
#self.stateGroup.blockSignals(False)
def reconnect(self):
self.dev.reconnect()
| 0.02653 |
import isodate
from django import forms
from django.utils.timezone import utc
OPERATORS = (
'__true__', '__null__', '$', '~', '^', '=', '<=', '>=', '<', '>',
'!__true__', '!__null__', '!$', '!~', '!^', '!=', '!'
)
def split_on_operator(value):
for operator in sorted(OPERATORS, key=len, reverse=True):
if value.startswith(operator):
value = value[len(operator):]
return (operator, value)
return (None, value)
class PrefixedField(object):
"""Special field that accepts an operator as prefix in the value.
Removes the prefix from the initial value before the validation process
starts, and put it back in a different attribute once the validation
process is finished. The cleaned value is the one without the prefix, thus
allowing to use the real value and check its type.
The validated, prefixed value is available in `prefixed_value` as a string,
and the prefix is in `operator`.
This is needed to allow fields like IntegerField to accept values
containing an operator. For example, a value such as '>13' will raise
a ValidationError in the basic django IntegerField. Using a PrefixedField
based IntegerField, this value is perfectly valid.
"""
operator = None
prefixed_value = None
def to_python(self, value):
if isinstance(value, basestring):
self.operator, value = split_on_operator(value)
return super(PrefixedField, self).to_python(value)
def clean(self, *args, **kwargs):
cleaned_value = super(PrefixedField, self).clean(*args, **kwargs)
self.prefixed_value = self.value_to_string(cleaned_value)
if self.operator is not None and self.prefixed_value is not None:
self.prefixed_value = self.operator + self.prefixed_value
return cleaned_value
def value_to_string(self, value):
"""Return the value as a string. """
if value is None:
return None
return unicode(value)
class MultipleValueField(forms.MultipleChoiceField):
"""This is the same as a MultipleChoiceField except choices don't matter
as no validation will be done. The advantage is that it will take a list
as input, and output a list as well, allowing several values to be passed.
In the end, it's like a CharField that can take a list of values. It is
used as the default field for supersearch.
"""
def validate(self, value):
pass
class MultiplePrefixedValueField(PrefixedField):
"""Special field that uses a SelectMultiple widget to deal with multiple
values. """
def __init__(self, *args, **kwargs):
kwargs['widget'] = forms.SelectMultiple
super(MultiplePrefixedValueField, self).__init__(*args, **kwargs)
def clean(self, values, *args, **kwargs):
cleaned_values = []
prefixed_values = []
if values is None:
# call the mother classe's clean to do other verifications
return super(MultiplePrefixedValueField, self).clean(
values,
*args,
**kwargs
)
for value in values:
cleaned_value = super(MultiplePrefixedValueField, self).clean(
value,
*args,
**kwargs
)
if cleaned_value is not None:
cleaned_values.append(cleaned_value)
prefixed_values.append(self.prefixed_value)
self.prefixed_value = prefixed_values
return cleaned_values
class IntegerField(MultiplePrefixedValueField, forms.IntegerField):
pass
class IsoDateTimeField(forms.DateTimeField):
def to_python(self, value):
if value:
try:
return isodate.parse_datetime(value).replace(tzinfo=utc)
except (ValueError, isodate.isoerror.ISO8601Error):
# let the super method deal with that
pass
return super(IsoDateTimeField, self).to_python(value)
class DateTimeField(MultiplePrefixedValueField, IsoDateTimeField):
def value_to_string(self, value):
if value:
return value.isoformat()
class StringField(MultipleValueField):
"""A CharField with a different name, to be considered as a string
by the dynamic_form.js library. This basically enables string operators
on that field ("contains", "starts with"... ).
"""
pass
class BooleanField(forms.CharField):
def to_python(self, value):
"""Return None if the value is None. Return 'true' if the value is one
of the accepted values. Return 'false' otherwise.
Return boolean values as a string so the middleware doesn't exclude
the field if the value is False.
"""
if value is None:
return None
if str(value).lower() in ('__true__', 'true', 't', '1', 'y', 'yes'):
return 'true'
return 'false'
| 0 |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import os.path
import yaml
from murano.dsl import class_loader
from murano.dsl import exceptions
from murano.dsl import murano_package
from murano.dsl import namespace_resolver
from murano.engine.system import yaql_functions
from murano.engine import yaql_yaml_loader
class TestClassLoader(class_loader.MuranoClassLoader):
_classes_cache = {}
def __init__(self, directory, package_name, parent_loader=None):
self._package = murano_package.MuranoPackage()
self._package.name = package_name
self._parent = parent_loader
if directory in TestClassLoader._classes_cache:
self._classes = TestClassLoader._classes_cache[directory]
else:
self._classes = {}
self._build_index(directory)
TestClassLoader._classes_cache[directory] = self._classes
self._functions = {}
super(TestClassLoader, self).__init__()
def find_package_name(self, class_name):
if class_name in self._classes:
return self._package.name
if self._parent:
return self._parent.find_package_name(class_name)
return None
def load_package(self, class_name):
return self._package
def load_definition(self, name):
try:
return self._classes[name]
except KeyError:
if self._parent:
return self._parent.load_definition(name)
raise exceptions.NoClassFound(name)
def _build_index(self, directory):
yamls = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(directory)
for f in fnmatch.filter(files, '*.yaml')]
for class_def_file in yamls:
self._load_class(class_def_file)
def _load_class(self, class_def_file):
with open(class_def_file) as stream:
data = yaml.load(stream, yaql_yaml_loader.YaqlYamlLoader)
if 'Name' not in data:
return
for name, method in (data.get('Methods') or data.get(
'Workflow') or {}).iteritems():
if name.startswith('test'):
method['Usage'] = 'Action'
ns = namespace_resolver.NamespaceResolver(data.get('Namespaces', {}))
class_name = ns.resolve_name(data['Name'])
self._classes[class_name] = data
def create_root_context(self):
context = super(TestClassLoader, self).create_root_context()
yaql_functions.register(context)
for name, func in self._functions.iteritems():
context.register_function(func, name)
return context
def register_function(self, func, name):
self._functions[name] = func
| 0 |
import requests
from collections import OrderedDict
from ..exceptions import ElevationApiError
from ..geometry import Point, LineString
def elevation(path, api_key=None, sampling=50):
"""
Google elevation API backend
"""
url = 'https://maps.googleapis.com/maps/api/elevation/json'
params = {}
points = []
# add api key if present
if api_key:
params['key'] = api_key
# convert path in list of Point objects
for latlng in path.split('|'):
latlng = latlng.split(',')
points.append(Point(float(latlng[1]), float(latlng[0])))
if len(points) > 1:
# length of the path in meters
length = LineString(points).length * 100000
# get 1 point every x meters, where x is defined in
# ELEVATION_DEFAULT_SAMPLING
samples = int(round(length / sampling))
# use the automatically calculated value as long as it is compatibile
# with the API usage limits
if samples > 512:
samples = 512
# at least 2 samples
elif samples < 2:
samples = 2
params['samples'] = samples
params['path'] = path
else:
params['locations'] = path
# send request to Google Elevation API
response = requests.get(url, params=params)
data = response.json()
# if ok convert to GeoJSON
if 'status' in data and data['status'] == 'OK':
# if more than one result use LineString
if len(data['results']) > 1:
geometry = 'LineString'
# else use Point
else:
geometry = 'Point'
# lng, lat, z coordinates
coordinates = []
for point in data['results']:
coordinates.append([point['location']['lng'],
point['location']['lat'],
point['elevation']])
return OrderedDict((
('type', 'Feature'),
('geometry', OrderedDict((
('type', geometry),
('coordinates', coordinates)
)))
))
# else return original response
else:
raise ElevationApiError(
"Google Elevation API error:\n\n{0}".format(response.content))
| 0 |
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import six
import warnings
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
## \todo Support cascading menus using "/" in labels. Rework API to
# better match the rest of GafferUI - ditch index based methods, and
# just have setCurrent(), getCurrent() passing strings, and a
# currentChangedSignal(). Maybe use list style methods for managing
# entries (append [] del etc).
class SelectionMenu( GafferUI.Widget ) :
__palette = None
def __init__( self, **kw ) :
warnings.warn( "GafferUI.SelectionMenu is deprecated, use MultiSelectionMenu instead.", DeprecationWarning, 2 )
GafferUI.Widget.__init__( self, QtWidgets.QComboBox(), **kw )
self._qtWidget().currentIndexChanged.connect( Gaffer.WeakMethod( self.__changed ) )
self._qtWidget().activated.connect( Gaffer.WeakMethod( self.__selected ) )
self.__currentIndexChangedSignal = GafferUI.WidgetSignal()
self.__selectedSignal = GafferUI.WidgetSignal()
# combo boxes appear to totally ignore the etch-disabled-text stylesheet option,
# and we really don't like the etching. the only effective way of disabling it
# seems to be to apply this palette which makes the etched text transparent.
## \todo When we extend the Style classes to deal with Widgets, this should be
# done there. The same code exists in the Button class too.
if SelectionMenu.__palette is None :
SelectionMenu.__palette = QtGui.QPalette( QtWidgets.QApplication.instance().palette( self._qtWidget() ) )
SelectionMenu.__palette.setColor( QtGui.QPalette.Disabled, QtGui.QPalette.Light, QtGui.QColor( 0, 0, 0, 0 ) )
self._qtWidget().setPalette( SelectionMenu.__palette )
self._qtWidget().setView( QtWidgets.QListView() )
def selectedSignal( self ):
return self.__selectedSignal
def currentIndexChangedSignal( self ):
return self.__currentIndexChangedSignal
def __changed( self, index ):
self.currentIndexChangedSignal()( self )
def __selected( self, index ):
self.selectedSignal()( self )
def addItem( self, itemName, imageOrImageFileName=None ):
self._qtWidget().addItem(itemName)
if not imageOrImageFileName is None:
self.setIcon( self.getTotal()-1, imageOrImageFileName )
def insertItem( self, index, itemName, imageOrImageFileName=None ):
self._qtWidget().insertItem( index, itemName )
if not imageOrImageFileName is None:
self.setIcon( index, imageOrImageFileName )
def setIcon( self, index, imageOrImageFileName ):
icon = None
assert( isinstance( imageOrImageFileName, ( six.string_types, GafferUI.Image, type( None ) ) ) )
if isinstance( imageOrImageFileName, six.string_types ) :
icon = GafferUI.Image( imageOrImageFileName )
else :
icon = imageOrImageFileName
if icon is not None :
self._qtWidget().setItemIcon( index, QtGui.QIcon(icon._qtPixmap() ) )
self._qtWidget().setIconSize( icon._qtPixmap().size() )
def getIcon( self, index ):
return self._qtWidget().itemIcon( index )
def getCurrentIndex( self ):
return self._qtWidget().currentIndex()
def getCurrentItem( self ):
return str( self._qtWidget().currentText() )
def getTotal( self ):
return self._qtWidget().count()
def setCurrentIndex( self, index ):
self._qtWidget().setCurrentIndex( index )
def getItem( self, index ):
return str( self._qtWidget().itemText( index ) )
def removeIndex( self, index ):
self._qtWidget().removeItem( index )
def insertSeparator( self, index ):
self._qtWidget().insertSeparator( index )
| 0.033786 |
#!/usr/bin/env python
__all__ = ['kuwo_download']
from ..common import *
import re
def kuwo_download_by_rid(rid, output_dir = '.', merge = True, info_only = False):
html=get_content("http://player.kuwo.cn/webmusic/st/getNewMuiseByRid?rid=MUSIC_%s"%rid)
title=match1(html,r"<name>(.*)</name>")
#to get title
#format =aac|mp3 ->to get aac format=mp3 ->to get mp3
url=get_content("http://antiserver.kuwo.cn/anti.s?format=mp3&rid=MUSIC_%s&type=convert_url&response=url"%rid)
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir)
def kuwo_playlist_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
html=get_content(url)
matched=set(re.compile("yinyue/(\d+)").findall(html))#reduce duplicated
for rid in matched:
kuwo_download_by_rid(rid,output_dir,merge,info_only)
def kuwo_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
if "www.kuwo.cn/yinyue" in url:
rid=match1(url,'yinyue/(\d+)')
kuwo_download_by_rid(rid,output_dir, merge, info_only)
else:
kuwo_playlist_download(url,output_dir,merge,info_only)
site_info = "kuwo.cn"
download = kuwo_download
# download_playlist = playlist_not_supported("kugou")
# download_playlist=playlist_not_supported("kuwo")
download_playlist=kuwo_playlist_download
| 0.035441 |
from gettext import gettext as _
from typing import Optional
from uuid import UUID
# https://www.bluetooth.com/specifications/assigned-numbers/service-discovery
# http://git.kernel.org/cgit/bluetooth/bluez.git/tree/lib/sdp.h
SDP_SERVER_SVCLASS_ID = 0x1000
BROWSE_GRP_DESC_SVCLASS_ID = 0x1001
PUBLIC_BROWSE_GROUP = 0x1002
SERIAL_PORT_SVCLASS_ID = 0x1101
LAN_ACCESS_SVCLASS_ID = 0x1102
DIALUP_NET_SVCLASS_ID = 0x1103
IRMC_SYNC_SVCLASS_ID = 0x1104
OBEX_OBJPUSH_SVCLASS_ID = 0x1105
OBEX_FILETRANS_SVCLASS_ID = 0x1106
IRMC_SYNC_CMD_SVCLASS_ID = 0x1107
HEADSET_SVCLASS_ID = 0x1108
CORDLESS_TELEPHONY_SVCLASS_ID = 0x1109
AUDIO_SOURCE_SVCLASS_ID = 0x110a
AUDIO_SINK_SVCLASS_ID = 0x110b
AV_REMOTE_TARGET_SVCLASS_ID = 0x110c
ADVANCED_AUDIO_SVCLASS_ID = 0x110d
AV_REMOTE_SVCLASS_ID = 0x110e
AV_REMOTE_CONTROLLER_SVCLASS_ID = 0x110f
INTERCOM_SVCLASS_ID = 0x1110
FAX_SVCLASS_ID = 0x1111
HEADSET_AGW_SVCLASS_ID = 0x1112
WAP_SVCLASS_ID = 0x1113
WAP_CLIENT_SVCLASS_ID = 0x1114
PANU_SVCLASS_ID = 0x1115
NAP_SVCLASS_ID = 0x1116
GN_SVCLASS_ID = 0x1117
DIRECT_PRINTING_SVCLASS_ID = 0x1118
REFERENCE_PRINTING_SVCLASS_ID = 0x1119
IMAGING_SVCLASS_ID = 0x111a
IMAGING_RESPONDER_SVCLASS_ID = 0x111b
IMAGING_ARCHIVE_SVCLASS_ID = 0x111c
IMAGING_REFOBJS_SVCLASS_ID = 0x111d
HANDSFREE_SVCLASS_ID = 0x111e
HANDSFREE_AGW_SVCLASS_ID = 0x111f
DIRECT_PRT_REFOBJS_SVCLASS_ID = 0x1120
REFLECTED_UI_SVCLASS_ID = 0x1121
BASIC_PRINTING_SVCLASS_ID = 0x1122
PRINTING_STATUS_SVCLASS_ID = 0x1123
HID_SVCLASS_ID = 0x1124
HCR_SVCLASS_ID = 0x1125
HCR_PRINT_SVCLASS_ID = 0x1126
HCR_SCAN_SVCLASS_ID = 0x1127
CIP_SVCLASS_ID = 0x1128
VIDEO_CONF_GW_SVCLASS_ID = 0x1129
UDI_MT_SVCLASS_ID = 0x112a
UDI_TA_SVCLASS_ID = 0x112b
AV_SVCLASS_ID = 0x112c
SAP_SVCLASS_ID = 0x112d
PBAP_PCE_SVCLASS_ID = 0x112e
PBAP_PSE_SVCLASS_ID = 0x112f
PBAP_SVCLASS_ID = 0x1130
MAP_MSE_SVCLASS_ID = 0x1132
MAP_MCE_SVCLASS_ID = 0x1133
MAP_SVCLASS_ID = 0x1134
GNSS_SVCLASS_ID = 0x1135
GNSS_SERVER_SVCLASS_ID = 0x1136
MPS_SC_SVCLASS_ID = 0x113A
MPS_SVCLASS_ID = 0x113B
PNP_INFO_SVCLASS_ID = 0x1200
GENERIC_NETWORKING_SVCLASS_ID = 0x1201
GENERIC_FILETRANS_SVCLASS_ID = 0x1202
GENERIC_AUDIO_SVCLASS_ID = 0x1203
GENERIC_TELEPHONY_SVCLASS_ID = 0x1204
UPNP_SVCLASS_ID = 0x1205
UPNP_IP_SVCLASS_ID = 0x1206
UPNP_PAN_SVCLASS_ID = 0x1300
UPNP_LAP_SVCLASS_ID = 0x1301
UPNP_L2CAP_SVCLASS_ID = 0x1302
VIDEO_SOURCE_SVCLASS_ID = 0x1303
VIDEO_SINK_SVCLASS_ID = 0x1304
VIDEO_DISTRIBUTION_SVCLASS_ID = 0x1305
HDP_SVCLASS_ID = 0x1400
HDP_SOURCE_SVCLASS_ID = 0x1401
HDP_SINK_SVCLASS_ID = 0x1402
GENERIC_ACCESS_SVCLASS_ID = 0x1800
GENERIC_ATTRIB_SVCLASS_ID = 0x1801
BATTERY_SERVICE_SVCLASS_ID = 0x180F
APPLE_AGENT_SVCLASS_ID = 0x2112
uuid_names = {
0x0001: _("SDP"),
0x0002: _("UDP"),
0x0003: _("RFCOMM"),
0x0004: _("TCP"),
0x0005: _("TCS-BIN"),
0x0006: _("TCS-AT"),
0x0007: _("ATT"),
0x0008: _("OBEX"),
0x0009: _("IP"),
0x000a: _("FTP"),
0x000c: _("HTTP"),
0x000e: _("WSP"),
0x000f: _("BNEP"),
0x0010: _("UPnP/ESDP"),
0x0011: _("HIDP"),
0x0012: _("Hardcopy Control Channel"),
0x0014: _("Hardcopy Data Channel"),
0x0016: _("Hardcopy Notification"),
0x0017: _("AVCTP"),
0x0019: _("AVDTP"),
0x001b: _("CMTP"),
0x001d: _("UDI_C-Plane"),
0x001e: _("Multi-Channel Adaptation Protocol (MCAP)"),
0x001f: _("Multi-Channel Adaptation Protocol (MCAP)"),
0x0100: _("L2CAP"),
0x1000: _("ServiceDiscoveryServerServiceClassID"),
0x1001: _("BrowseGroupDescriptorServiceClassID"),
0x1002: _("Public Browse Group"),
0x1101: _("Serial Port"),
0x1102: _("LAN Access Using PPP"),
0x1103: _("Dialup Networking (DUN)"),
0x1104: _("IrMC Sync"),
0x1105: _("OBEX Object Push"),
0x1106: _("OBEX File Transfer"),
0x1107: _("IrMC Sync Command"),
0x1108: _("Headset"),
0x1109: _("Cordless Telephony"),
0x110a: _("Audio Source"),
0x110b: _("Audio Sink"),
0x110c: _("Remote Control Target"),
0x110d: _("Advanced Audio"),
0x110e: _("Remote Control"),
0x110f: _("Video Conferencing"),
0x1110: _("Intercom"),
0x1111: _("Fax"),
0x1112: _("Headset Audio Gateway"),
0x1113: _("WAP"),
0x1114: _("WAP Client"),
0x1115: _("PANU"),
0x1116: _("Network Access Point"),
0x1117: _("Group Network"),
0x1118: _("DirectPrinting (BPP)"),
0x1119: _("ReferencePrinting (BPP)"),
0x111a: _("Imaging (BIP)"),
0x111b: _("ImagingResponder (BIP)"),
0x111c: _("ImagingAutomaticArchive (BIP)"),
0x111d: _("ImagingReferencedObjects (BIP)"),
0x111e: _("Handsfree"),
0x111f: _("Handsfree Audio Gateway"),
0x1120: _("DirectPrintingReferenceObjectsService (BPP)"),
0x1121: _("ReflectedUI (BPP)"),
0x1122: _("Basic Printing (BPP)"),
0x1123: _("Printing Status (BPP)"),
0x1124: _("Human Interface Device Service (HID)"),
0x1125: _("HardcopyCableReplacement (HCR)"),
0x1126: _("HCR_Print (HCR)"),
0x1127: _("HCR_Scan (HCR)"),
0x1128: _("Common ISDN Access (CIP)"),
0x1129: _("VideoConferencingGW (VCP)"),
0x112a: _("UDI-MT"),
0x112b: _("UDI-TA"),
0x112c: _("Audio/Video"),
0x112d: _("SIM Access (SAP)"),
0x112e: _("Phonebook Access (PBAP) - PCE"),
0x112f: _("Phonebook Access (PBAP) - PSE"),
0x1130: _("Phonebook Access (PBAP)"),
0x1131: _("Headset"),
0x1132: _("Message Access Server"),
0x1133: _("Message Notification Server"),
0x1134: _("Message Access Profile (MAP)"),
0x1135: _("GNSS"),
0x1136: _("GNSS Server"),
0x1137: _("3D Display"),
0x1138: _("3D Glasses"),
0x1139: _("3D Synchronization (3DSP)"),
0x113a: _("Multi-Profile Specification (MPS) Profile"),
0x113b: _("Multi-Profile Specification (MPS) Service"),
0x113c: _("Calendar, Task, and Notes (CTN) Access Service"),
0x113d: _("Calendar, Task, and Notes (CTN) Notification Service"),
0x113e: _("Calendar, Task, and Notes (CTN) Profile"),
0x1200: _("PnP Information"),
0x1201: _("Generic Networking"),
0x1202: _("Generic FileTransfer"),
0x1203: _("Generic Audio"),
0x1204: _("Generic Telephony"),
0x1303: _("Video Source"),
0x1304: _("Video Sink"),
0x1305: _("Video Distribution"),
0x1400: _("HDP"),
0x1401: _("HDP Source"),
0x1402: _("HDP Sink"),
0x1800: _("Generic Access"),
0x1801: _("Generic Attribute"),
0x1802: _("Immediate Alert"),
0x1803: _("Link Loss"),
0x1804: _("Tx Power"),
0x1805: _("Current Time Service"),
0x1806: _("Reference Time Update Service"),
0x1807: _("Next DST Change Service"),
0x1808: _("Glucose"),
0x1809: _("Health Thermometer"),
0x180A: _("Device Information"),
0x180D: _("Heart Rate"),
0x180E: _("Phone Alert Status Service"),
0x180F: _("Battery Service"),
0x1810: _("Blood Pressure"),
0x1811: _("Alert Notification Service"),
0x1812: _("Human Interface Device"),
0x1813: _("Scan Parameters"),
0x1814: _("Running Speed and Cadence"),
0x1815: _("Automation IO"),
0x1816: _("Cycling Speed and Cadence"),
0x1818: _("Cycling Power"),
0x1819: _("Location and Navigation"),
0x181A: _("Environmental Sensing"),
0x181B: _("Body Composition"),
0x181C: _("User Data"),
0x181D: _("Weight Scale"),
0x181E: _("Bond Management"),
0x181F: _("Continuous Glucose Monitoring"),
0x1820: _("Internet Protocol Support"),
0x1821: _("Indoor Positioning"),
0x1822: _("Pulse Oximeter"),
0x1823: _("HTTP Proxy"),
0x1824: _("Transport Discovery"),
0x1825: _("Object Transfer"),
0x2112: _("AppleAgent"),
0x2800: _("Primary Service"),
0x2801: _("Secondary Service"),
0x2802: _("Include"),
0x2803: _("Characteristic Declaration"),
0x2A00: _("Device Name"),
0x2A01: _("Appearance"),
0x2A02: _("Peripheral Privacy Flag"),
0x2A03: _("Reconnection Address"),
0x2A04: _("Peripheral Preferred Connection Parameters"),
0x2A05: _("Service Changed"),
0x2A23: _("System ID"),
0x2A24: _("Model Number String"),
0x2A25: _("Serial Number String"),
0x2A26: _("Firmware Revision String"),
0x2A27: _("Hardware Revision String"),
0x2A28: _("Software Revision String"),
0x2A29: _("Manufacturer Name String"),
0x2A50: _("PnP ID"),
0x2900: _("Characteristic Extended Properties"),
0x2901: _("Characteristic User Description"),
0x2902: _("Client Characteristic Configuration"),
0x2903: _("Server Characteristic Configuration"),
0x2904: _("Characteristic Presentation Format"),
0x2905: _("Characteristic Aggregate Format"),
0x2906: _("Valid Range"),
0x2907: _("External Report Reference"),
0x2908: _("Report Reference"),
}
SDP_ATTR_RECORD_HANDLE = 0x0000
SDP_ATTR_SVCLASS_ID_LIST = 0x0001
SDP_ATTR_RECORD_STATE = 0x0002
SDP_ATTR_SERVICE_ID = 0x0003
SDP_ATTR_PROTO_DESC_LIST = 0x0004
SDP_ATTR_BROWSE_GRP_LIST = 0x0005
SDP_ATTR_LANG_BASE_ATTR_ID_LIST = 0x0006
SDP_ATTR_SVCINFO_TTL = 0x0007
SDP_ATTR_SERVICE_AVAILABILITY = 0x0008
SDP_ATTR_PFILE_DESC_LIST = 0x0009
SDP_ATTR_DOC_URL = 0x000a
SDP_ATTR_CLNT_EXEC_URL = 0x000b
SDP_ATTR_ICON_URL = 0x000c
SDP_ATTR_ADD_PROTO_DESC_LIST = 0x000d
SDP_ATTR_SUPPORTED_REPOSITORIES = 0x0314
SDP_ATTR_MAS_INSTANCE_ID = 0x0315
SDP_ATTR_SUPPORTED_MESSAGE_TYPES = 0x0316
SDP_ATTR_PBAP_SUPPORTED_FEATURES = 0x0317
SDP_ATTR_MAP_SUPPORTED_FEATURES = 0x0317
SDP_ATTR_SPECIFICATION_ID = 0x0200
SDP_ATTR_VENDOR_ID = 0x0201
SDP_ATTR_PRODUCT_ID = 0x0202
SDP_ATTR_VERSION = 0x0203
SDP_ATTR_PRIMARY_RECORD = 0x0204
SDP_ATTR_VENDOR_ID_SOURCE = 0x0205
SDP_ATTR_HID_DEVICE_RELEASE_NUMBER = 0x0200
SDP_ATTR_HID_PARSER_VERSION = 0x0201
SDP_ATTR_HID_DEVICE_SUBCLASS = 0x0202
SDP_ATTR_HID_COUNTRY_CODE = 0x0203
SDP_ATTR_HID_VIRTUAL_CABLE = 0x0204
SDP_ATTR_HID_RECONNECT_INITIATE = 0x0205
SDP_ATTR_HID_DESCRIPTOR_LIST = 0x0206
SDP_ATTR_HID_LANG_ID_BASE_LIST = 0x0207
SDP_ATTR_HID_SDP_DISABLE = 0x0208
SDP_ATTR_HID_BATTERY_POWER = 0x0209
SDP_ATTR_HID_REMOTE_WAKEUP = 0x020a
SDP_ATTR_HID_PROFILE_VERSION = 0x020b
SDP_ATTR_HID_SUPERVISION_TIMEOUT = 0x020c
SDP_ATTR_HID_NORMALLY_CONNECTABLE = 0x020d
SDP_ATTR_HID_BOOT_DEVICE = 0x020e
SDP_PRIMARY_LANG_BASE = 0x0100
SDP_UUID = 0x0001
UDP_UUID = 0x0002
RFCOMM_UUID = 0x0003
TCP_UUID = 0x0004
TCS_BIN_UUID = 0x0005
TCS_AT_UUID = 0x0006
OBEX_UUID = 0x0008
IP_UUID = 0x0009
FTP_UUID = 0x000a
HTTP_UUID = 0x000c
WSP_UUID = 0x000e
BNEP_UUID = 0x000f
UPNP_UUID = 0x0010
HIDP_UUID = 0x0011
HCRP_CTRL_UUID = 0x0012
HCRP_DATA_UUID = 0x0014
HCRP_NOTE_UUID = 0x0016
AVCTP_UUID = 0x0017
AVDTP_UUID = 0x0019
CMTP_UUID = 0x001b
UDI_UUID = 0x001d
MCAP_CTRL_UUID = 0x001e
MCAP_DATA_UUID = 0x001f
L2CAP_UUID = 0x0100
# GATT UUIDs section
GATT_PRIM_SVC_UUID = 0x2800
GATT_SND_SVC_UUID = 0x2801
GATT_INCLUDE_UUID = 0x2802
GATT_CHARAC_UUID = 0x2803
# GATT Characteristic Types
GATT_CHARAC_DEVICE_NAME = 0x2A00
GATT_CHARAC_APPEARANCE = 0x2A01
GATT_CHARAC_PERIPHERAL_PRIV_FLAG = 0x2A02
GATT_CHARAC_RECONNECTION_ADDRESS = 0x2A03
GATT_CHARAC_PERIPHERAL_PREF_CONN = 0x2A04
GATT_CHARAC_SERVICE_CHANGED = 0x2A05
GATT_CHARAC_SYSTEM_ID = 0x2A23
GATT_CHARAC_MODEL_NUMBER_STRING = 0x2A24
GATT_CHARAC_SERIAL_NUMBER_STRING = 0x2A25
GATT_CHARAC_FIRMWARE_REVISION_STRING = 0x2A26
GATT_CHARAC_HARDWARE_REVISION_STRING = 0x2A27
GATT_CHARAC_SOFTWARE_REVISION_STRING = 0x2A28
GATT_CHARAC_MANUFACTURER_NAME_STRING = 0x2A29
GATT_CHARAC_PNP_ID = 0x2A50
# GATT Characteristic Descriptors
GATT_CHARAC_EXT_PROPER_UUID = 0x2900
GATT_CHARAC_USER_DESC_UUID = 0x2901
GATT_CLIENT_CHARAC_CFG_UUID = 0x2902
GATT_SERVER_CHARAC_CFG_UUID = 0x2903
GATT_CHARAC_FMT_UUID = 0x2904
GATT_CHARAC_AGREG_FMT_UUID = 0x2905
GATT_CHARAC_VALID_RANGE_UUID = 0x2906
GATT_EXTERNAL_REPORT_REFERENCE = 0x2907
GATT_REPORT_REFERENCE = 0x2908
class ServiceUUID(UUID):
def __init__(self, uuid: str):
super().__init__(uuid)
@property
def short_uuid(self) -> Optional[int]:
if self.reserved:
return self.int >> 96 & 0xFFFF
else:
return None
@property
def name(self) -> str:
if self.short_uuid:
try:
return uuid_names[self.short_uuid]
except KeyError:
return _("Unknown")
elif self.int == 0:
return _('Audio and input profiles')
else:
return _('Proprietary')
@property
def reserved(self) -> bool:
return self.int & UUID('FFFF0000-0000-FFFF-FFFF-FFFFFFFFFFFF').int == \
UUID('00000000-0000-1000-8000-00805F9B34FB').int
| 0 |
from display_exceptions import NotFound, PermissionDenied
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.messages import add_message
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.models import When, Case
from django.db.models.functions import Value, Concat
from django.http import HttpResponse
from django.shortcuts import redirect
from base.views import render_cms_special, notification
# @login_required
from member.models import Team
def team_info_all(request):
teams = Team.objects.filter(listed=True).order_by('name')
return render_cms_special(request, 'team_info_all.html', dict(
teams=teams,
))
def team_info(request, slug):
try:
team = Team.objects.get(slug=slug)
except Team.DoesNotExist:
raise NotFound(message='No team with {0:} found.'.format(slug), caption='Team not found', next=reverse('team_info_all'))
if team.listed is False:
if not request.user.is_authenticated():
raise PermissionDenied(message='Team {0:} is not visible if you\'re not logged in.'
.format(team), next=reverse('account_login'))
if not request.user.pk in set(member.pk for member in team.roles):
raise PermissionDenied(message='Team {0:} is not visible to you (only admins can view it).'
.format(team), next=reverse('team_info_all'))
return render_cms_special(request, 'team_info.html', dict(
team=team,
))
@login_required
def member_profile_me(request):
return redirect(to=request.user)
@login_required
def member_profile_all(request):
"""
Members are sorted by full name (first + last), with empty names after full ones.
"""
users = get_user_model().objects.filter(is_active=True).annotate(name_null=Case(
When(first_name='', last_name='', then=Value('1')),
default=Concat(Value('0'), 'first_name', Value(' '), 'last_name')
)).order_by('name_null')
return render_cms_special(request, 'member_profile_all.html', dict(
users=users,
))
def member_profile(request, pk=None, label=None):
#todo: cms update title
if pk is None:
if not request.user.is_authenticated():
return '{0:s}?next={1:s}'.format(redirect(settings.LOGIN_URL), request.path)
return redirect(reverse('profile_info', kwargs=dict(pk=request.user.pk, label=request.user.slug)))
try:
user = get_user_model().objects.get(pk=pk)
except get_user_model().DoesNotExist:
raise NotFound(message='No user with key {0:} found.'.format(pk), caption='User not found', next=reverse('profile_info_all'))
if user.slug != label:
return redirect(user.get_absolute_url())
return render_cms_special(request, 'member_profile.html', dict(
user=user,
))
def member_setup_info(request):
try:
reverse('profile_info_all')
except NoReverseMatch:
if request.user.is_staff:
return notification(request,
title='Member app not set up',
message='The member app seems not to have been added yet. Go to the page where you want the member ' +
'structure, go to advanced settings and add the Member app. This will add the appropriate urls,' +
' which cannot be found now. You can check this page to see if it worked. (Needs staff status)',
)
return notification(request,
title='Not available yet',
message='Sorry, due to a small setup problem, this page is not available yet. Check back soon!',
)
else:
if request.user.is_staff:
return notification(request,
title='Why are you here?',
message='This page shows information for staff in case the user app is not set up yet. ' +
'However, it seems to be set up now (urls can be found), so there is no reason to be sent here...',
)
return redirect(reverse('home'))
| 0.025579 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Emercoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Emercoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | 0.03828 |
import json
from typing import Optional, Type
import pygments.lexer
import pygments.lexers
import pygments.style
import pygments.styles
import pygments.token
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.lexer import Lexer
from pygments.lexers.special import TextLexer
from pygments.lexers.text import HttpLexer as PygmentsHttpLexer
from pygments.util import ClassNotFound
from ...compat import is_windows
from ...context import Environment
from ...plugins import FormatterPlugin
AUTO_STYLE = 'auto' # Follows terminal ANSI color styles
DEFAULT_STYLE = AUTO_STYLE
SOLARIZED_STYLE = 'solarized' # Bundled here
if is_windows:
# Colors on Windows via colorama don't look that
# great and fruity seems to give the best result there.
DEFAULT_STYLE = 'fruity'
AVAILABLE_STYLES = set(pygments.styles.get_all_styles())
AVAILABLE_STYLES.add(SOLARIZED_STYLE)
AVAILABLE_STYLES.add(AUTO_STYLE)
class ColorFormatter(FormatterPlugin):
"""
Colorize using Pygments
This processor that applies syntax highlighting to the headers,
and also to the body if its content type is recognized.
"""
group_name = 'colors'
def __init__(
self,
env: Environment,
explicit_json=False,
color_scheme=DEFAULT_STYLE,
**kwargs
):
super().__init__(**kwargs)
if not env.colors:
self.enabled = False
return
use_auto_style = color_scheme == AUTO_STYLE
has_256_colors = env.colors == 256
if use_auto_style or not has_256_colors:
http_lexer = PygmentsHttpLexer()
formatter = TerminalFormatter()
else:
http_lexer = SimplifiedHTTPLexer()
formatter = Terminal256Formatter(
style=self.get_style_class(color_scheme)
)
self.explicit_json = explicit_json # --json
self.formatter = formatter
self.http_lexer = http_lexer
def format_headers(self, headers: str) -> str:
return pygments.highlight(
code=headers,
lexer=self.http_lexer,
formatter=self.formatter,
).strip()
def format_body(self, body: str, mime: str) -> str:
lexer = self.get_lexer_for_body(mime, body)
if lexer:
body = pygments.highlight(
code=body,
lexer=lexer,
formatter=self.formatter,
)
return body
def get_lexer_for_body(
self, mime: str,
body: str
) -> Optional[Type[Lexer]]:
return get_lexer(
mime=mime,
explicit_json=self.explicit_json,
body=body,
)
@staticmethod
def get_style_class(color_scheme: str) -> Type[pygments.style.Style]:
try:
return pygments.styles.get_style_by_name(color_scheme)
except ClassNotFound:
return Solarized256Style
def get_lexer(
mime: str,
explicit_json=False,
body=''
) -> Optional[Type[Lexer]]:
# Build candidate mime type and lexer names.
mime_types, lexer_names = [mime], []
type_, subtype = mime.split('/', 1)
if '+' not in subtype:
lexer_names.append(subtype)
else:
subtype_name, subtype_suffix = subtype.split('+', 1)
lexer_names.extend([subtype_name, subtype_suffix])
mime_types.extend([
f'{type_}/{subtype_name}',
f'{type_}/{subtype_suffix}',
])
# As a last resort, if no lexer feels responsible, and
# the subtype contains 'json', take the JSON lexer
if 'json' in subtype:
lexer_names.append('json')
# Try to resolve the right lexer.
lexer = None
for mime_type in mime_types:
try:
lexer = pygments.lexers.get_lexer_for_mimetype(mime_type)
break
except ClassNotFound:
pass
else:
for name in lexer_names:
try:
lexer = pygments.lexers.get_lexer_by_name(name)
except ClassNotFound:
pass
if explicit_json and body and (not lexer or isinstance(lexer, TextLexer)):
# JSON response with an incorrect Content-Type?
try:
json.loads(body) # FIXME: the body also gets parsed in json.py
except ValueError:
pass # Nope
else:
lexer = pygments.lexers.get_lexer_by_name('json')
return lexer
class SimplifiedHTTPLexer(pygments.lexer.RegexLexer):
"""Simplified HTTP lexer for Pygments.
It only operates on headers and provides a stronger contrast between
their names and values than the original one bundled with Pygments
(:class:`pygments.lexers.text import HttpLexer`), especially when
Solarized color scheme is used.
"""
name = 'HTTP'
aliases = ['http']
filenames = ['*.http']
tokens = {
'root': [
# Request-Line
(r'([A-Z]+)( +)([^ ]+)( +)(HTTP)(/)(\d+\.\d+)',
pygments.lexer.bygroups(
pygments.token.Name.Function,
pygments.token.Text,
pygments.token.Name.Namespace,
pygments.token.Text,
pygments.token.Keyword.Reserved,
pygments.token.Operator,
pygments.token.Number
)),
# Response Status-Line
(r'(HTTP)(/)(\d+\.\d+)( +)(\d{3})( +)(.+)',
pygments.lexer.bygroups(
pygments.token.Keyword.Reserved, # 'HTTP'
pygments.token.Operator, # '/'
pygments.token.Number, # Version
pygments.token.Text,
pygments.token.Number, # Status code
pygments.token.Text,
pygments.token.Name.Exception, # Reason
)),
# Header
(r'(.*?)( *)(:)( *)(.+)', pygments.lexer.bygroups(
pygments.token.Name.Attribute, # Name
pygments.token.Text,
pygments.token.Operator, # Colon
pygments.token.Text,
pygments.token.String # Value
))
]
}
class Solarized256Style(pygments.style.Style):
"""
solarized256
------------
A Pygments style inspired by Solarized's 256 color mode.
:copyright: (c) 2011 by Hank Gay, (c) 2012 by John Mastro.
:license: BSD, see LICENSE for more details.
"""
BASE03 = "#1c1c1c"
BASE02 = "#262626"
BASE01 = "#4e4e4e"
BASE00 = "#585858"
BASE0 = "#808080"
BASE1 = "#8a8a8a"
BASE2 = "#d7d7af"
BASE3 = "#ffffd7"
YELLOW = "#af8700"
ORANGE = "#d75f00"
RED = "#af0000"
MAGENTA = "#af005f"
VIOLET = "#5f5faf"
BLUE = "#0087ff"
CYAN = "#00afaf"
GREEN = "#5f8700"
background_color = BASE03
styles = {
pygments.token.Keyword: GREEN,
pygments.token.Keyword.Constant: ORANGE,
pygments.token.Keyword.Declaration: BLUE,
pygments.token.Keyword.Namespace: ORANGE,
pygments.token.Keyword.Reserved: BLUE,
pygments.token.Keyword.Type: RED,
pygments.token.Name.Attribute: BASE1,
pygments.token.Name.Builtin: BLUE,
pygments.token.Name.Builtin.Pseudo: BLUE,
pygments.token.Name.Class: BLUE,
pygments.token.Name.Constant: ORANGE,
pygments.token.Name.Decorator: BLUE,
pygments.token.Name.Entity: ORANGE,
pygments.token.Name.Exception: YELLOW,
pygments.token.Name.Function: BLUE,
pygments.token.Name.Tag: BLUE,
pygments.token.Name.Variable: BLUE,
pygments.token.String: CYAN,
pygments.token.String.Backtick: BASE01,
pygments.token.String.Char: CYAN,
pygments.token.String.Doc: CYAN,
pygments.token.String.Escape: RED,
pygments.token.String.Heredoc: CYAN,
pygments.token.String.Regex: RED,
pygments.token.Number: CYAN,
pygments.token.Operator: BASE1,
pygments.token.Operator.Word: GREEN,
pygments.token.Comment: BASE01,
pygments.token.Comment.Preproc: GREEN,
pygments.token.Comment.Special: GREEN,
pygments.token.Generic.Deleted: CYAN,
pygments.token.Generic.Emph: 'italic',
pygments.token.Generic.Error: RED,
pygments.token.Generic.Heading: ORANGE,
pygments.token.Generic.Inserted: GREEN,
pygments.token.Generic.Strong: 'bold',
pygments.token.Generic.Subheading: ORANGE,
pygments.token.Token: BASE1,
pygments.token.Token.Other: ORANGE,
}
| 0 |
# -*- coding: utf-8 -*-
import re
from ..base.addon import BaseAddon
class LinkdecrypterComHook(BaseAddon):
__name__ = "LinkdecrypterComHook"
__type__ = "addon"
__version__ = "1.11"
__status__ = "broken"
__config__ = [
("enabled", "bool", "Activated", False),
("pluginmode", "all;listed;unlisted", "Use for plugins", "all"),
("pluginlist", "str", "Plugin list (comma separated)", ""),
("reload", "bool", "Reload plugin list", True),
("reloadinterval", "int", "Reload interval in hours", 12),
]
__description__ = """Linkdecrypter.com addon plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def get_hosters(self):
list = (
re.search(
r">Supported\(\d+\)</b>: <i>(.[\w\-., ]+)",
self.load("http://linkdecrypter.com/").replace("(g)", ""),
)
.group(1)
.split(", ")
)
try:
list.remove("download.serienjunkies.org")
except ValueError:
pass
return list
| 0 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import importutils
import webob
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron import manager
from neutron import quota
from neutron import wsgi
RESOURCE_NAME = 'quota'
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
QUOTAS = quota.QUOTAS
DB_QUOTA_DRIVER = 'neutron.db.quota_db.DbQuotaDriver'
EXTENDED_ATTRIBUTES_2_0 = {
RESOURCE_COLLECTION: {}
}
class QuotaSetsController(wsgi.Controller):
def __init__(self, plugin):
self._resource_name = RESOURCE_NAME
self._plugin = plugin
self._driver = importutils.import_class(
cfg.CONF.QUOTAS.quota_driver
)
self._update_extended_attributes = True
def _update_attributes(self):
for quota_resource in QUOTAS.resources.iterkeys():
attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]
attr_dict[quota_resource] = {
'allow_post': False,
'allow_put': True,
'convert_to': attributes.convert_to_int,
'validate': {'type:range': [-1, const.DB_INTEGER_MAX_VALUE]},
'is_visible': True}
self._update_extended_attributes = False
def _get_quotas(self, request, tenant_id):
return self._driver.get_tenant_quotas(
request.context, QUOTAS.resources, tenant_id)
def create(self, request, body=None):
msg = _('POST requests are not supported on this resource.')
raise webob.exc.HTTPNotImplemented(msg)
def index(self, request):
context = request.context
self._check_admin(context)
return {self._resource_name + "s":
self._driver.get_all_quotas(context, QUOTAS.resources)}
def tenant(self, request):
"""Retrieve the tenant info in context."""
context = request.context
if not context.tenant_id:
raise n_exc.QuotaMissingTenant()
return {'tenant': {'tenant_id': context.tenant_id}}
def show(self, request, id):
if id != request.context.tenant_id:
self._check_admin(request.context,
reason=_("Only admin is authorized "
"to access quotas for another tenant"))
return {self._resource_name: self._get_quotas(request, id)}
def _check_admin(self, context,
reason=_("Only admin can view or configure quota")):
if not context.is_admin:
raise n_exc.AdminRequired(reason=reason)
def delete(self, request, id):
self._check_admin(request.context)
self._driver.delete_tenant_quota(request.context, id)
def update(self, request, id, body=None):
self._check_admin(request.context)
if self._update_extended_attributes:
self._update_attributes()
body = base.Controller.prepare_request_body(
request.context, body, False, self._resource_name,
EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION])
for key, value in body[self._resource_name].items():
self._driver.update_quota_limit(request.context, id, key, value)
return {self._resource_name: self._get_quotas(request, id)}
class Quotasv2(extensions.ExtensionDescriptor):
"""Quotas management support."""
@classmethod
def get_name(cls):
return "Quota management support"
@classmethod
def get_alias(cls):
return RESOURCE_COLLECTION
@classmethod
def get_description(cls):
description = 'Expose functions for quotas management'
if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER:
description += ' per tenant'
return description
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/network/ext/quotas-sets/api/v2.0"
@classmethod
def get_updated(cls):
return "2012-07-29T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
controller = resource.Resource(
QuotaSetsController(manager.NeutronManager.get_plugin()),
faults=base.FAULT_MAP)
return [extensions.ResourceExtension(
Quotasv2.get_alias(),
controller,
collection_actions={'tenant': 'GET'})]
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| 0 |
########################################################################
# $HeadURL$
# File : Optimizer.py
# Author : Stuart Paterson
########################################################################
"""
The Optimizer base class is an agent that polls for jobs with a specific
status and minor status pair. The checkJob method is overridden for all
optimizer instances and associated actions are performed there.
"""
__RCSID__ = "$Id$"
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.AccountingSystem.Client.Types.Job import Job as AccountingJob
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC import S_OK, S_ERROR
class OptimizerModule( AgentModule ):
"""
The specific agents must provide the following methods:
- initialize() for initial settings
- beginExecution()
- execute() - the main method called in the agent cycle
- endExecution()
- finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
#############################################################################
def initialize( self, jobDB = False, logDB = False ):
""" Initialization of the Optimizer Agent.
"""
if not jobDB:
self.jobDB = JobDB()
else:
self.jobDB = jobDB
if not logDB:
self.logDB = JobLoggingDB()
else:
self.logDB = logDB
trailing = "Agent"
optimizerName = self.am_getModuleParam( 'agentName' )
if optimizerName[ -len( trailing ):].find( trailing ) == 0:
optimizerName = optimizerName[ :-len( trailing ) ]
self.am_setModuleParam( 'optimizerName', optimizerName )
self.startingMinorStatus = self.am_getModuleParam( 'optimizerName' )
self.startingMajorStatus = "Checking"
self.failedStatus = self.am_getOption( "FailedJobStatus" , 'Failed' )
self.requiredJobInfo = 'jdl'
self.am_setOption( "PollingTime", 30 )
return self.initializeOptimizer()
def initializeOptimizer( self ):
""" To be overwritten by inheriting class
"""
return S_OK()
#############################################################################
def execute( self ):
""" The main agent execution method
"""
result = self.initializeOptimizer()
if not result[ 'OK' ]:
return result
self._initResult = result[ 'Value' ]
condition = { 'Status' : self.startingMajorStatus }
if self.startingMinorStatus:
condition[ 'MinorStatus' ] = self.startingMinorStatus
result = self.jobDB.selectJobs( condition )
if not result['OK']:
self.log.warn( 'Failed to get a job list from the JobDB' )
return S_ERROR( 'Failed to get a job list from the JobDB' )
if not len( result['Value'] ):
self.log.verbose( 'No pending jobs to process' )
return S_OK( 'No work to do' )
for job in result['Value']:
result = self.getJobDefinition( job )
if not result['OK']:
self.setFailedJob( job, result[ 'Message' ], '' )
continue
jobDef = result[ 'Value' ]
result = self.optimizeJob( job, jobDef[ 'classad' ] )
return S_OK()
#############################################################################
def optimizeJob( self, job, classAdJob ):
""" Call the corresponding Optimizer checkJob method
"""
self.log.info( 'Job %s will be processed by %sAgent' % ( job, self.am_getModuleParam( 'optimizerName' ) ) )
result = self.checkJob( job, classAdJob )
if not result['OK']:
self.setFailedJob( job, result['Message'], classAdJob )
return result
#############################################################################
def getJobDefinition( self, job, jobDef = False ):
""" Retrieve JDL of the Job and return jobDef dictionary
"""
if jobDef == False:
jobDef = {}
#If not jdl in jobinfo load it
if 'jdl' not in jobDef:
if 'jdlOriginal' == self.requiredJobInfo:
result = self.jobDB.getJobJDL( job, original = True )
if not result[ 'OK' ]:
self.log.error( "No JDL for job", "%s" % job )
return S_ERROR( "No JDL for job" )
jobDef[ 'jdl' ] = result[ 'Value' ]
if 'jdl' == self.requiredJobInfo:
result = self.jobDB.getJobJDL( job )
if not result[ 'OK' ]:
self.log.error( "No JDL for job", "%s" % job )
return S_ERROR( "No JDL for job" )
jobDef[ 'jdl' ] = result[ 'Value' ]
#Load the classad if needed
if 'jdl' in jobDef and not 'classad' in jobDef:
try:
classad = ClassAd( jobDef[ 'jdl' ] )
except:
self.log.debug( "Cannot load JDL" )
return S_ERROR( 'Illegal Job JDL' )
if not classad.isOK():
self.log.debug( "Warning: illegal JDL for job %s, will be marked problematic" % ( job ) )
return S_ERROR( 'Illegal Job JDL' )
jobDef[ 'classad' ] = classad
return S_OK( jobDef )
#############################################################################
def getOptimizerJobInfo( self, job, reportName ):
"""This method gets job optimizer information that will
be used for
"""
self.log.verbose( "self.jobDB.getJobOptParameter(%s,'%s')" % ( job, reportName ) )
result = self.jobDB.getJobOptParameter( job, reportName )
if result['OK']:
value = result['Value']
if not value:
self.log.warn( 'JobDB returned null value for %s %s' % ( job, reportName ) )
return S_ERROR( 'No optimizer info returned' )
else:
try:
return S_OK( eval( value ) )
except Exception, x:
return S_ERROR( 'Could not evaluate optimizer parameters' )
return result
#############################################################################
def setOptimizerJobInfo( self, job, reportName, value ):
"""This method sets the job optimizer information that will subsequently
be used for job scheduling and TURL queries on the WN.
"""
self.log.verbose( "self.jobDB.setJobOptParameter(%s,'%s','%s')" % ( job, reportName, value ) )
if not self.am_Enabled():
return S_OK()
return self.jobDB.setJobOptParameter( job, reportName, str( value ) )
#############################################################################
def setOptimizerChain( self, job, value ):
"""This method sets the job optimizer chain, in principle only needed by
one of the optimizers.
"""
self.log.verbose( "self.jobDB.setOptimizerChain(%s,%s)" % ( job, value ) )
if not self.am_Enabled():
return S_OK()
return self.jobDB.setOptimizerChain( job, value )
#############################################################################
def setNextOptimizer( self, job ):
"""This method is executed when the optimizer instance has successfully
processed the job. The next optimizer in the chain will subsequently
start to work on the job.
"""
result = self.logDB.addLoggingRecord( job, status = self.startingMajorStatus,
minor = self.startingMinorStatus,
source = self.am_getModuleParam( "optimizerName" ) )
if not result['OK']:
self.log.warn( result['Message'] )
self.log.verbose( "self.jobDB.setNextOptimizer(%s,'%s')" % ( job, self.am_getModuleParam( "optimizerName" ) ) )
return self.jobDB.setNextOptimizer( job, self.am_getModuleParam( "optimizerName" ) )
#############################################################################
def updateJobStatus( self, job, status, minorStatus = None, appStatus = None ):
"""This method updates the job status in the JobDB, this should only be
used to fail jobs due to the optimizer chain.
"""
self.log.verbose( "self.jobDB.setJobStatus(%s,'Status/Minor/Application','%s'/'%s'/'%s',update=True)" %
( job, status, str( minorStatus ), str( appStatus ) ) )
if not self.am_Enabled():
return S_OK()
result = self.jobDB.setJobStatus( job, status, minorStatus, appStatus )
if not result['OK']:
return result
result = self.logDB.addLoggingRecord( job, status = status, minor = minorStatus, application = appStatus,
source = self.am_getModuleParam( 'optimizerName' ) )
if not result['OK']:
self.log.warn ( result['Message'] )
return S_OK()
#############################################################################
def setJobParam( self, job, reportName, value ):
"""This method updates a job parameter in the JobDB.
"""
self.log.verbose( "self.jobDB.setJobParameter(%s,'%s','%s')" % ( job, reportName, value ) )
if not self.am_Enabled():
return S_OK()
return self.jobDB.setJobParameter( job, reportName, value )
#############################################################################
def setFailedJob( self, job, msg, classAdJob = None ):
"""This method moves the job to the failed status
"""
self.log.verbose( "self.updateJobStatus(%s,'%s','%s')" % ( job, self.failedStatus, msg ) )
if not self.am_Enabled():
return S_OK()
self.updateJobStatus( job, self.failedStatus, msg )
if classAdJob:
self.sendAccountingRecord( job, msg, classAdJob )
#############################################################################
def checkJob( self, job, classad ):
"""This method controls the checking of the job, should be overridden in a subclass
"""
self.log.warn( 'Optimizer: checkJob method should be implemented in a subclass' )
return S_ERROR( 'Optimizer: checkJob method should be implemented in a subclass' )
#############################################################################
def sendAccountingRecord( self, job, msg, classAdJob ):
"""
Send and accounting record for the failed job
"""
accountingReport = AccountingJob()
accountingReport.setStartTime()
accountingReport.setEndTime()
owner = classAdJob.getAttributeString( 'Owner' )
userGroup = classAdJob.getAttributeString( 'OwnerGroup' )
jobGroup = classAdJob.getAttributeString( 'JobGroup' )
jobType = classAdJob.getAttributeString( 'JobType' )
jobClass = 'unknown'
if classAdJob.lookupAttribute( 'JobSplitType' ):
jobClass = classAdJob.getAttributeString( 'JobSplitType' )
inputData = []
processingType = 'unknown'
if classAdJob.lookupAttribute( 'ProcessingType' ):
processingType = classAdJob.getAttributeString( 'ProcessingType' )
if classAdJob.lookupAttribute( 'InputData' ):
inputData = classAdJob.getListFromExpression( 'InputData' )
inputDataFiles = len( inputData )
outputData = []
if classAdJob.lookupAttribute( 'OutputData' ):
outputData = classAdJob.getListFromExpression( 'OutputData' )
outputDataFiles = len( outputData )
acData = {
'User' : owner,
'UserGroup' : userGroup,
'JobGroup' : jobGroup,
'JobType' : jobType,
'JobClass' : jobClass,
'ProcessingType' : processingType,
'FinalMajorStatus' : self.failedStatus,
'FinalMinorStatus' : msg,
'CPUTime' : 0.0,
'NormCPUTime' : 0.0,
'ExecTime' : 0.0,
'InputDataSize' : 0.0,
'OutputDataSize' : 0.0,
'InputDataFiles' : inputDataFiles,
'OutputDataFiles' : outputDataFiles,
'DiskSpace' : 0.0,
'InputSandBoxSize' : 0.0,
'OutputSandBoxSize' : 0.0,
'ProcessedEvents' : 0.0
}
self.log.verbose( 'Accounting Report is:' )
self.log.verbose( acData )
accountingReport.setValuesFromDict( acData )
return accountingReport.commit()
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| 0.033725 |
#!/usr/bin/env python
#------------------------------------------------------------------------------
# plot_correlation_energy.py
#
# author: H. Hergert
# version: 1.0.0
# date: Dec 6, 2016
#
# tested with Python v2.7
#
#------------------------------------------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
import glob
from pylab import *
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
rc('text', usetex=True)
def Hamiltonian(delta,g):
H = array(
[[2*delta-g, -0.5*g, -0.5*g, -0.5*g, -0.5*g, 0.],
[ -0.5*g, 4*delta-g, -0.5*g, -0.5*g, 0., -0.5*g ],
[ -0.5*g, -0.5*g, 6*delta-g, 0., -0.5*g, -0.5*g ],
[ -0.5*g, -0.5*g, 0., 6*delta-g, -0.5*g, -0.5*g ],
[ -0.5*g, 0., -0.5*g, -0.5*g, 8*delta-g, -0.5*g ],
[ 0., -0.5*g, -0.5*g, -0.5*g, -0.5*g, 10*delta-g ]]
)
return H
def myLabels(x, pos):
return '$%s$'%x
# pairing strengths
glist = [ -1.0, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3,
0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0 ]
# uncorrelated energy
uncorr = [ 3.0, 2.9, 2.8, 2.7, 2.6, 2.5, 2.4, 2.3, 2.2, 2.1, 2.0, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4,
1.3, 1.2, 1.1, 1.0 ]
# exact
eigenvals = [eigvalsh(Hamiltonian(1.0,g))[0] for g in glist]
exact = [ a - b for a, b in zip(eigenvals,uncorr)]
# CCD correlation energy
ccd = [ -0.21895200, -0.18230800, -0.14813200, -0.11667400, -0.08821240, -0.06305620, -0.04154780,
-0.02406310, -0.01101140, -0.00283397, 0.00000000, -0.00300047, -0.01233930, -0.02852180,
-0.05204090, -0.08336220, -0.12290800, -0.17104400, -0.22806500, -0.29418800, -0.36955000 ]
# MBPT4 correlation energy
mbpt4 = [ -0.44400000, -0.28809800, -0.19581600, -0.13686300, -0.09602420, -0.06570970, -0.04228900,
-0.02421400, -0.01102850, -0.00283444, 0.00000000, -0.00300012, -0.01232950, -0.02845710,
-0.05180360, -0.08273260, -0.12154800, -0.16849800, -0.22377500, -0.28752700, -0.35985600 ]
# IMSRG, MBPT2, MBPT3
mbpt2 = [ ]
mbpt3 = [ ]
white = [ ]
wegner = [ ]
imtime = [ ]
# White generator flows - the MBPT numbers are contained in all flows, regardless of generator
for g in glist:
filename = glob.glob("results/imsrg-white*g%+3.1f*.flow"%(g))[0]
data = np.loadtxt(filename, skiprows=2)
if g != 0.0:
mbpt2.append(data[0,2]) # correlation energy: just MBPT2
mbpt3.append(data[0,2] + data[0,3]) # correlation energy: MBPT2+3 corrections
white.append(data[-1,1] - data[0,1])
else:
mbpt2.append(0.0)
mbpt3.append(0.0)
white.append(0.0)
# Wegner generator flows
for g in glist:
filename = glob.glob("results/imsrg-wegner*g%+3.1f*.flow"%(g))[0]
data = np.loadtxt(filename, skiprows=2)
if g != 0.0:
wegner.append(data[-1,1] - data[0,1])
else:
wegner.append(0.0)
# imaginary time generator flows
for g in glist:
filename = glob.glob("results/imsrg-imtime*g%+3.1f*.flow"%(g))[0]
data = np.loadtxt(filename, skiprows=2)
if g != 0.0:
imtime.append(data[-1,1] - data[0,1])
else:
imtime.append(0.0)
#------------------------------------------------------------------------------
# Comparison of methods
#------------------------------------------------------------------------------
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig = figure(figsize=(8,6))
# pl, ax = plt.subplots()
ax = fig.gca()
ax.tick_params(axis='both',which='major',width=1.5,length=8)
ax.tick_params(axis='both',which='minor',width=1.5,length=5)
ax.tick_params(axis='both',width=2,length=10,labelsize=20)
ax.minorticks_on()
for s in ['left', 'right', 'top', 'bottom']:
ax.spines[s].set_linewidth(2)
ax.set_xlim([-1.05,1.05])
ax.set_ylim([-0.5,0.06])
ax.xaxis.set_major_formatter(FuncFormatter(myLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLabels))
plt.xlabel('$g\,\mathrm{[a.u.]}$', fontsize=20)
plt.ylabel('$E_\mathrm{corr}\, \mathrm{[a.u.]}$', fontsize=20)
pl_exact = plt.plot(glist, exact, color='black',linestyle='-', linewidth = 2.0, label = 'exact')
pl_mbpt2 = plt.plot(glist, mbpt2, marker='^', markersize=8, color='gold', linestyle='-', linewidth = 2.0, label = 'MBPT(2)')
pl_mbpt3 = plt.plot(glist, mbpt3, marker='v', markersize=8, color='orange', linestyle='-', linewidth = 2.0, label = 'MBPT(3)')
pl_mbpt4 = plt.plot(glist, mbpt4, marker='D', markersize=8, color='red', linestyle='--', linewidth = 2.0, label = 'MBPT(4)')
pl_ccd = plt.plot(glist, ccd, marker='s', markersize=8, color='green', dashes=[8,6], linewidth = 2.0, label = 'CCD')
pl_white = plt.plot(glist, white, marker='o', markersize=8, color='blue', linestyle='-', linewidth = 2.0, label = 'IMSRG(2)')
plt.legend(bbox_to_anchor=(0.35, 0.05), loc=3, borderaxespad=0.5)
plt.savefig("correlation_energy.pdf", bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
#------------------------------------------------------------------------------
# Comparison of generators
#------------------------------------------------------------------------------
fig = figure(figsize=(8,6))
# pl, ax = plt.subplots()
ax = fig.gca()
ax.tick_params(axis='both',which='major',width=1.5,length=8)
ax.tick_params(axis='both',which='minor',width=1.5,length=5)
ax.tick_params(axis='both',width=2,length=10,labelsize=20)
ax.minorticks_on()
for s in ['left', 'right', 'top', 'bottom']:
ax.spines[s].set_linewidth(2)
ax.set_xlim([-1.05,1.05])
ax.set_ylim([-0.5,0.02])
ax.xaxis.set_major_formatter(FuncFormatter(myLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLabels))
plt.xlabel('$g\,\mathrm{[a.u.]}$', fontsize=20)
plt.ylabel('$E_\mathrm{corr}\, \mathrm{[a.u.]}$', fontsize=20)
pl_exact = plt.plot(glist, exact, color='black',linestyle='-', linewidth = 2.0, label = 'exact')
pl_imtime = plt.plot(glist, imtime, marker='D', markersize=8, color='green', linestyle='solid', linewidth = 2.0, label = 'imag. time')
pl_wegner = plt.plot(glist, wegner, marker='s', markersize=8, color='red', dashes=[8,6], linewidth = 2.0, label = 'Wegner')
pl_white = plt.plot(glist, white, marker='o', markersize=8, color='blue', dashes=[2,2,4],linewidth = 2.0, label = 'White')
plt.legend(bbox_to_anchor=(0.05, 0.05), loc=3, borderaxespad=0.5)
plt.savefig("correlation_energy_generators.pdf", bbox_inches="tight", pad_inches=0.05)
# plt.show()
| 0.032214 |
from core.vectors import PhpCode, ShellCmd, ModuleExec, PhpFile, Os
from core.module import Module
from core import modules
from core import messages
from utils.strings import str2hex
from core.loggers import log
import os
from ast import literal_eval
import urllib.request, urllib.parse, urllib.error
class Curl(Module):
"""Perform a curl-like HTTP request."""
aliases = [ 'curl' ]
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.register_vectors(
[
PhpFile(
payload_path = os.path.join(self.folder, 'php_context.tpl'),
name = 'file_get_contents',
arguments = [ '-raw-response' ]
),
PhpFile(
payload_path = os.path.join(self.folder, 'php_context.tpl'),
name = 'fopen_stream_get_contents',
arguments = [ '-raw-response' ]
),
PhpFile(
payload_path = os.path.join(self.folder, 'php_context.tpl'),
name = 'fopen_fread',
arguments = [ '-raw-response' ]
),
PhpFile(
payload_path = os.path.join(self.folder, 'php_curl.tpl'),
name = 'php_curl',
arguments = [ '-raw-response' ]
),
PhpFile(
payload_path = os.path.join(self.folder, 'php_httprequest1.tpl'),
name = 'php_httprequest1',
arguments = [ '-raw-response' ]
),
# TODO: fix this, it fails the "POST request with binary string" test
# due to some bash limitation with null bytes.
# ShellCmd(
# payload = """curl -s -i ${ '-A "$(env echo -ne \"%s\")"' % user_agent if user_agent else "" } ${ '--connect-timeout %i' % connect_timeout } ${ '-X %s' % request if (not data and request) else '' } ${ " ".join([ '-H "$(env echo -ne \"%s\")"' % h for h in header ]) } ${ '-b "$(env echo -ne \"%s\")"' % cookie if cookie else '' } ${ '--data-binary $(env echo -ne "%s")' % ' '.join(data) if data else '' } ${ '$(env echo -ne "%s")' % url }""",
# name = 'sh_curl'
# )
]
)
self.register_arguments([
{ 'name' : 'url' },
{ 'name' : '--header', 'dest' : 'header', 'action' : 'append', 'default' : [] },
{ 'name' : '-H', 'dest' : 'header', 'action' : 'append', 'default' : [] },
{ 'name' : '--cookie', 'dest' : 'cookie' },
{ 'name' : '-b', 'dest' : 'cookie' },
{ 'name' : '--data', 'dest' : 'data', 'action' : 'append', 'default' : [] },
{ 'name' : '-d', 'dest' : 'data', 'action' : 'append', 'default' : [] },
{ 'name' : '--user-agent', 'dest' : 'user_agent' },
{ 'name' : '-A', 'dest' : 'user_agent' },
{ 'name' : '--connect-timeout', 'type' : int, 'default' : 5, 'help' : 'Default: 2' },
{ 'name' : '--request', 'dest' : 'request', 'choices' : ( 'GET', 'HEAD', 'POST', 'PUT', 'OPTIONS' ), 'default' : 'GET' },
{ 'name' : '-X', 'dest' : 'request', 'choices' : ( 'GET', 'HEAD', 'POST', 'PUT', 'OPTIONS' ), 'default' : 'GET' },
{ 'name' : '--output', 'dest' : 'output' },
{ 'name' : '-o', 'dest' : 'output' },
{ 'name' : '-i', 'dest' : 'include_headers', 'help' : 'Include response headers', 'action' : 'store_true', 'default' : False },
{ 'name' : '-local', 'action' : 'store_true', 'default' : False, 'help' : 'Save file locally with -o|--output' },
{ 'name' : '-vector', 'choices' : self.vectors.get_names(), 'default' : 'file_get_contents' }
])
def _encode(self):
self.args['url'] = str2hex(self.args['url'])
if self.args['data']:
self.args['data'] = [ str2hex(x) for x in self.args['data'] ]
if self.args['user_agent']:
self.args['user_agent'] = str2hex(self.args['user_agent'])
if self.args['cookie']:
self.args['cookie'] = str2hex(self.args['cookie'])
if self.args['header']:
self.args['header'] = [ str2hex(x) for x in self.args['header'] ]
def run(self):
headers = []
saved = None
self._encode()
vector_name, result = self.vectors.find_first_result(
names = [ self.args.get('vector') ],
format_args = self.args,
condition = lambda r: r if r and r.strip() else None
)
# Print error and exit with no response or no headers
if not (vector_name and result):
log.warning(messages.module_net_curl.unexpected_response)
return None, headers, saved
elif not b'\r\n'*2 in result:
# If something is returned but there is \r\n*2, we consider
# everything as header. It happen with responses 204 No contents
# that end with \r\n\r (wtf).
headers = result
result = b''
else:
headers, result = result.split(b'\r\n'*2, 1)
headers = (
[
h.rstrip() for h
in headers.split(b'\r\n')
] if b'\r\n' in headers
else headers
)
output_path = self.args.get('output')
if output_path:
# If response must be saved, it's anyway safer to save it
# within additional requests
if not self.args.get('local'):
saved = ModuleExec('file_upload', [ '-content', result, output_path ]).run()
else:
try:
with open(output_path, 'wb') as resultfile:
resultfile.write(result)
except Exception as e:
log.warning(
messages.generic.error_loading_file_s_s % (output_path, str(e)))
saved = False
else:
saved = True
return result, headers, saved
def print_result(self, result):
resultstring = result[0].decode("utf-8", "replace")
headers = [ r.decode("utf-8", "replace") for r in result[1] ]
saved = result[2]
# If is saved, we do not want output
if self.args.get('output'):
log.info(saved)
return
# Print headers if requested
if self.args.get('include_headers'):
log.info( '\r\n'.join(headers) + '\r\n')
if resultstring:
log.info(resultstring)
| 0.027894 |
from django.contrib import admin
from oscar.core.loading import get_model
from treebeard.admin import TreeAdmin
AttributeOption = get_model('catalogue', 'AttributeOption')
AttributeOptionGroup = get_model('catalogue', 'AttributeOptionGroup')
Category = get_model('catalogue', 'Category')
Option = get_model('catalogue', 'Option')
Product = get_model('catalogue', 'Product')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductClass = get_model('catalogue', 'ProductClass')
ProductImage = get_model('catalogue', 'ProductImage')
ProductRecommendation = get_model('catalogue', 'ProductRecommendation')
class AttributeInline(admin.TabularInline):
model = ProductAttributeValue
class ProductRecommendationInline(admin.TabularInline):
model = ProductRecommendation
fk_name = 'primary'
class CategoryInline(admin.TabularInline):
model = ProductCategory
extra = 1
class ProductAttributeInline(admin.TabularInline):
model = ProductAttribute
extra = 2
class ProductClassAdmin(admin.ModelAdmin):
list_display = ('name', 'requires_shipping', 'track_stock')
inlines = [ProductAttributeInline]
class ProductAdmin(admin.ModelAdmin):
list_display = ('get_title', 'upc', 'get_product_class', 'structure',
'attribute_summary', 'date_created')
prepopulated_fields = {"slug": ("title",)}
inlines = [AttributeInline, CategoryInline, ProductRecommendationInline]
class ProductAttributeAdmin(admin.ModelAdmin):
list_display = ('name', 'code', 'product_class', 'type')
prepopulated_fields = {"code": ("name", )}
class OptionAdmin(admin.ModelAdmin):
pass
class ProductAttributeValueAdmin(admin.ModelAdmin):
list_display = ('product', 'attribute', 'value')
class AttributeOptionInline(admin.TabularInline):
model = AttributeOption
class AttributeOptionGroupAdmin(admin.ModelAdmin):
list_display = ('name', 'option_summary')
inlines = [AttributeOptionInline, ]
class CategoryAdmin(TreeAdmin):
pass
admin.site.register(ProductClass, ProductClassAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(ProductAttribute, ProductAttributeAdmin)
admin.site.register(ProductAttributeValue, ProductAttributeValueAdmin)
admin.site.register(AttributeOptionGroup, AttributeOptionGroupAdmin)
admin.site.register(Option, OptionAdmin)
admin.site.register(ProductImage)
admin.site.register(Category, CategoryAdmin)
admin.site.register(ProductCategory)
| 0 |
import datetime
from PIL import Image
import os
import pytz
import subprocess
import tempfile
from cStringIO import StringIO
from itertools import chain
from django.db import models
from django.conf import settings
from django.core.files.base import ContentFile
from arsoapi.util import Counter, fetch
from arsoapi.laplacian import laplacian
from arsoapi.formats import radar_detect_format, radar_get_format
from osgeo import gdal
import osgeo.gdalconst as gdalc
# set to 1 to print out stuff
LOG_LEVEL = getattr(settings, 'LOG_LEVEL', 0)
URL_VREME_RADAR = 'http://www.arso.gov.si/vreme/napovedi%20in%20podatki/radar.gif'
URL_VREME_TOCA = 'http://www.meteo.si/uploads/probase/www/warning/graphic/warning_%s_hp_si.jpg'
#URL_VREME_ALADIN = 'http://www.arso.gov.si/vreme/napovedi%%20in%%20podatki/aladin/AW00_oblpad_%.3d.png'
URL_VREME_ALADIN = 'http://meteo.arso.gov.si/uploads/probase/www/model/aladin/field/as_%s-%s_tcc-rr_si-neighbours_%.3d.png'
GDAL_TRANSLATE = '/usr/bin/gdal_translate'
GDAL_WARP = '/usr/bin/gdalwarp'
IMAGEMAGICK_CONVERT = '/usr/bin/convert'
for exe in [GDAL_TRANSLATE, GDAL_WARP, IMAGEMAGICK_CONVERT]:
if not os.path.exists(exe):
raise Exception("Invalid system setup, missing %s" % exe)
TOCA_MASK_FILE = os.path.join(os.path.dirname(__file__), 'toca_mask.png')
TOCA_MEJE_FILE = os.path.join(os.path.dirname(__file__), 'toca_meje.png')
ALADIN_MASK_FILE = os.path.join(os.path.dirname(__file__), 'mask.png')
ALADIN_MEJE_FILE = os.path.join(os.path.dirname(__file__), 'meje.png')
# init
gdal.AllRegister()
#print 'Gdal version', gdal.VersionInfo()
def popen(*args, **kwargs):
"a wrapper in order to suppress messages if debug is set to false"
params = {}
params.update(kwargs)
if not LOG_LEVEL:
params['stderr'] = subprocess.PIPE
params['stdout'] = subprocess.PIPE
return subprocess.Popen(*args, **params)
def check_popen_error(p):
if p.returncode is not None:
if p.returncode != 0:
print p.stdout.read()
print p.stderr.read()
class GeoDatasourceError(Exception): pass
##############################
## Models
##############################
class RadarPadavin(models.Model):
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
last_modified = models.DateTimeField(db_index=True, unique=True)
picdata = models.TextField()
format_id = models.IntegerField(null=True)
processed = models.FileField(upload_to='processed/radar', null=True, blank=True)
class Meta:
ordering = ('-timestamp',)
def __unicode__(self):
return u'%s' % (self.last_modified.strftime('%Y%m%d-%H%M%S'),)
def pic():
def fget(self):
if self.picdata:
return Image.open(StringIO(self.picdata.decode('base64')))
def fset(self, value):
s = StringIO()
value.save(s)
self.picdata = s.getvalue().encode('base64')
return fget, fset
pic = property(*pic())
def image_name(self):
return 'radar_%s.tif' % (self.last_modified.strftime('%Y%m%d-%H%M%S'),)
def process(self):
pic = self.pic
fmt = radar_detect_format(pic)
self.format_id = fmt.ID
if self.format_id > 0:
filtered = filter_radar(self.pic, fmt)
geotiff = annotate_geo_radar(filtered, fmt)
self.processed.save(name=self.image_name(), content=ContentFile(geotiff))
self.save()
class Toca(models.Model):
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
last_modified = models.DateTimeField(db_index=True, unique=True)
picdata = models.TextField()
processed = models.FileField(upload_to='processed/toca')
class Meta:
ordering = ('-timestamp',)
def __unicode__(self):
return u'%s' % self.last_modified.strftime('%Y%m%d-%H%M')
def pic():
def fget(self):
if self.picdata:
return Image.open(StringIO(self.picdata.decode('base64')))
def fset(self, value):
s = StringIO()
value.save(s)
self.picdata = s.getvalue().encode('base64')
return fget, fset
pic = property(*pic())
def image_name(self):
return 'toca_%s.tif' % (self.last_modified.strftime('%Y%m%d-%H%M%S'),)
def process(self):
filtered = filter_toca(self.pic)
geotiff = annotate_geo_toca(filtered)
self.processed.save(name=self.image_name(), content=ContentFile(geotiff))
self.save()
class Aladin(models.Model):
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
forecast_time = models.DateTimeField(db_index=True)
timedelta = models.IntegerField()
picdata = models.TextField()
processed = models.FileField(upload_to='processed/aladin', null=True, blank=True)
class Meta:
ordering = ('-forecast_time', '-timedelta')
unique_together = (('forecast_time', 'timedelta'),)
def pic():
def fget(self):
if self.picdata:
return Image.open(StringIO(self.picdata.decode('base64')))
def fset(self, value):
s = StringIO()
value.save(s)
self.picdata = s.getvalue().encode('base64')
return fget, fset
pic = property(*pic())
def image_name(self):
return 'aladin_%s_%s.tif' % (self.forecast_time.strftime('%Y%m%d-%H%M'), self.timedelta)
def process(self):
filtered = filter_aladin(self.pic)
geotiff = annotate_geo_aladin(filtered)
self.processed.save(name=self.image_name(), content=ContentFile(geotiff))
self.save()
###############################
## Functions
###############################
def fetch_radar():
return fetch(URL_VREME_RADAR)
def fetch_toca():
now = datetime.datetime.utcnow()
# an image appears 8 minutes over the 10 minute interval
# so this will map to the right 10 minute interval most of the time
now = now - datetime.timedelta(seconds=60*8)
now = now.replace(minute=now.minute - now.minute % 10)
url = URL_VREME_TOCA % now.strftime('%Y%m%d-%H%M')
return fetch(url)
def fetch_aladin(ft, n):
assert n % 3 == 0
return fetch(URL_VREME_ALADIN % (ft.strftime('%Y%m%d'), ft.strftime('%H%M'), n))
WHITE = (255,255,255)
BLACK = (0, 0, 0)
def mmph_to_level(mmph):
if mmph < 0.1:
return 0
if mmph <= 1.0:
return 25
elif mmph <= 5.0:
return 50
elif mmph <= 50.0:
return 75
else:
return 100
def filter_radar(src_img, fmt):
im = src_img.convert('RGB')
pixels = im.load()
cc = Counter()
for i in range(im.size[0]):
for j in range(im.size[1]):
cc[pixels[i,j]] += 1
if pixels[i,j] in fmt.COLOR_IGNORE:
c = Counter()
for p in (pixels[i-1,j], pixels[i,j-1], pixels[i+1,j], pixels[i,j+1]):
if p in fmt.COLOR_TO_MMPH:
c[p] += 1
if c.most_common():
pixels[i,j] = c.most_common(1)[0][0]
else:
pixels[i,j] = fmt.COLOR_BG
return im
TOCA_LEVELS = {
(255, 238, 89): 33,
(255, 179, 75): 66,
(255, 97, 72): 100,
}
def _imageop_divide(im, mask):
"useful for removing terrain from pictures"
pixels = im.load()
mask_pix = mask.load()
for i in xrange(im.size[0]):
for j in xrange(im.size[1]):
p = pixels[i,j]
m = mask_pix[i,j]
pixels[i,j] = (
256*p[0] / (m[0]+1),
256*p[1] / (m[1]+1),
256*p[2] / (m[2]+1),
)
def _nearest_color(p, palette, threshold):
dists = []
if p == (255,255,255):
return p
for c in palette:
d = sum([abs(a - b) for a, b in zip(p, c)])
dists.append((d, c))
mindist = sorted(dists)[0][0]
all_mindist = [i for i in dists if i[0] == mindist]
if len(all_mindist) == 1 and all_mindist[0][0] < threshold:
return all_mindist[0][1]
else:
return p
def filter_toca(src_img):
im = src_img.convert('RGB')
pixels = im.load()
mask = Image.open(TOCA_MASK_FILE).convert('RGB')
_imageop_divide(im, mask)
levels = {
(255,255,255): 0,
}
levels.update(TOCA_LEVELS)
for i in xrange(im.size[0]):
for j in xrange(im.size[1]):
p = pixels[i,j]
p2 = _nearest_color(p, levels, 80) # 80 here is pure empirical magic
if p2 != p:
pixels[i,j] = p2
def _surroundings(i, j):
for a in xrange(i-1, i+2):
for b in xrange(j-1, j+2):
yield a, b
for i in range(1, im.size[0]-1):
for j in range(1, im.size[1]-1):
if pixels[i,j] not in levels: # pixel needs repairing
c = Counter()
for coord in (pt for pt in _surroundings(i, j) if pixels[pt] in levels):
c[pixels[coord]] += 1
elected = c.most_common()[0][0]
pixels[i,j] = elected
return im
ALADIN_CRTE = (123,123,123)
ALADIN_BACKGROUND = (241, 241, 241)
ALADIN_MORJE = (252, 252, 252)
ALADIN_OBLACNOST = {
(228, 228, 254): 40,
(206, 206, 246): 60,
(189, 189, 227): 80,
}
ALADIN_ZELENA = (51, 153, 76)
ALADIN_MARKER_OZADJE = (230, 255, 127)
ALADIN_PADAVINE = {
(255, 255, 189): 1,
(246, 255, 170): 2,
(228, 255, 151): 5,
(189, 245, 149): 10,
(171, 228, 150): 20,
(134, 207, 131): 30,
(114, 189, 126): 40,
# unverified colors below
(18, 115, 55): 50,
(18, 158, 104): 60,
(15, 183, 134): 70,
(15, 207, 157): 80,
(9, 227, 174): 90,
}
ALADIN_VOTABLE = tuple([WHITE] + ALADIN_OBLACNOST.keys() + ALADIN_PADAVINE.keys())
ALADIN_DISTANCE = ALADIN_VOTABLE + (ALADIN_BACKGROUND, ALADIN_MORJE)
def filter_aladin(src_img):
im = src_img.convert('RGB')
pixels = im.load()
cc = Counter()
mask = Image.open(ALADIN_MASK_FILE).convert('RGB')
mask_pix = mask.load()
_imageop_divide(im, mask)
def _surroundings(i, j):
for a in xrange(i-2, i+3):
for b in xrange(j-2, j+3):
yield a, b
# step 2: fix artefacts from previous step
meje = Image.open(ALADIN_MEJE_FILE)
meje_pix = meje.load()
for i in range(2, im.size[0]-2):
for j in range(2, im.size[1]-2):
if meje_pix[i,j] == 0: # pixel needs repairing
c = Counter()
for coord in (pt for pt in _surroundings(i, j) if meje_pix[pt] != 0):
c[pixels[coord]] += 1
elected = c.most_common()[0][0]
pixels[i,j] = elected
edges_mask = laplacian(im.copy())
edges_mask = edges_mask.convert('1')
edges_pix = edges_mask.load()
for i in range(2, im.size[0]-2):
for j in range(2, im.size[1]-2):
if edges_pix[i,j] == 0: # pixel needs repairing
c = Counter()
for coord in (pt for pt in _surroundings(i, j) if edges_pix[pt] != 0):
c[pixels[coord]] += 1
res = c.most_common()
if res:
elected = c.most_common()[0][0]
pixels[i,j] = elected
return im
def filter_aladin_old(src_img):
im = src_img.convert('RGB')
pixels = im.load()
cc = Counter()
# remove country background
for i in range(im.size[0]):
for j in range(im.size[1]):
cc[pixels[i,j]] += 1
if pixels[i,j] == ALADIN_BACKGROUND:
c = Counter()
try:
neighbors = (pixels[i-1,j], pixels[i,j-1], pixels[i+1,j], pixels[i,j+1])
except IndexError:
continue
pixels[i,j] = WHITE
# fix crosshair in LJ
for i,j in chain(((230, i) for i in xrange(279, 291)), ((i, 284) for i in xrange(225, 236))):
c = Counter()
neighbors = (
pixels[i-1,j-1],
pixels[i-1,j],
pixels[i-1,j+1],
pixels[i,j-1],
#pixels[i-1,j], # self
pixels[i,j+1],
pixels[i+1,j-1],
pixels[i+1,j],
pixels[i+1,j+1],
)
for p in neighbors:
if p in ALADIN_VOTABLE:
c[p] += 1
if c.most_common():
pixels[i,j] = c.most_common(1)[0][0]
else:
pixels[i,j] = WHITE
# remove borders and coastlines
for i in range(im.size[0]):
for j in range(im.size[1]):
if pixels[i,j] == ALADIN_CRTE:
c = Counter()
try:
neighbors = (pixels[i-1,j], pixels[i,j-1], pixels[i+1,j], pixels[i,j+1])
except IndexError:
continue
for p in neighbors:
if p in ALADIN_VOTABLE:
c[p] += 1
if c.most_common():
pixels[i,j] = c.most_common(1)[0][0]
else:
pixels[i,j] = WHITE
# remove green edges
for i in range(im.size[0]):
for j in range(im.size[1]):
if pixels[i,j] == ALADIN_ZELENA:
c = Counter()
try:
neighbors = (
pixels[i-1,j-1],
pixels[i-1,j],
pixels[i-1,j+1],
pixels[i,j-1],
#pixels[i-1,j], # self
pixels[i,j+1],
pixels[i+1,j-1],
pixels[i+1,j],
pixels[i+1,j+1],
)
except IndexError:
continue
for p in neighbors:
if p in ALADIN_VOTABLE:
c[p] += 1
if c.most_common():
# ce ni na meji s 30mm ali 50mm potem ne more bit zelena veljavna izbira
if not (76, 179, 76) in c and \
not (0, 127, 51) in c and \
ALADIN_ZELENA in c and \
len(c.most_common()) > 1:
del c[ALADIN_ZELENA]
pixels[i,j] = c.most_common()[0][0]
else:
pixels[i,j] = WHITE
# remove number boxes
print 'removing'
# step 1: detect
pending_removal = {}
for i in range(1, im.size[0]):
for j in range(1, im.size[1]-1):
if pixels[i,j] == ALADIN_MARKER_OZADJE and \
pixels[i-1,j-1] == BLACK and \
pixels[i-1,j] == BLACK and \
pixels[i-1, j+1] == BLACK and\
pixels[i, j-1] == BLACK and \
pixels[i+1, j-1] == BLACK:
pending_removal[(i,j)] = 1
# step 2: find bounds
def _getneighbors(i, j):
yield i-1, j-1
yield i, j-1
yield i+1, j-1
yield i-1, j
#yield i, j # NO
yield i+1, j
yield i-1, j+1
yield i, j+1
yield i+1, j+1
markers = []
all_checked = {}
for pix in pending_removal.keys():
if pix in all_checked:
continue
marker = {}
checked = {}
this_pending = {}
this_pending[pix] = 1
while this_pending:
p = this_pending.keys()[0]
checked[p] = 1
all_checked[p] = 1
if pixels[p] in (BLACK, ALADIN_MARKER_OZADJE):
marker[p] = 1
for nxt in _getneighbors(*p):
if nxt not in checked:
this_pending[nxt] = 1
del this_pending[p]
markers.append(marker)
print 'n markers', len(markers)
# step 3: find marker bounding box
bboxes = []
for m in markers:
min_x = min_y = 200000
max_x = max_y = 0
for p in m:
min_x = min(min_x, p[0])
max_x = max(max_x, p[0])
min_y = min(min_y, p[1])
max_y = max(max_y, p[1])
bboxes.append((min_x, max_x, min_y, max_y))
# step 4: try to fill the boxes
pending_bboxes = []
for min_x, max_x, min_y, max_y in bboxes:
c = Counter()
p_top = ((i, max_y+1) for i in xrange(min_x, max_x+1))
p_bottom = ((i, min_y-1) for i in xrange(min_x, max_x+1))
p_left = ((min_x-1, i) for i in xrange(min_y, max_y+1))
p_right = ((max_x+1, i) for i in xrange(min_y, max_y+1))
for x in chain(p_top, p_bottom, p_left, p_right):
c[pixels[x]] += 1
if len(c.most_common()) == 1:
the_color = c.most_common()[0][0]
for i in xrange(min_x, max_x+1):
for j in xrange(min_y, max_y+1):
pixels[i,j] = the_color
else:
print 'simple fail'
pending_bboxes.append((min_x, max_x, min_y, max_y))
return im
def annotate_geo_radar(img, fmt, scale=1):
if LOG_LEVEL:
print 'ANN radar: Annotating'
src = tempfile.NamedTemporaryFile(mode='w+b', dir=settings.TEMPORARY_DIR, prefix='radar1_', suffix='.tif')
tmp = tempfile.NamedTemporaryFile(mode='w+b', dir=settings.TEMPORARY_DIR, prefix='radar2_', suffix='.tif')
dst = tempfile.NamedTemporaryFile(mode='w+b', dir=settings.TEMPORARY_DIR, prefix='radar3_', suffix='.tif')
if scale != 1:
img = img.resize((img.size[0]*scale, img.size[1]*scale))
img.save(src.name, 'tiff')
src.flush()
if LOG_LEVEL:
print 'ANN radar: gdal translate'
cmd = []
# magic numbers, geocoded pixels
# syntax: -gcp x y east north
for x, y, east, north in fmt.GCP:
cmd += ["-gcp"] + map(str, [x*scale, y*scale, east, north])
cmd += ["-a_srs", "EPSG:3787", src.name, tmp.name]
p = popen([GDAL_TRANSLATE] + cmd)
p.wait()
check_popen_error(p)
if LOG_LEVEL:
print 'ANN radar: gdal warp'
p = popen([GDAL_WARP] + '-s_srs EPSG:3787 -t_srs EPSG:4326'.split(' ') + [tmp.name, dst.name])
p.wait()
check_popen_error(p)
if LOG_LEVEL:
print 'ANN radar: done'
dst.seek(0)
processed = dst.read()
return processed
def annotate_geo_toca(img):
if LOG_LEVEL:
print 'ANN toca: Annotating'
src = tempfile.NamedTemporaryFile(mode='w+b', dir=settings.TEMPORARY_DIR, prefix='toca1_', suffix='.tif')
tmp = tempfile.NamedTemporaryFile(mode='w+b', dir=settings.TEMPORARY_DIR, prefix='toca3_', suffix='.tif')
dst = tempfile.NamedTemporaryFile(mode='w+b', dir=settings.TEMPORARY_DIR, prefix='toca3_', suffix='.tif')
img.save(src.name, 'tiff')
src.flush()
if LOG_LEVEL:
print 'ANN toca: gdal translate'
cmd = '-gcp 94 131 401712 154018 -gcp 542 97 589532 168167 -gcp 398 408 530526 38229 -a_srs EPSG:3787'.split(' ')
p = popen([GDAL_TRANSLATE] + cmd + [src.name, tmp.name])
p.wait()
check_popen_error(p)
if LOG_LEVEL:
print 'ANN toca: gdal warp'
p = popen([GDAL_WARP] + '-s_srs EPSG:3787 -t_srs EPSG:4326'.split(' ') + [tmp.name, dst.name])
p.wait()
check_popen_error(p)
if LOG_LEVEL:
print 'ANN toca: done'
dst.seek(0)
processed = dst.read()
return processed
def annotate_geo_aladin(img):
if LOG_LEVEL:
print 'ANN aladin: Annotating'
src = tempfile.NamedTemporaryFile(dir=settings.TEMPORARY_DIR, prefix='aladin1_', suffix='.tif')
tmp = tempfile.NamedTemporaryFile(dir=settings.TEMPORARY_DIR, prefix='aladin2_', suffix='.tif')
dst = tempfile.NamedTemporaryFile(dir=settings.TEMPORARY_DIR, prefix='aladin3_', suffix='.tif')
img.save(src.name, 'tiff')
src.flush()
if LOG_LEVEL:
print 'ANN aladin: gdal translate'
# old aladin
#cmd = '-gcp 530 194 622883 149136 -gcp 360 408 530526 38229 -gcp 116 187 401712 154018 -a_srs EPSG:3787'.split(' ')
# magic numbers - geocoded pixels
cmd = '-gcp 573 144 622883 149136 -gcp 424 323 530526 38229 -gcp 218 136 401712 154018 -a_srs EPSG:3787'.split(' ')
p = popen([GDAL_TRANSLATE] + cmd + [src.name, tmp.name])
p.wait()
check_popen_error(p)
if LOG_LEVEL:
print 'ANN aladin: gdal warp'
p = popen([GDAL_WARP] + '-s_srs EPSG:3787 -t_srs EPSG:4326'.split(' ') + [tmp.name, dst.name])
p.wait()
check_popen_error(p)
if LOG_LEVEL:
print 'ANN aladin: done'
dst.seek(0)
processed = dst.read()
return processed
def convert_geotiff_to_png(tiffdata):
"PIL does not support GeoTIFF, therefore imagemagick convert is needed"
src = tempfile.NamedTemporaryFile(dir=settings.TEMPORARY_DIR, prefix='convert_src_', suffix='.tif')
dst = tempfile.NamedTemporaryFile(dir=settings.TEMPORARY_DIR, prefix='convert_dst_', suffix='.png')
src.write(tiffdata)
src.flush()
p = popen([IMAGEMAGICK_CONVERT, src.name, dst.name])
p.wait()
check_popen_error(p)
dst.seek(0)
return dst.read()
class GeocodedRadar:
def __init__(self):
self.bands = {}
self.last_modified = None
self.fmt = radar_get_format(0)
self.clean()
def refresh(self):
r = RadarPadavin.objects.exclude(processed=None)[0]
if self.last_modified != r.last_modified:
self.load_from_model(r)
def load_from_model(self, instance):
if instance.format_id > 0:
self.fmt = radar_get_format(instance.format_id)
self.load_from_string(instance.processed.read())
self.last_modified = instance.last_modified
def load_from_string(self, data):
self.tmpfile = None # clear reference
self.tmpfile = tempfile.NamedTemporaryFile(dir=settings.TEMPORARY_DIR, prefix='radar_served_', suffix='.tif')
self.tmpfile.write(data)
self.tmpfile.flush()
self.load(self.tmpfile.name)
def __del__(self):
self.clean()
def clean(self):
for b in self.bands.keys():
del self.bands[b]
self.transform = None
self.rows = self.cols = None
self.ds = None
def load(self, filename):
self.clean()
self.ds = gdal.Open(filename, gdalc.GA_ReadOnly)
if self.ds is None:
raise GeoDatasourceError('No datasource file found')
self.rows = self.ds.RasterYSize
self.cols = self.ds.RasterXSize
self.transform = self.ds.GetGeoTransform()
for i in range(self.ds.RasterCount):
band = self.ds.GetRasterBand(i+1)
self.bands[i+1] = band.ReadAsArray(0, 0, self.cols, self.rows)
def get_pixel_at_coords(self, lat, lng):
if self.transform is None:
return (0, 0), None
yOrigin = self.transform[0]
xOrigin = self.transform[3]
pixelWidth = self.transform[1]
pixelHeight = self.transform[5]
xOffset = abs(int((lat-xOrigin) / pixelWidth)) # XXX remove abs
yOffset = abs(int((lng-yOrigin) / pixelHeight))
try:
pixel = tuple((int(b[xOffset,yOffset]) for b in self.bands.itervalues()))
except IndexError:
pixel = None
# x and y in these coordinates are switched for some reason?
return (yOffset, xOffset), pixel
def get_rain_at_coords(self, lat, lng):
position, pixel = self.get_pixel_at_coords(lat, lng)
return position, self.fmt.COLOR_TO_MMPH.get(pixel)
class GeocodedToca:
TOCA_LEVELS = TOCA_LEVELS
def __init__(self):
self.bands = {}
self.last_modified = None
def refresh(self):
t = Toca.objects.all()[0]
if self.last_modified != t.last_modified:
self.load_from_model(t)
def load_from_model(self, instance):
self.load_from_string(instance.processed.read())
self.last_modified = instance.last_modified
def load_from_string(self, data):
self.tmpfile = None # clear reference
self.tmpfile = tempfile.NamedTemporaryFile(dir=settings.TEMPORARY_DIR, prefix='toca_served_', suffix='.tif')
self.tmpfile.write(data)
self.tmpfile.flush()
self.load(self.tmpfile.name)
def __del__(self):
self.clean()
def clean(self):
for b in self.bands.keys():
del self.bands[b]
self.transform = None
self.rows = self.cols = None
self.ds = None
def load(self, filename):
self.clean()
self.ds = gdal.Open(filename, gdalc.GA_ReadOnly)
if self.ds is None:
raise GeoDatasourceError('No datasource file found')
self.rows = self.ds.RasterYSize
self.cols = self.ds.RasterXSize
self.transform = self.ds.GetGeoTransform()
for i in range(self.ds.RasterCount):
band = self.ds.GetRasterBand(i+1)
self.bands[i+1] = band.ReadAsArray(0, 0, self.cols, self.rows)
def get_pixel_at_coords(self, lat, lng):
yOrigin = self.transform[0]
xOrigin = self.transform[3]
pixelWidth = self.transform[1]
pixelHeight = self.transform[5]
xOffset = abs(int((lat-xOrigin) / pixelWidth)) # XXX remove abs
yOffset = abs(int((lng-yOrigin) / pixelHeight))
return (xOffset, yOffset), tuple((int(b[xOffset,yOffset]) for b in self.bands.itervalues()))
def get_toca_at_coords(self, lat, lng):
position, pixel = self.get_pixel_at_coords(lat, lng)
return position, self.TOCA_LEVELS.get(pixel, 0)
class GeocodedAladin:
CLOUDS = ALADIN_OBLACNOST
RAIN = ALADIN_PADAVINE
def __init__(self):
self.images = {}
self.bands = {}
self.tmpfiles = {}
self.forecast_time = {}
def refresh(self):
ft = self.forecast_time.get(6, datetime.datetime.now() - datetime.timedelta(1))
a = Aladin.objects.all()[0]
if a.forecast_time > ft:
self.load_from_models(Aladin.objects.filter(forecast_time=a.forecast_time))
def load_from_models(self, instances):
self.tmpfiles = {}
self.clean()
for i in instances:
#print 'ALADIN Loading...', i.timedelta
if not i.processed:
continue
i.processed.open()
self.load_from_string(i.processed.read(), i.timedelta, i.forecast_time)
self.forecast_time[i.timedelta] = i.forecast_time
import gc
gc.collect() # remove old images from mem, actually works on 2.6
def load_from_string(self, data, n, ft):
self.tmpfiles[n] = None # clear reference
self.tmpfiles[n] = tempfile.NamedTemporaryFile(dir=settings.TEMPORARY_DIR, prefix='aladin_served%s_' % n, suffix='.tif')
self.tmpfiles[n].write(data)
self.tmpfiles[n].flush()
self.load(self.tmpfiles[n].name, n, ft)
def clean(self):
for n in self.bands.keys():
for b in self.bands[n].keys():
self.bands[n][b] = None
self.bands[n] = {}
self.bands = {}
self.transform = None
self.rows = None
self.cols = None
for k in self.forecast_time.keys():
self.forecast_time[k] = None
for k in self.images.keys():
self.images[k] = None
self.forecast_time = {}
self.images = {}
def load(self, filename, n, ft):
self.images[n] = gdal.Open(filename, gdalc.GA_ReadOnly)
if self.images[n] is None:
raise GeoDatasourceError('No datasource file found')
self.rows = self.images[n].RasterYSize
self.cols = self.images[n].RasterXSize
self.transform = self.images[n].GetGeoTransform()
self.bands[n] = {}
for i in range(self.images[n].RasterCount):
band = self.images[n].GetRasterBand(i+1)
self.bands[n][i+1] = band.ReadAsArray(0, 0, self.cols, self.rows)
self.forecast_time[n] = ft
def _get_candidates(self, i, j):
for a in xrange(i-1, i+2):
for b in xrange(j-1, j+2):
yield a,b
def get_pixel_at_coords(self, lat, lng):
yOrigin = self.transform[0]
xOrigin = self.transform[3]
pixelWidth = self.transform[1]
pixelHeight = self.transform[5]
xOffset = abs(int((lat-xOrigin) / pixelWidth)) # XXX remove abs
yOffset = abs(int((lng-yOrigin) / pixelHeight))
resp = []
for n in self.bands:
candidates = self._get_candidates(xOffset, yOffset)
c = Counter()
for p in candidates:
k = tuple((int(b[p]) for b in self.bands[n].itervalues()))
c[k] += 1
if p == (xOffset, yOffset):
c[k] += 5
resp.append((n, c.most_common()[0][0]))
#resp.append((n, tuple((int(b[xOffset,yOffset]) for b in self.bands[n].itervalues()))))
return (xOffset, yOffset), list(sorted(resp))
def _nearest_color(self, p, palette):
dists = []
for c in palette:
d = sum([abs(a - b) for a, b in zip(p, c)])
dists.append((d, c))
mindist = sorted(dists)[0][0]
all_mindist = [i for i in dists if i[0] == mindist]
if len(all_mindist) == 1 and all_mindist[0][0] < 20:
return all_mindist[0][1]
else:
return p
def _get_value(self, pixel):
clouds = self.CLOUDS.get(self._nearest_color(pixel, self.CLOUDS), 0)
rain = self.RAIN.get(self._nearest_color(pixel, self.RAIN), 0)
return {
'clouds': clouds,
'rain': rain,
}
def get_forecast_at_coords(self, lat, lng):
position, pixel = self.get_pixel_at_coords(lat, lng)
forecast = []
for n, p in pixel:
ft = self.forecast_time[n] + datetime.timedelta(hours=n)
ft = ft.replace(tzinfo=pytz.UTC)
ft = ft.astimezone(pytz.timezone('Europe/Ljubljana'))
d = {'offset': n, 'forecast_time': ft.strftime('%Y-%m-%d %H:%M') }
d.update(self._get_value(p))
forecast.append(d)
return position, tuple(forecast)
| 0.041259 |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'../../core',
'-I',
'../../vm',
'-I',
'../../misc',
'-I',
'../../special',
'-I',
'../../tools/multiply',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
'-isystem',
'/usr/include',
'-isystem',
'/usr/include/c++/4.8',
'-isystem',
'/usr/include/c++/4.8/bits',
'-isystem',
'/usr/include/x86_64-linux-gnu/',
'-isystem',
'/usr/include/c++/v1/',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
#try:
# final_flags.remove( '-stdlib=libc++' )
#except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 0.024641 |
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from designate.openstack.common import log as logging
from designate import exceptions
from designate import schema
from designate.central import rpcapi as central_rpcapi
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
blueprint = flask.Blueprint('records', __name__)
record_schema = schema.Schema('v1', 'record')
records_schema = schema.Schema('v1', 'records')
def _find_recordset(context, domain_id, name, type):
return central_api.find_recordset(context, {
'domain_id': domain_id,
'name': name,
'type': type,
})
def _find_or_create_recordset(context, domain_id, name, type, ttl):
try:
recordset = _find_recordset(context, domain_id, name, type)
except exceptions.RecordSetNotFound:
recordset = central_api.create_recordset(context, domain_id, {
'name': name,
'type': type,
'ttl': ttl,
})
return recordset
def _extract_record_values(values):
record_values = ('data', 'priority', 'comment',)
return dict((k, values[k]) for k in record_values if k in values)
def _extract_recordset_values(values):
recordset_values = ('name', 'type', 'ttl',)
return dict((k, values[k]) for k in recordset_values if k in values)
def _format_record_v1(record, recordset):
record.update({
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
})
return record
def _fetch_domain_recordsets(context, domain_id):
criterion = {'domain_id': domain_id}
recordsets = central_api.find_recordsets(context, criterion)
return dict((r['id'], r) for r in recordsets)
@blueprint.route('/schemas/record', methods=['GET'])
def get_record_schema():
return flask.jsonify(record_schema.raw)
@blueprint.route('/schemas/records', methods=['GET'])
def get_records_schema():
return flask.jsonify(records_schema.raw)
@blueprint.route('/domains/<uuid:domain_id>/records', methods=['POST'])
def create_record(domain_id):
context = flask.request.environ.get('context')
values = flask.request.json
record_schema.validate(values)
recordset = _find_or_create_recordset(context,
domain_id,
values['name'],
values['type'],
values.get('ttl', None))
record = central_api.create_record(context, domain_id, recordset['id'],
_extract_record_values(values))
record = _format_record_v1(record, recordset)
response = flask.jsonify(record_schema.filter(record))
response.status_int = 201
response.location = flask.url_for('.get_record', domain_id=domain_id,
record_id=record['id'])
return response
@blueprint.route('/domains/<uuid:domain_id>/records', methods=['GET'])
def get_records(domain_id):
context = flask.request.environ.get('context')
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an empty records array instead of a domain not found
central_api.get_domain(context, domain_id)
records = central_api.find_records(context, {'domain_id': domain_id})
recordsets = _fetch_domain_recordsets(context, domain_id)
def _inner(record):
recordset = recordsets[record['recordset_id']]
return _format_record_v1(record, recordset)
records = [_inner(r) for r in records]
return flask.jsonify(records_schema.filter({'records': records}))
@blueprint.route('/domains/<uuid:domain_id>/records/<uuid:record_id>',
methods=['GET'])
def get_record(domain_id, record_id):
context = flask.request.environ.get('context')
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an record not found instead of a domain not found
central_api.get_domain(context, domain_id)
criterion = {'domain_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
recordset = central_api.get_recordset(
context, domain_id, record['recordset_id'])
record = _format_record_v1(record, recordset)
return flask.jsonify(record_schema.filter(record))
@blueprint.route('/domains/<uuid:domain_id>/records/<uuid:record_id>',
methods=['PUT'])
def update_record(domain_id, record_id):
context = flask.request.environ.get('context')
values = flask.request.json
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an record not found instead of a domain not found
central_api.get_domain(context, domain_id)
# Find the record
criterion = {'domain_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
# Find the associated recordset
recordset = central_api.get_recordset(
context, domain_id, record['recordset_id'])
# Filter out any extra fields from the fetched record
record = record_schema.filter(record)
# Ensure all the API V1 fields are in place
record = _format_record_v1(record, recordset)
# Name and Type can't be updated on existing records
if 'name' in values and record['name'] != values['name']:
raise exceptions.InvalidOperation('The name field is immutable')
if 'type' in values and record['type'] != values['type']:
raise exceptions.InvalidOperation('The type field is immutable')
# TTL Updates should be applied to the RecordSet
update_recordset = False
if 'ttl' in values and record['ttl'] != values['ttl']:
update_recordset = True
# Apply the updated fields to the record
record.update(values)
# Validate the record
record_schema.validate(record)
# Update the record
record = central_api.update_record(
context, domain_id, recordset['id'], record_id,
_extract_record_values(values))
# Update the recordset (if necessary)
if update_recordset:
recordset = central_api.update_recordset(
context, domain_id, recordset['id'],
_extract_recordset_values(values))
# Format and return the response
record = _format_record_v1(record, recordset)
return flask.jsonify(record_schema.filter(record))
@blueprint.route('/domains/<uuid:domain_id>/records/<uuid:record_id>',
methods=['DELETE'])
def delete_record(domain_id, record_id):
context = flask.request.environ.get('context')
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return a record not found instead of a domain not found
central_api.get_domain(context, domain_id)
# Find the record
criterion = {'domain_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
central_api.delete_record(
context, domain_id, record['recordset_id'], record_id)
return flask.Response(status=200)
| 0 |
from __future__ import unicode_literals
import logging
from django.db.models import Manager
from django.utils import six
from djblets.db.managers import ConcurrencyManager
from reviewboard.accounts.trophies import get_registered_trophy_types
class ProfileManager(Manager):
"""Manager for user profiles."""
def get_or_create(self, user, *args, **kwargs):
"""Return the profile for the user.
This will create the profile if one does not exist.
"""
if hasattr(user, '_profile'):
return user._profile, False
profile, is_new = \
super(ProfileManager, self).get_or_create(user=user, *args,
**kwargs)
user._profile = profile
return profile, is_new
class ReviewRequestVisitManager(ConcurrencyManager):
"""Manager for review request visits.
Unarchives a specified review request for all users that have archived it.
"""
def unarchive_all(self, review_request):
""" Unarchives review request for all users.
Unarchives the given review request for all users by changing all
review request visit database entries for this review request from
archived to visible.
"""
queryset = self.filter(review_request=review_request,
visibility=self.model.ARCHIVED)
queryset.update(visibility=self.model.VISIBLE)
class TrophyManager(Manager):
"""Manager for trophies.
Creates new trophies, updates the database and fetches trophies from the
database.
"""
def compute_trophies(self, review_request):
"""Compute and return trophies for a review request.
Computes trophies for a given review request by looping through all
registered trophy types and seeing if any apply to the review request.
If trophies are to be awarded, they are saved in the database and
returned. If no trophies are to be awarded, an empty list is returned.
"""
if 'calculated_trophies' in review_request.extra_data:
return list(self.filter(review_request=review_request))
calculated_trophy_types = []
registered_trophy_types = get_registered_trophy_types()
for registered_trophy_type in six.itervalues(registered_trophy_types):
try:
instance = registered_trophy_type()
except Exception as e:
logging.error('Error instantiating trophy type %r: %s',
registered_trophy_type, e, exc_info=1)
continue
try:
if instance.qualifies(review_request):
calculated_trophy_types.append(instance)
except Exception as e:
logging.error('Error when running %r.instance_qualifies: %s',
registered_trophy_type, e, exc_info=1)
trophies = [
self.model.objects.create(category=trophy_type.category,
review_request=review_request,
local_site=review_request.local_site,
user=review_request.submitter)
for trophy_type in calculated_trophy_types
]
review_request.extra_data['calculated_trophies'] = True
review_request.save(update_fields=['extra_data'])
return trophies
def get_trophies(self, review_request):
"""Get all the trophies for a given review request."""
return self.compute_trophies(review_request)
| 0 |
# -*- coding: utf-8 -*-
############################################################
#
# loguru guide, using logging.config
# https://loguru.readthedocs.io/en/stable/api/logger.html#loguru._logger.Logger.configure
############################################################
import sys
from loguru import logger
logger.configure(
handlers=[
dict(sink=sys.stderr, backtrace=False,
filter=lambda record: "default" in record["extra"]),
dict(sink="log/default.log", backtrace=False,
filter=lambda record: "default" in record["extra"]),
dict(sink=sys.stdout, backtrace=False,
format="{message}", level="INFO",
filter=lambda record: "emitter" in record["extra"]),
dict(sink="log/{time:YYYY-MM-DD}.log",
filter=lambda record: "default" in record["extra"],
backtrace=False, enqueue=True, rotation="10 MB"),
]
)
statis_logger = logger.bind(emitter=True)
default_logger = logger.bind(default=True)
def init_logger(name, level):
"""Initialize logger in subprocess
"""
logger.add(sink=f"log/{name}-{level}.log",
level=level,
backtrace=False,
rotation="1 day",
retention="7 days",
enqueue=True,
)
loggers = {}
def get_a_single_logger(name, level):
"""Create a wrapper logger with extra info
利用 filter 机制让这个 logger 的消息只输出到当前 sink 上
"""
logger_key = f"{name}-{level}"
if logger_key in loggers:
return loggers.get(logger_key)
logger.add(sink=f"log/{logger_key}.log",
level=level,
filter=lambda record: record["extra"].get("name") == name,
backtrace=False,
rotation="1 day",
retention="7 days",
enqueue=True,
)
# https://loguru.readthedocs.io/en/stable/api/logger.html#loguru._logger.Logger.bind
wrapper_log = logger.bind(name=name)
loggers[logger_key] = wrapper_log
return wrapper_log
| 0 |
# Organic Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for organic solar cells.
# Copyright (C) 2012 Roderick C. I. MacKenzie
#
# roderick.mackenzie@nottingham.ac.uk
# www.opvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import pygtk
pygtk.require('2.0')
import gtk
import sys
import os
import shutil
from cal_path import get_image_file_path
from about import about_dialog_show
from used_files_menu import used_files_menu
from server import server
from scan_tab import scan_vbox
from gui_util import dlg_get_text
import threading
import gobject
import multiprocessing
import time
import glob
from window_list import windows
from util import opvdm_delete_file
from util import delete_second_level_link_tree
from util import copy_scan_dir
from search import return_file_list
from win_lin import running_on_linux
import webbrowser
from search import find_fit_log
from scan_io import get_scan_dirs
from hpc import hpc_class
from debug import debug_mode
from inp import inp_update_token_value
from inp import inp_get_token_value
import i18n
_ = i18n.language.gettext
class scan_class(gtk.Window):
def callback_cluster(self, widget, data=None):
if self.cluster_window==None:
self.cluster_window=hpc_class()
self.cluster_window.init(self.hpc_root_dir,self.myserver.terminal)
print self.cluster_window.get_property("visible")
if self.cluster_window.get_property("visible")==True:
self.cluster_window.hide()
else:
self.cluster_window.show()
def get_main_menu(self, window):
accel_group = gtk.AccelGroup()
item_factory = gtk.ItemFactory(gtk.MenuBar, "<main>", accel_group)
item_factory.create_items(self.menu_items)
if debug_mode()==False:
item_factory.delete_item(_("/Advanced"))
window.add_accel_group(accel_group)
self.item_factory = item_factory
return item_factory.get_widget("<main>")
def callback_close(self, widget, data=None):
self.win_list.update(self,"scan_window")
self.hide()
return True
def callback_change_dir(self, widget, data=None):
dialog = gtk.FileChooserDialog(_("Change directory"),
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER)
filter = gtk.FileFilter()
filter.set_name(_("All files"))
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.sim_dir=dialog.get_filename()
a = open("scan_window.inp", "w")
a.write(self.sim_dir)
a.close()
self.clear_pages()
self.load_tabs()
dialog.destroy()
return True
def callback_help(self, widget, data=None):
webbrowser.open('http://www.opvdm.com/man/index.html')
def callback_add_page(self, widget, data=None):
new_sim_name=dlg_get_text( _("New simulation name:"), _("Simulation ")+str(self.number_of_tabs+1))
if new_sim_name!=None:
new_sim_name=self.remove_invalid(new_sim_name)
name=os.path.join(os.getcwd(),new_sim_name)
self.add_page(name)
def callback_remove_page(self,widget,name):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
self.toggle_tab_visible(tab.tab_name)
def callback_cluster_sleep(self,widget,data):
self.myserver.sleep()
def callback_cluster_poweroff(self,widget,data):
self.myserver.poweroff()
def callback_cluster_get_data(self,widget):
self.myserver.get_data()
def callback_cluster_print_jobs(self,widget):
self.myserver.print_jobs()
def callback_cluster_fit_log(self,widget):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
path=os.path.join(self.sim_dir,name)
find_fit_log("./fit.dat",path)
os.system("gnuplot -persist ./fit.dat &\n")
def callback_copy_page(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
old_dir=os.path.join(self.sim_dir,name)
new_sim_name=dlg_get_text( _("Clone the current simulation to a new simulation called:"), name)
if new_sim_name!=None:
new_sim_name=self.remove_invalid(new_sim_name)
new_dir=os.path.join(self.sim_dir,new_sim_name)
copy_scan_dir(new_dir,old_dir)
print _("I want to copy"),new_dir,old_dir
self.add_page(new_sim_name)
def callback_run_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.simulate(True,True)
def callback_build_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.simulate(False,True)
def callback_run_simulation_no_build(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.simulate(True,False)
def callback_nested_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.nested_simulation()
def callback_clean_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.clean_scan_dir()
def callback_clean_unconverged_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.scan_clean_unconverged()
def callback_clean_simulation_output(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.scan_clean_simulation_output()
def callback_import_from_hpc(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.import_from_hpc()
def callback_push_to_hpc(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.push_to_hpc()
def callback_push_unconverged_to_hpc(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.push_unconverged_to_hpc()
def callback_set_hpc_dir(self,widget,data):
config_file=os.path.join(self.sim_dir,"server.inp")
hpc_path=inp_get_token_value(config_file, "#hpc_dir")
dialog = gtk.FileChooserDialog(_("Select HPC dir"),
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
if os.path.isdir(hpc_path):
dialog.set_current_folder(hpc_path)
filter = gtk.FileFilter()
filter.set_name(_("All files"))
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
inp_update_token_value(config_file, "#hpc_dir", dialog.get_filename(),1)
dialog.destroy()
def remove_invalid(self,input_name):
return input_name.replace (" ", "_")
def callback_rename_page(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
old_dir=os.path.join(self.sim_dir,name)
new_sim_name=dlg_get_text( _("Rename the simulation to be called:"), name)
if new_sim_name!=None:
new_sim_name=self.remove_invalid(new_sim_name)
new_dir=os.path.join(self.sim_dir,new_sim_name)
shutil.move(old_dir, new_dir)
tab.rename(new_dir)
def callback_delete_page(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
dir_to_del=os.path.join(self.sim_dir,name)
md = gtk.MessageDialog(None, 0, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, _("Should I remove the simulation directory ")+dir_to_del)
#gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION,
# gtk.BUTTONS_CLOSE, "Should I remove the simulation directory "+dir_to_del)
response = md.run()
if response == gtk.RESPONSE_YES:
self.notebook.remove_page(pageNum)
for items in self.tab_menu.get_children():
if items.get_label()==name:
self.tab_menu.remove(items)
print _("I am going to delete file"),dir_to_del
delete_second_level_link_tree(dir_to_del)
self.number_of_tabs=self.number_of_tabs-1
elif response == gtk.RESPONSE_NO:
print _("Not deleting")
md.destroy()
def toggle_tab_visible(self,name):
tabs_open=0
print name
for i in range(0, self.number_of_tabs):
if self.rod[i].visible==True:
tabs_open=tabs_open+1
#print "tabs open",tabs_open,self.number_of_tabs
for i in range(0, self.number_of_tabs):
print self.rod[i].tab_name, name, self.rod[i].visible
if self.rod[i].tab_name==name:
if self.rod[i].visible==False:
self.rod[i].set_visible(True)
self.rod[i].visible=True
else:
if tabs_open>1:
print self.rod[i].tab_label
self.rod[i].set_visible(False)
self.rod[i].visible=False
def callback_view_toggle(self, widget, data):
#print "one",widget.get_label()
self.toggle_tab_visible(widget.get_label())
def callback_view_toggle_tab(self, widget, data):
self.toggle_tab_visible(data)
def callback_run_all_simulations(self,widget):
for i in range(0,self.notebook.get_n_pages()):
tab = self.notebook.get_nth_page(i)
tab.simulate(True,True)
def callback_stop_simulation(self,widget):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.stop_simulation()
def load_tabs(self):
sim_dirs=[]
get_scan_dirs(sim_dirs,self.sim_dir)
print sim_dirs,self.sim_dir
if len(sim_dirs)==0:
sim_dirs.append("scan1")
else:
for i in range(0,len(sim_dirs)):
sim_dirs[i]=sim_dirs[i]
for i in range(0,len(sim_dirs)):
self.add_page(sim_dirs[i])
def clear_pages(self):
for items in self.tab_menu.get_children():
self.tab_menu.remove(items)
for child in self.notebook.get_children():
self.notebook.remove(child)
self.rod=[]
def add_page(self,name):
hbox=gtk.HBox()
hbox.set_size_request(-1, 25)
label=gtk.Label("")
sim_name=os.path.basename(os.path.normpath(name))
print "Looking for",sim_name,name
self.rod.append(scan_vbox())
self.rod[len(self.rod)-1].init(self.myserver,self.tooltips,self.status_bar,self.context_id,label,self.sim_dir,sim_name)
label.set_justify(gtk.JUSTIFY_LEFT)
hbox.pack_start(label, False, True, 0)
button = gtk.Button()
close_image = gtk.Image()
close_image.set_from_file(os.path.join(get_image_file_path(),"close.png"))
close_image.show()
button.add(close_image)
button.props.relief = gtk.RELIEF_NONE
button.connect("clicked", self.callback_view_toggle_tab,self.rod[len(self.rod)-1].tab_name)
button.set_size_request(25, 25)
button.show()
hbox.pack_end(button, False, False, 0)
hbox.show_all()
self.notebook.append_page(self.rod[len(self.rod)-1],hbox)
self.notebook.set_tab_reorderable(self.rod[len(self.rod)-1],True)
menu_item = gtk.CheckMenuItem(sim_name)
menu_item.set_active(True)
self.tab_menu.append(menu_item)
menu_item.show()
menu_item.set_active(self.rod[len(self.rod)-1].visible)
#print "Rod",name,self.rod[len(self.rod)-1].visible
menu_item.connect("activate", self.callback_view_toggle,menu_item)
self.number_of_tabs=self.number_of_tabs+1
def callback_last_menu_click(self, widget, data):
print [data]
def switch_page(self,page, page_num, user_param1):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
self.status_bar.push(self.context_id, tab.sim_dir)
def callback_remove_all_results(self, widget, data):
results=[]
return_file_list(results,self.sim_dir,"scan.inp")
for i in range(0,len(results)):
dir_name=os.path.dirname(results[i])
if os.path.isdir(dir_name):
print "delete:",dir_name
#opvdm_delete_file(dir_name)
def callback_wol(self, widget, data):
self.myserver.wake_nodes()
def init(self,my_server):
self.cluster_window=None
self.win_list=windows()
self.win_list.load()
self.win_list.set_window(self,"scan_window")
print "constructur"
self.rod=[]
if os.path.isfile("scan_window.inp"):
f = open("scan_window.inp")
lines = f.readlines()
f.close()
path=lines[0].strip()
if path.startswith(os.getcwd()):
self.sim_dir=path
else:
self.sim_dir=os.getcwd()
else:
self.sim_dir=os.getcwd()
self.tooltips = gtk.Tooltips()
self.set_border_width(2)
self.set_title(_("Parameter scan - opvdm"))
n=0
self.hpc_root_dir= os.path.abspath(os.getcwd()+'/../')
self.number_of_tabs=0
items=0
self.status_bar = gtk.Statusbar()
self.status_bar.show()
self.context_id = self.status_bar.get_context_id("Statusbar example")
box=gtk.HBox()
box.add(self.status_bar)
box.set_child_packing(self.status_bar, True, True, 0, 0)
box.show()
self.menu_items = (
( _("/_File"), None, None, 0, "<Branch>" ),
( _("/File/Change dir"), None, self.callback_change_dir, 0, None ),
( _("/File/Close"), None, self.callback_close, 0, None ),
( _("/Simulations/_New"), None, self.callback_add_page, 0, "<StockItem>", "gtk-new" ),
( _("/Simulations/_Delete simulaton"), None, self.callback_delete_page, 0, "<StockItem>", "gtk-delete" ),
( _("/Simulations/_Rename simulation"), None, self.callback_rename_page, 0, "<StockItem>", "gtk-edit" ),
( _("/Simulations/_Clone simulation"), None, self.callback_copy_page, 0, "<StockItem>", "gtk-copy" ),
( _("/Simulations/sep1"), None, None, 0, "<Separator>" ),
( _("/Simulations/_Run simulation"), None, self.callback_run_simulation, 0, "<StockItem>", "gtk-media-play" ),
( _("/Advanced/_Build simulation"), None, self.callback_build_simulation, 0, "<StockItem>", "gtk-cdrom" ),
( _("/Advanced/_Run (no build)"), None, self.callback_run_simulation_no_build, 0, "<StockItem>", "gtk-media-play" ),
( _("/Advanced/_Run nested simulation"), None, self.callback_nested_simulation, 0, "<StockItem>", "gtk-media-play" ),
( _("/Advanced/_Clean simulation"), None, self.callback_clean_simulation, 0, "<StockItem>", "gtk-clear" ),
( _("/Advanced/_Clean unconverged simulation"), None, self.callback_clean_unconverged_simulation, 0, "<StockItem>", "gtk-clear" ),
( _("/Advanced/_Clean simulation output"), None, self.callback_clean_simulation_output, 0, "<StockItem>", "gtk-clear" ),
( _("/Advanced/sep2"), None, None, 0, "<Separator>" ),
( _("/Advanced/_Import from hpc"), None, self.callback_import_from_hpc, 0, "<StockItem>", "gtk-open" ),
( _("/Advanced/_Push to hpc"), None, self.callback_push_to_hpc, 0, "<StockItem>", "gtk-save" ),
( _("/Advanced/_Push unconverged to hpc"), None, self.callback_push_unconverged_to_hpc, 0, "<StockItem>", "gtk-save" ),
( _("/Advanced/_Set hpc dir"), None, self.callback_set_hpc_dir, 0, "<StockItem>", "gtk-open" ),
( _("/Advanced/_Cluster sleep"), None, self.callback_cluster_sleep, 0, "<StockItem>", "gtk-copy" ),
( _("/Advanced/_Cluster poweroff"), None, self.callback_cluster_poweroff, 0, "<StockItem>", "gtk-copy" ),
( _("/Advanced/_Cluster wake"), None, self.callback_wol, 0, "<StockItem>", "gtk-copy" ),
( _("/Advanced/_Remove all results"), None, self.callback_remove_all_results, 0, "<StockItem>", "gtk-copy" ),
( _("/_Help"), None, None, 0, "<LastBranch>" ),
( _("/_Help/Help"), None, self.callback_help, 0, None ),
( _("/_Help/About"), None, about_dialog_show, 0, "<StockItem>", "gtk-about" ),
)
main_vbox = gtk.VBox(False, 3)
menubar = self.get_main_menu(self)
main_vbox.pack_start(menubar, False, False, 0)
menubar.show()
toolbar = gtk.Toolbar()
toolbar.set_style(gtk.TOOLBAR_ICONS)
toolbar.set_size_request(-1, 50)
pos=0
#image = gtk.Image()
#image.set_from_file(os.path.join(get_image_file_path(),"new-tab.png"))
tb_new_scan = gtk.MenuToolButton(gtk.STOCK_NEW)
tb_new_scan.connect("clicked", self.callback_add_page)
self.tooltips.set_tip(tb_new_scan, _("New simulation"))
self.tab_menu=gtk.Menu()
tb_new_scan.set_menu(self.tab_menu)
toolbar.insert(tb_new_scan, pos)
pos=pos+1
sep = gtk.SeparatorToolItem()
sep.set_draw(True)
sep.set_expand(False)
toolbar.insert(sep, pos)
pos=pos+1
delete = gtk.ToolButton(gtk.STOCK_DELETE)
delete.connect("clicked", self.callback_delete_page,None)
self.tooltips.set_tip(delete, _("Delete simulation"))
toolbar.insert(delete, pos)
pos=pos+1
copy = gtk.ToolButton(gtk.STOCK_COPY)
copy.connect("clicked", self.callback_copy_page,None)
self.tooltips.set_tip(copy, _("Clone simulation"))
toolbar.insert(copy, pos)
pos=pos+1
rename = gtk.ToolButton(gtk.STOCK_EDIT)
rename.connect("clicked", self.callback_rename_page,None)
self.tooltips.set_tip(rename, _("Rename simulation"))
toolbar.insert(rename, pos)
pos=pos+1
sep = gtk.SeparatorToolItem()
sep.set_draw(True)
sep.set_expand(False)
toolbar.insert(sep, pos)
pos=pos+1
image = gtk.Image()
image.set_from_file(os.path.join(get_image_file_path(),"forward2.png"))
tb_simulate = gtk.ToolButton(image)
tb_simulate.connect("clicked", self.callback_run_all_simulations)
self.tooltips.set_tip(tb_simulate, _("Run all simulation"))
toolbar.insert(tb_simulate, pos)
pos=pos+1
if debug_mode()==True:
sep = gtk.SeparatorToolItem()
sep.set_draw(True)
sep.set_expand(False)
toolbar.insert(sep, pos)
pos=pos+1
image = gtk.Image()
image.set_from_file(os.path.join(get_image_file_path(),"server.png"))
cluster = gtk.ToolButton(image)
cluster.connect("clicked", self.callback_cluster)
self.tooltips.set_tip(cluster, _("Configure cluster"))
toolbar.insert(cluster, pos)
cluster.show()
pos=pos+1
sep = gtk.SeparatorToolItem()
sep.set_draw(False)
sep.set_expand(True)
toolbar.insert(sep, pos)
pos=pos+1
tb_help = gtk.ToolButton(gtk.STOCK_HELP)
tb_help.connect("clicked", self.callback_help)
self.tooltips.set_tip(tb_help, _("Help"))
toolbar.insert(tb_help, pos)
pos=pos+1
toolbar.show_all()
main_vbox.pack_start(toolbar, False, False, 0)
#main_vbox.add(toolbar)
main_vbox.set_border_width(1)
self.add(main_vbox)
main_vbox.show()
self.myserver=my_server
self.notebook = gtk.Notebook()
self.notebook.show()
self.notebook.set_tab_pos(gtk.POS_LEFT)
self.load_tabs()
main_vbox.pack_start(self.notebook, True, True, 0)
main_vbox.pack_start(box, False, False, 0)
self.connect("delete-event", self.callback_close)
self.notebook.connect("switch-page",self.switch_page)
self.set_icon_from_file(os.path.join(get_image_file_path(),"image.jpg"))
self.hide()
| 0.039547 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six import iteritems, string_types
"""build query for doclistview and return results"""
import frappe, json, copy, re
import frappe.defaults
import frappe.share
import frappe.permissions
from frappe.utils import flt, cint, getdate, get_datetime, get_time, make_filter_tuple, get_filter, add_to_date
from frappe import _
from frappe.model import optional_fields
from frappe.client import check_parent_permission
from frappe.model.utils.user_settings import get_user_settings, update_user_settings
from datetime import datetime
class DatabaseQuery(object):
def __init__(self, doctype, user=None):
self.doctype = doctype
self.tables = []
self.conditions = []
self.or_conditions = []
self.fields = None
self.user = user or frappe.session.user
self.ignore_ifnull = False
self.flags = frappe._dict()
self.reference_doctype = None
def execute(self, query=None, fields=None, filters=None, or_filters=None,
docstatus=None, group_by=None, order_by=None, limit_start=False,
limit_page_length=None, as_list=False, with_childnames=False, debug=False,
ignore_permissions=False, user=None, with_comment_count=False,
join='left join', distinct=False, start=None, page_length=None, limit=None,
ignore_ifnull=False, save_user_settings=False, save_user_settings_fields=False,
update=None, add_total_row=None, user_settings=None, reference_doctype=None):
if not ignore_permissions and not frappe.has_permission(self.doctype, "read", user=user):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(self.doctype))
raise frappe.PermissionError(self.doctype)
# filters and fields swappable
# its hard to remember what comes first
if (isinstance(fields, dict)
or (isinstance(fields, list) and fields and isinstance(fields[0], list))):
# if fields is given as dict/list of list, its probably filters
filters, fields = fields, filters
elif fields and isinstance(filters, list) \
and len(filters) > 1 and isinstance(filters[0], string_types):
# if `filters` is a list of strings, its probably fields
filters, fields = fields, filters
if fields:
self.fields = fields
else:
self.fields = ["`tab{0}`.`name`".format(self.doctype)]
if start: limit_start = start
if page_length: limit_page_length = page_length
if limit: limit_page_length = limit
self.filters = filters or []
self.or_filters = or_filters or []
self.docstatus = docstatus or []
self.group_by = group_by
self.order_by = order_by
self.limit_start = 0 if (limit_start is False) else cint(limit_start)
self.limit_page_length = cint(limit_page_length) if limit_page_length else None
self.with_childnames = with_childnames
self.debug = debug
self.join = join
self.distinct = distinct
self.as_list = as_list
self.ignore_ifnull = ignore_ifnull
self.flags.ignore_permissions = ignore_permissions
self.user = user or frappe.session.user
self.update = update
self.user_settings_fields = copy.deepcopy(self.fields)
# for contextual user permission check
# to determine which user permission is applicable on link field of specific doctype
self.reference_doctype = reference_doctype or self.doctype
if user_settings:
self.user_settings = json.loads(user_settings)
if query:
result = self.run_custom_query(query)
else:
result = self.build_and_run()
if with_comment_count and not as_list and self.doctype:
self.add_comment_count(result)
if save_user_settings:
self.save_user_settings_fields = save_user_settings_fields
self.update_user_settings()
return result
def build_and_run(self):
args = self.prepare_args()
args.limit = self.add_limit()
if args.conditions:
args.conditions = "where " + args.conditions
if self.distinct:
args.fields = 'distinct ' + args.fields
query = """select %(fields)s from %(tables)s %(conditions)s
%(group_by)s %(order_by)s %(limit)s""" % args
return frappe.db.sql(query, as_dict=not self.as_list, debug=self.debug, update=self.update)
def prepare_args(self):
self.parse_args()
self.sanitize_fields()
self.extract_tables()
self.set_optional_columns()
self.build_conditions()
args = frappe._dict()
if self.with_childnames:
for t in self.tables:
if t != "`tab" + self.doctype + "`":
self.fields.append(t + ".name as '%s:name'" % t[4:-1])
# query dict
args.tables = self.tables[0]
# left join parent, child tables
for child in self.tables[1:]:
args.tables += " {join} {child} on ({child}.parent = {main}.name)".format(join=self.join,
child=child, main=self.tables[0])
if self.grouped_or_conditions:
self.conditions.append("({0})".format(" or ".join(self.grouped_or_conditions)))
args.conditions = ' and '.join(self.conditions)
if self.or_conditions:
args.conditions += (' or ' if args.conditions else "") + \
' or '.join(self.or_conditions)
self.set_field_tables()
args.fields = ', '.join(self.fields)
self.set_order_by(args)
self.validate_order_by_and_group_by(args.order_by)
args.order_by = args.order_by and (" order by " + args.order_by) or ""
self.validate_order_by_and_group_by(self.group_by)
args.group_by = self.group_by and (" group by " + self.group_by) or ""
return args
def parse_args(self):
"""Convert fields and filters from strings to list, dicts"""
if isinstance(self.fields, string_types):
if self.fields == "*":
self.fields = ["*"]
else:
try:
self.fields = json.loads(self.fields)
except ValueError:
self.fields = [f.strip() for f in self.fields.split(",")]
# remove empty strings / nulls in fields
self.fields = [f for f in self.fields if f]
for filter_name in ["filters", "or_filters"]:
filters = getattr(self, filter_name)
if isinstance(filters, string_types):
filters = json.loads(filters)
if isinstance(filters, dict):
fdict = filters
filters = []
for key, value in iteritems(fdict):
filters.append(make_filter_tuple(self.doctype, key, value))
setattr(self, filter_name, filters)
def sanitize_fields(self):
'''
regex : ^.*[,();].*
purpose : The regex will look for malicious patterns like `,`, '(', ')', ';' in each
field which may leads to sql injection.
example :
field = "`DocType`.`issingle`, version()"
As field contains `,` and mysql function `version()`, with the help of regex
the system will filter out this field.
'''
sub_query_regex = re.compile("^.*[,();].*")
blacklisted_keywords = ['select', 'create', 'insert', 'delete', 'drop', 'update', 'case']
blacklisted_functions = ['concat', 'concat_ws', 'if', 'ifnull', 'nullif', 'coalesce',
'connection_id', 'current_user', 'database', 'last_insert_id', 'session_user',
'system_user', 'user', 'version']
def _raise_exception():
frappe.throw(_('Use of sub-query or function is restricted'), frappe.DataError)
def _is_query(field):
if re.compile(r"^(select|delete|update|drop|create)\s").match(field):
_raise_exception()
elif re.compile(r"\s*[0-9a-zA-z]*\s*( from | group by | order by | where | join )").match(field):
_raise_exception()
for field in self.fields:
if sub_query_regex.match(field):
if any(keyword in field.lower().split() for keyword in blacklisted_keywords):
_raise_exception()
if any("({0}".format(keyword) in field.lower() for keyword in blacklisted_keywords):
_raise_exception()
if any("{0}(".format(keyword) in field.lower() for keyword in blacklisted_functions):
_raise_exception()
if re.compile(r"[0-9a-zA-Z]+\s*'").match(field):
_raise_exception()
if re.compile(r"[0-9a-zA-Z]+\s*,").match(field):
_raise_exception()
_is_query(field)
def extract_tables(self):
"""extract tables from fields"""
self.tables = ['`tab' + self.doctype + '`']
# add tables from fields
if self.fields:
for f in self.fields:
if ( not ("tab" in f and "." in f) ) or ("locate(" in f) or ("count(" in f):
continue
table_name = f.split('.')[0]
if table_name.lower().startswith('group_concat('):
table_name = table_name[13:]
if table_name.lower().startswith('ifnull('):
table_name = table_name[7:]
if not table_name[0]=='`':
table_name = '`' + table_name + '`'
if not table_name in self.tables:
self.append_table(table_name)
def append_table(self, table_name):
self.tables.append(table_name)
doctype = table_name[4:-1]
if (not self.flags.ignore_permissions) and (not frappe.has_permission(doctype)):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(doctype))
raise frappe.PermissionError(doctype)
def set_field_tables(self):
'''If there are more than one table, the fieldname must not be ambigous.
If the fieldname is not explicitly mentioned, set the default table'''
if len(self.tables) > 1:
for i, f in enumerate(self.fields):
if '.' not in f:
self.fields[i] = '{0}.{1}'.format(self.tables[0], f)
def set_optional_columns(self):
"""Removes optional columns like `_user_tags`, `_comments` etc. if not in table"""
columns = frappe.db.get_table_columns(self.doctype)
# remove from fields
to_remove = []
for fld in self.fields:
for f in optional_fields:
if f in fld and not f in columns:
to_remove.append(fld)
for fld in to_remove:
del self.fields[self.fields.index(fld)]
# remove from filters
to_remove = []
for each in self.filters:
if isinstance(each, string_types):
each = [each]
for element in each:
if element in optional_fields and element not in columns:
to_remove.append(each)
for each in to_remove:
if isinstance(self.filters, dict):
del self.filters[each]
else:
self.filters.remove(each)
def build_conditions(self):
self.conditions = []
self.grouped_or_conditions = []
self.build_filter_conditions(self.filters, self.conditions)
self.build_filter_conditions(self.or_filters, self.grouped_or_conditions)
# match conditions
if not self.flags.ignore_permissions:
match_conditions = self.build_match_conditions()
if match_conditions:
self.conditions.append("(" + match_conditions + ")")
def build_filter_conditions(self, filters, conditions, ignore_permissions=None):
"""build conditions from user filters"""
if ignore_permissions is not None:
self.flags.ignore_permissions = ignore_permissions
if isinstance(filters, dict):
filters = [filters]
for f in filters:
if isinstance(f, string_types):
conditions.append(f)
else:
conditions.append(self.prepare_filter_condition(f))
def prepare_filter_condition(self, f):
"""Returns a filter condition in the format:
ifnull(`tabDocType`.`fieldname`, fallback) operator "value"
"""
f = get_filter(self.doctype, f)
tname = ('`tab' + f.doctype + '`')
if not tname in self.tables:
self.append_table(tname)
if 'ifnull(' in f.fieldname:
column_name = f.fieldname
else:
column_name = '{tname}.{fname}'.format(tname=tname,
fname=f.fieldname)
can_be_null = True
# prepare in condition
if f.operator.lower() in ('ancestors of', 'descendants of', 'not ancestors of', 'not descendants of'):
values = f.value or ''
# TODO: handle list and tuple
# if not isinstance(values, (list, tuple)):
# values = values.split(",")
ref_doctype = f.doctype
if frappe.get_meta(f.doctype).get_field(f.fieldname) is not None :
ref_doctype = frappe.get_meta(f.doctype).get_field(f.fieldname).options
result=[]
lft, rgt = frappe.db.get_value(ref_doctype, f.value, ["lft", "rgt"])
# Get descendants elements of a DocType with a tree structure
if f.operator.lower() in ('descendants of', 'not descendants of') :
result = frappe.db.sql_list("""select name from `tab{0}`
where lft>%s and rgt<%s order by lft asc""".format(ref_doctype), (lft, rgt))
else :
# Get ancestor elements of a DocType with a tree structure
result = frappe.db.sql_list("""select name from `tab{0}`
where lft<%s and rgt>%s order by lft desc""".format(ref_doctype), (lft, rgt))
fallback = "''"
value = (frappe.db.escape((v or '').strip(), percent=False) for v in result)
value = '("{0}")'.format('", "'.join(value))
# changing operator to IN as the above code fetches all the parent / child values and convert into tuple
# which can be directly used with IN operator to query.
f.operator = 'not in' if f.operator.lower() in ('not ancestors of', 'not descendants of') else 'in'
elif f.operator.lower() in ('in', 'not in'):
values = f.value or ''
if not isinstance(values, (list, tuple)):
values = values.split(",")
fallback = "''"
value = (frappe.db.escape((v or '').strip(), percent=False) for v in values)
value = '("{0}")'.format('", "'.join(value))
else:
df = frappe.get_meta(f.doctype).get("fields", {"fieldname": f.fieldname})
df = df[0] if df else None
if df and df.fieldtype in ("Check", "Float", "Int", "Currency", "Percent"):
can_be_null = False
if f.operator.lower() == 'between' and \
(f.fieldname in ('creation', 'modified') or (df and (df.fieldtype=="Date" or df.fieldtype=="Datetime"))):
value = get_between_date_filter(f.value, df)
fallback = "'0000-00-00 00:00:00'"
elif df and df.fieldtype=="Date":
value = getdate(f.value).strftime("%Y-%m-%d")
fallback = "'0000-00-00'"
elif (df and df.fieldtype=="Datetime") or isinstance(f.value, datetime):
value = get_datetime(f.value).strftime("%Y-%m-%d %H:%M:%S.%f")
fallback = "'0000-00-00 00:00:00'"
elif df and df.fieldtype=="Time":
value = get_time(f.value).strftime("%H:%M:%S.%f")
fallback = "'00:00:00'"
elif f.operator.lower() == "is":
if f.value == 'set':
f.operator = '!='
elif f.value == 'not set':
f.operator = '='
value = ""
fallback = '""'
can_be_null = True
if 'ifnull' not in column_name:
column_name = 'ifnull({}, {})'.format(column_name, fallback)
elif f.operator.lower() in ("like", "not like") or (isinstance(f.value, string_types) and
(not df or df.fieldtype not in ["Float", "Int", "Currency", "Percent", "Check"])):
value = "" if f.value==None else f.value
fallback = '""'
if f.operator.lower() in ("like", "not like") and isinstance(value, string_types):
# because "like" uses backslash (\) for escaping
value = value.replace("\\", "\\\\").replace("%", "%%")
else:
value = flt(f.value)
fallback = 0
# put it inside double quotes
if isinstance(value, string_types) and not f.operator.lower() == 'between':
value = '"{0}"'.format(frappe.db.escape(value, percent=False))
if (self.ignore_ifnull
or not can_be_null
or (f.value and f.operator.lower() in ('=', 'like'))
or 'ifnull(' in column_name.lower()):
condition = '{column_name} {operator} {value}'.format(
column_name=column_name, operator=f.operator,
value=value)
else:
condition = 'ifnull({column_name}, {fallback}) {operator} {value}'.format(
column_name=column_name, fallback=fallback, operator=f.operator,
value=value)
return condition
def build_match_conditions(self, as_condition=True):
"""add match conditions if applicable"""
self.match_filters = []
self.match_conditions = []
only_if_shared = False
if not self.user:
self.user = frappe.session.user
if not self.tables: self.extract_tables()
meta = frappe.get_meta(self.doctype)
role_permissions = frappe.permissions.get_role_permissions(meta, user=self.user)
self.shared = frappe.share.get_shared(self.doctype, self.user)
if (not meta.istable and
not role_permissions.get("read") and
not self.flags.ignore_permissions and
not has_any_user_permission_for_doctype(self.doctype, self.user, self.reference_doctype)):
only_if_shared = True
if not self.shared:
frappe.throw(_("No permission to read {0}").format(self.doctype), frappe.PermissionError)
else:
self.conditions.append(self.get_share_condition())
else:
if role_permissions.get("if_owner", {}).get("read"): #if has if_owner permission skip user perm check
self.match_conditions.append("`tab{0}`.owner = '{1}'".format(self.doctype,
frappe.db.escape(self.user, percent=False)))
elif role_permissions.get("read"): # add user permission only if role has read perm
# get user permissions
user_permissions = frappe.permissions.get_user_permissions(self.user)
self.add_user_permissions(user_permissions)
if as_condition:
conditions = ""
if self.match_conditions:
# will turn out like ((blog_post in (..) and blogger in (...)) or (blog_category in (...)))
conditions = "((" + ") or (".join(self.match_conditions) + "))"
doctype_conditions = self.get_permission_query_conditions()
if doctype_conditions:
conditions += (' and ' + doctype_conditions) if conditions else doctype_conditions
# share is an OR condition, if there is a role permission
if not only_if_shared and self.shared and conditions:
conditions = "({conditions}) or ({shared_condition})".format(
conditions=conditions, shared_condition=self.get_share_condition())
return conditions
else:
return self.match_filters
def get_share_condition(self):
return """`tab{0}`.name in ({1})""".format(self.doctype, ", ".join(["'%s'"] * len(self.shared))) % \
tuple([frappe.db.escape(s, percent=False) for s in self.shared])
def add_user_permissions(self, user_permissions):
meta = frappe.get_meta(self.doctype)
doctype_link_fields = []
doctype_link_fields = meta.get_link_fields()
doctype_link_fields.append(dict(
options=self.doctype,
fieldname='name',
))
# appended current doctype with fieldname as 'name' to
# and condition on doc name if user permission is found for current doctype
match_filters = {}
match_conditions = []
for df in doctype_link_fields:
user_permission_values = user_permissions.get(df.get('options'), {})
if df.get('ignore_user_permissions'): continue
empty_value_condition = 'ifnull(`tab{doctype}`.`{fieldname}`, "")=""'.format(
doctype=self.doctype, fieldname=df.get('fieldname')
)
if user_permission_values:
docs = []
if frappe.get_system_settings("apply_strict_user_permissions"):
condition = ""
else:
condition = empty_value_condition + " or "
for permission in user_permission_values:
if not permission.get('applicable_for'):
docs.append(permission.get('doc'))
# append docs based on user permission applicable on reference doctype
# This is useful when getting list of doc from a link field
# in this case parent doctype of the link will be the
# will be the reference doctype
elif df.get('fieldname') == 'name' and self.reference_doctype:
if permission.get('applicable_for') == self.reference_doctype:
docs.append(permission.get('doc'))
elif permission.get('applicable_for') == self.doctype:
docs.append(permission.get('doc'))
if docs:
condition += "`tab{doctype}`.`{fieldname}` in ({values})".format(
doctype=self.doctype,
fieldname=df.get('fieldname'),
values=", ".join(
[('"' + frappe.db.escape(doc, percent=False) + '"') for doc in docs])
)
match_conditions.append("({condition})".format(condition=condition))
match_filters[df.get('options')] = docs
if match_conditions:
self.match_conditions.append(" and ".join(match_conditions))
if match_filters:
self.match_filters.append(match_filters)
def get_permission_query_conditions(self):
condition_methods = frappe.get_hooks("permission_query_conditions", {}).get(self.doctype, [])
if condition_methods:
conditions = []
for method in condition_methods:
c = frappe.call(frappe.get_attr(method), self.user)
if c:
conditions.append(c)
return " and ".join(conditions) if conditions else None
def run_custom_query(self, query):
if '%(key)s' in query:
query = query.replace('%(key)s', 'name')
return frappe.db.sql(query, as_dict = (not self.as_list))
def set_order_by(self, args):
meta = frappe.get_meta(self.doctype)
if self.order_by:
args.order_by = self.order_by
else:
args.order_by = ""
# don't add order by from meta if a mysql group function is used without group by clause
group_function_without_group_by = (len(self.fields)==1 and
( self.fields[0].lower().startswith("count(")
or self.fields[0].lower().startswith("min(")
or self.fields[0].lower().startswith("max(")
) and not self.group_by)
if not group_function_without_group_by:
sort_field = sort_order = None
if meta.sort_field and ',' in meta.sort_field:
# multiple sort given in doctype definition
# Example:
# `idx desc, modified desc`
# will covert to
# `tabItem`.`idx` desc, `tabItem`.`modified` desc
args.order_by = ', '.join(['`tab{0}`.`{1}` {2}'.format(self.doctype,
f.split()[0].strip(), f.split()[1].strip()) for f in meta.sort_field.split(',')])
else:
sort_field = meta.sort_field or 'modified'
sort_order = (meta.sort_field and meta.sort_order) or 'desc'
args.order_by = "`tab{0}`.`{1}` {2}".format(self.doctype, sort_field or "modified", sort_order or "desc")
# draft docs always on top
if meta.is_submittable:
args.order_by = "`tab{0}`.docstatus asc, {1}".format(self.doctype, args.order_by)
def validate_order_by_and_group_by(self, parameters):
"""Check order by, group by so that atleast one column is selected and does not have subquery"""
if not parameters:
return
_lower = parameters.lower()
if 'select' in _lower and ' from ' in _lower:
frappe.throw(_('Cannot use sub-query in order by'))
for field in parameters.split(","):
if "." in field and field.strip().startswith("`tab"):
tbl = field.strip().split('.')[0]
if tbl not in self.tables:
if tbl.startswith('`'):
tbl = tbl[4:-1]
frappe.throw(_("Please select atleast 1 column from {0} to sort/group").format(tbl))
def add_limit(self):
if self.limit_page_length:
return 'limit %s, %s' % (self.limit_start, self.limit_page_length)
else:
return ''
def add_comment_count(self, result):
for r in result:
if not r.name:
continue
r._comment_count = 0
if "_comments" in r:
r._comment_count = len(json.loads(r._comments or "[]"))
def update_user_settings(self):
# update user settings if new search
user_settings = json.loads(get_user_settings(self.doctype))
if hasattr(self, 'user_settings'):
user_settings.update(self.user_settings)
if self.save_user_settings_fields:
user_settings['fields'] = self.user_settings_fields
update_user_settings(self.doctype, user_settings)
def get_order_by(doctype, meta):
order_by = ""
sort_field = sort_order = None
if meta.sort_field and ',' in meta.sort_field:
# multiple sort given in doctype definition
# Example:
# `idx desc, modified desc`
# will covert to
# `tabItem`.`idx` desc, `tabItem`.`modified` desc
order_by = ', '.join(['`tab{0}`.`{1}` {2}'.format(doctype,
f.split()[0].strip(), f.split()[1].strip()) for f in meta.sort_field.split(',')])
else:
sort_field = meta.sort_field or 'modified'
sort_order = (meta.sort_field and meta.sort_order) or 'desc'
order_by = "`tab{0}`.`{1}` {2}".format(doctype, sort_field or "modified", sort_order or "desc")
# draft docs always on top
if meta.is_submittable:
order_by = "`tab{0}`.docstatus asc, {1}".format(doctype, order_by)
return order_by
@frappe.whitelist()
def get_list(doctype, *args, **kwargs):
'''wrapper for DatabaseQuery'''
kwargs.pop('cmd', None)
kwargs.pop('ignore_permissions', None)
# If doctype is child table
if frappe.is_table(doctype):
# Example frappe.db.get_list('Purchase Receipt Item', {'parent': 'Purchase Receipt'})
# Here purchase receipt is the parent doctype of the child doctype Purchase Receipt Item
if not kwargs.get('parent'):
frappe.flags.error_message = _('Parent is required to get child table data')
raise frappe.PermissionError(doctype)
check_parent_permission(kwargs.get('parent'), doctype)
del kwargs['parent']
return DatabaseQuery(doctype).execute(None, *args, **kwargs)
def is_parent_only_filter(doctype, filters):
#check if filters contains only parent doctype
only_parent_doctype = True
if isinstance(filters, list):
for flt in filters:
if doctype not in flt:
only_parent_doctype = False
if 'Between' in flt:
flt[3] = get_between_date_filter(flt[3])
return only_parent_doctype
def has_any_user_permission_for_doctype(doctype, user, applicable_for):
user_permissions = frappe.permissions.get_user_permissions(user=user)
doctype_user_permissions = user_permissions.get(doctype, [])
for permission in doctype_user_permissions:
if not permission.applicable_for or permission.applicable_for == applicable_for:
return True
return False
def get_between_date_filter(value, df=None):
'''
return the formattted date as per the given example
[u'2017-11-01', u'2017-11-03'] => '2017-11-01 00:00:00.000000' AND '2017-11-04 00:00:00.000000'
'''
from_date = None
to_date = None
date_format = "%Y-%m-%d %H:%M:%S.%f"
if df:
date_format = "%Y-%m-%d %H:%M:%S.%f" if df.fieldtype == 'Datetime' else "%Y-%m-%d"
if value and isinstance(value, (list, tuple)):
if len(value) >= 1: from_date = value[0]
if len(value) >= 2: to_date = value[1]
if not df or (df and df.fieldtype == 'Datetime'):
to_date = add_to_date(to_date,days=1)
data = "'%s' AND '%s'" % (
get_datetime(from_date).strftime(date_format),
get_datetime(to_date).strftime(date_format))
return data
| 0.027226 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.