text
stringlengths 733
1.02M
| score
float64 0
0.27
|
---|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from email.utils import parseaddr
import functools
import htmlentitydefs
import itertools
import logging
import operator
import re
from ast import literal_eval
from openerp.tools import mute_logger
# Validation Library https://pypi.python.org/pypi/validate_email/1.1
from .validate_email import validate_email
import openerp
from openerp.osv import orm
from openerp.osv import fields
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
pattern = re.compile(r"&(\w+?);")
_logger = logging.getLogger('base.partner.merge')
# http://www.php2python.com/wiki/function.html-entity-decode/
def html_entity_decode_char(m, defs=None):
if defs is None:
defs = htmlentitydefs.entitydefs
try:
return defs[m.group(1)]
except KeyError:
return m.group(0)
def html_entity_decode(string):
return pattern.sub(html_entity_decode_char, string)
def sanitize_email(partner_email):
assert isinstance(partner_email, basestring) and partner_email
result = re.subn(r';|/|:', ',',
html_entity_decode(partner_email or ''))[0].split(',')
emails = [parseaddr(email)[1]
for item in result
for email in item.split()]
return [email.lower()
for email in emails
if validate_email(email)]
def is_integer_list(ids):
return all(isinstance(i, (int, long)) for i in ids)
class MergePartnerLine(orm.TransientModel):
_name = 'base.partner.merge.line'
_columns = {
'wizard_id': fields.many2one('base.partner.merge.automatic.wizard',
'Wizard'),
'min_id': fields.integer('MinID'),
'aggr_ids': fields.char('Ids', required=True),
}
_order = 'min_id asc'
class MergePartnerAutomatic(orm.TransientModel):
"""
The idea behind this wizard is to create a list of potential partners to
merge. We use two objects, the first one is the wizard for the end-user.
And the second will contain the partner list to merge.
"""
_name = 'base.partner.merge.automatic.wizard'
_columns = {
# Group by
'group_by_email': fields.boolean('Email'),
'group_by_name': fields.boolean('Name'),
'group_by_is_company': fields.boolean('Is Company'),
'group_by_vat': fields.boolean('VAT'),
'group_by_parent_id': fields.boolean('Parent Company'),
'state': fields.selection([('option', 'Option'),
('selection', 'Selection'),
('finished', 'Finished')],
'State',
readonly=True,
required=True),
'number_group': fields.integer("Group of Contacts", readonly=True),
'current_line_id': fields.many2one('base.partner.merge.line',
'Current Line'),
'line_ids': fields.one2many('base.partner.merge.line',
'wizard_id', 'Lines'),
'partner_ids': fields.many2many('res.partner', string='Contacts'),
'dst_partner_id': fields.many2one('res.partner',
string='Destination Contact'),
'exclude_contact': fields.boolean('A user associated to the contact'),
'exclude_journal_item': fields.boolean('Journal Items associated'
' to the contact'),
'maximum_group': fields.integer("Maximum of Group of Contacts"),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(MergePartnerAutomatic, self
).default_get(cr, uid, fields, context)
if (context.get('active_model') == 'res.partner' and
context.get('active_ids')):
partner_ids = context['active_ids']
res['state'] = 'selection'
res['partner_ids'] = partner_ids
res['dst_partner_id'] = self._get_ordered_partner(cr, uid,
partner_ids,
context=context
)[-1].id
return res
_defaults = {
'state': 'option'
}
def get_fk_on(self, cr, table):
q = """ SELECT cl1.relname as table,
att1.attname as column
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND cl2.relname = %s
AND att2.attname = 'id'
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND con.contype = 'f'
"""
return cr.execute(q, (table,))
def _update_foreign_keys(self, cr, uid, src_partners,
dst_partner, context=None):
_logger.debug('_update_foreign_keys for dst_partner: %s for '
'src_partners: %r',
dst_partner.id,
list(map(operator.attrgetter('id'), src_partners)))
# find the many2one relation to a partner
proxy = self.pool.get('res.partner')
self.get_fk_on(cr, 'res_partner')
# ignore two tables
for table, column in cr.fetchall():
if 'base_partner_merge_' in table:
continue
partner_ids = tuple(map(int, src_partners))
query = ("SELECT column_name FROM information_schema.columns"
" WHERE table_name LIKE '%s'") % (table)
cr.execute(query, ())
columns = []
for data in cr.fetchall():
if data[0] != column:
columns.append(data[0])
query_dic = {
'table': table,
'column': column,
'value': columns[0],
}
if len(columns) <= 1:
# unique key treated
query = """
UPDATE "%(table)s" as ___tu
SET %(column)s = %%s
WHERE
%(column)s = %%s AND
NOT EXISTS (
SELECT 1
FROM "%(table)s" as ___tw
WHERE
%(column)s = %%s AND
___tu.%(value)s = ___tw.%(value)s
)""" % query_dic
for partner_id in partner_ids:
cr.execute(query, (dst_partner.id, partner_id,
dst_partner.id))
else:
cr.execute("SAVEPOINT recursive_partner_savepoint")
try:
query = ('UPDATE "%(table)s" SET %(column)s = %%s WHERE '
'%(column)s IN %%s') % query_dic
cr.execute(query, (dst_partner.id, partner_ids,))
if (column == proxy._parent_name and
table == 'res_partner'):
query = """
WITH RECURSIVE cycle(id, parent_id) AS (
SELECT id, parent_id FROM res_partner
UNION
SELECT cycle.id, res_partner.parent_id
FROM res_partner, cycle
WHERE res_partner.id = cycle.parent_id
AND cycle.id != cycle.parent_id
)
SELECT id FROM cycle
WHERE id = parent_id AND id = %s
"""
cr.execute(query, (dst_partner.id,))
if cr.fetchall():
cr.execute("ROLLBACK TO SAVEPOINT "
"recursive_partner_savepoint")
finally:
cr.execute("RELEASE SAVEPOINT "
"recursive_partner_savepoint")
def _update_reference_fields(self, cr, uid, src_partners, dst_partner,
context=None):
_logger.debug('_update_reference_fields for dst_partner: %s for '
'src_partners: %r',
dst_partner.id,
list(map(operator.attrgetter('id'), src_partners)))
def update_records(model, src, field_model='model', field_id='res_id',
context=None):
proxy = self.pool.get(model)
if proxy is None:
return
domain = [(field_model, '=', 'res.partner'),
(field_id, '=', src.id)]
ids = proxy.search(cr, openerp.SUPERUSER_ID,
domain, context=context)
return proxy.write(cr, openerp.SUPERUSER_ID, ids,
{field_id: dst_partner.id}, context=context)
update_records = functools.partial(update_records, context=context)
for partner in src_partners:
update_records('base.calendar', src=partner,
field_model='model_id.model')
update_records('ir.attachment', src=partner,
field_model='res_model')
update_records('mail.followers', src=partner,
field_model='res_model')
update_records('mail.message', src=partner)
update_records('marketing.campaign.workitem', src=partner,
field_model='object_id.model')
update_records('ir.model.data', src=partner)
proxy = self.pool['ir.model.fields']
domain = [('ttype', '=', 'reference')]
record_ids = proxy.search(cr, openerp.SUPERUSER_ID, domain,
context=context)
for record in proxy.browse(cr, openerp.SUPERUSER_ID, record_ids,
context=context):
try:
proxy_model = self.pool[record.model]
except KeyError:
# ignore old tables
continue
if record.model == 'ir.property':
continue
field_type = proxy_model._columns.get(record.name).__class__._type
if field_type == 'function':
continue
for partner in src_partners:
domain = [
(record.name, '=', 'res.partner,%d' % partner.id)
]
model_ids = proxy_model.search(cr, openerp.SUPERUSER_ID,
domain, context=context)
values = {
record.name: 'res.partner,%d' % dst_partner.id,
}
proxy_model.write(cr, openerp.SUPERUSER_ID, model_ids, values,
context=context)
def _update_values(self, cr, uid, src_partners, dst_partner, context=None):
_logger.debug('_update_values for dst_partner: %s for src_partners: '
'%r',
dst_partner.id,
list(map(operator.attrgetter('id'), src_partners)))
columns = dst_partner._columns
def write_serializer(column, item):
if isinstance(item, browse_record):
return item.id
else:
return item
values = dict()
for column, field in columns.iteritems():
if (field._type not in ('many2many', 'one2many') and
not isinstance(field, fields.function)):
for item in itertools.chain(src_partners, [dst_partner]):
if item[column]:
values[column] = write_serializer(column,
item[column])
values.pop('id', None)
parent_id = values.pop('parent_id', None)
dst_partner.write(values)
if parent_id and parent_id != dst_partner.id:
try:
dst_partner.write({'parent_id': parent_id})
except (orm.except_orm, orm.except_orm):
_logger.info('Skip recursive partner hierarchies for '
'parent_id %s of partner: %s',
parent_id, dst_partner.id)
@mute_logger('openerp.osv.expression', 'openerp.osv.orm')
def _merge(self, cr, uid, partner_ids, dst_partner=None, context=None):
proxy = self.pool.get('res.partner')
partner_ids = proxy.exists(cr, uid, list(partner_ids),
context=context)
if len(partner_ids) < 2:
return
if len(partner_ids) > 3:
raise orm.except_orm(
_('Error'),
_("For safety reasons, you cannot merge more than 3 contacts "
"together. You can re-open the wizard several times if "
"needed."))
if (openerp.SUPERUSER_ID != uid and
len(set(partner.email for partner
in proxy.browse(cr, uid, partner_ids,
context=context))) > 1):
raise orm.except_orm(
_('Error'),
_("All contacts must have the same email. Only the "
"Administrator can merge contacts with different emails."))
if dst_partner and dst_partner.id in partner_ids:
src_partners = proxy.browse(cr, uid,
[id for id in partner_ids
if id != dst_partner.id],
context=context)
else:
ordered_partners = self._get_ordered_partner(cr, uid, partner_ids,
context)
dst_partner = ordered_partners[-1]
src_partners = ordered_partners[:-1]
_logger.info("dst_partner: %s", dst_partner.id)
if (openerp.SUPERUSER_ID != uid and
self._model_is_installed(
cr, uid, 'account.move.line', context=context) and
self.pool['account.move.line'].search(
cr, openerp.SUPERUSER_ID,
[('partner_id', 'in', [partner.id for partner
in src_partners])],
context=context)):
raise orm.except_orm(
_('Error'),
_("Only the destination contact may be linked to existing "
"Journal Items. Please ask the Administrator if you need to"
" merge several contacts linked to existing Journal "
"Items."))
self._update_foreign_keys(
cr, uid, src_partners, dst_partner, context=context)
self._update_reference_fields(
cr, uid, src_partners, dst_partner, context=context)
self._update_values(
cr, uid, src_partners, dst_partner, context=context)
_logger.info('(uid = %s) merged the partners %r with %s',
uid,
list(map(operator.attrgetter('id'), src_partners)),
dst_partner.id)
dst_partner.message_post(
body='%s %s' % (
_("Merged with the following partners:"),
", ".join(
'%s<%s>(ID %s)' % (p.name, p.email or 'n/a', p.id)
for p in src_partners
)
)
)
for partner in src_partners:
partner.unlink()
def clean_emails(self, cr, uid, context=None):
"""
Clean the email address of the partner, if there is an email field
with a minimum of two addresses, the system will create a new partner,
with the information of the previous one and will copy the new cleaned
email into the email field.
"""
if context is None:
context = {}
proxy_model = self.pool['ir.model.fields']
field_ids = proxy_model.search(cr, uid,
[('model', '=', 'res.partner'),
('ttype', 'like', '%2many')],
context=context)
fields = proxy_model.read(cr, uid, field_ids, context=context)
reset_fields = dict((field['name'], []) for field in fields)
proxy_partner = self.pool['res.partner']
context['active_test'] = False
ids = proxy_partner.search(cr, uid, [], context=context)
fields = ['name', 'var' 'partner_id' 'is_company', 'email']
partners = proxy_partner.read(cr, uid, ids, fields, context=context)
partners.sort(key=operator.itemgetter('id'))
partners_len = len(partners)
_logger.info('partner_len: %r', partners_len)
for idx, partner in enumerate(partners):
if not partner['email']:
continue
percent = (idx / float(partners_len)) * 100.0
_logger.info('idx: %r', idx)
_logger.info('percent: %r', percent)
try:
emails = sanitize_email(partner['email'])
head, tail = emails[:1], emails[1:]
email = head[0] if head else False
proxy_partner.write(cr, uid, [partner['id']],
{'email': email}, context=context)
for email in tail:
values = dict(reset_fields, email=email)
proxy_partner.copy(cr, uid, partner['id'], values,
context=context)
except Exception:
_logger.exception("There is a problem with this partner: %r",
partner)
raise
return True
def close_cb(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_window_close'}
def _generate_query(self, fields, maximum_group=100):
group_fields = ', '.join(fields)
filters = []
for field in fields:
if field in ['email', 'name']:
filters.append((field, 'IS NOT', 'NULL'))
criteria = ' AND '.join('%s %s %s' % (field, operator, value)
for field, operator, value in filters)
text = [
"SELECT min(id), array_agg(id)",
"FROM res_partner",
]
if criteria:
text.append('WHERE %s' % criteria)
text.extend([
"GROUP BY %s" % group_fields,
"HAVING COUNT(*) >= 2",
"ORDER BY min(id)",
])
if maximum_group:
text.extend([
"LIMIT %s" % maximum_group,
])
return ' '.join(text)
def _compute_selected_groupby(self, this):
group_by_str = 'group_by_'
group_by_len = len(group_by_str)
fields = [
key[group_by_len:]
for key in self._columns.keys()
if key.startswith(group_by_str)
]
groups = [
field
for field in fields
if getattr(this, '%s%s' % (group_by_str, field), False)
]
if not groups:
raise orm.except_orm(_('Error'),
_("You have to specify a filter for your "
"selection"))
return groups
def next_cb(self, cr, uid, ids, context=None):
"""
Don't compute any thing
"""
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
if this.current_line_id:
this.current_line_id.unlink()
return self._next_screen(cr, uid, this, context)
def _get_ordered_partner(self, cr, uid, partner_ids, context=None):
partners = self.pool.get('res.partner'
).browse(cr, uid,
list(partner_ids),
context=context)
ordered_partners = sorted(
sorted(
partners,
key=operator.attrgetter('create_date'),
reverse=True
),
key=operator.attrgetter('active'),
reverse=True
)
return ordered_partners
def _next_screen(self, cr, uid, this, context=None):
this.refresh()
values = {}
if this.line_ids:
# in this case, we try to find the next record.
current_line = this.line_ids[0]
current_partner_ids = literal_eval(current_line.aggr_ids)
values.update({
'current_line_id': current_line.id,
'partner_ids': [(6, 0, current_partner_ids)],
'dst_partner_id': self._get_ordered_partner(
cr, uid,
current_partner_ids,
context
)[-1].id,
'state': 'selection',
})
else:
values.update({
'current_line_id': False,
'partner_ids': [],
'state': 'finished',
})
this.write(values)
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def _model_is_installed(self, cr, uid, model, context=None):
proxy = self.pool.get('ir.model')
domain = [('model', '=', model)]
return proxy.search_count(cr, uid, domain, context=context) > 0
def _partner_use_in(self, cr, uid, aggr_ids, models, context=None):
"""
Check if there is no occurence of this group of partner in the selected
model
"""
for model, field in models.iteritems():
proxy = self.pool.get(model)
domain = [(field, 'in', aggr_ids)]
if proxy.search_count(cr, uid, domain, context=context):
return True
return False
def compute_models(self, cr, uid, ids, context=None):
"""
Compute the different models needed by the system if you want to
exclude some partners.
"""
assert is_integer_list(ids)
this = self.browse(cr, uid, ids[0], context=context)
models = {}
if this.exclude_contact:
models['res.users'] = 'partner_id'
if (self._model_is_installed(
cr, uid, 'account.move.line', context=context) and
this.exclude_journal_item):
models['account.move.line'] = 'partner_id'
return models
def _process_query(self, cr, uid, ids, query, context=None):
"""
Execute the select request and write the result in this wizard
"""
proxy = self.pool.get('base.partner.merge.line')
this = self.browse(cr, uid, ids[0], context=context)
models = self.compute_models(cr, uid, ids, context=context)
cr.execute(query)
counter = 0
for min_id, aggr_ids in cr.fetchall():
if models and self._partner_use_in(cr, uid, aggr_ids, models,
context=context):
continue
values = {
'wizard_id': this.id,
'min_id': min_id,
'aggr_ids': aggr_ids,
}
proxy.create(cr, uid, values, context=context)
counter += 1
values = {
'state': 'selection',
'number_group': counter,
}
this.write(values)
_logger.info("counter: %s", counter)
def start_process_cb(self, cr, uid, ids, context=None):
"""
Start the process.
* Compute the selected groups (with duplication)
* If the user has selected the 'exclude_XXX' fields, avoid the
partners.
"""
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
groups = self._compute_selected_groupby(this)
query = self._generate_query(groups, this.maximum_group)
self._process_query(cr, uid, ids, query, context=context)
return self._next_screen(cr, uid, this, context)
def automatic_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
this = self.browse(cr, uid, ids[0], context=context)
this.start_process_cb()
this.refresh()
for line in this.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(cr, uid, partner_ids, context=context)
line.unlink()
cr.commit()
this.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def parent_migration_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
query = """
SELECT
min(p1.id),
array_agg(DISTINCT p1.id)
FROM
res_partner as p1
INNER join
res_partner as p2
ON
p1.email = p2.email AND
p1.name = p2.name AND
(p1.parent_id = p2.id OR p1.id = p2.parent_id)
WHERE
p2.id IS NOT NULL
GROUP BY
p1.email,
p1.name,
CASE WHEN p1.parent_id = p2.id THEN p2.id
ELSE p1.id
END
HAVING COUNT(*) >= 2
ORDER BY
min(p1.id)
"""
self._process_query(cr, uid, ids, query, context=context)
for line in this.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(cr, uid, partner_ids, context=context)
line.unlink()
cr.commit()
this.write({'state': 'finished'})
cr.execute("""
UPDATE
res_partner
SET
is_company = NULL,
parent_id = NULL
WHERE
parent_id = id
""")
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def update_all_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
# WITH RECURSIVE cycle(id, parent_id) AS (
# SELECT id, parent_id FROM res_partner
# UNION
# SELECT cycle.id, res_partner.parent_id
# FROM res_partner, cycle
# WHERE res_partner.id = cycle.parent_id AND
# cycle.id != cycle.parent_id
# )
# UPDATE res_partner
# SET parent_id = NULL
# WHERE id in (SELECT id FROM cycle WHERE id = parent_id);
this = self.browse(cr, uid, ids[0], context=context)
self.parent_migration_process_cb(cr, uid, ids, context=None)
list_merge = [
{'group_by_vat': True,
'group_by_email': True,
'group_by_name': True},
# {'group_by_name': True,
# 'group_by_is_company': True,
# 'group_by_parent_id': True},
# {'group_by_email': True,
# 'group_by_is_company': True,
# 'group_by_parent_id': True},
# {'group_by_name': True,
# 'group_by_vat': True,
# 'group_by_is_company': True,
# 'exclude_journal_item': True},
# {'group_by_email': True,
# 'group_by_vat': True,
# 'group_by_is_company': True,
# 'exclude_journal_item': True},
# {'group_by_email': True,
# 'group_by_is_company': True,
# 'exclude_contact': True,
# 'exclude_journal_item': True},
# {'group_by_name': True,
# 'group_by_is_company': True,
# 'exclude_contact': True,
# 'exclude_journal_item': True}
]
for merge_value in list_merge:
id = self.create(cr, uid, merge_value, context=context)
self.automatic_process_cb(cr, uid, [id], context=context)
cr.execute("""
UPDATE
res_partner
SET
is_company = NULL
WHERE
parent_id IS NOT NULL AND
is_company IS NOT NULL
""")
# cr.execute("""
# UPDATE
# res_partner as p1
# SET
# is_company = NULL,
# parent_id = (
# SELECT p2.id
# FROM res_partner as p2
# WHERE p2.email = p1.email AND
# p2.parent_id != p2.id
# LIMIT 1
# )
# WHERE
# p1.parent_id = p1.id
# """)
return self._next_screen(cr, uid, this, context)
def merge_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
partner_ids = set(map(int, this.partner_ids))
if not partner_ids:
this.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
self._merge(cr, uid, partner_ids, this.dst_partner_id,
context=context)
if this.current_line_id:
this.current_line_id.unlink()
return self._next_screen(cr, uid, this, context)
def auto_set_parent_id(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
# select partner who have one least invoice
partner_treated = ['@gmail.com']
cr.execute(""" SELECT p.id, p.email
FROM res_partner as p
LEFT JOIN account_invoice as a
ON p.id = a.partner_id AND a.state in ('open','paid')
WHERE p.grade_id is NOT NULL
GROUP BY p.id
ORDER BY COUNT(a.id) DESC
""")
re_email = re.compile(r".*@")
for id, email in cr.fetchall():
# check email domain
email = re_email.sub("@", email or "")
if not email or email in partner_treated:
continue
partner_treated.append(email)
# don't update the partners if they are more of one who have
# invoice
cr.execute("""
SELECT *
FROM res_partner as p
WHERE p.id != %s AND p.email LIKE '%%%s' AND
EXISTS (SELECT * FROM account_invoice as a
WHERE p.id = a.partner_id
AND a.state in ('open','paid'))
""" % (id, email))
if len(cr.fetchall()) > 1:
_logger.info("%s MORE OF ONE COMPANY", email)
continue
# to display changed values
cr.execute(""" SELECT id,email
FROM res_partner
WHERE parent_id != %s
AND id != %s AND email LIKE '%%%s'
""" % (id, id, email))
_logger.info("%r", cr.fetchall())
# upgrade
cr.execute(""" UPDATE res_partner
SET parent_id = %s
WHERE id != %s AND email LIKE '%%%s'
""" % (id, id, email))
return False
| 0 |
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import aggregates as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import base_compute_client
class AggregatesClient(base_compute_client.BaseComputeClient):
def list_aggregates(self):
"""Get aggregate list."""
resp, body = self.get("os-aggregates")
body = json.loads(body)
self.validate_response(schema.list_aggregates, resp, body)
return rest_client.ResponseBody(resp, body)
def show_aggregate(self, aggregate_id):
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % aggregate_id)
body = json.loads(body)
self.validate_response(schema.get_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def create_aggregate(self, **kwargs):
"""Create a new aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#createAggregate
"""
post_body = json.dumps({'aggregate': kwargs})
resp, body = self.post('os-aggregates', post_body)
body = json.loads(body)
self.validate_response(schema.create_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def update_aggregate(self, aggregate_id, **kwargs):
"""Update an aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#updateAggregate
"""
put_body = json.dumps({'aggregate': kwargs})
resp, body = self.put('os-aggregates/%s' % aggregate_id, put_body)
body = json.loads(body)
self.validate_response(schema.update_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_aggregate(self, aggregate_id):
"""Delete the given aggregate."""
resp, body = self.delete("os-aggregates/%s" % aggregate_id)
self.validate_response(schema.delete_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_aggregate(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Return the primary type of resource this client works with."""
return 'aggregate'
def add_host(self, aggregate_id, **kwargs):
"""Add a host to the given aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#addHost
"""
post_body = json.dumps({'add_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return rest_client.ResponseBody(resp, body)
def remove_host(self, aggregate_id, **kwargs):
"""Remove a host from the given aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#removeAggregateHost
"""
post_body = json.dumps({'remove_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return rest_client.ResponseBody(resp, body)
def set_metadata(self, aggregate_id, **kwargs):
"""Replace the aggregate's existing metadata with new metadata.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#addAggregateMetadata
"""
post_body = json.dumps({'set_metadata': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_set_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
| 0 |
"""Create coordinate transforms."""
# Author: Eric Larson <larson.eric.d<gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from scipy import linalg
from ...transforms import combine_transforms, invert_transform, Transform
from ...utils import logger
from ..constants import FIFF
from .constants import CTF
def _make_transform_card(fro, to, r_lpa, r_nasion, r_rpa):
"""Make a transform from cardinal landmarks."""
# XXX de-duplicate this with code from Montage somewhere?
diff_1 = r_nasion - r_lpa
ex = r_rpa - r_lpa
alpha = np.dot(diff_1, ex) / np.dot(ex, ex)
ex /= np.sqrt(np.sum(ex * ex))
trans = np.eye(4)
move = (1. - alpha) * r_lpa + alpha * r_rpa
trans[:3, 3] = move
trans[:3, 0] = ex
ey = r_nasion - move
ey /= np.sqrt(np.sum(ey * ey))
trans[:3, 1] = ey
trans[:3, 2] = np.cross(ex, ey) # ez
return Transform(fro, to, trans)
def _quaternion_align(from_frame, to_frame, from_pts, to_pts):
"""Perform an alignment using the unit quaternions (modifies points)."""
assert from_pts.shape[1] == to_pts.shape[1] == 3
# Calculate the centroids and subtract
from_c, to_c = from_pts.mean(axis=0), to_pts.mean(axis=0)
from_ = from_pts - from_c
to_ = to_pts - to_c
# Compute the dot products
S = np.dot(from_.T, to_)
# Compute the magical N matrix
N = np.array([[S[0, 0] + S[1, 1] + S[2, 2], 0., 0., 0.],
[S[1, 2] - S[2, 1], S[0, 0] - S[1, 1] - S[2, 2], 0., 0.],
[S[2, 0] - S[0, 2], S[0, 1] + S[1, 0],
-S[0, 0] + S[1, 1] - S[2, 2], 0.],
[S[0, 1] - S[1, 0], S[2, 0] + S[0, 2],
S[1, 2] + S[2, 1], -S[0, 0] - S[1, 1] + S[2, 2]]])
# Compute the eigenvalues and eigenvectors
# Use the eigenvector corresponding to the largest eigenvalue as the
# unit quaternion defining the rotation
eig_vals, eig_vecs = linalg.eigh(N, overwrite_a=True)
which = np.argmax(eig_vals)
if eig_vals[which] < 0:
raise RuntimeError('No positive eigenvalues. Cannot do the alignment.')
q = eig_vecs[:, which]
# Write out the rotation
trans = np.eye(4)
trans[0, 0] = q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3]
trans[0, 1] = 2.0 * (q[1] * q[2] - q[0] * q[3])
trans[0, 2] = 2.0 * (q[1] * q[3] + q[0] * q[2])
trans[1, 0] = 2.0 * (q[2] * q[1] + q[0] * q[3])
trans[1, 1] = q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3]
trans[1, 2] = 2.0 * (q[2] * q[3] - q[0] * q[1])
trans[2, 0] = 2.0 * (q[3] * q[1] - q[0] * q[2])
trans[2, 1] = 2.0 * (q[3] * q[2] + q[0] * q[1])
trans[2, 2] = q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]
# Now we need to generate a transformed translation vector
trans[:3, 3] = to_c - np.dot(trans[:3, :3], from_c)
del to_c, from_c
# Test the transformation and print the results
logger.info(' Quaternion matching (desired vs. transformed):')
for fro, to in zip(from_pts, to_pts):
rr = np.dot(trans[:3, :3], fro) + trans[:3, 3]
diff = np.sqrt(np.sum((to - rr) ** 2))
logger.info(' %7.2f %7.2f %7.2f mm <-> %7.2f %7.2f %7.2f mm '
'(orig : %7.2f %7.2f %7.2f mm) diff = %8.3f mm'
% (tuple(1000 * to) + tuple(1000 * rr) +
tuple(1000 * fro) + (1000 * diff,)))
if diff > 1e-4:
raise RuntimeError('Something is wrong: quaternion matching did '
'not work (see above)')
return Transform(from_frame, to_frame, trans)
def _make_ctf_coord_trans_set(res4, coils):
"""Figure out the necessary coordinate transforms."""
# CTF head > Neuromag head
lpa = rpa = nas = T1 = T2 = T3 = T5 = None
if coils is not None:
for p in coils:
if p['valid'] and (p['coord_frame'] ==
FIFF.FIFFV_MNE_COORD_CTF_HEAD):
if lpa is None and p['kind'] == CTF.CTFV_COIL_LPA:
lpa = p
elif rpa is None and p['kind'] == CTF.CTFV_COIL_RPA:
rpa = p
elif nas is None and p['kind'] == CTF.CTFV_COIL_NAS:
nas = p
if lpa is None or rpa is None or nas is None:
raise RuntimeError('Some of the mandatory HPI device-coordinate '
'info was not there.')
t = _make_transform_card(FIFF.FIFFV_COORD_HEAD,
FIFF.FIFFV_MNE_COORD_CTF_HEAD,
lpa['r'], nas['r'], rpa['r'])
T3 = invert_transform(t)
# CTF device -> Neuromag device
#
# Rotate the CTF coordinate frame by 45 degrees and shift by 190 mm
# in z direction to get a coordinate system comparable to the Neuromag one
#
R = np.eye(4)
R[:3, 3] = [0., 0., 0.19]
val = 0.5 * np.sqrt(2.)
R[0, 0] = val
R[0, 1] = -val
R[1, 0] = val
R[1, 1] = val
T4 = Transform(FIFF.FIFFV_MNE_COORD_CTF_DEVICE,
FIFF.FIFFV_COORD_DEVICE, R)
# CTF device -> CTF head
# We need to make the implicit transform explicit!
h_pts = dict()
d_pts = dict()
kinds = (CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, CTF.CTFV_COIL_NAS,
CTF.CTFV_COIL_SPARE)
if coils is not None:
for p in coils:
if p['valid']:
if p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
for kind in kinds:
if kind not in h_pts and p['kind'] == kind:
h_pts[kind] = p['r']
elif p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE:
for kind in kinds:
if kind not in d_pts and p['kind'] == kind:
d_pts[kind] = p['r']
if any(kind not in h_pts for kind in kinds[:-1]):
raise RuntimeError('Some of the mandatory HPI device-coordinate '
'info was not there.')
if any(kind not in d_pts for kind in kinds[:-1]):
raise RuntimeError('Some of the mandatory HPI head-coordinate '
'info was not there.')
use_kinds = [kind for kind in kinds
if (kind in h_pts and kind in d_pts)]
r_head = np.array([h_pts[kind] for kind in use_kinds])
r_dev = np.array([d_pts[kind] for kind in use_kinds])
T2 = _quaternion_align(FIFF.FIFFV_MNE_COORD_CTF_DEVICE,
FIFF.FIFFV_MNE_COORD_CTF_HEAD, r_dev, r_head)
# The final missing transform
if T3 is not None and T2 is not None:
T5 = combine_transforms(T2, T3, FIFF.FIFFV_MNE_COORD_CTF_DEVICE,
FIFF.FIFFV_COORD_HEAD)
T1 = combine_transforms(invert_transform(T4), T5,
FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD)
s = dict(t_dev_head=T1, t_ctf_dev_ctf_head=T2, t_ctf_head_head=T3,
t_ctf_dev_dev=T4, t_ctf_dev_head=T5)
logger.info(' Coordinate transformations established.')
return s
| 0 |
#!/usr/bin/python
#
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
#
# Extract Json Value
#
# Very simple tool to ease extracting json values from the cmd line.
import os
import sys
import json
def print_usage():
print """
Usage: extract_json_value.py [json file path] [key of value to extract]
For nested keys, use . separator
"""
def help_and_exit(msg=None):
print msg
print_usage()
sys.exit(1)
def parse_and_validate_args():
if len(sys.argv) < 3:
help_and_exit(msg="Error: Invalid Args")
json_path = sys.argv[1]
json_key = sys.argv[2]
if not os.path.isfile(json_path):
help_and_exit("Error: Invalid json file path")
return json_path, json_key
def extract_key(json_path, json_key):
json_data = None
with open(json_path, 'r') as json_file:
json_data = json.load(json_file)
nested_keys = json_key.split('.')
json_context = json_data
for key in nested_keys:
json_context = json_context.get(key, None)
if json_context is None:
help_and_exit("Error: Invalid json key")
return str(json_context)
def execute():
json_path, json_key = parse_and_validate_args()
value = extract_key(json_path, json_key)
return value
if __name__ == "__main__":
print execute()
| 0.028507 |
#################################################################
# Program: Toolib
"""
Known issues:
! GetTable call after Destroy leads to crush
Careful with delegating to table
Example (how to avoid crush):
def __getattr__(self, name):
if name != '__del__':
return getattr(self.GetTable(), name)
else:
raise AttributeError, name
"""
__author__ = "Oleg Noga"
__date__ = "$Date: 2007/09/17 13:19:12 $"
__version__ = "$Revision: 1.3 $"
# $Source: D:/HOME/cvs/toolib/wx/grid/wxGrid.py,v $
#
#################################################################
import wx.grid
FIX_SETTABLE = 1
if FIX_SETTABLE:
from table.MTableMessaging import MTableMessaging
class NoneTable(wx.grid.PyGridTableBase):
def GetNumberRows(self):
return 0
def GetNumberCols(self):
return 0
def __nonzero__(self):
return False
USE_SETWXTABLE = 0
class DelegatingTable(wx.grid.PyGridTableBase, MTableMessaging):
"""
Since wxGrid SetTable is buggy
using TableDelegator and pythonic tables
"""
def __init__(self, table=None):
wx.grid.PyGridTableBase.__init__(self)
MTableMessaging.__init__(self)
self.__table = table or NoneTable()
if USE_SETWXTABLE:
self.__table._setWxTable(self)
def _setTable(self, table):
self.fireTableStructureChanging()
if USE_SETWXTABLE:
if self.__table is not None:
self.__table._setWxTable(None)
self.__table = table
if USE_SETWXTABLE:
self.__table._setWxTable(self)
self.fireTableStructureChanged()
def _getTable(self):
return self.__table
#######################################################
# BEGIN_AUTO_GENERATED_CODE
def AppendCols(self, *args):
return self.__table.AppendCols(*args)
def AppendRows(self, *args):
return self.__table.AppendRows(*args)
def AttrProvider(self, *args):
return self.__table.AttrProvider(*args)
def CanGetValueAs(self, *args):
return self.__table.CanGetValueAs(*args)
def CanHaveAttributes(self, *args):
return self.__table.CanHaveAttributes(*args)
def CanSetValueAs(self, *args):
return self.__table.CanSetValueAs(*args)
def ClassName(self, *args):
return self.__table.ClassName(*args)
def Clear(self, *args):
return self.__table.Clear(*args)
def DeleteCols(self, *args):
return self.__table.DeleteCols(*args)
def DeleteRows(self, *args):
return self.__table.DeleteRows(*args)
def Destroy(self, *args):
return self.__table.Destroy(*args)
def GetAttr(self, *args):
return self.__table.GetAttr(*args)
def GetAttrProvider(self, *args):
return self.__table.GetAttrProvider(*args)
def GetClassName(self, *args):
return self.__table.GetClassName(*args)
def GetColLabelValue(self, *args):
return self.__table.GetColLabelValue(*args)
def GetNumberCols(self, *args):
return self.__table.GetNumberCols(*args)
def GetNumberRows(self, *args):
return self.__table.GetNumberRows(*args)
def GetRowLabelValue(self, *args):
return self.__table.GetRowLabelValue(*args)
def GetTypeName(self, *args):
return self.__table.GetTypeName(*args)
def GetValue(self, *args):
return self.__table.GetValue(*args)
def GetValueAsBool(self, *args):
return self.__table.GetValueAsBool(*args)
def GetValueAsDouble(self, *args):
return self.__table.GetValueAsDouble(*args)
def GetValueAsLong(self, *args):
return self.__table.GetValueAsLong(*args)
def GetView(self, *args):
return self.__table.GetView(*args)
def InsertCols(self, *args):
return self.__table.InsertCols(*args)
def InsertRows(self, *args):
return self.__table.InsertRows(*args)
def IsEmptyCell(self, *args):
return self.__table.IsEmptyCell(*args)
def IsSameAs(self, *args):
return self.__table.IsSameAs(*args)
def NumberCols(self, *args):
return self.__table.NumberCols(*args)
def NumberRows(self, *args):
return self.__table.NumberRows(*args)
def SetAttr(self, *args):
return self.__table.SetAttr(*args)
def SetAttrProvider(self, *args):
return self.__table.SetAttrProvider(*args)
def SetColAttr(self, *args):
return self.__table.SetColAttr(*args)
def SetColLabelValue(self, *args):
return self.__table.SetColLabelValue(*args)
def SetRowAttr(self, *args):
return self.__table.SetRowAttr(*args)
def SetRowLabelValue(self, *args):
return self.__table.SetRowLabelValue(*args)
def SetValue(self, *args):
return self.__table.SetValue(*args)
def SetValueAsBool(self, *args):
return self.__table.SetValueAsBool(*args)
def SetValueAsDouble(self, *args):
return self.__table.SetValueAsDouble(*args)
def SetValueAsLong(self, *args):
return self.__table.SetValueAsLong(*args)
def SetView(self, *args):
return self.__table.SetView(*args)
def View(self, *args):
return self.__table.View(*args)
# END_AUTO_GENERATED_CODE
#######################################################
__super__ = wx.grid.Grid
class wxGrid(__super__):
def __init__(self, *args, **kwargs):
__super__.__init__(self, *args, **kwargs)
table = DelegatingTable()
__super__.SetTable(self, table, True)
table.addGridTableListener(self)
def GetTable(self):
return __super__.GetTable(self)._getTable()
def SetTable(self, table, ignored_takeOwnership=False):
__super__.GetTable(self)._setTable(table)
else:
wxGrid = wx.grid.Grid
if __name__ == '__main__':
def inject(f):
d = filter(lambda name: name[0].isupper(), dir(wx.grid.PyGridTableBase))
d.sort()
for i in d:
#if VERBOSE: print "%s", args
print >>f, """\
def %s(self, *args, **kwargs):
return self.__table.%s(*args, **kwargs)
""".replace("%s", i)
f = open(__file__, 'rt')
code = f.readlines()
f.close()
state = "begin"
f = open(__file__, 'wt')
for i in code:
if state == 'begin':
f.write(i)
if i.find('BEGIN_AUTO_GENERATED_CODE') != -1:
inject(f)
state = 'injected'
elif state == 'injected':
if i.find('END_AUTO_GENERATED_CODE') != -1:
f.write(i)
state = 'end'
elif state == 'end':
f.write(i)
f.close()
| 0.043704 |
#
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Base class for project."""
import logging
from . import util
@util.export
class Base(object):
"""Base class for all objects."""
_LOG_PREFIX = 'ovirt.engine.'
@property
def logger(self):
"""Logger."""
return self._logger
def __init__(self):
"""Contructor."""
prefix = ''
if not self.__module__.startswith(self._LOG_PREFIX):
prefix = self._LOG_PREFIX
self._logger = logging.getLogger(prefix + self.__module__)
# vim: expandtab tabstop=4 shiftwidth=4
| 0 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponse # noqa
from django import shortcuts
from django import template
from django.template.defaultfilters import title # noqa
from django.utils.http import urlencode
from django.utils.translation import npgettext_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.floating_ips \
import workflows
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances.workflows \
import resize_instance
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF")
SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
SHELVE = 0
UNSHELVE = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class TerminateInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "terminate"
classes = ("btn-danger",)
icon = "remove"
policy_rules = (("compute", "compute:delete"),)
help_text = _("Terminated instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Terminate Instance",
u"Terminate Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled termination of Instance",
u"Scheduled termination of Instances",
count
)
def allowed(self, request, instance=None):
"""Allow terminate action if instance not currently being deleted."""
return not is_deleting(instance)
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "reboot"
classes = ('btn-danger', 'btn-reboot')
policy_rules = (("compute", "compute:reboot"),)
help_text = _("Restarted instances will lose any data"
" not saved in persistent storage.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Hard Reboot Instance",
u"Hard Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Hard Rebooted Instance",
u"Hard Rebooted Instances",
count
)
def allowed(self, request, instance=None):
if instance is not None:
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
else:
return True
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=False)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Soft Reboot Instance",
u"Soft Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Soft Rebooted Instance",
u"Soft Rebooted Instances",
count
)
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=True)
class TogglePause(tables.BatchAction):
name = "pause"
icon = "pause"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Pause Instance",
u"Pause Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Paused Instance",
u"Paused Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
policy = (("compute", "compute_extension:admin_actions:unpause"),)
else:
self.current_present_action = PAUSE
policy = (("compute", "compute_extension:admin_actions:pause"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.paused)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
classes = ("btn-suspend",)
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Suspend Instance",
u"Suspend Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Suspended Instance",
u"Suspended Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
policy = (("compute", "compute_extension:admin_actions:resume"),)
else:
self.current_present_action = SUSPEND
policy = (("compute", "compute_extension:admin_actions:suspend"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.suspended)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class ToggleShelve(tables.BatchAction):
name = "shelve"
icon = "shelve"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Shelve Instance",
u"Shelve Instances",
count
),
ungettext_lazy(
u"Unshelve Instance",
u"Unshelve Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Shelved Instance",
u"Shelved Instances",
count
),
ungettext_lazy(
u"Unshelved Instance",
u"Unshelved Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('Shelve', request):
return False
if not instance:
return False
self.shelved = instance.status == "SHELVED_OFFLOADED"
if self.shelved:
self.current_present_action = UNSHELVE
policy = (("compute", "compute_extension:unshelve"),)
else:
self.current_present_action = SHELVE
policy = (("compute", "compute_extension:shelve"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.shelved)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.shelved:
api.nova.server_unshelve(request, obj_id)
self.current_past_action = UNSHELVE
else:
api.nova.server_shelve(request, obj_id)
self.current_past_action = SHELVE
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchLink, self).__init__(attrs, **kwargs)
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except Exception:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class LaunchLinkNG(LaunchLink):
name = "launch-ng"
url = "horizon:project:instances:index"
ajax = False
classes = ("btn-launch", )
def get_default_attrs(self):
url = urlresolvers.reverse(self.url)
ngclick = "modal.openLaunchInstanceWizard(" \
"{ successUrl: '%s' })" % url
self.attrs.update({
'ng-controller': 'LaunchInstanceModalController as modal',
'ng-click': ngclick
})
return super(LaunchLinkNG, self).get_default_attrs()
def get_link_url(self, datum=None):
return "javascript:void(0);"
class EditInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("compute", "compute:update"),)
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
update_instance.UpdateInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images:snapshots:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("compute", "compute:snapshot"),)
def allowed(self, request, instance=None):
return instance.status in SNAPSHOT_READY_STATES \
and not is_deleting(instance)
class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
policy_rules = (("compute", "compute_extension:consoles"),)
def allowed(self, request, instance=None):
# We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is
# not set at all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True)) and \
instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(ConsoleLink, self).get_link_url(datum)
tab_query_string = tabs.ConsoleTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
policy_rules = (("compute", "compute_extension:console_output"),)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(LogLink, self).get_link_url(datum)
tab_query_string = tabs.LogTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "resize"
verbose_name = _("Resize Instance")
url = "horizon:project:instances:resize"
classes = ("ajax-modal", "btn-resize")
policy_rules = (("compute", "compute:resize"),)
def get_link_url(self, project):
return self._get_link_url(project, 'flavor_choice')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
resize_instance.ResizeInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
class ConfirmResize(policy.PolicyTargetMixin, tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
policy_rules = (("compute", "compute:confirm_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_confirm_resize(request, instance)
class RevertResize(policy.PolicyTargetMixin, tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
policy_rules = (("compute", "compute:revert_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_revert_resize(request, instance)
class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rebuild"
verbose_name = _("Rebuild Instance")
classes = ("btn-rebuild", "ajax-modal")
url = "horizon:project:instances:rebuild"
policy_rules = (("compute", "compute:rebuild"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class DecryptInstancePassword(tables.LinkAction):
name = "decryptpassword"
verbose_name = _("Retrieve Password")
classes = ("btn-decrypt", "ajax-modal")
url = "horizon:project:instances:decryptpassword"
def allowed(self, request, instance):
enable = getattr(settings,
'OPENSTACK_ENABLE_PASSWORD_RETRIEVE',
False)
return (enable
and (instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and get_keyname(instance) is not None)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
keypair_name = get_keyname(datum)
return urlresolvers.reverse(self.url, args=[instance_id,
keypair_name])
class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
next_url = self.table.get_full_url()
params = {
"instance_id": self.table.get_object_id(datum),
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
params = urlencode(params)
return "?".join([base_url, params])
class SimpleAssociateIP(policy.PolicyTargetMixin, tables.Action):
name = "associate-simple"
verbose_name = _("Associate Floating IP")
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
target_id = api.network.floating_ip_target_get_by_instance(
request, instance_id).split('_')[0]
fip = api.network.tenant_floating_ip_allocate(request)
api.network.floating_ip_associate(request, fip.id, target_id)
messages.success(request,
_("Successfully associated floating IP: %s")
% fip.ip)
except Exception:
exceptions.handle(request,
_("Unable to associate floating IP."))
return shortcuts.redirect(request.get_full_path())
class SimpleDisassociateIP(policy.PolicyTargetMixin, tables.Action):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-danger", "btn-disassociate",)
policy_rules = (("compute", "network:disassociate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if not conf.HORIZON_CONFIG["simple_ip_management"]:
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return not is_deleting(instance)
return False
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
targets = api.network.floating_ip_target_list_by_instance(
request, instance_id)
target_ids = [t.split('_')[0] for t in targets]
fips = [fip for fip in api.network.tenant_floating_ip_list(request)
if fip.port_id in target_ids]
# Removing multiple floating IPs at once doesn't work, so this pops
# off the first one.
if fips:
fip = fips.pop()
api.network.floating_ip_disassociate(request, fip.id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
else:
messages.info(request, _("No floating IPs to disassociate."))
except Exception:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect(request.get_full_path())
def instance_fault_to_friendly_message(instance):
fault = getattr(instance, 'fault', {})
message = fault.get('message', _("Unknown"))
default_message = _("Please try again later [Error: %s].") % message
fault_map = {
'NoValidHost': _("There is not enough capacity for this "
"flavor in the selected availability zone. "
"Try again later or select a different availability "
"zone.")
}
return fault_map.get(message, default_message)
def get_instance_error(instance):
if instance.status.lower() != 'error':
return None
message = instance_fault_to_friendly_message(instance)
preamble = _('Failed to perform requested operation on instance "%s", the '
'instance has an error status') % instance.name or instance.id
message = string_concat(preamble, ': ', message)
return message
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
try:
instance.full_flavor = api.nova.flavor_get(request,
instance.flavor["id"])
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor information '
'for instance "%s".') % instance_id,
ignore=True)
error = get_instance_error(instance)
if error:
messages.error(request, error)
return instance
class StartInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "start"
classes = ('btn-confirm',)
policy_rules = (("compute", "compute:start"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Start Instance",
u"Start Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Started Instance",
u"Started Instances",
count
)
def allowed(self, request, instance):
return ((instance is None) or
(instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED")))
def action(self, request, obj_id):
api.nova.server_start(request, obj_id)
class StopInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "stop"
classes = ('btn-danger',)
policy_rules = (("compute", "compute:stop"),)
help_text = _("The instance(s) will be shut off.")
@staticmethod
def action_present(count):
return npgettext_lazy(
"Action to perform (the instance is currently running)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
@staticmethod
def action_past(count):
return npgettext_lazy(
"Past action (the instance is currently already Shut Off)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
def allowed(self, request, instance):
return ((instance is None)
or ((get_power_state(instance) in ("RUNNING", "SUSPENDED"))
and not is_deleting(instance)))
def action(self, request, obj_id):
api.nova.server_stop(request, obj_id)
class LockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "lock"
policy_rules = (("compute", "compute_extension:admin_actions:lock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Lock Instance",
u"Lock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Locked Instance",
u"Locked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow unlocked instances to be locked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_lock(request, obj_id)
class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "unlock"
policy_rules = (("compute", "compute_extension:admin_actions:unlock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unlock Instance",
u"Unlock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unlocked Instance",
u"Unlocked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow locked instances to be unlocked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_unlock(request, obj_id)
class AttachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "attach_interface"
verbose_name = _("Attach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:attach_interface"
policy_rules = (("compute", "compute_extension:attach_interfaces"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
# TODO(lyj): the policy for detach interface not exists in nova.json,
# once it's added, it should be added here.
class DetachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "detach_interface"
verbose_name = _("Detach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:detach_interface"
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
ip_groups = {}
for ip_group, addresses in six.iteritems(instance.addresses):
ip_groups[ip_group] = {}
ip_groups[ip_group]["floating"] = []
ip_groups[ip_group]["non_floating"] = []
for address in addresses:
if ('OS-EXT-IPS:type' in address and
address['OS-EXT-IPS:type'] == "floating"):
ip_groups[ip_group]["floating"].append(address)
else:
ip_groups[ip_group]["non_floating"].append(address)
context = {
"ip_groups": ip_groups,
}
return template.loader.render_to_string(template_name, context)
def get_size(instance):
if hasattr(instance, "full_flavor"):
template_name = 'project/instances/_instance_flavor.html'
size_ram = sizeformat.mb_float_format(instance.full_flavor.ram)
if instance.full_flavor.disk > 0:
size_disk = sizeformat.diskgbformat(instance.full_flavor.disk)
else:
size_disk = _("%s GB") % "0"
context = {
"name": instance.full_flavor.name,
"id": instance.id,
"size_disk": size_disk,
"size_ram": size_ram,
"vcpus": instance.full_flavor.vcpus,
"flavor_id": instance.full_flavor.id
}
return template.loader.render_to_string(template_name, context)
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("active", pgettext_lazy("Current status of an Instance", u"Active")),
("shutoff", pgettext_lazy("Current status of an Instance", u"Shutoff")),
("suspended", pgettext_lazy("Current status of an Instance",
u"Suspended")),
("paused", pgettext_lazy("Current status of an Instance", u"Paused")),
("error", pgettext_lazy("Current status of an Instance", u"Error")),
("resize", pgettext_lazy("Current status of an Instance",
u"Resize/Migrate")),
("verify_resize", pgettext_lazy("Current status of an Instance",
u"Confirm or Revert Resize/Migrate")),
("revert_resize", pgettext_lazy(
"Current status of an Instance", u"Revert Resize/Migrate")),
("reboot", pgettext_lazy("Current status of an Instance", u"Reboot")),
("hard_reboot", pgettext_lazy("Current status of an Instance",
u"Hard Reboot")),
("password", pgettext_lazy("Current status of an Instance", u"Password")),
("rebuild", pgettext_lazy("Current status of an Instance", u"Rebuild")),
("migrating", pgettext_lazy("Current status of an Instance",
u"Migrating")),
("build", pgettext_lazy("Current status of an Instance", u"Build")),
("rescue", pgettext_lazy("Current status of an Instance", u"Rescue")),
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("soft_deleted", pgettext_lazy("Current status of an Instance",
u"Soft Deleted")),
("shelved", pgettext_lazy("Current status of an Instance", u"Shelved")),
("shelved_offloaded", pgettext_lazy("Current status of an Instance",
u"Shelved Offloaded")),
)
TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", u"None")
# Mapping of task states taken from Nova's nova/compute/task_states.py
TASK_DISPLAY_CHOICES = (
("scheduling", pgettext_lazy("Task status of an Instance",
u"Scheduling")),
("block_device_mapping", pgettext_lazy("Task status of an Instance",
u"Block Device Mapping")),
("networking", pgettext_lazy("Task status of an Instance",
u"Networking")),
("spawning", pgettext_lazy("Task status of an Instance", u"Spawning")),
("image_snapshot", pgettext_lazy("Task status of an Instance",
u"Snapshotting")),
("image_snapshot_pending", pgettext_lazy("Task status of an Instance",
u"Image Snapshot Pending")),
("image_pending_upload", pgettext_lazy("Task status of an Instance",
u"Image Pending Upload")),
("image_uploading", pgettext_lazy("Task status of an Instance",
u"Image Uploading")),
("image_backup", pgettext_lazy("Task status of an Instance",
u"Image Backup")),
("updating_password", pgettext_lazy("Task status of an Instance",
u"Updating Password")),
("resize_prep", pgettext_lazy("Task status of an Instance",
u"Preparing Resize or Migrate")),
("resize_migrating", pgettext_lazy("Task status of an Instance",
u"Resizing or Migrating")),
("resize_migrated", pgettext_lazy("Task status of an Instance",
u"Resized or Migrated")),
("resize_finish", pgettext_lazy("Task status of an Instance",
u"Finishing Resize or Migrate")),
("resize_reverting", pgettext_lazy("Task status of an Instance",
u"Reverting Resize or Migrate")),
("resize_confirming", pgettext_lazy("Task status of an Instance",
u"Confirming Resize or Migrate")),
("rebooting", pgettext_lazy("Task status of an Instance", u"Rebooting")),
("reboot_pending", pgettext_lazy("Task status of an Instance",
u"Reboot Pending")),
("reboot_started", pgettext_lazy("Task status of an Instance",
u"Reboot Started")),
("rebooting_hard", pgettext_lazy("Task status of an Instance",
u"Rebooting Hard")),
("reboot_pending_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Pending Hard")),
("reboot_started_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Started Hard")),
("pausing", pgettext_lazy("Task status of an Instance", u"Pausing")),
("unpausing", pgettext_lazy("Task status of an Instance", u"Resuming")),
("suspending", pgettext_lazy("Task status of an Instance",
u"Suspending")),
("resuming", pgettext_lazy("Task status of an Instance", u"Resuming")),
("powering-off", pgettext_lazy("Task status of an Instance",
u"Powering Off")),
("powering-on", pgettext_lazy("Task status of an Instance",
u"Powering On")),
("rescuing", pgettext_lazy("Task status of an Instance", u"Rescuing")),
("unrescuing", pgettext_lazy("Task status of an Instance",
u"Unrescuing")),
("rebuilding", pgettext_lazy("Task status of an Instance",
u"Rebuilding")),
("rebuild_block_device_mapping", pgettext_lazy(
"Task status of an Instance", u"Rebuild Block Device Mapping")),
("rebuild_spawning", pgettext_lazy("Task status of an Instance",
u"Rebuild Spawning")),
("migrating", pgettext_lazy("Task status of an Instance", u"Migrating")),
("deleting", pgettext_lazy("Task status of an Instance", u"Deleting")),
("soft-deleting", pgettext_lazy("Task status of an Instance",
u"Soft Deleting")),
("restoring", pgettext_lazy("Task status of an Instance", u"Restoring")),
("shelving", pgettext_lazy("Task status of an Instance", u"Shelving")),
("shelving_image_pending_upload", pgettext_lazy(
"Task status of an Instance", u"Shelving Image Pending Upload")),
("shelving_image_uploading", pgettext_lazy("Task status of an Instance",
u"Shelving Image Uploading")),
("shelving_offloading", pgettext_lazy("Task status of an Instance",
u"Shelving Offloading")),
("unshelving", pgettext_lazy("Task status of an Instance",
u"Unshelving")),
)
POWER_DISPLAY_CHOICES = (
("NO STATE", pgettext_lazy("Power state of an Instance", u"No State")),
("RUNNING", pgettext_lazy("Power state of an Instance", u"Running")),
("BLOCKED", pgettext_lazy("Power state of an Instance", u"Blocked")),
("PAUSED", pgettext_lazy("Power state of an Instance", u"Paused")),
("SHUTDOWN", pgettext_lazy("Power state of an Instance", u"Shut Down")),
("SHUTOFF", pgettext_lazy("Power state of an Instance", u"Shut Off")),
("CRASHED", pgettext_lazy("Power state of an Instance", u"Crashed")),
("SUSPENDED", pgettext_lazy("Power state of an Instance", u"Suspended")),
("FAILED", pgettext_lazy("Power state of an Instance", u"Failed")),
("BUILDING", pgettext_lazy("Power state of an Instance", u"Building")),
)
class InstancesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = (('name', _("Instance Name"), True),
('status', _("Status ="), True),
('image', _("Image ID ="), True),
('flavor', _("Flavor ID ="), True))
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved", True),
("shelved_offloaded", True),
)
name = tables.Column("name",
link="horizon:project:instances:detail",
verbose_name=_("Instance Name"))
image_name = tables.Column("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
size = tables.Column(get_size, sortable=False, verbose_name=_("Size"))
keypair = tables.Column(get_keyname, verbose_name=_("Key Pair"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
az = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
empty_value=TASK_DISPLAY_NONE,
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"),
display_choices=POWER_DISPLAY_CHOICES)
created = tables.Column("created",
verbose_name=_("Time since created"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta(object):
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', True):
launch_actions = (LaunchLink,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', False):
launch_actions = (LaunchLinkNG,) + launch_actions
table_actions = launch_actions + (TerminateInstance,
InstancesFilterAction)
row_actions = (StartInstance, ConfirmResize, RevertResize,
CreateSnapshot, SimpleAssociateIP, AssociateIP,
SimpleDisassociateIP, AttachInterface,
DetachInterface, EditInstance,
DecryptInstancePassword, EditInstanceSecurityGroups,
ConsoleLink, LogLink, TogglePause, ToggleSuspend,
ToggleShelve, ResizeLink, LockInstance, UnlockInstance,
SoftRebootInstance, RebootInstance,
StopInstance, RebuildInstance, TerminateInstance)
| 0 |
#!/usr/bin/env python
try:
import pacman
except ImportError:
import alpm
pacman = alpm
import os, tempfile, shutil, sys
def pacman_pkg_getgroups(pkg):
i = pacman.void_to_PM_LIST(pacman.pkg_getinfo(pkg, pacman.PKG_GROUPS))
ret = []
while i:
ret.append(pacman.void_to_char(pacman.list_getdata(i)))
i = pacman.list_next(i)
return ret
def any_in(needle, haystack):
"""return true if any of the needle list is found in haystack"""
for i in needle:
if i in haystack:
return True
return False
if len(sys.argv) > 1 and sys.argv[1] == "--help":
print "COREPKGS (core, chroot-core, devel-core) packages which depend on packages outsite COREPKGS (->COREPKGS is needed)"
sys.exit(0)
basecats = ['core', 'chroot-core', 'devel-core']
root = tempfile.mkdtemp()
pacman.initialize(root)
if os.getcwd().split('/')[-2] == "frugalware-current":
treename = "frugalware-current"
else:
treename = "frugalware"
db = pacman.db_register(treename)
pacman.db_setserver(db, "file://" + os.getcwd() + "/../frugalware-%s" % sys.argv[1])
pacman.db_update(1, db)
i = pacman.db_getpkgcache(db)
while i:
pkg = pacman.void_to_PM_PKG(pacman.list_getdata(i))
pkgname = pacman.void_to_char(pacman.pkg_getinfo(pkg, pacman.PKG_NAME))
groups = pacman_pkg_getgroups(pkg)
if not any_in(basecats, groups):
i = pacman.list_next(i)
continue
j = pacman.void_to_PM_LIST(pacman.pkg_getinfo(pkg, pacman.PKG_DEPENDS))
while j:
found = False
dep = pacman.void_to_char(pacman.list_getdata(j)).split("<")[0].split(">")[0].split("=")[0]
k = pacman.db_getpkgcache(db)
while not found and k:
p = pacman.void_to_PM_PKG(pacman.list_getdata(k))
if pacman.void_to_char(pacman.pkg_getinfo(p, pacman.PKG_NAME)) == dep:
if any_in(basecats, pacman_pkg_getgroups(p)):
found = True
else:
l = pacman.void_to_PM_LIST(pacman.pkg_getinfo(p, pacman.PKG_PROVIDES))
while not found and l:
pr = pacman.void_to_PM_PKG(pacman.list_getdata(l))
if pacman.void_to_char(pacman.pkg_getinfo(pr, pacman.PKG_NAME)) == dep:
found = True
l = pacman.list_next(l)
k = pacman.list_next(k)
if not found:
try:
socket = open("../source/%s/%s/FrugalBuild" % (groups[0], pkgname))
while True:
line = socket.readline()
if not line:
break
if line[:14] != "# Maintainer: ":
continue
# FIXME: we here hardcore the encoding of the FBs
maintainer = line[14:].strip().decode('latin1')
break
socket.close()
except IOError:
maintainer = "Unknown"
print "%s should be moved out from COREPKGS (%s is not in COREPKGS; %s)" % (pkgname, pacman.void_to_char(pacman.list_getdata(j)), maintainer)
j = pacman.list_next(j)
i = pacman.list_next(i)
shutil.rmtree(root)
| 0.025707 |
########
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import copy
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from cloudstack_plugin.cloudstack_common import (
get_cloud_driver,
CLOUDSTACK_ID_PROPERTY,
CLOUDSTACK_NAME_PROPERTY,
CLOUDSTACK_TYPE_PROPERTY,
COMMON_RUNTIME_PROPERTIES_KEYS,
USE_EXTERNAL_RESOURCE_PROPERTY,
)
VOLUME_CLOUDSTACK_TYPE = 'volume'
RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
@operation
def create(**kwargs):
""" Create a volume
"""
cloud_driver = get_cloud_driver(ctx)
volume = {}
if ctx.node.properties[USE_EXTERNAL_RESOURCE_PROPERTY] is False:
ctx.logger.debug('reading volume attributes.')
volume.update(copy.deepcopy(ctx.node.properties['volume']))
if 'name' in volume:
volume_name = volume['name']
else:
raise NonRecoverableError("To create a volume, the name of the "
"volume is needed")
if 'size' in volume:
volume_size = volume['size']
else:
raise NonRecoverableError("To create a volume, the size of the "
"volume is needed")
volume = cloud_driver.create_volume(name=volume_name,
size=volume_size)
if volume_exists(cloud_driver, volume.id):
ctx.instance.runtime_properties[CLOUDSTACK_ID_PROPERTY] = volume.id
ctx.instance.runtime_properties[CLOUDSTACK_TYPE_PROPERTY] = \
VOLUME_CLOUDSTACK_TYPE
else:
raise NonRecoverableError("Volume not created")
elif ctx.node.properties[USE_EXTERNAL_RESOURCE_PROPERTY] is True:
if ctx.node.properties['resource_id']:
resource_id = ctx.node.properties['resource_id']
volume = get_volume_by_id(cloud_driver, resource_id)
if volume is not None:
ctx.instance.runtime_properties[CLOUDSTACK_ID_PROPERTY] = \
volume.id
ctx.instance.runtime_properties[CLOUDSTACK_NAME_PROPERTY] = \
volume.name
ctx.instance.runtime_properties[CLOUDSTACK_TYPE_PROPERTY] = \
VOLUME_CLOUDSTACK_TYPE
else:
raise NonRecoverableError("Could not find volume with id {0}".
format(resource_id))
else:
raise NonRecoverableError("Resource_id for volume is not supplied")
return
@operation
def delete(**kwargs):
""" Delete a volume
"""
cloud_driver = get_cloud_driver(ctx)
volume_id = ctx.instance.runtime_properties[CLOUDSTACK_ID_PROPERTY]
volume = get_volume_by_id(cloud_driver, volume_id)
if volume is None:
raise NonRecoverableError('Volume with id {0} not found'
.format(volume_id))
if not ctx.node.properties[USE_EXTERNAL_RESOURCE_PROPERTY]:
ctx.logger.info('Trying to destroy volume {0}'.format(volume))
cloud_driver.destroy_volume(volume=volume)
else:
ctx.logger.info('Volume {0} does not need to be destroyed'.format(
volume))
def volume_exists(cloud_driver, volume_id):
exists = get_volume_by_id(cloud_driver, volume_id)
if not exists:
return False
return True
def get_volume_by_id(cloud_driver, volume_id):
volumes = [volume for volume in cloud_driver.list_volumes()
if volume_id == volume.id]
if not volumes:
ctx.logger.info('Could not find volume with ID {0}'.
format(volume_id))
return None
return volumes[0]
| 0 |
"""Support for Juicenet cloud."""
import logging
import pyjuicenet
import voluptuous as vol
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "juicenet"
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_ACCESS_TOKEN): cv.string})},
extra=vol.ALLOW_EXTRA,
)
JUICENET_COMPONENTS = ["sensor", "switch"]
def setup(hass, config):
"""Set up the Juicenet component."""
hass.data[DOMAIN] = {}
access_token = config[DOMAIN].get(CONF_ACCESS_TOKEN)
hass.data[DOMAIN]["api"] = pyjuicenet.Api(access_token)
for component in JUICENET_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class JuicenetDevice(Entity):
"""Represent a base Juicenet device."""
def __init__(self, device, sensor_type, hass):
"""Initialise the sensor."""
self.hass = hass
self.device = device
self.type = sensor_type
@property
def name(self):
"""Return the name of the device."""
return self.device.name()
def update(self):
"""Update state of the device."""
self.device.update_state()
@property
def _manufacturer_device_id(self):
"""Return the manufacturer device id."""
return self.device.id()
@property
def _token(self):
"""Return the device API token."""
return self.device.token()
@property
def unique_id(self):
"""Return a unique ID."""
return "{}-{}".format(self.device.id(), self.type)
| 0 |
import datetime, os, random, uuid, zipfile
# GLOBAL:
DATES = []
PST = ((datetime.datetime.utcnow()) - datetime.timedelta(hours=7))
YEAR = PST.year - 1;
MONTH = PST.month;
DAY = PST.day;
date = datetime.datetime(YEAR,MONTH,DAY)
patterns = {
"A" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"B" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"C" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
"D" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0],
"E" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1],
"F" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],
"G" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1],
"H" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"I" : [1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
"J" : [1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
"K" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
"L" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1],
"M" : [1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"N" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"O" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"P" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
"Q" : [1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"R" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1],
"S" : [1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1],
"T" : [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
"U" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"V" : [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0],
"W" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"X" : [1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
"Y" : [1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"Z" : [1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1]
};
def gen_pattern(char):
global date
for i in range(35):
check = patterns[char][i]
if check:
DATES.append(date)
date += datetime.timedelta(days=1)
def gen_empty_pattern():
global date
for i in range(7):
date += datetime.timedelta(days=1)
def init(message="ANINDYA", author_name="", author_email=""):
class author:
name = author_name
email = author_email
_author = author()
message = message.upper()
string = message[0:8]
for char in string:
gen_pattern(char)
gen_empty_pattern()
random_dir = str(uuid.uuid4())
dir_path = os.getcwd() + "/gits/" + random_dir
import github
REPO = github.init(dir_path)
for date in DATES:
date = date.strftime("%Y-%m-%d %H:%M:%S")
github.commit(REPO, date, dir_path, _author)
return "./gits/" + random_dir
| 0.029058 |
# -*- coding: utf-8 -*-
import collections
import functools
from requests.compat import urlparse, urlencode
from . import exceptions
from . import models
class GitHubIterator(models.GitHubCore, collections.Iterator):
"""The :class:`GitHubIterator` class powers all of the iter_* methods."""
def __init__(self, count, url, cls, session, params=None, etag=None,
headers=None):
models.GitHubCore.__init__(self, {}, session)
#: Original number of items requested
self.original = count
#: Number of items left in the iterator
self.count = count
#: URL the class used to make it's first GET
self.url = url
#: Last URL that was requested
self.last_url = None
self._api = self.url
#: Class for constructing an item to return
self.cls = cls
#: Parameters of the query string
self.params = params or {}
self._remove_none(self.params)
# We do not set this from the parameter sent. We want this to
# represent the ETag header returned by GitHub no matter what.
# If this is not None, then it won't be set from the response and
# that's not what we want.
#: The ETag Header value returned by GitHub
self.etag = None
#: Headers generated for the GET request
self.headers = headers or {}
#: The last response seen
self.last_response = None
#: Last status code received
self.last_status = 0
if etag:
self.headers.update({'If-None-Match': etag})
self.path = urlparse(self.url).path
def _repr(self):
return '<GitHubIterator [{0}, {1}]>'.format(self.count, self.path)
def __iter__(self):
self.last_url, params = self.url, self.params
headers = self.headers
if 0 < self.count <= 100 and self.count != -1:
params['per_page'] = self.count
if 'per_page' not in params and self.count == -1:
params['per_page'] = 100
cls = self.cls
if issubclass(self.cls, models.GitHubCore):
cls = functools.partial(self.cls, session=self)
while (self.count == -1 or self.count > 0) and self.last_url:
response = self._get(self.last_url, params=params,
headers=headers)
self.last_response = response
self.last_status = response.status_code
if params:
params = None # rel_next already has the params
if not self.etag and response.headers.get('ETag'):
self.etag = response.headers.get('ETag')
json = self._get_json(response)
if json is None:
break
# languages returns a single dict. We want the items.
if isinstance(json, dict):
if issubclass(self.cls, models.GitHubObject):
raise exceptions.UnprocessableResponseBody(
"GitHub's API returned a body that could not be"
" handled", json
)
if json.get('ETag'):
del json['ETag']
if json.get('Last-Modified'):
del json['Last-Modified']
json = json.items()
for i in json:
yield cls(i)
self.count -= 1 if self.count > 0 else 0
if self.count == 0:
break
rel_next = response.links.get('next', {})
self.last_url = rel_next.get('url', '')
def __next__(self):
if not hasattr(self, '__i__'):
self.__i__ = self.__iter__()
return next(self.__i__)
def _get_json(self, response):
return self._json(response, 200)
def refresh(self, conditional=False):
self.count = self.original
if conditional:
self.headers['If-None-Match'] = self.etag
self.etag = None
self.__i__ = self.__iter__()
return self
def next(self):
return self.__next__()
class SearchIterator(GitHubIterator):
"""This is a special-cased class for returning iterable search results.
It inherits from :class:`GitHubIterator <github3.structs.GitHubIterator>`.
All members and methods documented here are unique to instances of this
class. For other members and methods, check its parent class.
"""
def __init__(self, count, url, cls, session, params=None, etag=None,
headers=None):
super(SearchIterator, self).__init__(count, url, cls, session, params,
etag, headers)
#: Total count returned by GitHub
self.total_count = 0
#: Items array returned in the last request
self.items = []
def _repr(self):
return '<SearchIterator [{0}, {1}?{2}]>'.format(self.count, self.path,
urlencode(self.params))
def _get_json(self, response):
json = self._json(response, 200)
# I'm not sure if another page will retain the total_count attribute,
# so if it's not in the response, just set it back to what it used to
# be
self.total_count = json.get('total_count', self.total_count)
self.items = json.get('items', [])
# If we return None then it will short-circuit the while loop.
return json.get('items')
| 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that attaches a correlation id to WSGI request"""
from ceilometer.openstack.common.middleware import base
from ceilometer.openstack.common import uuidutils
class CorrelationIdMiddleware(base.Middleware):
def process_request(self, req):
correlation_id = (req.headers.get("X_CORRELATION_ID") or
uuidutils.generate_uuid())
req.headers['X_CORRELATION_ID'] = correlation_id
| 0 |
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.identity.v2 import endpoints_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestEndpointsClient(base.BaseServiceTest):
FAKE_CREATE_ENDPOINT = {
"endpoint": {
"id": 1,
"tenantId": 1,
"region": "North",
"type": "compute",
"publicURL": "https://compute.north.public.com/v1",
"internalURL": "https://compute.north.internal.com/v1",
"adminURL": "https://compute.north.internal.com/v1"
}
}
FAKE_LIST_ENDPOINTS = {
"endpoints": [
{
"id": 1,
"tenantId": "1",
"region": "North",
"type": "compute",
"publicURL": "https://compute.north.public.com/v1",
"internalURL": "https://compute.north.internal.com/v1",
"adminURL": "https://compute.north.internal.com/v1"
},
{
"id": 2,
"tenantId": "1",
"region": "South",
"type": "compute",
"publicURL": "https://compute.north.public.com/v1",
"internalURL": "https://compute.north.internal.com/v1",
"adminURL": "https://compute.north.internal.com/v1"
}
]
}
def setUp(self):
super(TestEndpointsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = endpoints_client.EndpointsClient(fake_auth,
'identity', 'regionOne')
def _test_create_endpoint(self, bytes_body=False):
self.check_service_client_function(
self.client.create_endpoint,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_ENDPOINT,
bytes_body,
service_id="b344506af7644f6794d9cb316600b020",
region="region-demo",
publicurl="https://compute.north.public.com/v1",
adminurl="https://compute.north.internal.com/v1",
internalurl="https://compute.north.internal.com/v1")
def _test_list_endpoints(self, bytes_body=False):
self.check_service_client_function(
self.client.list_endpoints,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_ENDPOINTS,
bytes_body)
def test_create_endpoint_with_str_body(self):
self._test_create_endpoint()
def test_create_endpoint_with_bytes_body(self):
self._test_create_endpoint(bytes_body=True)
def test_list_endpoints_with_str_body(self):
self._test_list_endpoints()
def test_list_endpoints_with_bytes_body(self):
self._test_list_endpoints(bytes_body=True)
def test_delete_endpoint(self):
self.check_service_client_function(
self.client.delete_endpoint,
'tempest.lib.common.rest_client.RestClient.delete',
{},
endpoint_id="b344506af7644f6794d9cb316600b020",
status=204)
| 0 |
#
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.cElementTree as etree
import ethtool
import socket
import logging
import utils
logger = logging.getLogger('glustercli')
if hasattr(etree, 'ParseError'):
_etreeExceptions = (etree.ParseError, AttributeError, ValueError)
else:
_etreeExceptions = (SyntaxError, AttributeError, ValueError)
_glusterCommandPath = utils.CommandPath("gluster",
"/usr/sbin/gluster",
)
_TRANS_IN_PROGRESS = "another transaction is in progress"
_peerUUID = ''
_peer = ''
def _getLocalPeer():
global _peer
if _peer:
return _peer
fqdn = socket.getfqdn()
ip = socket.gethostbyname(fqdn)
if not ip.startswith('127.'):
_peer = ip
return ip
for dev in ethtool.get_active_devices():
try:
ip = ethtool.get_ipaddr(dev)
if not ip.startswith('127.'):
_peer = ip
return ip
except IOError as e:
logger.warn('failed to get ipaddr for device %s: %s' % (dev, e))
return fqdn
def _getGlusterVolCmd():
return [_glusterCommandPath.cmd, "--mode=script", "volume"]
def _getGlusterPeerCmd():
return [_glusterCommandPath.cmd, "--mode=script", "peer"]
def _getGlusterSystemCmd():
return [_glusterCommandPath.cmd, "system::"]
def _getGlusterVolGeoRepCmd():
return _getGlusterVolCmd() + ["geo-replication"]
def _getGlusterSnapshotCmd():
return [_glusterCommandPath.cmd, "--mode=script", "snapshot"]
class BrickStatus:
PAUSED = 'PAUSED'
COMPLETED = 'COMPLETED'
RUNNING = 'RUNNING'
UNKNOWN = 'UNKNOWN'
NA = 'NA'
class HostStatus:
CONNECTED = 'CONNECTED'
DISCONNECTED = 'DISCONNECTED'
UNKNOWN = 'UNKNOWN'
class VolumeStatus:
ONLINE = 'ONLINE'
OFFLINE = 'OFFLINE'
class TransportType:
TCP = 'TCP'
RDMA = 'RDMA'
class TaskType:
REBALANCE = 'REBALANCE'
REPLACE_BRICK = 'REPLACE_BRICK'
REMOVE_BRICK = 'REMOVE_BRICK'
class GlusterXMLError(Exception):
message = "XML error"
def __init__(self, cmd, xml):
self.cmd = cmd
self.xml = xml
def __str__(self):
return "%s\ncommand: %s\nXML: %s" % (self.message, self.cmd, self.xml)
class GlusterCmdFailed(utils.CmdExecFailed):
message = "gluster command failed"
class GlusterBusy(utils.CmdExecFailed):
message = "gluster busy"
def _throwIfBusy(cmd, rc, out, err):
o = out + err
if _TRANS_IN_PROGRESS in o.lower():
raise GlusterBusy(cmd, rc, out, err)
def _execGluster(cmd):
rc, out, err = utils.execCmd(cmd)
_throwIfBusy(cmd, rc, out, err)
return rc, out, err
def _execGlusterXml(cmd):
cmd.append('--xml')
rc, out, err = utils.execCmd(cmd)
_throwIfBusy(cmd, rc, out, err)
try:
tree = etree.fromstring(out)
rv = int(tree.find('opRet').text)
msg = tree.find('opErrstr').text
errNo = int(tree.find('opErrno').text)
except _etreeExceptions:
raise GlusterXMLError(cmd, out)
if rv == 0:
return tree
if errNo != 0:
rv = errNo
raise GlusterCmdFailed(cmd, rv, err=msg)
def _getLocalPeerUUID():
global _peerUUID
if _peerUUID:
return _peerUUID
command = _getGlusterSystemCmd() + ["uuid", "get"]
rc, out, err = _execGluster(command)
o = out.strip()
if o.startswith('UUID: '):
_peerUUID = o[6:]
return _peerUUID
def _parseVolumeStatus(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': [],
'nfs': [],
'shd': []}
hostname = _getLocalPeer()
for el in tree.findall('volStatus/volumes/volume/node'):
value = {}
for ch in el.getchildren():
value[ch.tag] = ch.text or ''
if value['path'] == 'localhost':
value['path'] = hostname
if value['status'] == '1':
value['status'] = 'ONLINE'
else:
value['status'] = 'OFFLINE'
if value['hostname'] == 'NFS Server':
status['nfs'].append({'hostname': value['path'],
'hostuuid': value['peerid'],
'port': value['port'],
'status': value['status'],
'pid': value['pid']})
elif value['hostname'] == 'Self-heal Daemon':
status['shd'].append({'hostname': value['path'],
'hostuuid': value['peerid'],
'status': value['status'],
'pid': value['pid']})
else:
status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
value['path']),
'hostuuid': value['peerid'],
'port': value['port'],
'status': value['status'],
'pid': value['pid']})
return status
def _parseVolumeStatusDetail(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
value = {}
for ch in el.getchildren():
value[ch.tag] = ch.text or ''
sizeTotal = int(value['sizeTotal'])
value['sizeTotal'] = sizeTotal / (1024.0 * 1024.0)
sizeFree = int(value['sizeFree'])
value['sizeFree'] = sizeFree / (1024.0 * 1024.0)
status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
value['path']),
'hostuuid': value['peerid'],
'sizeTotal': '%.3f' % (value['sizeTotal'],),
'sizeFree': '%.3f' % (value['sizeFree'],),
'device': value['device'],
'blockSize': value['blockSize'],
'mntOptions': value['mntOptions'],
'fsName': value['fsName']})
return status
def _parseVolumeStatusClients(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
hostname = el.find('hostname').text
path = el.find('path').text
hostuuid = el.find('peerid').text
clientsStatus = []
for c in el.findall('clientsStatus/client'):
clientValue = {}
for ch in c.getchildren():
clientValue[ch.tag] = ch.text or ''
clientsStatus.append({'hostname': clientValue['hostname'],
'bytesRead': clientValue['bytesRead'],
'bytesWrite': clientValue['bytesWrite']})
status['bricks'].append({'brick': '%s:%s' % (hostname, path),
'hostuuid': hostuuid,
'clientsStatus': clientsStatus})
return status
def _parseVolumeStatusMem(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
brick = {'brick': '%s:%s' % (el.find('hostname').text,
el.find('path').text),
'hostuuid': el.find('peerid').text,
'mallinfo': {},
'mempool': []}
for ch in el.find('memStatus/mallinfo').getchildren():
brick['mallinfo'][ch.tag] = ch.text or ''
for c in el.findall('memStatus/mempool/pool'):
mempool = {}
for ch in c.getchildren():
mempool[ch.tag] = ch.text or ''
brick['mempool'].append(mempool)
status['bricks'].append(brick)
return status
def volumeStatus(volumeName, brick=None, option=None):
command = _getGlusterVolCmd() + ["status", volumeName]
if brick:
command.append(brick)
if option:
command.append(option)
xmltree = _execGlusterXml(command)
try:
if option == 'detail':
return _parseVolumeStatusDetail(xmltree)
elif option == 'clients':
return _parseVolumeStatusClients(xmltree)
elif option == 'mem':
return _parseVolumeStatusMem(xmltree)
else:
return _parseVolumeStatus(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def _parseVolumeInfo(tree):
volumes = {}
for el in tree.findall('volInfo/volumes/volume'):
value = {}
value['volumeName'] = el.find('name').text
value['uuid'] = el.find('id').text
value['volumeType'] = el.find('typeStr').text.upper().replace('-', '_')
status = el.find('statusStr').text.upper()
if status == 'STARTED':
value["volumeStatus"] = VolumeStatus.ONLINE
else:
value["volumeStatus"] = VolumeStatus.OFFLINE
value['brickCount'] = el.find('brickCount').text
value['distCount'] = el.find('distCount').text
value['stripeCount'] = el.find('stripeCount').text
value['replicaCount'] = el.find('replicaCount').text
transportType = el.find('transport').text
if transportType == '0':
value['transportType'] = [TransportType.TCP]
elif transportType == '1':
value['transportType'] = [TransportType.RDMA]
else:
value['transportType'] = [TransportType.TCP, TransportType.RDMA]
value['bricks'] = []
value['options'] = {}
value['bricksInfo'] = []
for b in el.findall('bricks/brick'):
value['bricks'].append(b.text)
for o in el.findall('options/option'):
value['options'][o.find('name').text] = o.find('value').text
for d in el.findall('bricks/brick'):
brickDetail = {}
# this try block is to maintain backward compatibility
# it returns an empty list when gluster doesnot return uuid
try:
brickDetail['name'] = d.find('name').text
brickDetail['hostUuid'] = d.find('hostUuid').text
value['bricksInfo'].append(brickDetail)
except AttributeError:
break
volumes[value['volumeName']] = value
return volumes
def _parseVolumeProfileInfo(tree, nfs):
bricks = []
if nfs:
brickKey = 'nfs'
bricksKey = 'nfsServers'
else:
brickKey = 'brick'
bricksKey = 'bricks'
for brick in tree.findall('volProfile/brick'):
fopCumulative = []
blkCumulative = []
fopInterval = []
blkInterval = []
brickName = brick.find('brickName').text
if brickName == 'localhost':
brickName = _getLocalPeer()
for block in brick.findall('cumulativeStats/blockStats/block'):
blkCumulative.append({'size': block.find('size').text,
'read': block.find('reads').text,
'write': block.find('writes').text})
for fop in brick.findall('cumulativeStats/fopStats/fop'):
fopCumulative.append({'name': fop.find('name').text,
'hits': fop.find('hits').text,
'latencyAvg': fop.find('avgLatency').text,
'latencyMin': fop.find('minLatency').text,
'latencyMax': fop.find('maxLatency').text})
for block in brick.findall('intervalStats/blockStats/block'):
blkInterval.append({'size': block.find('size').text,
'read': block.find('reads').text,
'write': block.find('writes').text})
for fop in brick.findall('intervalStats/fopStats/fop'):
fopInterval.append({'name': fop.find('name').text,
'hits': fop.find('hits').text,
'latencyAvg': fop.find('avgLatency').text,
'latencyMin': fop.find('minLatency').text,
'latencyMax': fop.find('maxLatency').text})
bricks.append(
{brickKey: brickName,
'cumulativeStats': {
'blockStats': blkCumulative,
'fopStats': fopCumulative,
'duration': brick.find('cumulativeStats/duration').text,
'totalRead': brick.find('cumulativeStats/totalRead').text,
'totalWrite': brick.find('cumulativeStats/totalWrite').text},
'intervalStats': {
'blockStats': blkInterval,
'fopStats': fopInterval,
'duration': brick.find('intervalStats/duration').text,
'totalRead': brick.find('intervalStats/totalRead').text,
'totalWrite': brick.find('intervalStats/totalWrite').text}})
status = {'volumeName': tree.find("volProfile/volname").text,
bricksKey: bricks}
return status
def volumeInfo(volumeName=None, remoteServer=None):
command = _getGlusterVolCmd() + ["info"]
if remoteServer:
command += ['--remote-host=%s' % remoteServer]
if volumeName:
command.append(volumeName)
xmltree = _execGlusterXml(command)
try:
return _parseVolumeInfo(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeCreate(volumeName, brickList, replicaCount=0, stripeCount=0,
transportList=[], force=False):
command = _getGlusterVolCmd() + ["create", volumeName]
if stripeCount:
command += ["stripe", "%s" % stripeCount]
if replicaCount:
command += ["replica", "%s" % replicaCount]
if transportList:
command += ["transport", ','.join(transportList)]
command += brickList
if force:
command.append('force')
xmltree = _execGlusterXml(command)
try:
return {'uuid': xmltree.find('volCreate/volume/id').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeStart(volumeName, force=False):
command = _getGlusterVolCmd() + ["start", volumeName]
if force:
command.append('force')
_execGluster(command)
return True
def volumeStop(volumeName, force=False):
command = _getGlusterVolCmd() + ["stop", volumeName]
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeDelete(volumeName):
command = _getGlusterVolCmd() + ["delete", volumeName]
_execGlusterXml(command)
return True
def volumeSet(volumeName, option, value):
command = _getGlusterVolCmd() + ["set", volumeName, option, value]
_execGlusterXml(command)
return True
def _parseVolumeSetHelpXml(out):
optionList = []
tree = etree.fromstring(out)
for el in tree.findall('option'):
option = {}
for ch in el.getchildren():
option[ch.tag] = ch.text or ''
optionList.append(option)
return optionList
def volumeSetHelpXml():
rc, out, err = _execGluster(_getGlusterVolCmd() + ["set", 'help-xml'])
return _parseVolumeSetHelpXml(out)
def volumeReset(volumeName, option='', force=False):
command = _getGlusterVolCmd() + ['reset', volumeName]
if option:
command.append(option)
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeBrickAdd(volumeName, brickList,
replicaCount=0, stripeCount=0, force=False):
command = _getGlusterVolCmd() + ["add-brick", volumeName]
if stripeCount:
command += ["stripe", "%s" % stripeCount]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeRebalanceStart(volumeName, rebalanceType="", force=False):
command = _getGlusterVolCmd() + ["rebalance", volumeName]
if rebalanceType:
command.append(rebalanceType)
command.append("start")
if force:
command.append("force")
xmltree = _execGlusterXml(command)
try:
return {'taskId': xmltree.find('volRebalance/task-id').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeRebalanceStop(volumeName, force=False):
command = _getGlusterVolCmd() + ["rebalance", volumeName, "stop"]
if force:
command.append('force')
xmltree = _execGlusterXml(command)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'rebalance')
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode):
if mode == 'rebalance':
tree = xmltree.find('volRebalance')
elif mode == 'remove-brick':
tree = xmltree.find('volRemoveBrick')
else:
return
st = tree.find('aggregate/statusStr').text
statusStr = st.replace(' ', '_').replace('-', '_')
status = {
'summary': {
'runtime': tree.find('aggregate/runtime').text,
'filesScanned': tree.find('aggregate/lookups').text,
'filesMoved': tree.find('aggregate/files').text,
'filesFailed': tree.find('aggregate/failures').text,
'filesSkipped': tree.find('aggregate/skipped').text,
'totalSizeMoved': tree.find('aggregate/size').text,
'status': statusStr.upper()},
'hosts': []}
for el in tree.findall('node'):
st = el.find('statusStr').text
statusStr = st.replace(' ', '_').replace('-', '_')
status['hosts'].append({'name': el.find('nodeName').text,
'id': el.find('id').text,
'runtime': el.find('runtime').text,
'filesScanned': el.find('lookups').text,
'filesMoved': el.find('files').text,
'filesFailed': el.find('failures').text,
'filesSkipped': el.find('skipped').text,
'totalSizeMoved': el.find('size').text,
'status': statusStr.upper()})
return status
def volumeRebalanceStatus(volumeName):
command = _getGlusterVolCmd() + ["rebalance", volumeName, "status"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'rebalance')
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeReplaceBrickStart(volumeName, existingBrick, newBrick):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "start"]
xmltree = _execGlusterXml(command)
try:
return {'taskId': xmltree.find('volReplaceBrick/task-id').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeReplaceBrickAbort(volumeName, existingBrick, newBrick):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "abort"]
_execGlusterXml(command)
return True
def volumeReplaceBrickPause(volumeName, existingBrick, newBrick):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "pause"]
_execGlusterXml(command)
return True
def volumeReplaceBrickStatus(volumeName, existingBrick, newBrick):
rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
volumeName,
existingBrick, newBrick,
"status"])
message = "\n".join(out)
statLine = out[0].strip().upper()
if BrickStatus.PAUSED in statLine:
return BrickStatus.PAUSED, message
elif statLine.endswith('MIGRATION COMPLETE'):
return BrickStatus.COMPLETED, message
elif statLine.startswith('NUMBER OF FILES MIGRATED'):
return BrickStatus.RUNNING, message
elif statLine.endswith("UNKNOWN"):
return BrickStatus.UNKNOWN, message
else:
return BrickStatus.NA, message
def volumeReplaceBrickCommit(volumeName, existingBrick, newBrick,
force=False):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "commit"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeBrickRemoveStart(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["start"]
xmltree = _execGlusterXml(command)
try:
return {'taskId': xmltree.find('volRemoveBrick/task-id').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeBrickRemoveStop(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["stop"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'remove-brick')
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeBrickRemoveStatus(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["status"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'remove-brick')
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeBrickRemoveCommit(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["commit"]
_execGlusterXml(command)
return True
def volumeBrickRemoveForce(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["force"]
_execGlusterXml(command)
return True
def peerProbe(hostName):
command = _getGlusterPeerCmd() + ["probe", hostName]
_execGlusterXml(command)
return True
def peerDetach(hostName, force=False):
command = _getGlusterPeerCmd() + ["detach", hostName]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except GlusterCmdFailed as e:
if e.rc == 2:
raise GlusterPeerNotFound(hostName)
raise
def _parsePeerStatus(tree, gHostName, gUuid, gStatus):
hostList = [{'hostname': gHostName,
'uuid': gUuid,
'status': gStatus}]
for el in tree.findall('peerStatus/peer'):
if el.find('state').text != '3':
status = HostStatus.UNKNOWN
elif el.find('connected').text == '1':
status = HostStatus.CONNECTED
else:
status = HostStatus.DISCONNECTED
hostList.append({'hostname': el.find('hostname').text,
'uuid': el.find('uuid').text,
'status': status})
return hostList
def peerStatus():
command = _getGlusterPeerCmd() + ["status"]
xmltree = _execGlusterXml(command)
try:
return _parsePeerStatus(xmltree,
_getLocalPeer(),
_getLocalPeerUUID(), HostStatus.CONNECTED)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeProfileStart(volumeName):
command = _getGlusterVolCmd() + ["profile", volumeName, "start"]
_execGlusterXml(command)
return True
def volumeProfileStop(volumeName):
command = _getGlusterVolCmd() + ["profile", volumeName, "stop"]
_execGlusterXml(command)
return True
def volumeProfileInfo(volumeName, nfs=False):
command = _getGlusterVolCmd() + ["profile", volumeName, "info"]
if nfs:
command += ["nfs"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeProfileInfo(xmltree, nfs)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def _parseVolumeTasks(tree):
tasks = {}
for el in tree.findall('volStatus/volumes/volume'):
volumeName = el.find('volName').text
for c in el.findall('tasks/task'):
taskType = c.find('type').text
taskType = taskType.upper().replace('-', '_').replace(' ', '_')
taskId = c.find('id').text
bricks = []
if taskType == TaskType.REPLACE_BRICK:
bricks.append(c.find('params/srcBrick').text)
bricks.append(c.find('params/dstBrick').text)
elif taskType == TaskType.REMOVE_BRICK:
for b in c.findall('params/brick'):
bricks.append(b.text)
elif taskType == TaskType.REBALANCE:
pass
statusStr = c.find('statusStr').text.upper() \
.replace('-', '_') \
.replace(' ', '_')
tasks[taskId] = {'volumeName': volumeName,
'taskType': taskType,
'status': statusStr,
'bricks': bricks}
return tasks
def volumeTasks(volumeName="all"):
command = _getGlusterVolCmd() + ["status", volumeName, "tasks"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeTasks(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeGeoRepSessionStart(volumeName, remoteHost, remoteVolumeName,
force=False):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "start"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeGeoRepSessionStop(volumeName, remoteHost, remoteVolumeName,
force=False):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "stop"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def _parseGeoRepStatus(tree, detail=False):
status = {}
for volume in tree.findall('geoRep/volume'):
sessions = []
volumeDetail = {}
for session in volume.findall('sessions/session'):
pairs = []
sessionDetail = {}
sessionDetail['sessionKey'] = session.find('session_slave').text
sessionDetail['remoteVolumeName'] = sessionDetail[
'sessionKey'].split("::")[-1]
for pair in session.findall('pair'):
pairDetail = {}
pairDetail['host'] = pair.find('master_node').text
pairDetail['hostUuid'] = pair.find(
'master_node_uuid').text
pairDetail['brickName'] = pair.find('master_brick').text
pairDetail['remoteHost'] = pair.find(
'slave').text.split("::")[0]
pairDetail['status'] = pair.find('status').text
pairDetail['checkpointStatus'] = pair.find(
'checkpoint_status').text
pairDetail['crawlStatus'] = pair.find('crawl_status').text
if detail:
pairDetail['filesSynced'] = pair.find('files_syncd').text
pairDetail['filesPending'] = pair.find(
'files_pending').text
pairDetail['bytesPending'] = pair.find(
'bytes_pending').text
pairDetail['deletesPending'] = pair.find(
'deletes_pending').text
pairDetail['filesSkipped'] = pair.find(
'files_skipped').text
pairs.append(pairDetail)
sessionDetail['bricks'] = pairs
sessions.append(sessionDetail)
volumeDetail['sessions'] = sessions
status[volume.find('name').text] = volumeDetail
return status
def volumeGeoRepStatus(volumeName=None, remoteHost=None,
remoteVolumeName=None, detail=False):
command = _getGlusterVolGeoRepCmd()
if volumeName:
command.append(volumeName)
if remoteHost and remoteVolumeName:
command.append("%s::%s" % (remoteHost, remoteVolumeName))
command.append("status")
if detail:
command.append("detail")
xmltree = _execGlusterXml(command)
try:
return _parseGeoRepStatus(xmltree, detail)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeGeoRepSessionPause(volumeName, remoteHost, remoteVolumeName,
force=False):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "pause"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeGeoRepSessionResume(volumeName, remoteHost, remoteVolumeName,
force=False):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "resume"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def _parseVolumeGeoRepConfig(tree):
conf = tree.find('geoRep/config')
config = {}
for child in conf.getchildren():
config[child.tag] = child.text
return {'geoRepConfig': config}
def volumeGeoRepConfig(volumeName, remoteHost,
remoteVolumeName, optionName=None,
optionValue=None):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "config"]
if optionName and optionValue:
command += [optionName, optionValue]
elif optionName:
command += ["!%s" % optionName]
xmltree = _execGlusterXml(command)
if optionName:
return True
try:
return _parseVolumeGeoRepConfig(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def snapshotCreate(volumeName, snapName,
snapDescription=None,
force=False):
command = _getGlusterSnapshotCmd() + ["create", snapName, volumeName]
if snapDescription:
command += ['description', snapDescription]
if force:
command.append('force')
xmltree = _execGlusterXml(command)
try:
return {'uuid': xmltree.find('snapCreate/snapshot/uuid').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def snapshotDelete(volumeName=None, snapName=None):
command = _getGlusterSnapshotCmd() + ["delete"]
if snapName:
command.append(snapName)
elif volumeName:
command += ["volume", volumeName]
# xml output not used because of BZ:1161416 in gluster cli
rc, out, err = _execGluster(command)
return True
def snapshotActivate(snapName, force=False):
command = _getGlusterSnapshotCmd() + ["activate", snapName]
if force:
command.append('force')
_execGlusterXml(command)
return True
def snapshotDeactivate(snapName):
command = _getGlusterSnapshotCmd() + ["deactivate", snapName]
_execGlusterXml(command)
return True
def _parseRestoredSnapshot(tree):
snapshotRestore = {}
snapshotRestore['volumeName'] = tree.find('snapRestore/volume/name').text
snapshotRestore['volumeUuid'] = tree.find('snapRestore/volume/uuid').text
snapshotRestore['snapshotName'] = tree.find(
'snapRestore/snapshot/name').text
snapshotRestore['snapshotUuid'] = tree.find(
'snapRestore/snapshot/uuid').text
return snapshotRestore
def snapshotRestore(snapName):
command = _getGlusterSnapshotCmd() + ["restore", snapName]
xmltree = _execGlusterXml(command)
try:
return _parseRestoredSnapshot(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
| 0 |
#
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from conveyor.conveyorheat.engine import attributes
from conveyor.conveyorheat.engine import constraints
from conveyor.conveyorheat.engine import properties
from conveyor.conveyorheat.engine.resources.openstack.neutron import neutron
from conveyor.conveyorheat.engine import support
from conveyor.i18n import _
class PoolMember(neutron.NeutronResource):
"""A resource for managing LBaaS v2 Pool Members.
A pool member represents a single backend node.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
PROPERTIES = (
POOL, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP,
SUBNET,
) = (
'pool', 'address', 'protocol_port', 'weight', 'admin_state_up',
'subnet'
)
ATTRIBUTES = (
ADDRESS_ATTR, POOL_ID_ATTR
) = (
'address', 'pool_id'
)
properties_schema = {
POOL: properties.Schema(
properties.Schema.STRING,
_('Name or ID of the load balancing pool.'),
required=True
),
ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address of the pool member on the pool network.'),
required=True,
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('Port on which the pool member listens for requests or '
'connections.'),
required=True,
constraints=[
constraints.Range(1, 65535),
]
),
WEIGHT: properties.Schema(
properties.Schema.INTEGER,
_('Weight of pool member in the pool (default to 1).'),
default=1,
constraints=[
constraints.Range(0, 256),
],
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of the pool member.'),
default=True,
update_allowed=True,
constraints=[constraints.AllowedValues(['True'])]
),
SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet name or ID of this member.'),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
}
attributes_schema = {
ADDRESS_ATTR: attributes.Schema(
_('The IP address of the pool member.'),
type=attributes.Schema.STRING
),
POOL_ID_ATTR: attributes.Schema(
_('The ID of the pool to which the pool member belongs.'),
type=attributes.Schema.STRING
)
}
def __init__(self, name, definition, stack):
super(PoolMember, self).__init__(name, definition, stack)
self._pool_id = None
self._lb_id = None
@property
def pool_id(self):
if self._pool_id is None:
self._pool_id = self.client_plugin().find_resourceid_by_name_or_id(
self.POOL,
self.properties[self.POOL],
cmd_resource='lbaas_pool')
return self._pool_id
@property
def lb_id(self):
if self._lb_id is None:
pool = self.client().show_lbaas_pool(self.pool_id)['pool']
listener_id = pool['listeners'][0]['id']
listener = self.client().show_listener(listener_id)['listener']
self._lb_id = listener['loadbalancers'][0]['id']
return self._lb_id
def _check_lb_status(self):
return self.client_plugin().check_lb_status(self.lb_id)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_pool(
properties, self.POOL, 'pool_id')
properties.pop('pool_id')
if self.SUBNET in properties:
self.client_plugin().resolve_subnet(
properties, self.SUBNET, 'subnet_id')
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
member = self.client().create_lbaas_member(
self.pool_id, {'member': properties})['member']
self.resource_id_set(member['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
member = self.client().show_lbaas_member(self.resource_id,
self.pool_id)
return member['member']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_lbaas_member(self.resource_id,
self.pool_id,
{'member': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_lbaas_member(self.resource_id,
self.pool_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::PoolMember': PoolMember,
}
| 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Citrix Systems, Inc.
# Copyright 2013 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.virt import driver
class FakeDriver(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class FakeDriver2(FakeDriver):
pass
class ToDriverRegistryTestCase(test.TestCase):
def assertDriverInstance(self, inst, class_, *args, **kwargs):
self.assertEquals(class_, inst.__class__)
self.assertEquals(args, inst.args)
self.assertEquals(kwargs, inst.kwargs)
def test_driver_dict_from_config(self):
drvs = driver.driver_dict_from_config(
[
'key1=nova.tests.test_driver.FakeDriver',
'key2=nova.tests.test_driver.FakeDriver2',
], 'arg1', 'arg2', param1='value1', param2='value2'
)
self.assertEquals(
sorted(['key1', 'key2']),
sorted(drvs.keys())
)
self.assertDriverInstance(
drvs['key1'],
FakeDriver, 'arg1', 'arg2', param1='value1',
param2='value2')
self.assertDriverInstance(
drvs['key2'],
FakeDriver2, 'arg1', 'arg2', param1='value1',
param2='value2')
| 0 |
#!/usr/bin/python3
import argparse
import sys
from collections import defaultdict
from random import choice
def randnewword(letter_dict, char=None):
if not char:
char = choice(letter_dict['first'])
result = char
while char != '\n':
char = choice(letter_dict[char])
result += char
return result
def build_dictionary(infile):
letter_dict = defaultdict(list)
for line in infile:
first = True
for char1, char2 in zip(line, line[1:]):
if first:
first = False
letter_dict['first'].append(char1)
letter_dict[char1].append(char2)
return letter_dict
def main(args):
letter_dict = build_dictionary(args.infile)
while args.numwords > 0:
args.numwords -= 1
new_word = randnewword(letter_dict)
args.outfile.write(new_word)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
'Create new words using a stochastic Markov chain.')
parser.add_argument('infile', nargs='?',
type=argparse.FileType('r'),
default=sys.stdin)
parser.add_argument('outfile', nargs='?',
type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('-n', '--numwords',
type=int,
default=1)
args = parser.parse_args()
main(args)
| 0 |
#!/usr/bin/env python
'''
File: Communicator.py
Author: Samuel Barrett
Description: some classes for socket communication
Created: 2010-11-07
Modified: 2010-11-07
'''
import socket, sys, time
import cPickle as pickle
defaultPort = 5557
class TimeoutError(Exception):
def __init__(self, *args):
self.value = args
def __str__(self):
return repr(self.value)
class Communicator(object):
def __init__(self,host='localhost',port=defaultPort,sock=None):
self._sock = sock
self._storedMsg = ''
self._addr = (host,port)
self.initialize()
def initialize(self):
if self._sock is None:
raise ValueError
def close(self):
if self._sock is not None:
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except:
pass
finally:
self._sock = None
def sendMsg(self,msg):
#print 'sending',msg
self._sock.sendto(msg + '\0',self._addr)
def recvMsg(self,event=None,retryCount=None):
msg = self._storedMsg
while ('\0' not in msg):
if (event is not None) and event.isSet():
return None
newMsg = ''
try:
newMsg,self._addr = self._sock.recvfrom(8192)
msg += newMsg
except socket.error:
#time.sleep(0.1)
pass
if len(newMsg) == 0:
if (retryCount is None) or (retryCount <= 0):
raise TimeoutError
else:
retryCount -= 1
print '[Trainer] waiting for message, retry =', retryCount
time.sleep(0.3)
#raise ValueError('Error while receiving message')
(msg,sep,rest) = msg.partition('\0')
self._storedMsg = rest
#print 'received',msg
return msg
def send(self,obj):
self.sendMsg(pickle.dumps(obj))
def recv(self,event=None):
msg = self.recvMsg(event)
if msg is None:
return None
return self.convertMsg(msg)
def convertMsg(self,msg):
return pickle.loads(msg)
class ClientCommunicator(Communicator):
def initialize(self):
try:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.settimeout(5)
except:
print >>sys.stderr,'Error creating socket'
raise
| 0.025571 |
#!/usr/bin/python
import sys
import argparse
import os
import re
'''
A simple script to create lower-resolution Android drawables from higher-resolution ones.
For example, given a batch of -xhdpi images, you can generate -hdpi and -mdpi images.
This makes it possible to only export highest-resolution artwork from image authoring tools, and
automate the rest.
Usage:
drawable_convert.py -d res/drawable-mdpi -d res/drawable-hdpi res/drawable-xhdpi-v14/select*.png
... will take select*.png from xhdpi and place versions into mdpi and hdpi folders.
Correct resize ratios are computed based on resource directory names.
Actual scaling is done by ImageMagick's convert command.
'''
class Converter:
def __init__(self, dstList, image_magic_path):
# print u'Dst list: {0}'.format(dstList)
self.mDstList = dstList
self.image_magic_path = image_magic_path;
def convert(self, src):
for dstpath in self.mDstList:
(srcpath, srcname) = os.path.split(src)
dst = os.path.join(dstpath, srcname)
self.convertOne(src, dst)
def convertOne(self, src, dst):
# print u'\n*****\n{0} to {1}\n*****\n'.format(src, dst)
'''
Determine relative density
'''
srcDpi = self.getDpi(src)
dstDpi = self.getDpi(dst)
if srcDpi < dstDpi:
print u'NOT converting from {0}dpi to {1}dpi'.format(srcDpi, dstDpi)
else:
factor = dstDpi*100/srcDpi
print u'Converting from {0}dpi to {1}dpi, {2}%'.format(srcDpi, dstDpi, factor)
image_magic_path = self.image_magic_path + '\\\\';
cmd = u'{0}convert -verbose "{1}" -resize "{3}%x{3}%" "{2}"'.format(image_magic_path, src, dst, factor)
os.system(cmd)
def getDpi(self, f):
p = os.path.dirname(f)
if re.match('.*drawable.*\\-mdpi.*', p):
return 160
elif re.match('.*drawable.*\\-hdpi.*', p):
return 240
elif re.match('.*drawable.*\\-xhdpi.*', p):
return 320
elif re.match('.*drawable.*\\-xxhdpi.*', p):
return 480
elif re.match('.*drawable.*\\-xxhdpi.*', p):
return 640
else:
raise ValueError(u'Cannot determine densitiy for {0}'.format(p))
if __name__ == "__main__":
'''
Parse command line arguments
'''
parser = argparse.ArgumentParser(description='Converts drawable resources in Android applications')
parser.add_argument('-i', dest='image_magic_path', action='append', required=True, help='ImageMagick root directory')
parser.add_argument('-d', dest='DST', action='append', required=True, help='destination directory')
parser.add_argument('src', nargs='+', help='files to convert (one or more)')
args = parser.parse_args()
cv = Converter(args.DST, args.image_magic_path[0])
for src in args.src:
cv.convert(src)
'''
if [ $# -lt 1 ] ; then
echo "Usage: $0 file_list"
exit 1
fi
for f in $*
do
echo "File: ${f}"
convert -verbose "${f}" -resize "75%x75%" "../drawable-hdpi/${f}"
convert -verbose "${f}" -resize "50%x50%" "../drawable-mdpi/${f}"
done
''' | 0.036606 |
"""Viessmann ViCare sensor device."""
import logging
import requests
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_POWER,
BinarySensorDevice,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_NAME
from . import (
DOMAIN as VICARE_DOMAIN,
PYVICARE_ERROR,
VICARE_API,
VICARE_HEATING_TYPE,
VICARE_NAME,
HeatingType,
)
_LOGGER = logging.getLogger(__name__)
CONF_GETTER = "getter"
SENSOR_CIRCULATION_PUMP_ACTIVE = "circulationpump_active"
SENSOR_BURNER_ACTIVE = "burner_active"
SENSOR_COMPRESSOR_ACTIVE = "compressor_active"
SENSOR_TYPES = {
SENSOR_CIRCULATION_PUMP_ACTIVE: {
CONF_NAME: "Circulation pump active",
CONF_DEVICE_CLASS: DEVICE_CLASS_POWER,
CONF_GETTER: lambda api: api.getCirculationPumpActive(),
},
# gas sensors
SENSOR_BURNER_ACTIVE: {
CONF_NAME: "Burner active",
CONF_DEVICE_CLASS: DEVICE_CLASS_POWER,
CONF_GETTER: lambda api: api.getBurnerActive(),
},
# heatpump sensors
SENSOR_COMPRESSOR_ACTIVE: {
CONF_NAME: "Compressor active",
CONF_DEVICE_CLASS: DEVICE_CLASS_POWER,
CONF_GETTER: lambda api: api.getCompressorActive(),
},
}
SENSORS_GENERIC = [SENSOR_CIRCULATION_PUMP_ACTIVE]
SENSORS_BY_HEATINGTYPE = {
HeatingType.gas: [SENSOR_BURNER_ACTIVE],
HeatingType.heatpump: [SENSOR_COMPRESSOR_ACTIVE],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare sensor devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
heating_type = hass.data[VICARE_DOMAIN][VICARE_HEATING_TYPE]
sensors = SENSORS_GENERIC.copy()
if heating_type != HeatingType.generic:
sensors.extend(SENSORS_BY_HEATINGTYPE[heating_type])
add_entities(
[
ViCareBinarySensor(
hass.data[VICARE_DOMAIN][VICARE_NAME], vicare_api, sensor
)
for sensor in sensors
]
)
class ViCareBinarySensor(BinarySensorDevice):
"""Representation of a ViCare sensor."""
def __init__(self, name, api, sensor_type):
"""Initialize the sensor."""
self._sensor = SENSOR_TYPES[sensor_type]
self._name = f"{name} {self._sensor[CONF_NAME]}"
self._api = api
self._sensor_type = sensor_type
self._state = None
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None and self._state != PYVICARE_ERROR
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.service.id}-{self._sensor_type}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._sensor[CONF_DEVICE_CLASS]
def update(self):
"""Update state of sensor."""
try:
self._state = self._sensor[CONF_GETTER](self._api)
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from ViCare server")
except ValueError:
_LOGGER.error("Unable to decode data from ViCare server")
| 0 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import mail
import handle_incoming_email
def test_handle_bounced_email(testbed):
handler = handle_incoming_email.LogSenderHandler()
handler.request = 'request'
message = mail.EmailMessage(
sender='support@example.com',
subject='Your account has been approved')
message.to = 'Albert Johnson <Albert.Johnson@example.com>'
message.body = 'Dear Albert.'
handler.receive(message)
| 0 |
"""
open-dobot driver.
Implements driver to open firmware that controls Dobot FPGA.
Abstracts communication protocol, CCITT CRC and commands sent to FPGA.
Find firmware and SDK at https://github.com/maxosprojects/open-dobot
Author: maxosprojects (March 18 2016)
Additional Authors: <put your name here>
Version: 1.2.2
License: MIT
"""
import serial
import threading
import time
from serial import SerialException
import math
import sys
# Workaround to support Python 2/3
if sys.version_info > (3,):
long = int
_max_trys = 1
CMD_READY = 0
CMD_STEPS = 1
CMD_EXEC_QUEUE = 2
CMD_GET_ACCELS = 3
CMD_SWITCH_TO_ACCEL_REPORT_MODE = 4
CMD_CALIBRATE_JOINT = 5
CMD_EMERGENCY_STOP = 6
CMD_SET_COUNTERS = 7
CMD_GET_COUNTERS = 8
CMD_LASER_ON = 9
CMD_PUMP_ON = 10
CMD_VALVE_ON = 11
CMD_BOARD_VERSION = 12
piToDegrees = 180.0 / math.pi
halfPi = math.pi / 2.0
class DobotDriver:
def __init__(self, comport, rate=115200):
self._lock = threading.Lock()
self._comport = comport
self._rate = rate
self._port = None
self._crc = 0xffff
self.FPGA = 0
self.RAMPS = 1
self._toolRotation = 0
self._gripper = 480
def Open(self, timeout=0.025):
try:
self._port = serial_aggregator(serial.Serial(self._comport, baudrate=self._rate, timeout=timeout, interCharTimeout=0.1))
# self._port = serial.Serial(self._comport, baudrate=self._rate, timeout=timeout, interCharTimeout=0.1)
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2)
# s.connect(("localhost", 5555))
# self._port = serial2socket(s)
# Have to wait for Arduino initialization to finish, or else it doesn't boot.
time.sleep(2)
except SerialException as e:
print(e)
exit(1)
ret = (0, 0)
i = 200
while not ret[0] and i > 0:
ret = self.BoardVersion()
i -= 1
if i == 0:
print("Cannot get board version. Giving up")
exit(1)
self._ramps = bool(ret[1])
if self._ramps:
print("Board: RAMPS")
else:
print("Board: FPGA")
if self._ramps:
self._stepCoeff = 20000
self._stopSeq = self.reverseBits32(0)
else:
self._stepCoeff = 500000
self._stopSeq = 0x0242f000
self._stepCoeffOver2 = self._stepCoeff / 2
self._freqCoeff = self._stepCoeff * 25
def Close(self):
self._port.close()
def _crc_clear(self):
self._crc = 0xffff
def _crc_update(self, data):
self._crc = self._crc ^ (data << 8)
for bit in range(0, 8):
if (self._crc&0x8000) == 0x8000:
self._crc = ((self._crc << 1) ^ 0x1021)
else:
self._crc = self._crc << 1
def _readchecksumword(self):
data = self._port.read(2)
if len(data)==2:
arr = bytearray(data)
crc = (arr[0]<<8) | arr[1]
return (1,crc)
return (0,0)
def _readbyte(self):
data = self._port.read(1)
if len(data):
val = bytearray(data)[0]
self._crc_update(val)
return (1,val)
return (0,0)
def _readword(self):
val1 = self._readbyte()
if val1[0]:
val2 = self._readbyte()
if val2[0]:
return (1,val1[1]<<8|val2[1])
return (0,0)
def _readsword(self):
val = self._readword()
if val[0]:
if val[1]&0x8000:
return (val[0],val[1]-0x10000)
return (val[0],val[1])
return (0,0)
def _readlong(self):
val1 = self._readbyte()
if val1[0]:
val2 = self._readbyte()
if val2[0]:
val3 = self._readbyte()
if val3[0]:
val4 = self._readbyte()
if val4[0]:
return (1,val1[1]<<24|val2[1]<<16|val3[1]<<8|val4[1])
return (0,0)
def _readslong(self):
val = self._readlong()
if val[0]:
if val[1]&0x80000000:
return (val[0],val[1]-0x100000000)
return (val[0],val[1])
return (0,0)
def _read1(self, cmd):
return self._read(cmd, [self._readbyte])
def _read22(self, cmd):
return self._read(cmd, [self._readword,
self._readword])
def _reads22(self, cmd):
return self._read(cmd, [self._readsword,
self._readsword])
def _reads222222(self, cmd):
return self._read(cmd, [self._readsword,
self._readsword,
self._readsword,
self._readsword,
self._readsword,
self._readsword])
def _read4(self, cmd):
return self._read(cmd, [self._readlong])
def _read41(self, cmd):
return self._read(cmd, [self._readslong,
self._readbyte])
def _reads444(self, cmd):
return self._read(cmd, [self._readslong,
self._readslong,
self._readslong])
def _read(self, cmd, read_commands=list()):
trys = _max_trys
while trys:
self._sendcommand(cmd)
self._writechecksum()
self._port.send()
ret = [1]
for c in read_commands:
val = c()
if not val[0]:
return tuple([0] * (len(read_commands) + 1))
ret.append(val[1])
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF == crc[1]&0xFFFF:
return tuple(ret)
trys -= 1
return tuple([0] * (len(read_commands) + 1))
def _writebyte(self, val):
self._crc_update(val&0xFF)
self._port.write(bytearray([val&0xFF]))
def _writeword(self, val):
self._writebyte((val>>8)&0xFF)
self._writebyte(val&0xFF)
def _writelong(self, val):
self._writebyte((val>>24)&0xFF)
self._writebyte((val>>16)&0xFF)
self._writebyte((val>>8)&0xFF)
self._writebyte(val&0xFF)
def _writechecksum(self):
self._port.write(bytearray([(self._crc>>8)&0xFF]))
self._port.write(bytearray([self._crc&0xFF]))
def _sendcommand(self, command):
self._crc_clear()
self._writebyte(command)
def _write(self, cmd, write_commands=list()):
trys = _max_trys
while trys:
self._sendcommand(cmd)
for c in write_commands:
c[0](c[1])
self._writechecksum()
self._port.send()
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF == crc[1]&0xFFFF:
return True
trys -= 1
return False
def _write0(self, cmd):
return self._write(cmd)
def _write1(self, cmd, val):
return self._write(cmd, [(self._writebyte, val)])
def _write2(self, cmd, val):
return self._write(cmd, [(self._writeword, val)])
def _write4(self, cmd, val):
return self._write(cmd, [(self._writelong, val)])
def _write14(self, cmd, val1, val2):
return self._write(cmd, [(self._writebyte, val1), (self._writelong, val2)])
def _write14411(self, cmd, val1, val2, val3, val4):
return self._write(cmd, [(self._writelong, val1),
(self._writelong, val2),
(self._writebyte, val3),
(self._writebyte, val4)])
def _write444(self, cmd, val1, val2, val3):
return self._write(cmd, [(self._writelong, val1),
(self._writelong, val2),
(self._writelong, val3)])
def _write_read(self, cmd, write_commands):
tries = _max_trys
while tries:
self._sendcommand(cmd)
for c in write_commands:
c[0](c[1])
self._writechecksum()
self._port.send()
ret = self._readbyte()
if ret[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc & 0xFFFF != crc[1] & 0xFFFF:
# raise Exception('crc differs', self._crc, crc)
return (0, 0)
return (1, ret[1])
tries -= 1
return (0, 0)
def _write1read1(self, cmd, val1):
return self._write_read(cmd, [(self._writebyte, val1)])
def _write11121read1(self, cmd, val1, val2, val3, val4, val5):
return self._write_read(cmd, [(self._writebyte, val1),
(self._writebyte, val2),
(self._writebyte, val3),
(self._writeword, val4),
(self._writebyte, val5)])
def _write14441read1(self, cmd, val1, val2, val3, val4):
return self._write_read(cmd, [(self._writelong, val1),
(self._writelong, val2),
(self._writelong, val3),
(self._writebyte, val4)])
def _write1444122read1(self, cmd, val1, val2, val3, val4, val5, val6):
return self._write_read(cmd, [(self._writelong, val1),
(self._writelong, val2),
(self._writelong, val3),
(self._writebyte, val4),
(self._writeword, val5),
(self._writeword, val6)])
def reverseBits32(self, val):
### return long(bin(val&0xFFFFFFFF)[:1:-1], 2)
# return int('{0:032b}'.format(val)[::-1], 2)
# Not reversing bits in bytes any more as SPI switched to LSB first.
# But still need to reverse bytes places.
return ((val & 0x000000FF) << 24) | ((val & 0x0000FF00) << 8) | ((val & 0x00FF0000) >> 8) | ((val & 0xFF000000) >> 24)
def reverseBits16(self, val):
# Not reversing bits in bytes any more as SPI switched to LSB first.
# But still need to reverse bytes places.
return ((val & 0x00FF) << 8) | ((val & 0xFF00) >> 8)
def freqToCmdVal(self, freq):
'''
Converts stepping frequency into a command value that dobot takes.
'''
if freq == 0:
return self._stopSeq
return self.reverseBits32(long((self._freqCoeff) / freq))
def stepsToCmdVal(self, steps):
'''
Converts number of steps for dobot to do in 20ms into a command value that dobot
takes to set the stepping frequency.
'''
if steps == 0:
return self._stopSeq
return self.reverseBits32(long(self._stepCoeff / steps))
def stepsToCmdValFloat(self, steps):
'''
Converts number of steps for dobot to do in 20ms into a command value that dobot
takes to set the stepping frequency.
@param steps - float number of steps; float to minimize error and have finer control
@return tuple (command_value, leftover), where leftover is the fractioned steps that don't fit
into 20ms interval a command runs for
'''
if abs(steps) < 0.01:
return (self._stopSeq, 0, 0.0)
# "round" makes leftover negative in certain cases and causes backlash compensation to oscillate.
# actualSteps = long(round(steps))
actualSteps = long(steps)
if actualSteps == 0:
return (self._stopSeq, 0, steps)
val = long(self._stepCoeff / actualSteps)
actualSteps = long(self._stepCoeff / val)
if val == 0:
return (self._stopSeq, 0, steps)
return (self.reverseBits32(val), actualSteps, steps - actualSteps)
def accelToAngle(self, val, offset):
return self.accelToRadians(val, offset) * piToDegrees
def accel3DXToAngle(self, x, y, z):
return self.accel3DXToRadians(x, y, z) * piToDegrees
def accelToRadians(self, val, offset):
try:
return math.asin(float(val - offset) / 493.56)
except ValueError:
return halfPi
def accel3DXToRadians(self, x, y, z):
try:
xf = float(x)
yf = float(y)
zf = float(z)
return math.atan2(xf, math.sqrt(yf * yf + zf * zf))
except ValueError:
return halfPi
def CalibrateJoint(self, joint, forwardCommand, backwardCommand, direction, pin, pinMode, pullup):
'''
Initiates joint calibration procedure using a limit switch/photointerrupter. Effective immediately.
Current command buffer is cleared.
Cancel the procedure by issuing EmergencyStop() is necessary.
@param joint - which joint to calibrate: 1-3
@param forwardCommand - command to send to the joint when moving forward (towards limit switch);
use freqToCmdVal()
@param backwardCommand - command to send to the joint after hitting (towards limit switch);
use freqToCmdVal()
@param direction - direction to move joint towards limit switch/photointerrupter: 0-1
@param pin - firmware internal pin reference number that limit switch is connected to;
refer to dobot.h -> calibrationPins
@param pinMode - limit switch/photointerrupter normal LOW = 0, normal HIGH = 1
@param pullup - enable pullup on the pin = 1, disable = 0
@return True if command succesfully received, False otherwise.
'''
if 1 > joint > 3:
return False
control = ((pinMode & 0x01) << 4) | ((pullup & 0x01) << 3) | ((direction & 0x01) << 2) | ((joint - 1) & 0x03)
self._lock.acquire()
result = self._write14411(CMD_CALIBRATE_JOINT, forwardCommand, backwardCommand, pin, control)
self._lock.release()
return result
def EmergencyStop(self):
'''
Stops the arm in case of emergency. Clears command buffer and cancels calibration procedure.
@return True if command succesfully received, False otherwise.
'''
self._lock.acquire()
result = self._write0(CMD_EMERGENCY_STOP)
self._lock.release()
return result
def Steps(self, j1, j2, j3, j1dir, j2dir, j3dir, servoGrab, servoRot, deferred=False):
'''
Adds a command to the controller's queue to execute on FPGA.
@param j1 - joint1 subcommand
@param j2 - joint2 subcommand
@param j3 - joint3 subcommand
@param j1dir - direction for joint1: 0-1
@param j2dir - direction for joint2: 0-1
@param j3dir - direction for joint3: 0-1
@param servoGrab - servoGrab position (gripper): 0x00d0-0x01e0 (or 208-480 decimal)
@param servoRot - servoRot position (tool rotation): 0x0000-0x0400 (or 0-1024 decimal)
@param deferred - defer execution of this command and all commands issued after this until
the "ExecQueue" command is issued. Currently ignored.
@return Returns a tuple where the first element tells whether the command has been successfully
received (1 - received, 0 - timed out), and the second element tells whether the command was added
to the controller's command queue (1 - added, 0 - not added, as the queue was full).
'''
control = (j1dir & 0x01) | ((j2dir & 0x01) << 1) | ((j3dir & 0x01) << 2);
# if deferred:
# control |= 0x01
self._lock.acquire()
if servoGrab > 480:
servoGrab = 480
elif servoGrab < 208:
servoGrab = 208
if servoRot > 1024:
servoRot = 1024
elif servoRot < 0:
servoRot = 0
if self._ramps:
servoRot *= 2;
servoRot += 2000;
servoGrab *= 2;
servoGrab += 2000;
self._toolRotation = servoRot
self._gripper = servoGrab
result = self._write1444122read1(CMD_STEPS, j1, j2, j3, control, self.reverseBits16(servoGrab), self.reverseBits16(servoRot))
self._lock.release()
return result
def ExecQueue(self):
'''
Executes deferred commands.
'''
raise NotImplementedError()
self._lock.acquire()
result = self._write0(CMD_EXEC_QUEUE)
self._lock.release()
return result
def GetAccelerometers(self):
'''
Returns data aquired from accelerometers at power on.
There are 17 reads in FPGA version and 20 reads in RAMPS version of each accelerometer
that the firmware does and then averages the result before returning it here.
'''
self._lock.acquire()
result = self._reads222222(CMD_GET_ACCELS)
self._lock.release()
return result
def GetCounters(self):
'''
'''
self._lock.acquire()
result = self._reads444(CMD_GET_COUNTERS)
self._lock.release()
return result
def SetCounters(self, base, rear, fore):
'''
'''
self._lock.acquire()
result = self._write444(CMD_SET_COUNTERS, base, rear, fore)
self._lock.release()
return result
def SwitchToAccelerometerReportMode(self):
'''
Apparently the following won't work because of the way dobot was desgined
and limitations of AVR - cannot switch SPI from Slave to Master back.
So, as a workaround, just hold the "Sensor Calibration" button and start your
app. Arduino is reset on serial port connection and it takes about 2 seconds
for it to start. After that you can release the button. That switches dobot to
accelerometer reporting mode. To move the arm turn off the power switch.
This function is left just in case a proper fix comes up.
Switches dobot to accelerometer report mode.
Dobot must be reset to enter normal mode after issuing this command.
'''
raise NotImplementedError('Read function description for more info')
self._lock.acquire()
result = self._write_read(CMD_SWITCH_TO_ACCEL_REPORT_MODE, [])
self._lock.release()
return result
def LaserOn(self, on):
'''
@return Returns a tuple where the first element tells whether the command has been successfully
received (0 - yes, 1 - timed out), and the second element tells whether the command was added
to the controller's command queue (1 - added, 0 - not added, as the queue was full).
'''
self._lock.acquire()
if on:
result = self._write1read1(CMD_LASER_ON, 1)
else:
result = self._write1read1(CMD_LASER_ON, 0)
self._lock.release()
return result
def PumpOn(self, on):
'''
Turn On/Off the pump motor, if you want actually grip something you also need to turn the valve
on otherwise the air just flow throught it.
@return Returns a tuple where the first element tells whether the command has been successfully
received (0 - yes, 1 - timed out), and the second element tells whether the command was added
to the controller's command queue (1 - added, 0 - not added, as the queue was full).
'''
self._lock.acquire()
if on:
result = self._write1read1(CMD_PUMP_ON, 1)
else:
result = self._write1read1(CMD_PUMP_ON, 0)
self._lock.release()
return result
def ValveOn(self, on):
'''
Does not do much by itself but work in tandem with the pump, when the pump is on turning the valve
on will allow you to grab things, turn it off to release the air pressure.
@return Returns a tuple where the first element tells whether the command has been successfully
received (0 - yes, 1 - timed out), and the second element tells whether the command was added
to the controller's command queue (1 - added, 0 - not added, as the queue was full).
'''
self._lock.acquire()
if on:
result = self._write1read1(CMD_VALVE_ON, 1)
else:
result = self._write1read1(CMD_VALVE_ON, 0)
self._lock.release()
return result
def Wait(self, waitTime):
'''
Makes the arm wait in current position for the specified period of time. The wait period is specified
in seconds and can be fractions of seconds.
The resolution of this command is up to 20ms.
In order to make the arm wait a number of commands is issued to do nothing. Each command takes 20ms
to execute by the arm.
'''
iterations = int(waitTime * 50)
for i in range(iterations):
ret = (0, 0)
# Keep sending until buffered
while not ret[0] or not ret[1]:
ret = self.Steps(0, 0, 0, 0, 0, 0, self._gripper, self._toolRotation)
def BoardVersion(self):
'''
Checks board version.
Returns (success, version), where version=0 is FPGA and version=1 is RAMPS.
'''
self._lock.acquire()
result = self._read1(CMD_BOARD_VERSION)
self._lock.release()
return result
def isFpga(self):
return self._ramps == self.FPGA
def isRamps(self):
return self._ramps == self.RAMPS
def isReady(self):
'''
Checks whether the controller is up and running.
'''
self._lock.acquire()
result = self._read1(CMD_READY)
self._lock.release()
# Check for magic number.
# return [result[0], result[1] == 0x40]
return result
def reset(self):
# self._lock.acquire()
i = 0
while i < 5:
self._port.read(1)
i += 1
self._crc_clear()
# self._lock.release()
"""
open-dobot serial aggregator.
This is a workaround to send data in bursts on systems that have slow API
used by pyserial (e.g. Windows).
"""
class serial_aggregator:
def __init__(self, ser):
self._ser = ser
self._buf = bytearray()
def write(self, data):
self._buf.extend(data)
def read(self, size):
return self._ser.read(size)
def flushInput(self):
pass
def flush(self):
pass
def send(self):
self._ser.write(self._buf)
self._buf = bytearray()
def close(self):
self._ser.close()
| 0.033866 |
"""Defines the Lamina class for use with mechpy.
TODO:
-----
* [ ] add error checking for attribute types
* [ ] add unyt units to input (may allow for removal of `ftype`)
* [ ] add a property for the compliance matrix Sk
* [ ] add failure indeces
"""
from numpy import array, zeros
from numpy.linalg import inv
from math import isnan, cos, sin, radians
from mechpy._cogs import ms
class Lamina:
"""
An individual lamina.
The ``Lamina`` class exists for material property assignment. To consider
loading, thermal effects, or deflection, see the ``Ply`` and ``Laminate``
classes.
Attributes
----------
t : float
lamina thickness
E1, E2, G12 : float
elastic moduli in the lamina 1-, 2-, and 12-directions
nu12 : float
Poisson's ratio in the 12-plane
a11, a22 : float
coefficients of thermal expansion (CTE) in the lamina 1- and 2-
directions
b11, b22 : float
coefficients of hygroscopic expansion (CTE) in the lamina 1- and 3-
directions
F1, F2, F12 : float
lamina strengths in each direction
ftype : {'strain', 'stress'}
lamina strength type
"""
__name__ = 'Lamina'
__slots__ = ['t', 'E1', 'E2', 'nu12', 'G12', 'a11', 'a22', 'b11', 'b22',
'F1', 'F2', 'F12', 'ftype']
def __init__(self, t, E1, E2, nu12, G12, a11, a22, b11, b22, F1=1, F2=1,
F12=1, ftype='strain'):
self.t = t
self.E1 = E1
self.E2 = E2
self.nu12 = nu12
self.G12 = G12
self.a11 = a11
self.a22 = a22
self.b11 = b11
self.b22 = b22
self.F1 = F1
self.F2 = F2
self.F12 = F12
self.ftype = ftype
@property
def is_fully_defined(self):
"""Check if lamina is fully defined with proper attr data types."""
if self.t == 0 or isnan(self.t):
raise TypeError("lamina.tk must be a non-zero number")
elif self.E1 == 0 or isnan(self.E1):
raise TypeError("lamina.E1 must be a non-zero number")
elif self.E2 == 0 or isnan(self.E2):
raise TypeError("lamina.E2 must be a non-zero number")
elif (self.nu12 >= 1) or isnan(self.nu12):
raise TypeError("""lamina.nu12 must be less than or equal to 1""")
elif self.G12 == 0 or isnan(self.G12):
raise TypeError("lamina.G12 must be a non-zero number")
else:
return True
class Ply(Lamina):
"""
A Ply for use in a Laminate.
Extended ``Lamina``. While the ``Lamina`` class exists for defining
material properties, the ``Ply`` class is intended to extend its
functionality further for considering loading and thermal effects. ``Ply``
instances may exist on their own, but they are intended to function as
constituent items of a ``Laminate``.
| **Attribute Types**
| Not all attributes of are able to be directly modified. Attributes are
divided into 'base' and 'calculated' values categories and are
prescribed by the class attributes ``__baseattr__`` and ``__calcattr__``,
respectively. Base attributes may be set freely, while calculated
attributes are 'locked' and updated based on the values of base
attributes.
| **Assumptions**
| The following assumptions apply to all Ply objects:
* Ply z, zk, and zk1 are all measured assuming that positive is upward,
TOWARD the top surface of the laminate.
* Theta is in degrees, measured from the laminate x-axis to the lamina
1- axis.
Attributes
----------
laminate : Laminate
the Laminate object the Ply belongs to
t, E1, E2, nu12, G12, a11, a22, b11, F1, F2, F12, ftype
See ``Lamina`` attribute definitions
theta : float
the angle the Ply is oriented in w.r.t. the Laminate coordinate system
Q : 3x1 numpy.ndarray
Ply stiffness matrix in the Ply coordinate system
Qbar : 3x1 numpy.ndarray
Ply stiffness matrix in the Laminate coordinate system
T : 3x1 numpy.ndarray
Ply transformation matrix
Tinv : 3x1 numpy.ndarray
Inverse of the Ply transformation matrix
e_m, e_t, e_h : 3x1 numpy.ndarray
Ply strains due to mechanical, thermal, and hygroscopic loading
s_m, s_t, s_h : 3x1 numpy.ndarray
Ply stresses due to mechanical, thermal, and hygroscopic loading
z : float
Vertical location of the ply midplane in the laminate
failure_theory : {'strain', 'stress', 'Tsai-Hill'}
The failure theory for calculating the failure index
failure_index : float
the failure index
"""
__name__ = 'Ply'
__baseattr__ = Lamina.__slots__ + ['theta', 'z', 'e_m', 's_m']
__calcattr__ = ['Q', 'Qbar', 'T', 'Tinv', 'e_t', 'e_h', 's_t', 's_h',
'laminate', 'failure_theory', 'failure_index']
__slots__ = __baseattr__ + __calcattr__ + ['__locked']
def __unlock(func):
"""Decorate methods to unlock attributes.
Parameters
----------
func : bool
The function to unlock.
Returns
-------
function
An unlocked function.
"""
def wrapper(self, *args, **kwargs):
super().__setattr__('_Ply__locked', False)
func(self, *args, **kwargs)
super().__setattr__('_Ply__locked', True)
return wrapper
@__unlock
def __init__(self, laminate, t, theta, E1, E2, nu12, G12, a11, a22, b11,
b22, F1=1, F2=1, F12=1, ftype='strain',
failure_theory='strain'):
"""Extend ``__init__`` to account for Ply-only attributes."""
self.laminate = laminate
self.z = 0
self.theta = theta
self.e_m = zeros((3, 1))
self.e_t = zeros((3, 1))
self.e_h = zeros((3, 1))
self.s_m = zeros((3, 1))
self.s_t = zeros((3, 1))
self.s_h = zeros((3, 1))
self.failure_theory = failure_theory
self.failure_index = 0
super().__init__(t, E1, E2, nu12, G12, a11, a22, b22, b22, F1, F2, F12,
ftype)
self.__update()
def __setattr__(self, attr, val):
"""Extend ``__setattr__`` to protect calculated attributes."""
if self.__locked:
# udpate laminate after updated properties are set
if attr in self.__baseattr__:
super().__setattr__(attr, val)
self.__update()
# don't set protected values
elif attr in self.__calcattr__:
raise AttributeError(self.__name__ + ".%s" % attr
+ " is a derived value and cannot be set")
# update the laminate
if self.laminate:
self.laminate._Laminate__update()
else:
super().__setattr__(attr, val)
@__unlock
def __update(self):
"""Update calculated attributes."""
# on-axis reduced stiffness matrix, Q
# NASA-RP-1351, Eq (15)
nu21 = self.nu12 * self.E2 / self.E1 # Jones, Eq (2.67)
q11 = self.E1 / (1 - self.nu12 * nu21)
q12 = self.nu12 * self.E2 / (1 - self.nu12 * nu21)
q22 = self.E2 / (1 - self.nu12 * nu21)
q66 = self.G12
self.Q = array([[q11, q12, 0], [q12, q22, 0], [0, 0, q66]])
# the transformation matrix and its inverse
# create intermediate trig terms
m = cos(radians(self.theta))
n = sin(radians(self.theta))
# create transformation matrix and inverse
self.T = array([[m**2, n**2, 2 * m * n],
[n**2, m**2, -2 * m * n],
[-m * n, m * n, m**2 - n**2]])
self.Tinv = inv(self.T)
# the transformed reduced stiffness matrix (laminate coordinate system)
# Jones, Eq (2.84)
self.Qbar = self.Tinv @ self.Q @ self.Tinv.T
# thermal and hygroscopic strains in laminate and lamina c-systems
# NASA-RP-1351 Eq (90), (91), and (95)
self.e_t = array([[self.a11], [self.a22], [0]]) * self.laminate.dT
self.e_h = array([[self.b11], [self.b22], [0]]) * self.laminate.dM
# thermal and hygroscopic stresses in laminate and lamina c-systems
# NASA-RP-1351 Eq (90), (91), and (95)
self.s_t = self.Q @ self.e_t
self.s_h = self.Q @ self.e_h
# calculate failure index
self.failure_index = self.calc_failure_index(self.failure_theory,
self.F1,
self.F2,
self.F12)
@classmethod
def from_lamina(cls, lamina, laminate, theta):
"""Create a new Ply object from a Lamina object.
Parameters
----------
lamina : Lamina
``Lamina`` from which to create ``Ply``
laminate : Laminate
``Laminate`` object the ``Ply`` belongs to
theta : float
``Ply`` orientation w.r.t. the ``Laminate`` coordinate system
Returns
-------
``Ply`` object
"""
return cls(laminate=laminate,
theta=theta,
t=lamina.t,
E1=lamina.E1,
E2=lamina.E2,
nu12=lamina.nu12,
G12=lamina.G12,
a11=lamina.a11,
a22=lamina.a22,
b11=lamina.b11,
b22=lamina.b22,
F1=lamina.F1,
F2=lamina.F2,
F12=lamina.F12,
ftype=lamina.ftype)
@property
def zk(self):
"""The vertical location of the lamina's top plane."""
return self.z + self.t / 2
@property
def zk1(self):
"""The vertical location of the lamina's bottom plane."""
return self.z - self.t / 2
@zk1.setter
def zk1(self, new_zk1):
self.z = new_zk1 + self.t / 2
@property
def e(self):
"""Total strain."""
return self.e_m + self.e_t + self.e_h
@property
def s(self):
"""Total stress."""
return self.s_m + self.s_t + self.s_h
@staticmethod
def calc_failure_index(theory, F1, F2, F12):
r"""Calculate the failure index for a given failure theory.
Parameters
----------
theory : {'strain', 'stress', 'Tsai-Hill'}
failure theory for which to calculate a failure index
F1, F2, F12 : float
strengths of the material
Returns
-------
float
The failure index
Notes
-----
A ply is considered to fail if the failure index for an applied load
is equal to or greater than one. Failure indicies for each failure
theory are calculated according to the following equations per the
BJSFM User Manual [#3]_.
Max strain (``theory='strain'``):
.. math::
\mathrm{FI} = \mathrm{min}
\left( \frac{\varepsilon_1}{F_1},\quad
\frac{\varepsilon_2}{F_2},\quad
\frac{\gamma_{12}}{F_{12}} \right)
Max stress (``theory='stress'``):
.. math::
\mathrm{FI} = \mathrm{min}
\left( \frac{\sigma_1}{F_1} = 1,\quad
\frac{\sigma_2}{F_2} = 1,\quad
\frac{\tau_{12}}{F_{12}} \right)
Tsai-Hill (``theory='Tsai-Hill'``):
.. math::
\mathrm{FI} = \left( \frac{\sigma_1}{F_1} \right)^{2}
+ \left( \frac{\sigma_2}{F_2} \right)^{2}
+ \left( \frac{\tau_{12}}{F_{12}} \right)^{2}
- \frac{\sigma_1 \sigma_2}{F_1^2}
.. note:: Modified Tsai-Wu and Hoffman criteria are not supported
as they require separate strength values for tension and compression
References
----------
.. [#3] Ogonowski, J.M, *Effect of Variances and Manufacturing
Tolerances on the Design Strength and Life of Mechanically Fastened
Composite Joints, Volume 3 - Bolted Joint Stress Field Model
(BJSFM) Computer Program User's Manual*, McDonnell Aircraft
Company, AFWAL-TR-81-3041 VOLUME 3, pp. 8-9, 15 April 1981
"""
@property
def margin(self):
"""The margin of safety."""
return ms(1, self.failure_index)
| 0 |
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.i18n import _, _LI
from cinder import objects
LOG = logging.getLogger(__name__)
def authorize(context, action_name):
action = 'snapshot_actions:%s' % action_name
extensions.extension_authorizer('snapshot', action)(context)
class SnapshotActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SnapshotActionsController, self).__init__(*args, **kwargs)
LOG.debug("SnapshotActionsController initialized")
@wsgi.action('os-update_snapshot_status')
def _update_snapshot_status(self, req, id, body):
"""Update database fields related to status of a snapshot.
Intended for creation of snapshots, so snapshot state
must start as 'creating' and be changed to 'available',
'creating', or 'error'.
"""
context = req.environ['cinder.context']
authorize(context, 'update_snapshot_status')
LOG.debug("body: %s", body)
try:
status = body['os-update_snapshot_status']['status']
except KeyError:
msg = _("'status' must be specified.")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Allowed state transitions
status_map = {'creating': ['creating', 'available', 'error'],
'deleting': ['deleting', 'error_deleting']}
current_snapshot = objects.Snapshot.get_by_id(context, id)
if current_snapshot.status not in status_map:
msg = _("Snapshot status %(cur)s not allowed for "
"update_snapshot_status") % {
'cur': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
if status not in status_map[current_snapshot.status]:
msg = _("Provided snapshot status %(provided)s not allowed for "
"snapshot with status %(current)s.") % \
{'provided': status,
'current': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict = {'id': id,
'status': status}
progress = body['os-update_snapshot_status'].get('progress', None)
if progress:
# This is expected to be a string like '73%'
msg = _('progress must be an integer percentage')
try:
integer = int(progress[:-1])
except ValueError:
raise webob.exc.HTTPBadRequest(explanation=msg)
if integer < 0 or integer > 100 or progress[-1] != '%':
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict.update({'progress': progress})
LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
{'id': id, 'dict': update_dict})
current_snapshot.update(update_dict)
current_snapshot.save()
return webob.Response(status_int=202)
class Snapshot_actions(extensions.ExtensionDescriptor):
"""Enable snapshot manager actions."""
name = "SnapshotActions"
alias = "os-snapshot-actions"
namespace = \
"http://docs.openstack.org/volume/ext/snapshot-actions/api/v1.1"
updated = "2013-07-16T00:00:00+00:00"
def get_controller_extensions(self):
controller = SnapshotActionsController()
extension = extensions.ControllerExtension(self,
'snapshots',
controller)
return [extension]
| 0 |
class TagsSet:
def __init__(self, src=[], exclude=[]):
if isinstance(src, TagsSet):
self._include = set(src._include)
self._exclude = set(src._exclude)
else:
include = set(src)
exclude = set(exclude)
inter = include & exclude
self._include = include - inter
self._exclude = exclude
@classmethod
def from_string(cls, s, sep='|', neg='~'):
values = s.split(sep)
inc_values = (v for v in values if not v.startswith(neg))
exc_values = (v[len(neg):] for v in values if v.startswith(neg))
return cls(inc_values, exc_values)
def __str__(self):
values = [str(v) for v in self._include]
values += ['~{}'.format(v) for v in self._exclude]
return '|'.join(values)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__qualname__,
repr(self._include),
repr(self._exclude))
def add(self, value):
self._include.add(value)
if value in self._exclude:
self._exclude.remove(value)
def exclude(self, value):
self._exclude.add(value)
if value in self._include:
self._include.remove(value)
def remove(self, value):
if value in self._include:
self._include.remove(value)
if value in self._exclude:
self._exclude.remove(value)
def clear(self):
self._include.clear()
self._exclude.clear()
def __iter__(self):
return iter(self._include)
def iter_exclude(self):
return iter(self._exclude)
def __bool__(self):
return bool(self._include) or bool(self._exclude)
| 0 |
'''
Created on Jul 3, 2014
@author: jonaswallin
'''
from __future__ import division
import scipy.special as sps
import numpy as np
import time
class ln_gamma_d(object):
"""
lookup table for ln( \Gamma_d)
"""
def __init__(self, d = None):
self.res = {}
self.d = d
def __call__(self, x):
if x not in self.res:
res = sps.multigammaln(x, self.d)
self.res[x] = res
else:
res = self.res[x]
return res
def test_speed(d, sim):
xs = np.ceil(np.random.rand(sim)*100) + d
lg = ln_gamma_d(d=d)
t0 = time.time()
y = [lg(x) for x in xs] # @UnusedVariable
t1 = time.time()
string = "lookup = %.4f msec/sim (sim ,d ) = (%d %d) "%(1000*np.double(t1-t0)/sim, sim, d )
print(string)
t0 = time.time()
yd = [sps.multigammaln(x,d) for x in xs] # @UnusedVariable
t1 = time.time()
string = "sps.multigammaln = %.4f msec/sim (sim ,d ) = (%d %d) "%(1000*np.double(t1-t0)/sim, sim, d )
print(string)
if __name__ == '__main__':
test_speed(2, 10**3)
test_speed(2, 10**4)
test_speed(5, 10**3)
test_speed(5, 10**4)
| 0.054614 |
from __future__ import print_function,division
"""
# Table: tables of data
"""
from lib import *
from col import *
@setting
def TBL(): return o(
bad = r'(["\' \t\r\n]|#.*)',
sep = ",",
skip = "?",
num = '$',
less = '<',
more = '>',
norm = True
)
def readcsv(file, t = None):
for cells in lines(file):
if t:
Row(cells,t)
else:
t = table0(cells)
return t
def lines(file) :
def atom(x):
try : return int(x)
except ValueError:
try : return float(x)
except ValueError : return x
kept = ""
for line in open(file):
now = re.sub(the.TBL.bad,"",line)
kept += now
if kept:
if not now[-1] == the.TBL.sep:
yield map(atom, kept.split(the.TBL.sep))
kept = ""
def table0(cells):
t = o(num={}, sym={}, rows=[], all =[],indep={},
less={}, more={}, goal={}, fields=cells)
my= the.TBL
def nump(cell):
for char in [my.num, my.less, my.more]:
if char in cell:
return True
for i,cell in enumerate(cells):
if nump(cell):
hdr = t.num[i] = Num()
else:
hdr = t.sym[i] = Sym()
hdr.txt = cell
hdr.pos = i
t.all += [hdr]
if my.less in cell: t.goal[i] = t.less[i] = hdr
if my.more in cell: t.goal[i] = t.more[i] = hdr
if not i in t.goal: t.indep[i]= hdr
return t
def clone(t):
return table0(t.fields)
def rows2Table(t,rows):
return cells2Table(t,[row.cells for row in rows])
def cells2Table(t,lstOfCells):
t1 = clone(t)
for cells in lstOfCells: Row(cells,t1)
return t1
class Row:
id=0
def __init__(i,cells=[],t=None):
Row.id = i.id = Row.id + 1
i.cells = cells
i._cache = None
if t:
i.table = t
t.rows += [i]
i += cells
def __iadd__(i,cells):
i._cache = None
for hdr in i.table.all:
tmp = cells[hdr.pos]
if tmp != the.TBL.skip:
hdr += tmp
return i
def __getitem__(i,k): return i.cells[k]
def __sub__(i,j) : return dist(i,j,i.table)
def __hash__(i) : return i.id
def __repr__(i) : return '<'+str(i.cells)+'>'
def xy(i,e,w,c,score=False):
a = i - e
b = i - w
x = (a**2 + c**2 - b**2) / (2*c+0.00001)
h = a if a**2 >= x**2 else b
y = (h**2 - x**2)**0.5
s = 0
if score:
if 0 <= x <= c:
s = (b/a)*(e.fromHell() - w.fromHell())/c/y
return o(it=i, a=a, b=b, c=c, x=x, y=y, s=s)
@cache
def fromHell(i) :
n = inc = 0
for hdr in i.table.more.values():
n += 1
x = i[hdr.pos]
inc += hdr.fromHell(x,the.TBL.norm,True)
for hdr in i.table.less.values():
n += 1
x = i[hdr.pos]
inc += hdr.fromHell(x,the.TBL.norm,False)
return inc**0.5 / n**0.5
def furthest(i,rows=None,t=None):
return closest(i,rows,t, last=-10**32, better=gt)
def closest(i,rows=None,t=None, last=10**32, better=lt):
t = t or i.table
rows = rows or t.rows
out = None
for row in rows:
if row.id != i.id:
tmp = dist(i,row,t)
if better(tmp,last):
last,out = tmp,row
return out
def dist(i,j,t):
n = inc = 0
skip = the.TBL.skip
for hdr in t.indep.values():
k = hdr.pos
x, y = i[k], j[k]
if x == y == skip:
continue
n += 1
if k in t.sym:
inc += 0 if x==y else 1
else:
lo, hi = hdr.lo, hdr.hi
mid = (hi - lo)/2
if the.TBL.norm:
if x != skip: x = hdr.norm(x)
if y != skip: y = hdr.norm(y)
lo, hi, mid = 0, 1, 0.5
if x == skip: x = hi if y < mid else lo
if y == skip: y = hi if x < mid else lo
inc += (x-y)**2
return inc**0.5 / (n + 0.000001)**0.5
| 0.048285 |
class Solution(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
_matrix = []
for i in range(n):
m = []
for j in range(n - 1, -1, -1):
m.append(matrix[j][i])
_matrix.append(m)
print m
for i in range(n):
matrix[i] = _matrix[i]
if __name__ == '__main__':
image = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
[2, 3, 4, 5, 6, 7, 8, 9, 0, 1],
[3, 4, 5, 6, 7, 8, 9, 0, 1, 2],
[4, 5, 6, 7, 8, 9, 0, 1, 2, 3],
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4],
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
[7, 8, 9, 0, 1, 2, 3, 4, 5, 6],
[8, 9, 0, 1, 2, 3, 4, 5, 6, 7],
[9, 0, 1, 2, 3, 4, 5, 6, 7, 8],
]
rimage = [
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[0, 9, 8, 7, 6, 5, 4, 3, 2, 1],
[1, 0, 9, 8, 7, 6, 5, 4, 3, 2],
[2, 1, 0, 9, 8, 7, 6, 5, 4, 3],
[3, 2, 1, 0, 9, 8, 7, 6, 5, 4],
[4, 3, 2, 1, 0, 9, 8, 7, 6, 5],
[5, 4, 3, 2, 1, 0, 9, 8, 7, 6],
[6, 5, 4, 3, 2, 1, 0, 9, 8, 7],
[7, 6, 5, 4, 3, 2, 1, 0, 9, 8],
[8, 7, 6, 5, 4, 3, 2, 1, 0, 9],
]
Solution().rotate(image)
assert image == rimage
| 0 |
# Copyright (C) 2014 The Debsources developers <info@sources.debian.net>.
# See the AUTHORS file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=AUTHORS;hb=HEAD
#
# This file is part of Debsources. Debsources is free software: you can
# redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version. For more information
# see the COPYING file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=COPYING;hb=HEAD
from __future__ import absolute_import
import os
def read_html(fname):
"""try to read an HTML file and return the contained markup.
Return None if the file doesn't exist or is empty
"""
markup = None
if os.path.isfile(fname):
with open(fname) as f:
markup = f.read().strip()
if not markup:
markup = None
return markup
def read_update_ts(fname):
last_update = None
try:
with open(fname) as f:
last_update = f.readline().strip()
except IOError:
last_update = "unknown"
return last_update
| 0 |
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
from pyasn1.type import univ, char, tag
__all__ = ['ObjectDescriptor', 'GeneralizedTime', 'UTCTime']
NoValue = univ.NoValue
noValue = univ.noValue
class ObjectDescriptor(char.GraphicString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.GraphicString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 7)
)
class GeneralizedTime(char.VisibleString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
)
class UTCTime(char.VisibleString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
)
| 0 |
import ocl as cam
import camvtk
import time
import vtk
import math
import datetime
def drawellipse(myscreen, ellcenter, a_axis, b_axis):
resolution=50
for n in xrange(0,resolution):
angle1= (float(n)/float(resolution))*2*math.pi
angle2= (float(n+1)/float(resolution))*2*math.pi
x=ellcenter.x + a_axis*math.cos(angle1)
y=ellcenter.y + b_axis*math.sin(angle1)
z=ellcenter.z
x2=ellcenter.x + a_axis*math.cos(angle2)
y2=ellcenter.y + b_axis*math.sin(angle2)
myscreen.addActor( camvtk.Line(p1=(x,y,z),p2=(x2,y2,z), color=camvtk.grey) )
def main(ycoord=0.970, filename="test"):
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(2, 5, 5)
myscreen.camera.SetFocalPoint(1.38,1, 0)
#ycoord = 1.1
a=cam.Point(3,ycoord,-2)
b=cam.Point(-1,ycoord,3)
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)));
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)));
#c=cam.Point(0,0,0.3)
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
#t = cam.Triangle(a,b,c)
cutter = cam.BullCutter(1,0.2)
print cutter
xar = camvtk.Arrow(color=camvtk.red, rotXYZ=(0,0,0))
myscreen.addActor(xar)
yar = camvtk.Arrow(color=camvtk.green, rotXYZ=(0,0,90))
myscreen.addActor(yar)
zar = camvtk.Arrow(color=camvtk.blue, rotXYZ=(0,-90,0))
myscreen.addActor(zar)
cl = cam.Point(2.1748, 1, 0)
radius1=1
radius2=0.25
tor = camvtk.Toroid(r1=radius1, r2=radius2, center=(cl.x, cl.y, cl.z),rotXYZ=(0,0,0))
#tor.SetWireframe()
#myscreen.addActor(tor)
cyl = camvtk.Cylinder(center=(cl.x,cl.y,cl.z) , radius=radius1, height=2, color=(0,1,1),
rotXYZ=(90,0,0), resolution=50 )
#myscreen.addActor(cyl)
cl_line = camvtk.Line( p1=(cl.x,cl.y,-100),p2=(cl.x,cl.y,+100), color=camvtk.red )
myscreen.addActor(cl_line)
tube = camvtk.Tube(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z),color=(1,1,0))
tube.SetOpacity(0.2)
myscreen.addActor(tube)
# cylindrical-cutter circle at z=0 plane
#cir= camvtk.Circle(radius=radius1, center=(cl.x,cl.y,cl.z), color=camvtk.yellow)
#myscreen.addActor(cir)
#clp = camvtk.Point(center=(cl.x,cl.y,cl.z))
#myscreen.addActor(clp)
# short axis of ellipse = radius2
# long axis of ellipse = radius2/sin(theta)
# where theta is the slope of the line
dx = b.x - a.x
dz = b.z - a.z
#print "dx=", dx
#print "dz=", dz
theta = math.atan(dz/dx) ## dx==0 is special case!! (i.e. vertical lines)
print "theta=",theta
a_axis = abs( radius2/math.sin(theta) )
print "a=", a_axis
# ellipse
#a=2
b_axis=radius2
print "b= ", b_axis
# slice the tube with a plane at z=0 and find the ellipse center
# line is from Point a to b:
# a + t*(b-a)
# find t so that z-component is zero:
# a.z + t( b.z -a.z) = 0
# t= a.z / (b.z - a.z)
# so point
tparam = -a.z / (b.z - a.z) # NOTE horizontal lines are a special case!!
ellcenter = a + tparam*(b-a)
print "ellcenter (z=0?) =", ellcenter
# center of the
# ecen_tmp=cam.Point(ellcenter,a.y,0)
#drawellipse(myscreen, ellcenter, a_axis, b_axis)
oe = cam.Ellipse(ellcenter, a_axis, b_axis, radius1)
#oe2 = cam.Ellipse(ellcenter, a_axis, b_axis, 0.05) # to locate text on the outside of the ellipse
nmax=20
#delta=0.05
#td = 1
t = camvtk.Text()
t.SetPos( (myscreen.width-450, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
ytext = "Y: %3.3f" % (ycoord)
t2.SetText(ytext)
t2.SetPos( (50, myscreen.height-150) )
myscreen.addActor( t2)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
epos = cam.Epos()
epos.setS(0,1)
#epos1.setS(0,1)
t.SetText("OpenCAMLib 10.03-beta, " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
#p5 = oe.ePoint(epos5)
#pt = oe2.oePoint(epos5)
#print "before= ", epos5.s, " , ", epos5.t
nsteps = cam.Ellipse.solver(oe, cl)
epos = oe.epos1
cce = oe.ePoint(epos)
cle = oe.oePoint(epos)
#epos2 = cam.Epos()
#epos.s = epos.s
#epos.t = epos.t
#print nsteps
print "solution1 at: ", epos.s , " , ", epos.t
#print "solution2 at: ", epos2.s , " , ", epos2.t
print " cl =", cl
print " cle=", cle
xoffset = cl.x - cle.x
print "xoffset= ", xoffset
# we slide xoffset along the x-axis from ellcenter
# to find the correct z-plane
# line is: a + t*(b-a)
# find t so that x-component is ellcenter.x + xoffset
# a.x + t(b.x-a.x) = ellcenter.x + xoffset
# t= (ellcenter.x + xoffset - a.x) / (b.x - a.x)
tparam2 = (ellcenter.x + xoffset - a.x) / (b.x - a.x)
slide = tparam2*(b-a)
print "sliding z-delta: ", slide.z
elc2 = a + tparam2*(b-a)
print "ellcenter2=", elc2
#convlist.append(nsteps)
fe = cam.Ellipse(elc2, a_axis, b_axis, radius1)
fecen = camvtk.Sphere(center=(elc2.x,elc2.y,elc2.z), radius=0.01, color=camvtk.pink)
myscreen.addActor(fecen)
fccp = fe.ePoint(epos)
fclp = fe.oePoint(epos)
print "solver cl=", fclp, " == ", cl, " ??"
fcir= camvtk.Circle(radius=radius1, center=(cl.x,cl.y,elc2.z), color=camvtk.yellow)
myscreen.addActor(fcir)
fccpoint = camvtk.Sphere(center=(fccp.x,fccp.y,fccp.z), radius=0.01, color=camvtk.green)
myscreen.addActor(fccpoint)
fclpoint = camvtk.Sphere(center=(fclp.x,fclp.y,fclp.z), radius=0.01, color=camvtk.blue)
myscreen.addActor(fclpoint)
# line from ellipse center to fcc
myscreen.addActor(camvtk.Line( p1=(elc2.x,elc2.y,elc2.z),p2=(fccp.x,fccp.y,fccp.z), color=camvtk.cyan ))
# the offset normal
myscreen.addActor(camvtk.Line( p1=(fclp.x,fclp.y,fclp.z),p2=(fccp.x,fccp.y,fccp.z), color=camvtk.yellow ))
drawellipse(myscreen, elc2, a_axis, b_axis)
#convtext = "%i" % (nsteps)
#print (pt.x, pt.y, pt.z)
#center=(pt.x, pt.y, pt.z)
#tst = camvtk.Text3D( color=(1,1,1), center=(pt.x, pt.y, 0) ,
#text=convtext, scale=0.02)
#tst.SetCamera(myscreen.camera)
#myscreen.addActor(tst)
colmax=11
colmin=4
nsteps = nsteps - colmin
colmax = colmax - colmin
convcolor=( float(nsteps*nsteps)/(colmax), float((colmax-nsteps))/colmax, 0 )
#esphere = camvtk.Sphere(center=(p5.x,p5.y,0), radius=0.01, color=convcolor)
end_sphere = camvtk.Sphere(center=(cce.x,cce.y,0), radius=0.01, color=camvtk.green)
cl_sphere = camvtk.Sphere(center=(cle.x,cle.y,0), radius=0.01, color=camvtk.pink)
cl_sphere.SetOpacity(0.4)
clcir= camvtk.Circle(radius=radius1, center=(cle.x,cle.y,cle.z), color=camvtk.pink)
myscreen.addActor(clcir)
#myscreen.addActor(esphere)
myscreen.addActor(end_sphere)
myscreen.addActor(cl_sphere)
#myscreen.render()
print "done."
myscreen.render()
lwr.SetFileName(filename)
#lwr.Write()
#raw_input("Press Enter to terminate")
#time.sleep(0.5)
myscreen.iren.Start()
if __name__ == "__main__":
main()
#myscreen.iren.Start()
#raw_input("Press Enter to terminate")
| 0.032749 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import inspect
import itertools
import json
import xmlrpclib
def to_primitive(value, convert_instances=False, level=0):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
for test in nasty:
if test(value):
return unicode(value)
# value of itertools.count doesn't get caught by inspects
# above and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return unicode(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > 3:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if isinstance(value, (list, tuple)):
o = []
for v in value:
o.append(to_primitive(v, convert_instances=convert_instances,
level=level))
return o
elif isinstance(value, dict):
o = {}
for k, v in value.iteritems():
o[k] = to_primitive(v, convert_instances=convert_instances,
level=level)
return o
elif isinstance(value, datetime.datetime):
return str(value)
elif hasattr(value, 'iteritems'):
return to_primitive(dict(value.iteritems()),
convert_instances=convert_instances,
level=level)
elif hasattr(value, '__iter__'):
return to_primitive(list(value), level)
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return to_primitive(value.__dict__,
convert_instances=convert_instances,
level=level + 1)
else:
return value
except TypeError, e:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return unicode(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| 0 |
import math
import scipy.interpolate as intrp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
font = {'family' : 'normal',
'size' : 24}
rc('font', **font)
### The function
def f(t):
return 1/(1+t**2)
# Spline
def spline(xpts, ypts):
n = len(xpts)
mat = np.zeros(( n, n))
rhs = np.zeros(( n,1 ))
for i in range(1,n-1):
rhs[i] = 6 * ( (ypts[i+1]-ypts[i]) / (xpts[i+1]-xpts[i]) \
-(ypts[i]-ypts[i-1]) / (xpts[i]-xpts[i-1]) )
for j in range(0,n-1):
# Set triagonal elements
if(j==i-1): mat[i][j] += xpts[i] - xpts[i-1]
elif(j==i): mat[i][j] += 2*(xpts[i+1]-xpts[i-1])
elif(j==i+1): mat[i][j] += xpts[i+1]-xpts[i]
# BCs
mat[0][0] = 1
mat[-1][-1] = 1
rhs[0] = 0
rhs[-1] = 0
# Solve it
x_vec = np.linalg.solve(mat, rhs)
return x_vec
#######
# The function
x = [ i/100 for i in range(-500,500) ]
fx = [ f(i) for i in x ]
plt.plot(x,fx, 'k--',label='f(t)', linewidth=5)
### 5 points
xpts = np.linspace(-5, 5, 5)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'r', label='5 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 5 Points:', rmse)
### 10 points
xpts = np.linspace(-5, 5, 10)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'b', label='10 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 10 Points:', rmse)
### 15 points
xpts = np.linspace(-5, 5, 15)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'g', label='15 Points',linewidth=3)
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 15 Points:', rmse)
plt.legend(fontsize=16)
plt.ylim( [-0.2, 1.1] )
plt.title('Natural Cubic Splines for $f(t)$')
plt.savefig('Problem5ii.png')
plt.show()
| 0.032591 |
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Disk File Interface for the Swift Object Server
The `DiskFile`, `DiskFileWriter` and `DiskFileReader` classes combined define
the on-disk abstraction layer for supporting the object server REST API
interfaces (excluding `REPLICATE`). Other implementations wishing to provide
an alternative backend for the object server must implement the three
classes. An example alternative implementation can be found in the
`mem_server.py` and `mem_diskfile.py` modules along size this one.
The `DiskFileManager` is a reference implemenation specific class and is not
part of the backend API.
The remaining methods in this module are considered implementation specific and
are also not considered part of the backend API.
"""
import six.moves.cPickle as pickle
import copy
import errno
import fcntl
import json
import os
import re
import time
import uuid
import hashlib
import logging
import traceback
import xattr
from os.path import basename, dirname, exists, join, splitext
from random import shuffle
from tempfile import mkstemp
from contextlib import contextmanager
from collections import defaultdict
from datetime import timedelta
from eventlet import Timeout
from eventlet.hubs import trampoline
import six
from pyeclib.ec_iface import ECDriverError, ECInvalidFragmentMetadata, \
ECBadFragmentChecksum, ECInvalidParameter
from swift import gettext_ as _
from swift.common.constraints import check_drive
from swift.common.request_helpers import is_sys_meta
from swift.common.utils import mkdirs, Timestamp, \
storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \
fsync_dir, drop_buffer_cache, lock_path, write_pickle, \
config_true_value, listdir, split_path, remove_file, \
get_md5_socket, F_SETPIPE_SZ, decode_timestamps, encode_timestamps, \
tpool_reraise, MD5_OF_EMPTY_STRING, link_fd_to_path, o_tmpfile_supported, \
O_TMPFILE, makedirs_count, replace_partition_in_path
from swift.common.splice import splice, tee
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \
DiskFileDeleted, DiskFileError, DiskFileNotOpen, PathNotDir, \
ReplicationLockTimeout, DiskFileExpired, DiskFileXattrNotSupported, \
DiskFileBadMetadataChecksum
from swift.common.swob import multi_range_iterator
from swift.common.storage_policy import (
get_policy_string, split_policy_string, PolicyError, POLICIES,
REPL_POLICY, EC_POLICY)
from functools import partial
PICKLE_PROTOCOL = 2
DEFAULT_RECLAIM_AGE = timedelta(weeks=1).total_seconds()
HASH_FILE = 'hashes.pkl'
HASH_INVALIDATIONS_FILE = 'hashes.invalid'
METADATA_KEY = b'user.swift.metadata'
METADATA_CHECKSUM_KEY = b'user.swift.metadata_checksum'
DROP_CACHE_WINDOW = 1024 * 1024
# These are system-set metadata keys that cannot be changed with a POST.
# They should be lowercase.
RESERVED_DATAFILE_META = {'content-length', 'deleted', 'etag'}
DATAFILE_SYSTEM_META = {'x-static-large-object'}
DATADIR_BASE = 'objects'
ASYNCDIR_BASE = 'async_pending'
TMP_BASE = 'tmp'
get_data_dir = partial(get_policy_string, DATADIR_BASE)
get_async_dir = partial(get_policy_string, ASYNCDIR_BASE)
get_tmp_dir = partial(get_policy_string, TMP_BASE)
MIN_TIME_UPDATE_AUDITOR_STATUS = 60
# This matches rsync tempfiles, like ".<timestamp>.data.Xy095a"
RE_RSYNC_TEMPFILE = re.compile(r'^\..*\.([a-zA-Z0-9_]){6}$')
def _get_filename(fd):
"""
Helper function to get to file name from a file descriptor or filename.
:param fd: file descriptor or filename.
:returns: the filename.
"""
if hasattr(fd, 'name'):
# fd object
return fd.name
# fd is a filename
return fd
def _encode_metadata(metadata):
"""
UTF8 encode any unicode keys or values in given metadata dict.
:param metadata: a dict
"""
def encode_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items()))
def _decode_metadata(metadata):
"""
Given a metadata dict from disk, convert keys and values to native strings.
:param metadata: a dict
"""
if six.PY2:
def to_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
else:
def to_str(item):
if isinstance(item, six.binary_type):
return item.decode('utf8', 'surrogateescape')
return item
return dict(((to_str(k), to_str(v)) for k, v in metadata.items()))
def read_metadata(fd, add_missing_checksum=False):
"""
Helper function to read the pickled metadata from an object file.
:param fd: file descriptor or filename to load the metadata from
:param add_missing_checksum: if set and checksum is missing, add it
:returns: dictionary of metadata
"""
metadata = b''
key = 0
try:
while True:
metadata += xattr.getxattr(
fd, METADATA_KEY + str(key or '').encode('ascii'))
key += 1
except (IOError, OSError) as e:
if errno.errorcode.get(e.errno) in ('ENOTSUP', 'EOPNOTSUPP'):
msg = "Filesystem at %s does not support xattr"
logging.exception(msg, _get_filename(fd))
raise DiskFileXattrNotSupported(e)
if e.errno == errno.ENOENT:
raise DiskFileNotExist()
# TODO: we might want to re-raise errors that don't denote a missing
# xattr here. Seems to be ENODATA on linux and ENOATTR on BSD/OSX.
metadata_checksum = None
try:
metadata_checksum = xattr.getxattr(fd, METADATA_CHECKSUM_KEY)
except (IOError, OSError):
# All the interesting errors were handled above; the only thing left
# here is ENODATA / ENOATTR to indicate that this attribute doesn't
# exist. This is fine; it just means that this object predates the
# introduction of metadata checksums.
if add_missing_checksum:
new_checksum = hashlib.md5(metadata).hexdigest()
try:
xattr.setxattr(fd, METADATA_CHECKSUM_KEY, new_checksum)
except (IOError, OSError) as e:
logging.error("Error adding metadata: %s" % e)
if metadata_checksum:
computed_checksum = hashlib.md5(metadata).hexdigest().encode('ascii')
if metadata_checksum != computed_checksum:
raise DiskFileBadMetadataChecksum(
"Metadata checksum mismatch for %s: "
"stored checksum='%s', computed='%s'" % (
fd, metadata_checksum, computed_checksum))
# strings are utf-8 encoded when written, but have not always been
# (see https://bugs.launchpad.net/swift/+bug/1678018) so encode them again
# when read
if six.PY2:
metadata = pickle.loads(metadata)
else:
metadata = pickle.loads(metadata, encoding='bytes')
return _decode_metadata(metadata)
def write_metadata(fd, metadata, xattr_size=65536):
"""
Helper function to write pickled metadata for an object file.
:param fd: file descriptor or filename to write the metadata
:param metadata: metadata to write
"""
metastr = pickle.dumps(_encode_metadata(metadata), PICKLE_PROTOCOL)
metastr_md5 = hashlib.md5(metastr).hexdigest().encode('ascii')
key = 0
try:
while metastr:
xattr.setxattr(fd, METADATA_KEY + str(key or '').encode('ascii'),
metastr[:xattr_size])
metastr = metastr[xattr_size:]
key += 1
xattr.setxattr(fd, METADATA_CHECKSUM_KEY, metastr_md5)
except IOError as e:
# errno module doesn't always have both of these, hence the ugly
# check
if errno.errorcode.get(e.errno) in ('ENOTSUP', 'EOPNOTSUPP'):
msg = "Filesystem at %s does not support xattr"
logging.exception(msg, _get_filename(fd))
raise DiskFileXattrNotSupported(e)
elif e.errno in (errno.ENOSPC, errno.EDQUOT):
msg = "No space left on device for %s" % _get_filename(fd)
logging.exception(msg)
raise DiskFileNoSpace()
raise
def extract_policy(obj_path):
"""
Extracts the policy for an object (based on the name of the objects
directory) given the device-relative path to the object. Returns None in
the event that the path is malformed in some way.
The device-relative path is everything after the mount point; for example:
/srv/node/d42/objects-5/30/179/
485dc017205a81df3af616d917c90179/1401811134.873649.data
would have device-relative path:
objects-5/30/179/485dc017205a81df3af616d917c90179/1401811134.873649.data
:param obj_path: device-relative path of an object, or the full path
:returns: a :class:`~swift.common.storage_policy.BaseStoragePolicy` or None
"""
try:
obj_portion = obj_path[obj_path.rindex(DATADIR_BASE):]
obj_dirname = obj_portion[:obj_portion.index('/')]
except Exception:
return None
try:
base, policy = split_policy_string(obj_dirname)
except PolicyError:
return None
return policy
def quarantine_renamer(device_path, corrupted_file_path):
"""
In the case that a file is corrupted, move it to a quarantined
area to allow replication to fix it.
:params device_path: The path to the device the corrupted file is on.
:params corrupted_file_path: The path to the file you want quarantined.
:returns: path (str) of directory the file was moved to
:raises OSError: re-raises non errno.EEXIST / errno.ENOTEMPTY
exceptions from rename
"""
policy = extract_policy(corrupted_file_path)
if policy is None:
# TODO: support a quarantine-unknown location
policy = POLICIES.legacy
from_dir = dirname(corrupted_file_path)
to_dir = join(device_path, 'quarantined',
get_data_dir(policy),
basename(from_dir))
invalidate_hash(dirname(from_dir))
try:
renamer(from_dir, to_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
to_dir = "%s-%s" % (to_dir, uuid.uuid4().hex)
renamer(from_dir, to_dir, fsync=False)
return to_dir
def read_hashes(partition_dir):
"""
Read the existing hashes.pkl
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
hashes_file = join(partition_dir, HASH_FILE)
hashes = {'valid': False}
try:
with open(hashes_file, 'rb') as hashes_fp:
pickled_hashes = hashes_fp.read()
except (IOError, OSError):
pass
else:
try:
hashes = pickle.loads(pickled_hashes)
except Exception:
# pickle.loads() can raise a wide variety of exceptions when
# given invalid input depending on the way in which the
# input is invalid.
pass
# hashes.pkl w/o valid updated key is "valid" but "forever old"
hashes.setdefault('valid', True)
hashes.setdefault('updated', -1)
return hashes
def write_hashes(partition_dir, hashes):
"""
Write hashes to hashes.pkl
The updated key is added to hashes before it is written.
"""
hashes_file = join(partition_dir, HASH_FILE)
# 'valid' key should always be set by the caller; however, if there's a bug
# setting invalid is most safe
hashes.setdefault('valid', False)
hashes['updated'] = time.time()
write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL)
def consolidate_hashes(partition_dir):
"""
Take what's in hashes.pkl and hashes.invalid, combine them, write the
result back to hashes.pkl, and clear out hashes.invalid.
:param partition_dir: absolute path to partition dir containing hashes.pkl
and hashes.invalid
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
with lock_path(partition_dir):
hashes = read_hashes(partition_dir)
found_invalidation_entry = False
try:
with open(invalidations_file, 'rb') as inv_fh:
for line in inv_fh:
found_invalidation_entry = True
suffix = line.strip()
hashes[suffix] = None
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
if found_invalidation_entry:
write_hashes(partition_dir, hashes)
# Now that all the invalidations are reflected in hashes.pkl, it's
# safe to clear out the invalidations file.
with open(invalidations_file, 'wb') as inv_fh:
pass
return hashes
def invalidate_hash(suffix_dir):
"""
Invalidates the hash for a suffix_dir in the partition's hashes file.
:param suffix_dir: absolute path to suffix dir whose hash needs
invalidating
"""
suffix = basename(suffix_dir)
partition_dir = dirname(suffix_dir)
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
if not isinstance(suffix, bytes):
suffix = suffix.encode('utf-8')
with lock_path(partition_dir), open(invalidations_file, 'ab') as inv_fh:
inv_fh.write(suffix + b"\n")
def relink_paths(target_path, new_target_path, check_existing=False):
"""
Hard-links a file located in target_path using the second path
new_target_path. Creates intermediate directories if required.
:param target_path: current absolute filename
:param new_target_path: new absolute filename for the hardlink
:param check_existing: if True, check whether the link is already present
before attempting to create a new one
"""
if target_path != new_target_path:
logging.debug('Relinking %s to %s due to next_part_power set',
target_path, new_target_path)
new_target_dir = os.path.dirname(new_target_path)
if not os.path.isdir(new_target_dir):
os.makedirs(new_target_dir)
link_exists = False
if check_existing:
try:
new_stat = os.stat(new_target_path)
orig_stat = os.stat(target_path)
link_exists = (new_stat.st_ino == orig_stat.st_ino)
except OSError:
pass # if anything goes wrong, try anyway
if not link_exists:
os.link(target_path, new_target_path)
def get_part_path(dev_path, policy, partition):
"""
Given the device path, policy, and partition, returns the full
path to the partition
"""
return os.path.join(dev_path, get_data_dir(policy), str(partition))
class AuditLocation(object):
"""
Represents an object location to be audited.
Other than being a bucket of data, the only useful thing this does is
stringify to a filesystem path so the auditor's logs look okay.
"""
def __init__(self, path, device, partition, policy):
self.path, self.device, self.partition, self.policy = (
path, device, partition, policy)
def __str__(self):
return str(self.path)
def object_audit_location_generator(devices, datadir, mount_check=True,
logger=None, device_dirs=None,
auditor_type="ALL"):
"""
Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all
objects stored under that directory for the given datadir (policy),
if device_dirs isn't set. If device_dirs is set, only yield AuditLocation
for the objects under the entries in device_dirs. The AuditLocation only
knows the path to the hash directory, not to the .data file therein
(if any). This is to avoid a double listdir(hash_dir); the DiskFile object
will always do one, so we don't.
:param devices: parent directory of the devices to be audited
:param datadir: objects directory
:param mount_check: flag to check if a mount check should be performed
on devices
:param logger: a logger object
:param device_dirs: a list of directories under devices to traverse
:param auditor_type: either ALL or ZBF
"""
if not device_dirs:
device_dirs = listdir(devices)
else:
# remove bogus devices and duplicates from device_dirs
device_dirs = list(
set(listdir(devices)).intersection(set(device_dirs)))
# randomize devices in case of process restart before sweep completed
shuffle(device_dirs)
base, policy = split_policy_string(datadir)
for device in device_dirs:
if not check_drive(devices, device, mount_check):
if logger:
logger.debug(
'Skipping %s as it is not %s', device,
'mounted' if mount_check else 'a dir')
continue
datadir_path = os.path.join(devices, device, datadir)
if not os.path.exists(datadir_path):
continue
partitions = get_auditor_status(datadir_path, logger, auditor_type)
for pos, partition in enumerate(partitions):
update_auditor_status(datadir_path, logger,
partitions[pos:], auditor_type)
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for hsh in hashes:
hsh_path = os.path.join(suff_path, hsh)
yield AuditLocation(hsh_path, device, partition,
policy)
update_auditor_status(datadir_path, logger, [], auditor_type)
def get_auditor_status(datadir_path, logger, auditor_type):
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
status = {}
try:
if six.PY3:
statusfile = open(auditor_status, encoding='utf8')
else:
statusfile = open(auditor_status, 'rb')
with statusfile:
status = statusfile.read()
except (OSError, IOError) as e:
if e.errno != errno.ENOENT and logger:
logger.warning(_('Cannot read %(auditor_status)s (%(err)s)') %
{'auditor_status': auditor_status, 'err': e})
return listdir(datadir_path)
try:
status = json.loads(status)
except ValueError as e:
logger.warning(_('Loading JSON from %(auditor_status)s failed'
' (%(err)s)') %
{'auditor_status': auditor_status, 'err': e})
return listdir(datadir_path)
return status['partitions']
def update_auditor_status(datadir_path, logger, partitions, auditor_type):
status = json.dumps({'partitions': partitions})
if six.PY3:
status = status.encode('utf8')
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
try:
mtime = os.stat(auditor_status).st_mtime
except OSError:
mtime = 0
recently_updated = (mtime + MIN_TIME_UPDATE_AUDITOR_STATUS) > time.time()
if recently_updated and len(partitions) > 0:
if logger:
logger.debug(
'Skipping the update of recently changed %s' % auditor_status)
return
try:
with open(auditor_status, "wb") as statusfile:
statusfile.write(status)
except (OSError, IOError) as e:
if logger:
logger.warning(_('Cannot write %(auditor_status)s (%(err)s)') %
{'auditor_status': auditor_status, 'err': e})
def clear_auditor_status(devices, datadir, auditor_type="ALL"):
device_dirs = listdir(devices)
for device in device_dirs:
datadir_path = os.path.join(devices, device, datadir)
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
remove_file(auditor_status)
def strip_self(f):
"""
Wrapper to attach module level functions to base class.
"""
def wrapper(self, *args, **kwargs):
return f(*args, **kwargs)
return wrapper
class DiskFileRouter(object):
policy_type_to_manager_cls = {}
@classmethod
def register(cls, policy_type):
"""
Decorator for Storage Policy implementations to register
their DiskFile implementation.
"""
def register_wrapper(diskfile_cls):
if policy_type in cls.policy_type_to_manager_cls:
raise PolicyError(
'%r is already registered for the policy_type %r' % (
cls.policy_type_to_manager_cls[policy_type],
policy_type))
cls.policy_type_to_manager_cls[policy_type] = diskfile_cls
return diskfile_cls
return register_wrapper
def __init__(self, *args, **kwargs):
self.policy_to_manager = {}
for policy in POLICIES:
manager_cls = self.policy_type_to_manager_cls[policy.policy_type]
self.policy_to_manager[int(policy)] = manager_cls(*args, **kwargs)
def __getitem__(self, policy):
return self.policy_to_manager[int(policy)]
class BaseDiskFileManager(object):
"""
Management class for devices, providing common place for shared parameters
and methods not provided by the DiskFile class (which primarily services
the object server REST API layer).
The `get_diskfile()` method is how this implementation creates a `DiskFile`
object.
.. note::
This class is reference implementation specific and not part of the
pluggable on-disk backend API.
.. note::
TODO(portante): Not sure what the right name to recommend here, as
"manager" seemed generic enough, though suggestions are welcome.
:param conf: caller provided configuration object
:param logger: caller provided logger
"""
diskfile_cls = None # must be set by subclasses
invalidate_hash = strip_self(invalidate_hash)
consolidate_hashes = strip_self(consolidate_hashes)
quarantine_renamer = strip_self(quarantine_renamer)
def __init__(self, conf, logger):
self.logger = logger
self.devices = conf.get('devices', '/srv/node')
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.reclaim_age = int(conf.get('reclaim_age', DEFAULT_RECLAIM_AGE))
replication_concurrency_per_device = conf.get(
'replication_concurrency_per_device')
replication_one_per_device = conf.get('replication_one_per_device')
if replication_concurrency_per_device is None \
and replication_one_per_device is not None:
self.logger.warning('Option replication_one_per_device is '
'deprecated and will be removed in a future '
'version. Update your configuration to use '
'option replication_concurrency_per_device.')
if config_true_value(replication_one_per_device):
replication_concurrency_per_device = 1
else:
replication_concurrency_per_device = 0
elif replication_one_per_device is not None:
self.logger.warning('Option replication_one_per_device ignored as '
'replication_concurrency_per_device is '
'defined.')
if replication_concurrency_per_device is None:
self.replication_concurrency_per_device = 1
else:
self.replication_concurrency_per_device = int(
replication_concurrency_per_device)
self.replication_lock_timeout = int(conf.get(
'replication_lock_timeout', 15))
self.use_splice = False
self.pipe_size = None
conf_wants_splice = config_true_value(conf.get('splice', 'no'))
# If the operator wants zero-copy with splice() but we don't have the
# requisite kernel support, complain so they can go fix it.
if conf_wants_splice and not splice.available:
self.logger.warning(
"Use of splice() requested (config says \"splice = %s\"), "
"but the system does not support it. "
"splice() will not be used." % conf.get('splice'))
elif conf_wants_splice and splice.available:
try:
sockfd = get_md5_socket()
os.close(sockfd)
except IOError as err:
# AF_ALG socket support was introduced in kernel 2.6.38; on
# systems with older kernels (or custom-built kernels lacking
# AF_ALG support), we can't use zero-copy.
if err.errno != errno.EAFNOSUPPORT:
raise
self.logger.warning("MD5 sockets not supported. "
"splice() will not be used.")
else:
self.use_splice = True
with open('/proc/sys/fs/pipe-max-size') as f:
max_pipe_size = int(f.read())
self.pipe_size = min(max_pipe_size, self.disk_chunk_size)
self.use_linkat = o_tmpfile_supported()
def make_on_disk_filename(self, timestamp, ext=None,
ctype_timestamp=None, *a, **kw):
"""
Returns filename for given timestamp.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param ext: an optional string representing a file extension to be
appended to the returned file name
:param ctype_timestamp: an optional content-type timestamp, an instance
of :class:`~swift.common.utils.Timestamp`
:returns: a file name
"""
rv = timestamp.internal
if ext == '.meta' and ctype_timestamp:
# If ctype_timestamp is None then the filename is simply the
# internal form of the timestamp. If ctype_timestamp is not None
# then the difference between the raw values of the two timestamps
# is appended as a hex number, with its sign.
#
# There are two reasons for encoding the content-type timestamp
# in the filename in this way. First, it means that two .meta files
# having the same timestamp but different content-type timestamps
# (and potentially different content-type values) will be distinct
# and therefore will be independently replicated when rsync
# replication is used. That ensures that all nodes end up having
# all content-type values after replication (with the most recent
# value being selected when the diskfile is opened). Second, having
# the content-type encoded in timestamp in the filename makes it
# possible for the on disk file search code to determine that
# timestamp by inspecting only the filename, and not needing to
# open the file and read its xattrs.
rv = encode_timestamps(timestamp, ctype_timestamp, explicit=True)
if ext:
rv = '%s%s' % (rv, ext)
return rv
def parse_on_disk_filename(self, filename):
"""
Parse an on disk file name.
:param filename: the file name including extension
:returns: a dict, with keys for timestamp, ext and ctype_timestamp:
* timestamp is a :class:`~swift.common.utils.Timestamp`
* ctype_timestamp is a :class:`~swift.common.utils.Timestamp` or
None for .meta files, otherwise None
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extension.
Subclasses may override this method to add further keys to the
returned dict.
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
ts_ctype = None
fname, ext = splitext(filename)
try:
if ext == '.meta':
timestamp, ts_ctype = decode_timestamps(
fname, explicit=True)[:2]
else:
timestamp = Timestamp(fname)
except ValueError:
raise DiskFileError('Invalid Timestamp value in filename %r'
% filename)
return {
'timestamp': timestamp,
'ext': ext,
'ctype_timestamp': ts_ctype
}
def _process_ondisk_files(self, exts, results, **kwargs):
"""
Called by get_ondisk_files(). Should be over-ridden to implement
subclass specific handling of files.
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
"""
raise NotImplementedError
def _verify_ondisk_files(self, results, **kwargs):
"""
Verify that the final combination of on disk files complies with the
diskfile contract.
:param results: files that have been found and accepted
:returns: True if the file combination is compliant, False otherwise
"""
data_file, meta_file, ts_file = tuple(
[results[key]
for key in ('data_file', 'meta_file', 'ts_file')])
return ((data_file is None and meta_file is None and ts_file is None)
or (ts_file is not None and data_file is None
and meta_file is None)
or (data_file is not None and ts_file is None))
def _split_list(self, original_list, condition):
"""
Split a list into two lists. The first list contains the first N items
of the original list, in their original order, where 0 < N <=
len(original list). The second list contains the remaining items of the
original list, in their original order.
The index, N, at which the original list is split is the index of the
first item in the list that does not satisfy the given condition. Note
that the original list should be appropriately sorted if the second
list is to contain no items that satisfy the given condition.
:param original_list: the list to be split.
:param condition: a single argument function that will be used to test
for the list item to split on.
:return: a tuple of two lists.
"""
for i, item in enumerate(original_list):
if not condition(item):
return original_list[:i], original_list[i:]
return original_list, []
def _split_gt_timestamp(self, file_info_list, timestamp):
"""
Given a list of file info dicts, reverse sorted by timestamp, split the
list into two: items newer than timestamp, and items at same time or
older than timestamp.
:param file_info_list: a list of file_info dicts.
:param timestamp: a Timestamp.
:return: a tuple of two lists.
"""
return self._split_list(
file_info_list, lambda x: x['timestamp'] > timestamp)
def _split_gte_timestamp(self, file_info_list, timestamp):
"""
Given a list of file info dicts, reverse sorted by timestamp, split the
list into two: items newer than or at same time as the timestamp, and
items older than timestamp.
:param file_info_list: a list of file_info dicts.
:param timestamp: a Timestamp.
:return: a tuple of two lists.
"""
return self._split_list(
file_info_list, lambda x: x['timestamp'] >= timestamp)
def get_ondisk_files(self, files, datadir, verify=True, **kwargs):
"""
Given a simple list of files names, determine the files that constitute
a valid fileset i.e. a set of files that defines the state of an
object, and determine the files that are obsolete and could be deleted.
Note that some files may fall into neither category.
If a file is considered part of a valid fileset then its info dict will
be added to the results dict, keyed by <extension>_info. Any files that
are no longer required will have their info dicts added to a list
stored under the key 'obsolete'.
The results dict will always contain entries with keys 'ts_file',
'data_file' and 'meta_file'. Their values will be the fully qualified
path to a file of the corresponding type if there is such a file in the
valid fileset, or None.
:param files: a list of file names.
:param datadir: directory name files are from.
:param verify: if True verify that the ondisk file contract has not
been violated, otherwise do not verify.
:returns: a dict that will contain keys:
ts_file -> path to a .ts file or None
data_file -> path to a .data file or None
meta_file -> path to a .meta file or None
ctype_file -> path to a .meta file or None
and may contain keys:
ts_info -> a file info dict for a .ts file
data_info -> a file info dict for a .data file
meta_info -> a file info dict for a .meta file
ctype_info -> a file info dict for a .meta file which
contains the content-type value
unexpected -> a list of file paths for unexpected
files
possible_reclaim -> a list of file info dicts for possible
reclaimable files
obsolete -> a list of file info dicts for obsolete files
"""
# Build the exts data structure:
# exts is a dict that maps file extensions to a list of file_info
# dicts for the files having that extension. The file_info dicts are of
# the form returned by parse_on_disk_filename, with the filename added.
# Each list is sorted in reverse timestamp order.
# the results dict is used to collect results of file filtering
results = {}
# The exts dict will be modified during subsequent processing as files
# are removed to be discarded or ignored.
exts = defaultdict(list)
for afile in files:
# Categorize files by extension
try:
file_info = self.parse_on_disk_filename(afile)
file_info['filename'] = afile
exts[file_info['ext']].append(file_info)
except DiskFileError as e:
file_path = os.path.join(datadir or '', afile)
results.setdefault('unexpected', []).append(file_path)
# log warnings if it's not a rsync temp file
if RE_RSYNC_TEMPFILE.match(afile):
self.logger.debug('Rsync tempfile: %s', file_path)
else:
self.logger.warning('Unexpected file %s: %s',
file_path, e)
for ext in exts:
# For each extension sort files into reverse chronological order.
exts[ext] = sorted(
exts[ext], key=lambda info: info['timestamp'], reverse=True)
if exts.get('.ts'):
# non-tombstones older than or equal to latest tombstone are
# obsolete
for ext in filter(lambda ext: ext != '.ts', exts.keys()):
exts[ext], older = self._split_gt_timestamp(
exts[ext], exts['.ts'][0]['timestamp'])
results.setdefault('obsolete', []).extend(older)
# all but most recent .ts are obsolete
results.setdefault('obsolete', []).extend(exts['.ts'][1:])
exts['.ts'] = exts['.ts'][:1]
if exts.get('.meta'):
# retain the newest meta file
retain = 1
if exts['.meta'][1:]:
# there are other meta files so find the one with newest
# ctype_timestamp...
exts['.meta'][1:] = sorted(
exts['.meta'][1:],
key=lambda info: info['ctype_timestamp'],
reverse=True)
# ...and retain this IFF its ctype_timestamp is greater than
# newest meta file
if (exts['.meta'][1]['ctype_timestamp'] >
exts['.meta'][0]['ctype_timestamp']):
if (exts['.meta'][1]['timestamp'] ==
exts['.meta'][0]['timestamp']):
# both at same timestamp so retain only the one with
# newest ctype
exts['.meta'][:2] = [exts['.meta'][1],
exts['.meta'][0]]
retain = 1
else:
# retain both - first has newest metadata, second has
# newest ctype
retain = 2
# discard all meta files not being retained...
results.setdefault('obsolete', []).extend(exts['.meta'][retain:])
exts['.meta'] = exts['.meta'][:retain]
# delegate to subclass handler
self._process_ondisk_files(exts, results, **kwargs)
# set final choice of files
if 'data_info' in results:
if exts.get('.meta'):
# only report a meta file if a data file has been chosen
results['meta_info'] = exts['.meta'][0]
ctype_info = exts['.meta'].pop()
if (ctype_info['ctype_timestamp']
> results['data_info']['timestamp']):
results['ctype_info'] = ctype_info
elif exts.get('.ts'):
# only report a ts file if a data file has not been chosen
# (ts files will commonly already have been removed from exts if
# a data file was chosen, but that may not be the case if
# non-durable EC fragment(s) were chosen, hence the elif here)
results['ts_info'] = exts['.ts'][0]
# set ts_file, data_file, meta_file and ctype_file with path to
# chosen file or None
for info_key in ('data_info', 'meta_info', 'ts_info', 'ctype_info'):
info = results.get(info_key)
key = info_key[:-5] + '_file'
results[key] = join(datadir, info['filename']) if info else None
if verify:
assert self._verify_ondisk_files(
results, **kwargs), \
"On-disk file search algorithm contract is broken: %s" \
% str(results)
return results
def cleanup_ondisk_files(self, hsh_path, **kwargs):
"""
Clean up on-disk files that are obsolete and gather the set of valid
on-disk files for an object.
:param hsh_path: object hash path
:param frag_index: if set, search for a specific fragment index .data
file, otherwise accept the first valid .data file
:returns: a dict that may contain: valid on disk files keyed by their
filename extension; a list of obsolete files stored under the
key 'obsolete'; a list of files remaining in the directory,
reverse sorted, stored under the key 'files'.
"""
def is_reclaimable(timestamp):
return (time.time() - float(timestamp)) > self.reclaim_age
files = listdir(hsh_path)
files.sort(reverse=True)
results = self.get_ondisk_files(
files, hsh_path, verify=False, **kwargs)
if 'ts_info' in results and is_reclaimable(
results['ts_info']['timestamp']):
remove_file(join(hsh_path, results['ts_info']['filename']))
files.remove(results.pop('ts_info')['filename'])
for file_info in results.get('possible_reclaim', []):
# stray files are not deleted until reclaim-age
if is_reclaimable(file_info['timestamp']):
results.setdefault('obsolete', []).append(file_info)
for file_info in results.get('obsolete', []):
remove_file(join(hsh_path, file_info['filename']))
files.remove(file_info['filename'])
results['files'] = files
return results
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
the given ondisk_info.
:param hashes: a dict of md5 hashes to be updated
:param ondisk_info: a dict describing the state of ondisk files, as
returned by get_ondisk_files
"""
raise NotImplementedError
def _hash_suffix_dir(self, path):
"""
:param path: full path to directory
"""
hashes = defaultdict(hashlib.md5)
try:
path_contents = sorted(os.listdir(path))
except OSError as err:
if err.errno in (errno.ENOTDIR, errno.ENOENT):
raise PathNotDir()
raise
for hsh in path_contents:
hsh_path = join(path, hsh)
try:
ondisk_info = self.cleanup_ondisk_files(hsh_path)
except OSError as err:
if err.errno == errno.ENOTDIR:
partition_path = dirname(path)
objects_path = dirname(partition_path)
device_path = dirname(objects_path)
quar_path = quarantine_renamer(device_path, hsh_path)
logging.exception(
_('Quarantined %(hsh_path)s to %(quar_path)s because '
'it is not a directory'), {'hsh_path': hsh_path,
'quar_path': quar_path})
continue
raise
if not ondisk_info['files']:
try:
os.rmdir(hsh_path)
except OSError:
pass
continue
# ondisk_info has info dicts containing timestamps for those
# files that could determine the state of the diskfile if it were
# to be opened. We update the suffix hash with the concatenation of
# each file's timestamp and extension. The extension is added to
# guarantee distinct hash values from two object dirs that have
# different file types at the same timestamp(s).
#
# Files that may be in the object dir but would have no effect on
# the state of the diskfile are not used to update the hash.
for key in (k for k in ('meta_info', 'ts_info')
if k in ondisk_info):
info = ondisk_info[key]
hashes[None].update(info['timestamp'].internal + info['ext'])
# delegate to subclass for data file related updates...
self._update_suffix_hashes(hashes, ondisk_info)
if 'ctype_info' in ondisk_info:
# We have a distinct content-type timestamp so update the
# hash. As a precaution, append '_ctype' to differentiate this
# value from any other timestamp value that might included in
# the hash in future. There is no .ctype file so use _ctype to
# avoid any confusion.
info = ondisk_info['ctype_info']
hashes[None].update(info['ctype_timestamp'].internal
+ '_ctype')
try:
os.rmdir(path)
except OSError as e:
if e.errno == errno.ENOENT:
raise PathNotDir()
else:
# if we remove it, pretend like it wasn't there to begin with so
# that the suffix key gets removed
raise PathNotDir()
return hashes
def _hash_suffix(self, path):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param path: full path to directory
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
"""
raise NotImplementedError
def _get_hashes(self, *args, **kwargs):
hashed, hashes = self.__get_hashes(*args, **kwargs)
hashes.pop('updated', None)
hashes.pop('valid', None)
return hashed, hashes
def __get_hashes(self, device, partition, policy, recalculate=None,
do_listdir=False):
"""
Get hashes for each suffix dir in a partition. do_listdir causes it to
mistrust the hash cache for suffix existence at the (unexpectedly high)
cost of a listdir.
:param device: name of target device
:param partition: partition on the device in which the object lives
:param policy: the StoragePolicy instance
:param recalculate: list of suffixes which should be recalculated when
got
:param do_listdir: force existence check for all hashes in the
partition
:returns: tuple of (number of suffix dirs hashed, dictionary of hashes)
"""
hashed = 0
dev_path = self.get_dev_path(device)
partition_path = get_part_path(dev_path, policy, partition)
hashes_file = join(partition_path, HASH_FILE)
modified = False
orig_hashes = {'valid': False}
if recalculate is None:
recalculate = []
try:
orig_hashes = self.consolidate_hashes(partition_path)
except Exception:
self.logger.warning('Unable to read %r', hashes_file,
exc_info=True)
if not orig_hashes['valid']:
# This is the only path to a valid hashes from invalid read (e.g.
# does not exist, corrupt, etc.). Moreover, in order to write this
# valid hashes we must read *the exact same* invalid state or we'll
# trigger race detection.
do_listdir = True
hashes = {'valid': True}
# If the exception handling around consolidate_hashes fired we're
# going to do a full rehash regardless; but we need to avoid
# needless recursion if the on-disk hashes.pkl is actually readable
# (worst case is consolidate_hashes keeps raising exceptions and we
# eventually run out of stack).
# N.B. orig_hashes invalid only effects new parts and error/edge
# conditions - so try not to get overly caught up trying to
# optimize it out unless you manage to convince yourself there's a
# bad behavior.
orig_hashes = read_hashes(partition_path)
else:
hashes = copy.deepcopy(orig_hashes)
if do_listdir:
for suff in os.listdir(partition_path):
if len(suff) == 3:
hashes.setdefault(suff, None)
modified = True
self.logger.debug('Run listdir on %s', partition_path)
hashes.update((suffix, None) for suffix in recalculate)
for suffix, hash_ in hashes.items():
if not hash_:
suffix_dir = join(partition_path, suffix)
try:
hashes[suffix] = self._hash_suffix(suffix_dir)
hashed += 1
except PathNotDir:
del hashes[suffix]
except OSError:
logging.exception(_('Error hashing suffix'))
modified = True
if modified:
with lock_path(partition_path):
if read_hashes(partition_path) == orig_hashes:
write_hashes(partition_path, hashes)
return hashed, hashes
return self.__get_hashes(device, partition, policy,
recalculate=recalculate,
do_listdir=do_listdir)
else:
return hashed, hashes
def construct_dev_path(self, device):
"""
Construct the path to a device without checking if it is mounted.
:param device: name of target device
:returns: full path to the device
"""
return os.path.join(self.devices, device)
def get_dev_path(self, device, mount_check=None):
"""
Return the path to a device, first checking to see if either it
is a proper mount point, or at least a directory depending on
the mount_check configuration option.
:param device: name of target device
:param mount_check: whether or not to check mountedness of device.
Defaults to bool(self.mount_check).
:returns: full path to the device, None if the path to the device is
not a proper mount point or directory.
"""
if mount_check is False:
# explicitly forbidden from syscall, just return path
return join(self.devices, device)
# we'll do some kind of check if not explicitly forbidden
if mount_check or self.mount_check:
mount_check = True
else:
mount_check = False
return check_drive(self.devices, device, mount_check)
@contextmanager
def replication_lock(self, device):
"""
A context manager that will lock on the device given, if
configured to do so.
:param device: name of target device
:raises ReplicationLockTimeout: If the lock on the device
cannot be granted within the configured timeout.
"""
if self.replication_concurrency_per_device:
dev_path = self.get_dev_path(device)
with lock_path(
dev_path,
timeout=self.replication_lock_timeout,
timeout_class=ReplicationLockTimeout,
limit=self.replication_concurrency_per_device):
yield True
else:
yield True
def pickle_async_update(self, device, account, container, obj, data,
timestamp, policy):
"""
Write data describing a container update notification to a pickle file
in the async_pending directory.
:param device: name of target device
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param data: update data to be written to pickle file
:param timestamp: a Timestamp
:param policy: the StoragePolicy instance
"""
device_path = self.construct_dev_path(device)
async_dir = os.path.join(device_path, get_async_dir(policy))
tmp_dir = os.path.join(device_path, get_tmp_dir(policy))
mkdirs(tmp_dir)
ohash = hash_path(account, container, obj)
write_pickle(
data,
os.path.join(async_dir, ohash[-3:], ohash + '-' +
Timestamp(timestamp).internal),
tmp_dir)
self.logger.increment('async_pendings')
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
"""
Returns a BaseDiskFile instance for an object based on the object's
partition, path parts and policy.
:param device: name of target device
:param partition: partition on device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param policy: the StoragePolicy instance
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
return self.diskfile_cls(self, dev_path,
partition, account, container, obj,
policy=policy, use_splice=self.use_splice,
pipe_size=self.pipe_size,
use_linkat=self.use_linkat, **kwargs)
def clear_auditor_status(self, policy, auditor_type="ALL"):
datadir = get_data_dir(policy)
clear_auditor_status(self.devices, datadir, auditor_type)
def object_audit_location_generator(self, policy, device_dirs=None,
auditor_type="ALL"):
"""
Yield an AuditLocation for all objects stored under device_dirs.
:param policy: the StoragePolicy instance
:param device_dirs: directory of target device
:param auditor_type: either ALL or ZBF
"""
datadir = get_data_dir(policy)
return object_audit_location_generator(self.devices, datadir,
self.mount_check,
self.logger, device_dirs,
auditor_type)
def get_diskfile_from_audit_location(self, audit_location):
"""
Returns a BaseDiskFile instance for an object at the given
AuditLocation.
:param audit_location: object location to be audited
"""
dev_path = self.get_dev_path(audit_location.device, mount_check=False)
return self.diskfile_cls.from_hash_dir(
self, audit_location.path, dev_path,
audit_location.partition, policy=audit_location.policy)
def get_diskfile_from_hash(self, device, partition, object_hash,
policy, **kwargs):
"""
Returns a DiskFile instance for an object at the given
object_hash. Just in case someone thinks of refactoring, be
sure DiskFileDeleted is *not* raised, but the DiskFile
instance representing the tombstoned object is returned
instead.
:param device: name of target device
:param partition: partition on the device in which the object lives
:param object_hash: the hash of an object path
:param policy: the StoragePolicy instance
:raises DiskFileNotExist: if the object does not exist
:returns: an instance of BaseDiskFile
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
object_path = os.path.join(
dev_path, get_data_dir(policy), str(partition), object_hash[-3:],
object_hash)
try:
filenames = self.cleanup_ondisk_files(object_path)['files']
except OSError as err:
if err.errno == errno.ENOTDIR:
quar_path = self.quarantine_renamer(dev_path, object_path)
logging.exception(
_('Quarantined %(object_path)s to %(quar_path)s because '
'it is not a directory'), {'object_path': object_path,
'quar_path': quar_path})
raise DiskFileNotExist()
if err.errno != errno.ENOENT:
raise
raise DiskFileNotExist()
if not filenames:
raise DiskFileNotExist()
try:
metadata = read_metadata(os.path.join(object_path, filenames[-1]))
except EOFError:
raise DiskFileNotExist()
try:
account, container, obj = split_path(
metadata.get('name', ''), 3, 3, True)
except ValueError:
raise DiskFileNotExist()
return self.diskfile_cls(self, dev_path,
partition, account, container, obj,
policy=policy, **kwargs)
def get_hashes(self, device, partition, suffixes, policy):
"""
:param device: name of target device
:param partition: partition name
:param suffixes: a list of suffix directories to be recalculated
:param policy: the StoragePolicy instance
:returns: a dictionary that maps suffix directories
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = get_part_path(dev_path, policy, partition)
if not os.path.exists(partition_path):
mkdirs(partition_path)
_junk, hashes = tpool_reraise(
self._get_hashes, device, partition, policy, recalculate=suffixes)
return hashes
def _listdir(self, path):
"""
:param path: full path to directory
"""
try:
return os.listdir(path)
except OSError as err:
if err.errno != errno.ENOENT:
self.logger.error(
'ERROR: Skipping %r due to error with listdir attempt: %s',
path, err)
return []
def yield_suffixes(self, device, partition, policy):
"""
Yields tuples of (full_path, suffix_only) for suffixes stored
on the given device and partition.
:param device: name of target device
:param partition: partition name
:param policy: the StoragePolicy instance
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = get_part_path(dev_path, policy, partition)
for suffix in self._listdir(partition_path):
if len(suffix) != 3:
continue
try:
int(suffix, 16)
except ValueError:
continue
yield (os.path.join(partition_path, suffix), suffix)
def yield_hashes(self, device, partition, policy,
suffixes=None, **kwargs):
"""
Yields tuples of (hash_only, timestamps) for object
information stored for the given device, partition, and
(optionally) suffixes. If suffixes is None, all stored
suffixes will be searched for object hashes. Note that if
suffixes is not None but empty, such as [], then nothing will
be yielded.
timestamps is a dict which may contain items mapping:
- ts_data -> timestamp of data or tombstone file,
- ts_meta -> timestamp of meta file, if one exists
- ts_ctype -> timestamp of meta file containing most recent
content-type value, if one exists
where timestamps are instances of
:class:`~swift.common.utils.Timestamp`
:param device: name of target device
:param partition: partition name
:param policy: the StoragePolicy instance
:param suffixes: optional list of suffix directories to be searched
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
if suffixes is None:
suffixes = self.yield_suffixes(device, partition, policy)
else:
partition_path = get_part_path(dev_path, policy, partition)
suffixes = (
(os.path.join(partition_path, suffix), suffix)
for suffix in suffixes)
key_preference = (
('ts_meta', 'meta_info', 'timestamp'),
('ts_data', 'data_info', 'timestamp'),
('ts_data', 'ts_info', 'timestamp'),
('ts_ctype', 'ctype_info', 'ctype_timestamp'),
)
for suffix_path, suffix in suffixes:
for object_hash in self._listdir(suffix_path):
object_path = os.path.join(suffix_path, object_hash)
try:
results = self.cleanup_ondisk_files(
object_path, **kwargs)
timestamps = {}
for ts_key, info_key, info_ts_key in key_preference:
if info_key not in results:
continue
timestamps[ts_key] = results[info_key][info_ts_key]
if 'ts_data' not in timestamps:
# file sets that do not include a .data or .ts
# file cannot be opened and therefore cannot
# be ssync'd
continue
yield (object_hash, timestamps)
except AssertionError as err:
self.logger.debug('Invalid file set in %s (%s)' % (
object_path, err))
except DiskFileError as err:
self.logger.debug(
'Invalid diskfile filename in %r (%s)' % (
object_path, err))
class BaseDiskFileWriter(object):
"""
Encapsulation of the write context for servicing PUT REST API
requests. Serves as the context manager object for the
:class:`swift.obj.diskfile.DiskFile` class's
:func:`swift.obj.diskfile.DiskFile.create` method.
.. note::
It is the responsibility of the
:func:`swift.obj.diskfile.DiskFile.create` method context manager to
close the open file descriptor.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
:param name: name of object from REST API
:param datadir: on-disk directory object will end up in on
:func:`swift.obj.diskfile.DiskFileWriter.put`
:param fd: open file descriptor of temporary file to receive data
:param tmppath: full path name of the opened file descriptor
:param bytes_per_sync: number bytes written between sync calls
:param diskfile: the diskfile creating this DiskFileWriter instance
:param next_part_power: the next partition power to be used
"""
def __init__(self, name, datadir, fd, tmppath, bytes_per_sync, diskfile,
next_part_power):
# Parameter tracking
self._name = name
self._datadir = datadir
self._fd = fd
self._tmppath = tmppath
self._bytes_per_sync = bytes_per_sync
self._diskfile = diskfile
self.next_part_power = next_part_power
# Internal attributes
self._upload_size = 0
self._last_sync = 0
self._extension = '.data'
self._put_succeeded = False
@property
def manager(self):
return self._diskfile.manager
@property
def put_succeeded(self):
return self._put_succeeded
def write(self, chunk):
"""
Write a chunk of data to disk. All invocations of this method must
come before invoking the :func:
For this implementation, the data is written into a temporary file.
:param chunk: the chunk of data to write as a string object
:returns: the total number of bytes written to an object
"""
while chunk:
written = os.write(self._fd, chunk)
self._upload_size += written
chunk = chunk[written:]
# For large files sync every 512MB (by default) written
diff = self._upload_size - self._last_sync
if diff >= self._bytes_per_sync:
tpool_reraise(fdatasync, self._fd)
drop_buffer_cache(self._fd, self._last_sync, diff)
self._last_sync = self._upload_size
return self._upload_size
def _finalize_put(self, metadata, target_path, cleanup):
# Write the metadata before calling fsync() so that both data and
# metadata are flushed to disk.
write_metadata(self._fd, metadata)
# We call fsync() before calling drop_cache() to lower the amount of
# redundant work the drop cache code will perform on the pages (now
# that after fsync the pages will be all clean).
fsync(self._fd)
# From the Department of the Redundancy Department, make sure we call
# drop_cache() after fsync() to avoid redundant work (pages all
# clean).
drop_buffer_cache(self._fd, 0, self._upload_size)
self.manager.invalidate_hash(dirname(self._datadir))
# After the rename/linkat completes, this object will be available for
# requests to reference.
if self._tmppath:
# It was a named temp file created by mkstemp()
renamer(self._tmppath, target_path)
else:
# It was an unnamed temp file created by open() with O_TMPFILE
link_fd_to_path(self._fd, target_path,
self._diskfile._dirs_created)
# Check if the partition power will/has been increased
new_target_path = None
if self.next_part_power:
new_target_path = replace_partition_in_path(
target_path, self.next_part_power)
if target_path != new_target_path:
try:
fsync_dir(os.path.dirname(target_path))
relink_paths(target_path, new_target_path)
except OSError as exc:
self.manager.logger.exception(
'Relinking %s to %s failed: %s',
target_path, new_target_path, exc)
# If rename is successful, flag put as succeeded. This is done to avoid
# unnecessary os.unlink() of tempfile later. As renamer() has
# succeeded, the tempfile would no longer exist at its original path.
self._put_succeeded = True
if cleanup:
try:
self.manager.cleanup_ondisk_files(self._datadir)
except OSError:
logging.exception(_('Problem cleaning up %s'), self._datadir)
self._part_power_cleanup(target_path, new_target_path)
def _put(self, metadata, cleanup=True, *a, **kw):
"""
Helper method for subclasses.
For this implementation, this method is responsible for renaming the
temporary file to the final name and directory location. This method
should be called after the final call to
:func:`swift.obj.diskfile.DiskFileWriter.write`.
:param metadata: dictionary of metadata to be associated with the
object
:param cleanup: a Boolean. If True then obsolete files will be removed
from the object dir after the put completes, otherwise
obsolete files are left in place.
"""
timestamp = Timestamp(metadata['X-Timestamp'])
ctype_timestamp = metadata.get('Content-Type-Timestamp')
if ctype_timestamp:
ctype_timestamp = Timestamp(ctype_timestamp)
filename = self.manager.make_on_disk_filename(
timestamp, self._extension, ctype_timestamp=ctype_timestamp,
*a, **kw)
metadata['name'] = self._name
target_path = join(self._datadir, filename)
tpool_reraise(self._finalize_put, metadata, target_path, cleanup)
def put(self, metadata):
"""
Finalize writing the file on disk.
:param metadata: dictionary of metadata to be associated with the
object
"""
raise NotImplementedError
def commit(self, timestamp):
"""
Perform any operations necessary to mark the object as durable. For
replication policy type this is a no-op.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
"""
pass
def _part_power_cleanup(self, cur_path, new_path):
"""
Cleanup relative DiskFile directories.
If the partition power is increased soon or has just been increased but
the relinker didn't yet cleanup the old files, an additional cleanup of
the relative dirs has to be done. Otherwise there might be some unused
files left if a PUT or DELETE is done in the meantime
:param cur_path: current full path to an object file
:param new_path: recomputed path to an object file, based on the
next_part_power set in the ring
"""
if new_path is None:
return
# Partition power will be increased soon
if new_path != cur_path:
new_target_dir = os.path.dirname(new_path)
try:
self.manager.cleanup_ondisk_files(new_target_dir)
except OSError:
logging.exception(
_('Problem cleaning up %s'), new_target_dir)
# Partition power has been increased, cleanup not yet finished
else:
prev_part_power = int(self.next_part_power) - 1
old_target_path = replace_partition_in_path(
cur_path, prev_part_power)
old_target_dir = os.path.dirname(old_target_path)
try:
self.manager.cleanup_ondisk_files(old_target_dir)
except OSError:
logging.exception(
_('Problem cleaning up %s'), old_target_dir)
class BaseDiskFileReader(object):
"""
Encapsulation of the WSGI read context for servicing GET REST API
requests. Serves as the context manager object for the
:class:`swift.obj.diskfile.DiskFile` class's
:func:`swift.obj.diskfile.DiskFile.reader` method.
.. note::
The quarantining behavior of this method is considered implementation
specific, and is not required of the API.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
:param fp: open file object pointer reference
:param data_file: on-disk data file name for the object
:param obj_size: verified on-disk size of the object
:param etag: expected metadata etag value for entire file
:param disk_chunk_size: size of reads from disk in bytes
:param keep_cache_size: maximum object size that will be kept in cache
:param device_path: on-disk device path, used when quarantining an obj
:param logger: logger caller wants this object to use
:param quarantine_hook: 1-arg callable called w/reason when quarantined
:param use_splice: if true, use zero-copy splice() to send data
:param pipe_size: size of pipe buffer used in zero-copy operations
:param diskfile: the diskfile creating this DiskFileReader instance
:param keep_cache: should resulting reads be kept in the buffer cache
"""
def __init__(self, fp, data_file, obj_size, etag,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile,
keep_cache=False):
# Parameter tracking
self._fp = fp
self._data_file = data_file
self._obj_size = obj_size
self._etag = etag
self._diskfile = diskfile
self._disk_chunk_size = disk_chunk_size
self._device_path = device_path
self._logger = logger
self._quarantine_hook = quarantine_hook
self._use_splice = use_splice
self._pipe_size = pipe_size
if keep_cache:
# Caller suggests we keep this in cache, only do it if the
# object's size is less than the maximum.
self._keep_cache = obj_size < keep_cache_size
else:
self._keep_cache = False
# Internal Attributes
self._iter_etag = None
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._md5_of_sent_bytes = None
self._suppress_file_closing = False
self._quarantined_dir = None
@property
def manager(self):
return self._diskfile.manager
def _init_checks(self):
if self._fp.tell() == 0:
self._started_at_0 = True
self._iter_etag = hashlib.md5()
def _update_checks(self, chunk):
if self._iter_etag:
self._iter_etag.update(chunk)
def __iter__(self):
"""Returns an iterator over the data file."""
try:
dropped_cache = 0
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._init_checks()
while True:
chunk = self._fp.read(self._disk_chunk_size)
if chunk:
self._update_checks(chunk)
self._bytes_read += len(chunk)
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
self._drop_cache(self._fp.fileno(), dropped_cache,
self._bytes_read - dropped_cache)
dropped_cache = self._bytes_read
yield chunk
else:
self._read_to_eof = True
self._drop_cache(self._fp.fileno(), dropped_cache,
self._bytes_read - dropped_cache)
break
finally:
if not self._suppress_file_closing:
self.close()
def can_zero_copy_send(self):
return self._use_splice
def zero_copy_send(self, wsockfd):
"""
Does some magic with splice() and tee() to move stuff from disk to
network without ever touching userspace.
:param wsockfd: file descriptor (integer) of the socket out which to
send data
"""
# Note: if we ever add support for zero-copy ranged GET responses,
# we'll have to make this conditional.
self._started_at_0 = True
rfd = self._fp.fileno()
client_rpipe, client_wpipe = os.pipe()
hash_rpipe, hash_wpipe = os.pipe()
md5_sockfd = get_md5_socket()
# The actual amount allocated to the pipe may be rounded up to the
# nearest multiple of the page size. If we have the memory allocated,
# we may as well use it.
#
# Note: this will raise IOError on failure, so we don't bother
# checking the return value.
pipe_size = fcntl.fcntl(client_rpipe, F_SETPIPE_SZ, self._pipe_size)
fcntl.fcntl(hash_rpipe, F_SETPIPE_SZ, pipe_size)
dropped_cache = 0
self._bytes_read = 0
try:
while True:
# Read data from disk to pipe
(bytes_in_pipe, _1, _2) = splice(
rfd, None, client_wpipe, None, pipe_size, 0)
if bytes_in_pipe == 0:
self._read_to_eof = True
self._drop_cache(rfd, dropped_cache,
self._bytes_read - dropped_cache)
break
self._bytes_read += bytes_in_pipe
# "Copy" data from pipe A to pipe B (really just some pointer
# manipulation in the kernel, not actual copying).
bytes_copied = tee(client_rpipe, hash_wpipe, bytes_in_pipe, 0)
if bytes_copied != bytes_in_pipe:
# We teed data between two pipes of equal size, and the
# destination pipe was empty. If, somehow, the destination
# pipe was full before all the data was teed, we should
# fail here. If we don't raise an exception, then we will
# have the incorrect MD5 hash once the object has been
# sent out, causing a false-positive quarantine.
raise Exception("tee() failed: tried to move %d bytes, "
"but only moved %d" %
(bytes_in_pipe, bytes_copied))
# Take the data and feed it into an in-kernel MD5 socket. The
# MD5 socket hashes data that is written to it. Reading from
# it yields the MD5 checksum of the written data.
#
# Note that we don't have to worry about splice() returning
# None here (which happens on EWOULDBLOCK); we're splicing
# $bytes_in_pipe bytes from a pipe with exactly that many
# bytes in it, so read won't block, and we're splicing it into
# an MD5 socket, which synchronously hashes any data sent to
# it, so writing won't block either.
(hashed, _1, _2) = splice(hash_rpipe, None, md5_sockfd, None,
bytes_in_pipe, splice.SPLICE_F_MORE)
if hashed != bytes_in_pipe:
raise Exception("md5 socket didn't take all the data? "
"(tried to write %d, but wrote %d)" %
(bytes_in_pipe, hashed))
while bytes_in_pipe > 0:
try:
res = splice(client_rpipe, None, wsockfd, None,
bytes_in_pipe, 0)
bytes_in_pipe -= res[0]
except IOError as exc:
if exc.errno == errno.EWOULDBLOCK:
trampoline(wsockfd, write=True)
else:
raise
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
self._drop_cache(rfd, dropped_cache,
self._bytes_read - dropped_cache)
dropped_cache = self._bytes_read
finally:
# Linux MD5 sockets return '00000000000000000000000000000000' for
# the checksum if you didn't write any bytes to them, instead of
# returning the correct value.
if self._bytes_read > 0:
bin_checksum = os.read(md5_sockfd, 16)
hex_checksum = ''.join("%02x" % ord(c) for c in bin_checksum)
else:
hex_checksum = MD5_OF_EMPTY_STRING
self._md5_of_sent_bytes = hex_checksum
os.close(client_rpipe)
os.close(client_wpipe)
os.close(hash_rpipe)
os.close(hash_wpipe)
os.close(md5_sockfd)
self.close()
def app_iter_range(self, start, stop):
"""
Returns an iterator over the data file for range (start, stop)
"""
if start or start == 0:
self._fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
try:
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_ranges(self, ranges, content_type, boundary, size):
"""
Returns an iterator over the data file for a set of ranges
"""
if not ranges:
yield ''
else:
try:
self._suppress_file_closing = True
for chunk in multi_range_iterator(
ranges, content_type, boundary, size,
self.app_iter_range):
yield chunk
finally:
self._suppress_file_closing = False
self.close()
def _drop_cache(self, fd, offset, length):
"""
Method for no-oping buffer cache drop method.
:param fd: file descriptor or filename
"""
if not self._keep_cache:
drop_buffer_cache(fd, offset, length)
def _quarantine(self, msg):
self._quarantined_dir = self.manager.quarantine_renamer(
self._device_path, self._data_file)
self._logger.warning("Quarantined object %s: %s" % (
self._data_file, msg))
self._logger.increment('quarantines')
self._quarantine_hook(msg)
def _handle_close_quarantine(self):
"""Check if file needs to be quarantined"""
if self._iter_etag and not self._md5_of_sent_bytes:
self._md5_of_sent_bytes = self._iter_etag.hexdigest()
if self._bytes_read != self._obj_size:
self._quarantine(
"Bytes read: %s, does not match metadata: %s" % (
self._bytes_read, self._obj_size))
elif self._md5_of_sent_bytes and \
self._etag != self._md5_of_sent_bytes:
self._quarantine(
"ETag %s and file's md5 %s do not match" % (
self._etag, self._md5_of_sent_bytes))
def close(self):
"""
Close the open file handle if present.
For this specific implementation, this method will handle quarantining
the file if necessary.
"""
if self._fp:
try:
if self._started_at_0 and self._read_to_eof:
self._handle_close_quarantine()
except DiskFileQuarantined:
raise
except (Exception, Timeout) as e:
self._logger.error(_(
'ERROR DiskFile %(data_file)s'
' close failure: %(exc)s : %(stack)s'),
{'exc': e, 'stack': ''.join(traceback.format_stack()),
'data_file': self._data_file})
finally:
fp, self._fp = self._fp, None
fp.close()
class BaseDiskFile(object):
"""
Manage object files.
This specific implementation manages object files on a disk formatted with
a POSIX-compliant file system that supports extended attributes as
metadata on a file or directory.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
The following path format is used for data file locations:
<devices_path/<device_dir>/<datadir>/<partdir>/<suffixdir>/<hashdir>/
<datafile>.<ext>
:param mgr: associated DiskFileManager instance
:param device_path: path to the target device or drive
:param partition: partition on the device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param _datadir: override the full datadir otherwise constructed here
:param policy: the StoragePolicy instance
:param use_splice: if true, use zero-copy splice() to send data
:param pipe_size: size of pipe buffer used in zero-copy operations
:param use_linkat: if True, use open() with linkat() to create obj file
:param open_expired: if True, open() will not raise a DiskFileExpired if
object is expired
:param next_part_power: the next partition power to be used
"""
reader_cls = None # must be set by subclasses
writer_cls = None # must be set by subclasses
def __init__(self, mgr, device_path, partition,
account=None, container=None, obj=None, _datadir=None,
policy=None, use_splice=False, pipe_size=None,
use_linkat=False, open_expired=False, next_part_power=None,
**kwargs):
self._manager = mgr
self._device_path = device_path
self._logger = mgr.logger
self._disk_chunk_size = mgr.disk_chunk_size
self._bytes_per_sync = mgr.bytes_per_sync
self._use_splice = use_splice
self._pipe_size = pipe_size
self._use_linkat = use_linkat
self._open_expired = open_expired
# This might look a lttle hacky i.e tracking number of newly created
# dirs to fsync only those many later. If there is a better way,
# please suggest.
# Or one could consider getting rid of doing fsyncs on dirs altogether
# and mounting XFS with the 'dirsync' mount option which should result
# in all entry fops being carried out synchronously.
self._dirs_created = 0
self.policy = policy
self.next_part_power = next_part_power
if account and container and obj:
self._name = '/' + '/'.join((account, container, obj))
self._account = account
self._container = container
self._obj = obj
else:
# gets populated when we read the metadata
self._name = None
self._account = None
self._container = None
self._obj = None
self._tmpdir = join(device_path, get_tmp_dir(policy))
self._ondisk_info = None
self._metadata = None
self._datafile_metadata = None
self._metafile_metadata = None
self._data_file = None
self._fp = None
self._quarantined_dir = None
self._content_length = None
if _datadir:
self._datadir = _datadir
else:
name_hash = hash_path(account, container, obj)
self._datadir = join(
device_path, storage_directory(get_data_dir(policy),
partition, name_hash))
@property
def manager(self):
return self._manager
@property
def account(self):
return self._account
@property
def container(self):
return self._container
@property
def obj(self):
return self._obj
@property
def content_length(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self._content_length
@property
def timestamp(self):
if self._metadata is None:
raise DiskFileNotOpen()
return Timestamp(self._metadata.get('X-Timestamp'))
@property
def data_timestamp(self):
if self._datafile_metadata is None:
raise DiskFileNotOpen()
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
@property
def durable_timestamp(self):
"""
Provides the timestamp of the newest data file found in the object
directory.
:return: A Timestamp instance, or None if no data file was found.
:raises DiskFileNotOpen: if the open() method has not been previously
called on this instance.
"""
if self._ondisk_info is None:
raise DiskFileNotOpen()
if self._datafile_metadata:
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
return None
@property
def fragments(self):
return None
@property
def content_type(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata.get('Content-Type')
@property
def content_type_timestamp(self):
if self._metadata is None:
raise DiskFileNotOpen()
t = self._metadata.get('Content-Type-Timestamp',
self._datafile_metadata.get('X-Timestamp'))
return Timestamp(t)
@classmethod
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy):
return cls(mgr, device_path, None, partition, _datadir=hash_dir_path,
policy=policy)
def open(self, modernize=False):
"""
Open the object.
This implementation opens the data file representing the object, reads
the associated metadata in the extended attributes, additionally
combining metadata from fast-POST `.meta` files.
:param modernize: if set, update this diskfile to the latest format.
Currently, this means adding metadata checksums if none are
present.
.. note::
An implementation is allowed to raise any of the following
exceptions, but is only required to raise `DiskFileNotExist` when
the object representation does not exist.
:raises DiskFileCollision: on name mis-match with metadata
:raises DiskFileNotExist: if the object does not exist
:raises DiskFileDeleted: if the object was previously deleted
:raises DiskFileQuarantined: if while reading metadata of the file
some data did pass cross checks
:returns: itself for use as a context manager
"""
# First figure out if the data directory exists
try:
files = os.listdir(self._datadir)
except OSError as err:
if err.errno == errno.ENOTDIR:
# If there's a file here instead of a directory, quarantine
# it; something's gone wrong somewhere.
raise self._quarantine(
# hack: quarantine_renamer actually renames the directory
# enclosing the filename you give it, but here we just
# want this one file and not its parent.
os.path.join(self._datadir, "made-up-filename"),
"Expected directory, found file at %s" % self._datadir)
elif err.errno != errno.ENOENT:
raise DiskFileError(
"Error listing directory %s: %s" % (self._datadir, err))
# The data directory does not exist, so the object cannot exist.
files = []
# gather info about the valid files to use to open the DiskFile
file_info = self._get_ondisk_files(files)
self._data_file = file_info.get('data_file')
if not self._data_file:
raise self._construct_exception_from_ts_file(**file_info)
self._fp = self._construct_from_data_file(
modernize=modernize, **file_info)
# This method must populate the internal _metadata attribute.
self._metadata = self._metadata or {}
return self
def __enter__(self):
"""
Context enter.
.. note::
An implementation shall raise `DiskFileNotOpen` when has not
previously invoked the :func:`swift.obj.diskfile.DiskFile.open`
method.
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self
def __exit__(self, t, v, tb):
"""
Context exit.
.. note::
This method will be invoked by the object server while servicing
the REST API *before* the object has actually been read. It is the
responsibility of the implementation to properly handle that.
"""
if self._fp is not None:
fp, self._fp = self._fp, None
fp.close()
def _quarantine(self, data_file, msg):
"""
Quarantine a file; responsible for incrementing the associated logger's
count of quarantines.
:param data_file: full path of data file to quarantine
:param msg: reason for quarantining to be included in the exception
:returns: DiskFileQuarantined exception object
"""
self._quarantined_dir = self.manager.quarantine_renamer(
self._device_path, data_file)
self._logger.warning("Quarantined object %s: %s" % (
data_file, msg))
self._logger.increment('quarantines')
return DiskFileQuarantined(msg)
def _get_ondisk_files(self, files):
"""
Determine the on-disk files to use.
:param files: a list of files in the object's dir
:returns: dict of files to use having keys 'data_file', 'ts_file',
'meta_file'
"""
raise NotImplementedError
def _construct_exception_from_ts_file(self, ts_file, **kwargs):
"""
If a tombstone is present it means the object is considered
deleted. We just need to pull the metadata from the tombstone file
which has the timestamp to construct the deleted exception. If there
was no tombstone, just report it does not exist.
:param ts_file: the tombstone file name found on disk
:returns: DiskFileDeleted if the ts_file was provided, else
DiskFileNotExist
"""
if not ts_file:
exc = DiskFileNotExist()
else:
try:
metadata = self._failsafe_read_metadata(ts_file, ts_file)
except DiskFileQuarantined:
# If the tombstone's corrupted, quarantine it and pretend it
# wasn't there
exc = DiskFileNotExist()
else:
# All well and good that we have found a tombstone file, but
# we don't have a data file so we are just going to raise an
# exception that we could not find the object, providing the
# tombstone's timestamp.
exc = DiskFileDeleted(metadata=metadata)
return exc
def _verify_name_matches_hash(self, data_file):
"""
:param data_file: data file name, used when quarantines occur
"""
hash_from_fs = os.path.basename(self._datadir)
hash_from_name = hash_path(self._name.lstrip('/'))
if hash_from_fs != hash_from_name:
raise self._quarantine(
data_file,
"Hash of name in metadata does not match directory name")
def _verify_data_file(self, data_file, fp):
"""
Verify the metadata's name value matches what we think the object is
named.
:param data_file: data file name being consider, used when quarantines
occur
:param fp: open file pointer so that we can `fstat()` the file to
verify the on-disk size with Content-Length metadata value
:raises DiskFileCollision: if the metadata stored name does not match
the referenced name of the file
:raises DiskFileExpired: if the object has expired
:raises DiskFileQuarantined: if data inconsistencies were detected
between the metadata and the file-system
metadata
"""
try:
mname = self._metadata['name']
except KeyError:
raise self._quarantine(data_file, "missing name metadata")
else:
if mname != self._name:
self._logger.error(
_('Client path %(client)s does not match '
'path stored in object metadata %(meta)s'),
{'client': self._name, 'meta': mname})
raise DiskFileCollision('Client path does not match path '
'stored in object metadata')
try:
x_delete_at = int(self._metadata['X-Delete-At'])
except KeyError:
pass
except ValueError:
# Quarantine, the x-delete-at key is present but not an
# integer.
raise self._quarantine(
data_file, "bad metadata x-delete-at value %s" % (
self._metadata['X-Delete-At']))
else:
if x_delete_at <= time.time() and not self._open_expired:
raise DiskFileExpired(metadata=self._metadata)
try:
metadata_size = int(self._metadata['Content-Length'])
except KeyError:
raise self._quarantine(
data_file, "missing content-length in metadata")
except ValueError:
# Quarantine, the content-length key is present but not an
# integer.
raise self._quarantine(
data_file, "bad metadata content-length value %s" % (
self._metadata['Content-Length']))
fd = fp.fileno()
try:
statbuf = os.fstat(fd)
except OSError as err:
# Quarantine, we can't successfully stat the file.
raise self._quarantine(data_file, "not stat-able: %s" % err)
else:
obj_size = statbuf.st_size
if obj_size != metadata_size:
raise self._quarantine(
data_file, "metadata content-length %s does"
" not match actual object size %s" % (
metadata_size, statbuf.st_size))
self._content_length = obj_size
return obj_size
def _failsafe_read_metadata(self, source, quarantine_filename=None,
add_missing_checksum=False):
"""
Read metadata from source object file. In case of failure, quarantine
the file.
Takes source and filename separately so we can read from an open
file if we have one.
:param source: file descriptor or filename to load the metadata from
:param quarantine_filename: full path of file to load the metadata from
:param add_missing_checksum: if True and no metadata checksum is
present, generate one and write it down
"""
try:
return read_metadata(source, add_missing_checksum)
except (DiskFileXattrNotSupported, DiskFileNotExist):
raise
except DiskFileBadMetadataChecksum as err:
raise self._quarantine(quarantine_filename, str(err))
except Exception as err:
raise self._quarantine(
quarantine_filename,
"Exception reading metadata: %s" % err)
def _merge_content_type_metadata(self, ctype_file):
"""
When a second .meta file is providing the most recent Content-Type
metadata then merge it into the metafile_metadata.
:param ctype_file: An on-disk .meta file
"""
ctypefile_metadata = self._failsafe_read_metadata(
ctype_file, ctype_file)
if ('Content-Type' in ctypefile_metadata
and (ctypefile_metadata.get('Content-Type-Timestamp') >
self._metafile_metadata.get('Content-Type-Timestamp'))
and (ctypefile_metadata.get('Content-Type-Timestamp') >
self.data_timestamp)):
self._metafile_metadata['Content-Type'] = \
ctypefile_metadata['Content-Type']
self._metafile_metadata['Content-Type-Timestamp'] = \
ctypefile_metadata.get('Content-Type-Timestamp')
def _construct_from_data_file(self, data_file, meta_file, ctype_file,
modernize=False,
**kwargs):
"""
Open the `.data` file to fetch its metadata, and fetch the metadata
from fast-POST `.meta` files as well if any exist, merging them
properly.
:param data_file: on-disk `.data` file being considered
:param meta_file: on-disk fast-POST `.meta` file being considered
:param ctype_file: on-disk fast-POST `.meta` file being considered that
contains content-type and content-type timestamp
:param modernize: whether to update the on-disk files to the newest
format
:returns: an opened data file pointer
:raises DiskFileError: various exceptions from
:func:`swift.obj.diskfile.DiskFile._verify_data_file`
"""
try:
fp = open(data_file, 'rb')
except IOError as e:
if e.errno == errno.ENOENT:
raise DiskFileNotExist()
raise
self._datafile_metadata = self._failsafe_read_metadata(
fp, data_file,
add_missing_checksum=modernize)
self._metadata = {}
if meta_file:
self._metafile_metadata = self._failsafe_read_metadata(
meta_file, meta_file,
add_missing_checksum=modernize)
if ctype_file and ctype_file != meta_file:
self._merge_content_type_metadata(ctype_file)
sys_metadata = dict(
[(key, val) for key, val in self._datafile_metadata.items()
if key.lower() in (RESERVED_DATAFILE_META |
DATAFILE_SYSTEM_META)
or is_sys_meta('object', key)])
self._metadata.update(self._metafile_metadata)
self._metadata.update(sys_metadata)
# diskfile writer added 'name' to metafile, so remove it here
self._metafile_metadata.pop('name', None)
# TODO: the check for Content-Type is only here for tests that
# create .data files without Content-Type
if ('Content-Type' in self._datafile_metadata and
(self.data_timestamp >
self._metafile_metadata.get('Content-Type-Timestamp'))):
self._metadata['Content-Type'] = \
self._datafile_metadata['Content-Type']
self._metadata.pop('Content-Type-Timestamp', None)
else:
self._metadata.update(self._datafile_metadata)
if self._name is None:
# If we don't know our name, we were just given a hash dir at
# instantiation, so we'd better validate that the name hashes back
# to us
self._name = self._metadata['name']
self._verify_name_matches_hash(data_file)
self._verify_data_file(data_file, fp)
return fp
def get_metafile_metadata(self):
"""
Provide the metafile metadata for a previously opened object as a
dictionary. This is metadata that was written by a POST and does not
include any persistent metadata that was set by the original PUT.
:returns: object's .meta file metadata dictionary, or None if there is
no .meta file
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metafile_metadata
def get_datafile_metadata(self):
"""
Provide the datafile metadata for a previously opened object as a
dictionary. This is metadata that was included when the object was
first PUT, and does not include metadata set by any subsequent POST.
:returns: object's datafile metadata dictionary
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._datafile_metadata is None:
raise DiskFileNotOpen()
return self._datafile_metadata
def get_metadata(self):
"""
Provide the metadata for a previously opened object as a dictionary.
:returns: object's metadata dictionary
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata
def read_metadata(self):
"""
Return the metadata for an object without requiring the caller to open
the object first.
:returns: metadata dictionary for an object
:raises DiskFileError: this implementation will raise the same
errors as the `open()` method.
"""
with self.open():
return self.get_metadata()
def reader(self, keep_cache=False,
_quarantine_hook=lambda m: None):
"""
Return a :class:`swift.common.swob.Response` class compatible
"`app_iter`" object as defined by
:class:`swift.obj.diskfile.DiskFileReader`.
For this implementation, the responsibility of closing the open file
is passed to the :class:`swift.obj.diskfile.DiskFileReader` object.
:param keep_cache: caller's preference for keeping data read in the
OS buffer cache
:param _quarantine_hook: 1-arg callable called when obj quarantined;
the arg is the reason for quarantine.
Default is to ignore it.
Not needed by the REST layer.
:returns: a :class:`swift.obj.diskfile.DiskFileReader` object
"""
dr = self.reader_cls(
self._fp, self._data_file, int(self._metadata['Content-Length']),
self._metadata['ETag'], self._disk_chunk_size,
self._manager.keep_cache_size, self._device_path, self._logger,
use_splice=self._use_splice, quarantine_hook=_quarantine_hook,
pipe_size=self._pipe_size, diskfile=self, keep_cache=keep_cache)
# At this point the reader object is now responsible for closing
# the file pointer.
self._fp = None
return dr
def _get_tempfile(self):
fallback_to_mkstemp = False
tmppath = None
if self._use_linkat:
self._dirs_created = makedirs_count(self._datadir)
try:
fd = os.open(self._datadir, O_TMPFILE | os.O_WRONLY)
except OSError as err:
if err.errno in (errno.EOPNOTSUPP, errno.EISDIR, errno.EINVAL):
msg = 'open(%s, O_TMPFILE | O_WRONLY) failed: %s \
Falling back to using mkstemp()' \
% (self._datadir, os.strerror(err.errno))
self._logger.warning(msg)
fallback_to_mkstemp = True
else:
raise
if not self._use_linkat or fallback_to_mkstemp:
if not exists(self._tmpdir):
mkdirs(self._tmpdir)
fd, tmppath = mkstemp(dir=self._tmpdir)
return fd, tmppath
@contextmanager
def create(self, size=None):
"""
Context manager to create a file. We create a temporary file first, and
then return a DiskFileWriter object to encapsulate the state.
.. note::
An implementation is not required to perform on-disk
preallocations even if the parameter is specified. But if it does
and it fails, it must raise a `DiskFileNoSpace` exception.
:param size: optional initial size of file to explicitly allocate on
disk
:raises DiskFileNoSpace: if a size is specified and allocation fails
"""
try:
fd, tmppath = self._get_tempfile()
except OSError as err:
if err.errno in (errno.ENOSPC, errno.EDQUOT):
# No more inodes in filesystem
raise DiskFileNoSpace()
raise
dfw = None
try:
if size is not None and size > 0:
try:
fallocate(fd, size)
except OSError as err:
if err.errno in (errno.ENOSPC, errno.EDQUOT):
raise DiskFileNoSpace()
raise
dfw = self.writer_cls(self._name, self._datadir, fd, tmppath,
bytes_per_sync=self._bytes_per_sync,
diskfile=self,
next_part_power=self.next_part_power)
yield dfw
finally:
try:
os.close(fd)
except OSError:
pass
if (dfw is None) or (not dfw.put_succeeded):
# Try removing the temp file only if put did NOT succeed.
#
# dfw.put_succeeded is set to True after renamer() succeeds in
# DiskFileWriter._finalize_put()
try:
if tmppath:
# when mkstemp() was used
os.unlink(tmppath)
except OSError:
self._logger.exception('Error removing tempfile: %s' %
tmppath)
def write_metadata(self, metadata):
"""
Write a block of metadata to an object without requiring the caller to
create the object first. Supports fast-POST behavior semantics.
:param metadata: dictionary of metadata to be associated with the
object
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
with self.create() as writer:
writer._extension = '.meta'
writer.put(metadata)
def delete(self, timestamp):
"""
Delete the object.
This implementation creates a tombstone file using the given
timestamp, and removes any older versions of the object file. Any
file that has an older timestamp than timestamp will be deleted.
.. note::
An implementation is free to use or ignore the timestamp
parameter.
:param timestamp: timestamp to compare with each file
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
# this is dumb, only tests send in strings
timestamp = Timestamp(timestamp)
with self.create() as deleter:
deleter._extension = '.ts'
deleter.put({'X-Timestamp': timestamp.internal})
class DiskFileReader(BaseDiskFileReader):
pass
class DiskFileWriter(BaseDiskFileWriter):
def put(self, metadata):
"""
Finalize writing the file on disk.
:param metadata: dictionary of metadata to be associated with the
object
"""
super(DiskFileWriter, self)._put(metadata, True)
class DiskFile(BaseDiskFile):
reader_cls = DiskFileReader
writer_cls = DiskFileWriter
def _get_ondisk_files(self, files):
self._ondisk_info = self.manager.get_ondisk_files(files, self._datadir)
return self._ondisk_info
@DiskFileRouter.register(REPL_POLICY)
class DiskFileManager(BaseDiskFileManager):
diskfile_cls = DiskFile
def _process_ondisk_files(self, exts, results, **kwargs):
"""
Implement replication policy specific handling of .data files.
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
"""
if exts.get('.data'):
for ext in exts.keys():
if ext == '.data':
# older .data's are obsolete
exts[ext], obsolete = self._split_gte_timestamp(
exts[ext], exts['.data'][0]['timestamp'])
else:
# other files at same or older timestamp as most recent
# data are obsolete
exts[ext], obsolete = self._split_gt_timestamp(
exts[ext], exts['.data'][0]['timestamp'])
results.setdefault('obsolete', []).extend(obsolete)
# set results
results['data_info'] = exts['.data'][0]
# .meta files *may* be ready for reclaim if there is no data
if exts.get('.meta') and not exts.get('.data'):
results.setdefault('possible_reclaim', []).extend(
exts.get('.meta'))
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
the given ondisk_info.
:param hashes: a dict of md5 hashes to be updated
:param ondisk_info: a dict describing the state of ondisk files, as
returned by get_ondisk_files
"""
if 'data_info' in ondisk_info:
file_info = ondisk_info['data_info']
hashes[None].update(
file_info['timestamp'].internal + file_info['ext'])
def _hash_suffix(self, path):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param path: full path to directory
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
:returns: md5 of files in suffix
"""
hashes = self._hash_suffix_dir(path)
return hashes[None].hexdigest()
class ECDiskFileReader(BaseDiskFileReader):
def __init__(self, fp, data_file, obj_size, etag,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile,
keep_cache=False):
super(ECDiskFileReader, self).__init__(
fp, data_file, obj_size, etag,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile, keep_cache)
self.frag_buf = None
self.frag_offset = 0
self.frag_size = self._diskfile.policy.fragment_size
def _init_checks(self):
super(ECDiskFileReader, self)._init_checks()
# for a multi-range GET this will be called at the start of each range;
# only initialise the frag_buf for reads starting at 0.
# TODO: reset frag buf to '' if tell() shows that start is on a frag
# boundary so that we check frags selected by a range not starting at 0
if self._started_at_0:
self.frag_buf = ''
else:
self.frag_buf = None
def _check_frag(self, frag):
if not frag:
return
if not isinstance(frag, six.binary_type):
# ECInvalidParameter can be returned if the frag violates the input
# format so for safety, check the input chunk if it's binary to
# avoid quarantining a valid fragment archive.
self._diskfile._logger.warn(
_('Unexpected fragment data type (not quarantined)'
'%(datadir)s: %(type)s at offset 0x%(offset)x'),
{'datadir': self._diskfile._datadir,
'type': type(frag),
'offset': self.frag_offset})
return
try:
self._diskfile.policy.pyeclib_driver.get_metadata(frag)
except (ECInvalidFragmentMetadata, ECBadFragmentChecksum,
ECInvalidParameter):
# Any of these exceptions may be returned from ECDriver with a
# corrupted fragment.
msg = 'Invalid EC metadata at offset 0x%x' % self.frag_offset
self._quarantine(msg)
# We have to terminate the response iter with an exception but it
# can't be StopIteration, this will produce a STDERR traceback in
# eventlet.wsgi if you have eventlet_debug turned on; but any
# attempt to finish the iterator cleanly won't trigger the needful
# error handling cleanup - failing to do so, and yet still failing
# to deliver all promised bytes will hang the HTTP connection
raise DiskFileQuarantined(msg)
except ECDriverError as err:
self._diskfile._logger.warn(
_('Problem checking EC fragment %(datadir)s: %(err)s'),
{'datadir': self._diskfile._datadir, 'err': err})
def _update_checks(self, chunk):
super(ECDiskFileReader, self)._update_checks(chunk)
if self.frag_buf is not None:
self.frag_buf += chunk
cursor = 0
while len(self.frag_buf) >= cursor + self.frag_size:
self._check_frag(self.frag_buf[cursor:cursor + self.frag_size])
cursor += self.frag_size
self.frag_offset += self.frag_size
if cursor:
self.frag_buf = self.frag_buf[cursor:]
def _handle_close_quarantine(self):
super(ECDiskFileReader, self)._handle_close_quarantine()
self._check_frag(self.frag_buf)
class ECDiskFileWriter(BaseDiskFileWriter):
def _finalize_durable(self, data_file_path, durable_data_file_path):
exc = None
new_data_file_path = new_durable_data_file_path = None
if self.next_part_power:
new_data_file_path = replace_partition_in_path(
data_file_path, self.next_part_power)
new_durable_data_file_path = replace_partition_in_path(
durable_data_file_path, self.next_part_power)
try:
try:
os.rename(data_file_path, durable_data_file_path)
fsync_dir(self._datadir)
if self.next_part_power and \
data_file_path != new_data_file_path:
try:
os.rename(new_data_file_path,
new_durable_data_file_path)
except OSError as exc:
self.manager.logger.exception(
'Renaming new path %s to %s failed: %s',
new_data_file_path, new_durable_data_file_path,
exc)
except (OSError, IOError) as err:
if err.errno not in (errno.ENOSPC, errno.EDQUOT):
# re-raise to catch all handler
raise
params = {'file': durable_data_file_path, 'err': err}
self.manager.logger.exception(
_('No space left on device for %(file)s (%(err)s)'),
params)
exc = DiskFileNoSpace(
'No space left on device for %(file)s (%(err)s)' % params)
else:
try:
self.manager.cleanup_ondisk_files(self._datadir)
except OSError as os_err:
self.manager.logger.exception(
_('Problem cleaning up %(datadir)s (%(err)s)'),
{'datadir': self._datadir, 'err': os_err})
self._part_power_cleanup(
durable_data_file_path, new_durable_data_file_path)
except Exception as err:
params = {'file': durable_data_file_path, 'err': err}
self.manager.logger.exception(
_('Problem making data file durable %(file)s (%(err)s)'),
params)
exc = DiskFileError(
'Problem making data file durable %(file)s (%(err)s)' % params)
if exc:
raise exc
def commit(self, timestamp):
"""
Finalize put by renaming the object data file to include a durable
marker. We do this for EC policy because it requires a 2-phase put
commit confirmation.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:raises DiskFileError: if the diskfile frag_index has not been set
(either during initialisation or a call to put())
"""
data_file_path = join(
self._datadir, self.manager.make_on_disk_filename(
timestamp, '.data', self._diskfile._frag_index))
durable_data_file_path = os.path.join(
self._datadir, self.manager.make_on_disk_filename(
timestamp, '.data', self._diskfile._frag_index, durable=True))
tpool_reraise(
self._finalize_durable, data_file_path, durable_data_file_path)
def put(self, metadata):
"""
The only difference between this method and the replication policy
DiskFileWriter method is adding the frag index to the metadata.
:param metadata: dictionary of metadata to be associated with object
"""
fi = None
cleanup = True
if self._extension == '.data':
# generally we treat the fragment index provided in metadata as
# canon, but if it's unavailable (e.g. tests) it's reasonable to
# use the frag_index provided at instantiation. Either way make
# sure that the fragment index is included in object sysmeta.
fi = metadata.setdefault('X-Object-Sysmeta-Ec-Frag-Index',
self._diskfile._frag_index)
fi = self.manager.validate_fragment_index(fi)
self._diskfile._frag_index = fi
# defer cleanup until commit() writes makes diskfile durable
cleanup = False
super(ECDiskFileWriter, self)._put(metadata, cleanup, frag_index=fi)
class ECDiskFile(BaseDiskFile):
reader_cls = ECDiskFileReader
writer_cls = ECDiskFileWriter
def __init__(self, *args, **kwargs):
super(ECDiskFile, self).__init__(*args, **kwargs)
frag_index = kwargs.get('frag_index')
self._frag_index = None
if frag_index is not None:
self._frag_index = self.manager.validate_fragment_index(frag_index)
self._frag_prefs = self._validate_frag_prefs(kwargs.get('frag_prefs'))
self._durable_frag_set = None
def _validate_frag_prefs(self, frag_prefs):
"""
Validate that frag_prefs is a list of dicts containing expected keys
'timestamp' and 'exclude'. Convert timestamp values to Timestamp
instances and validate that exclude values are valid fragment indexes.
:param frag_prefs: data to validate, should be a list of dicts.
:raise DiskFileError: if the frag_prefs data is invalid.
:return: a list of dicts with converted and validated values.
"""
# We *do* want to preserve an empty frag_prefs list because it
# indicates that a durable file is not required.
if frag_prefs is None:
return None
try:
return [
{'timestamp': Timestamp(pref['timestamp']),
'exclude': [self.manager.validate_fragment_index(fi)
for fi in pref['exclude']]}
for pref in frag_prefs]
except ValueError as e:
raise DiskFileError(
'Bad timestamp in frag_prefs: %r: %s'
% (frag_prefs, e))
except DiskFileError as e:
raise DiskFileError(
'Bad fragment index in frag_prefs: %r: %s'
% (frag_prefs, e))
except (KeyError, TypeError) as e:
raise DiskFileError(
'Bad frag_prefs: %r: %s' % (frag_prefs, e))
@property
def durable_timestamp(self):
"""
Provides the timestamp of the newest durable file found in the object
directory.
:return: A Timestamp instance, or None if no durable file was found.
:raises DiskFileNotOpen: if the open() method has not been previously
called on this instance.
"""
if self._ondisk_info is None:
raise DiskFileNotOpen()
if self._ondisk_info.get('durable_frag_set'):
return self._ondisk_info['durable_frag_set'][0]['timestamp']
return None
@property
def fragments(self):
"""
Provides information about all fragments that were found in the object
directory, including fragments without a matching durable file, and
including any fragment chosen to construct the opened diskfile.
:return: A dict mapping <Timestamp instance> -> <list of frag indexes>,
or None if the diskfile has not been opened or no fragments
were found.
"""
if self._ondisk_info:
frag_sets = self._ondisk_info['frag_sets']
return dict([(ts, [info['frag_index'] for info in frag_set])
for ts, frag_set in frag_sets.items()])
def _get_ondisk_files(self, files):
"""
The only difference between this method and the replication policy
DiskFile method is passing in the frag_index and frag_prefs kwargs to
our manager's get_ondisk_files method.
:param files: list of file names
"""
self._ondisk_info = self.manager.get_ondisk_files(
files, self._datadir, frag_index=self._frag_index,
frag_prefs=self._frag_prefs)
return self._ondisk_info
def purge(self, timestamp, frag_index):
"""
Remove a tombstone file matching the specified timestamp or
datafile matching the specified timestamp and fragment index
from the object directory.
This provides the EC reconstructor/ssync process with a way to
remove a tombstone or fragment from a handoff node after
reverting it to its primary node.
The hash will be invalidated, and if empty or invalid the
hsh_path will be removed on next cleanup_ondisk_files.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param frag_index: fragment archive index, must be
a whole number or None.
"""
purge_file = self.manager.make_on_disk_filename(
timestamp, ext='.ts')
remove_file(os.path.join(self._datadir, purge_file))
if frag_index is not None:
# data file may or may not be durable so try removing both filename
# possibilities
purge_file = self.manager.make_on_disk_filename(
timestamp, ext='.data', frag_index=frag_index)
remove_file(os.path.join(self._datadir, purge_file))
purge_file = self.manager.make_on_disk_filename(
timestamp, ext='.data', frag_index=frag_index, durable=True)
remove_file(os.path.join(self._datadir, purge_file))
self.manager.invalidate_hash(dirname(self._datadir))
@DiskFileRouter.register(EC_POLICY)
class ECDiskFileManager(BaseDiskFileManager):
diskfile_cls = ECDiskFile
def validate_fragment_index(self, frag_index):
"""
Return int representation of frag_index, or raise a DiskFileError if
frag_index is not a whole number.
:param frag_index: a fragment archive index
"""
try:
frag_index = int(str(frag_index))
except (ValueError, TypeError) as e:
raise DiskFileError(
'Bad fragment index: %s: %s' % (frag_index, e))
if frag_index < 0:
raise DiskFileError(
'Fragment index must not be negative: %s' % frag_index)
return frag_index
def make_on_disk_filename(self, timestamp, ext=None, frag_index=None,
ctype_timestamp=None, durable=False, *a, **kw):
"""
Returns the EC specific filename for given timestamp.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param ext: an optional string representing a file extension to be
appended to the returned file name
:param frag_index: a fragment archive index, used with .data extension
only, must be a whole number.
:param ctype_timestamp: an optional content-type timestamp, an instance
of :class:`~swift.common.utils.Timestamp`
:param durable: if True then include a durable marker in data filename.
:returns: a file name
:raises DiskFileError: if ext=='.data' and the kwarg frag_index is not
a whole number
"""
if ext == '.data':
# for datafiles only we encode the fragment index in the filename
# to allow archives of different indexes to temporarily be stored
# on the same node in certain situations
frag_index = self.validate_fragment_index(frag_index)
rv = timestamp.internal + '#' + str(frag_index)
if durable:
rv += '#d'
return '%s%s' % (rv, ext)
return super(ECDiskFileManager, self).make_on_disk_filename(
timestamp, ext, ctype_timestamp, *a, **kw)
def parse_on_disk_filename(self, filename):
"""
Returns timestamp(s) and other info extracted from a policy specific
file name. For EC policy the data file name includes a fragment index
and possibly a durable marker, both of which which must be stripped off
to retrieve the timestamp.
:param filename: the file name including extension
:returns: a dict, with keys for timestamp, frag_index, durable, ext and
ctype_timestamp:
* timestamp is a :class:`~swift.common.utils.Timestamp`
* frag_index is an int or None
* ctype_timestamp is a :class:`~swift.common.utils.Timestamp` or
None for .meta files, otherwise None
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extension
* durable is a boolean that is True if the filename is a data file
that includes a durable marker
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
frag_index = None
float_frag, ext = splitext(filename)
if ext == '.data':
parts = float_frag.split('#')
try:
timestamp = Timestamp(parts[0])
except ValueError:
raise DiskFileError('Invalid Timestamp value in filename %r'
% filename)
# it is an error for an EC data file to not have a valid
# fragment index
try:
frag_index = parts[1]
except IndexError:
# expect validate_fragment_index raise DiskFileError
pass
frag_index = self.validate_fragment_index(frag_index)
try:
durable = parts[2] == 'd'
except IndexError:
durable = False
return {
'timestamp': timestamp,
'frag_index': frag_index,
'ext': ext,
'ctype_timestamp': None,
'durable': durable
}
rv = super(ECDiskFileManager, self).parse_on_disk_filename(filename)
rv['frag_index'] = None
return rv
def _process_ondisk_files(self, exts, results, frag_index=None,
frag_prefs=None, **kwargs):
"""
Implement EC policy specific handling of .data and legacy .durable
files.
If a frag_prefs keyword arg is provided then its value may determine
which fragment index at which timestamp is used to construct the
diskfile. The value of frag_prefs should be a list. Each item in the
frag_prefs list should be a dict that describes per-timestamp
preferences using the following items:
* timestamp: An instance of :class:`~swift.common.utils.Timestamp`.
* exclude: A list of valid fragment indexes (i.e. whole numbers)
that should be EXCLUDED when choosing a fragment at the
timestamp. This list may be empty.
For example::
[
{'timestamp': <Timestamp instance>, 'exclude': [1,3]},
{'timestamp': <Timestamp instance>, 'exclude': []}
]
The order of per-timestamp dicts in the frag_prefs list is significant
and indicates descending preference for fragments from each timestamp
i.e. a fragment that satisfies the first per-timestamp preference in
the frag_prefs will be preferred over a fragment that satisfies a
subsequent per-timestamp preferred, and so on.
If a timestamp is not cited in any per-timestamp preference dict then
it is assumed that any fragment index at that timestamp may be used to
construct the diskfile.
When a frag_prefs arg is provided, including an empty list, there is no
requirement for there to be a durable file at the same timestamp as a
data file that is chosen to construct the disk file
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
:param frag_index: if set, search for a specific fragment index .data
file, otherwise accept the first valid .data file.
:param frag_prefs: if set, search for any fragment index .data file
that satisfies the frag_prefs.
"""
durable_info = None
if exts.get('.durable'):
# in older versions, separate .durable files were used to indicate
# the durability of data files having the same timestamp
durable_info = exts['.durable'][0]
# Split the list of .data files into sets of frags having the same
# timestamp, identifying the durable and newest sets (if any) as we go.
# To do this we can take advantage of the list of .data files being
# reverse-time ordered. Keep the resulting per-timestamp frag sets in
# a frag_sets dict mapping a Timestamp instance -> frag_set.
all_frags = exts.get('.data')
frag_sets = {}
durable_frag_set = None
while all_frags:
frag_set, all_frags = self._split_gte_timestamp(
all_frags, all_frags[0]['timestamp'])
# sort the frag set into ascending frag_index order
frag_set.sort(key=lambda info: info['frag_index'])
timestamp = frag_set[0]['timestamp']
frag_sets[timestamp] = frag_set
for frag in frag_set:
# a data file marked as durable may supersede a legacy durable
# file if it is newer
if frag['durable']:
if (not durable_info or
durable_info['timestamp'] < timestamp):
# this frag defines the durable timestamp
durable_info = frag
break
if durable_info and durable_info['timestamp'] == timestamp:
durable_frag_set = frag_set
break # ignore frags that are older than durable timestamp
# Choose which frag set to use
chosen_frag_set = None
if frag_prefs is not None:
candidate_frag_sets = dict(frag_sets)
# For each per-timestamp frag preference dict, do we have any frag
# indexes at that timestamp that are not in the exclusion list for
# that timestamp? If so choose the highest of those frag_indexes.
for ts, exclude_indexes in [
(ts_pref['timestamp'], ts_pref['exclude'])
for ts_pref in frag_prefs
if ts_pref['timestamp'] in candidate_frag_sets]:
available_indexes = [info['frag_index']
for info in candidate_frag_sets[ts]]
acceptable_indexes = list(set(available_indexes) -
set(exclude_indexes))
if acceptable_indexes:
chosen_frag_set = candidate_frag_sets[ts]
# override any frag_index passed in as method param with
# the last (highest) acceptable_index
frag_index = acceptable_indexes[-1]
break
else:
# this frag_set has no acceptable frag index so
# remove it from the candidate frag_sets
candidate_frag_sets.pop(ts)
else:
# No acceptable frag index was found at any timestamp mentioned
# in the frag_prefs. Choose the newest remaining candidate
# frag_set - the proxy can decide if it wants the returned
# fragment with that time.
if candidate_frag_sets:
ts_newest = sorted(candidate_frag_sets.keys())[-1]
chosen_frag_set = candidate_frag_sets[ts_newest]
else:
chosen_frag_set = durable_frag_set
# Select a single chosen frag from the chosen frag_set, by either
# matching against a specified frag_index or taking the highest index.
chosen_frag = None
if chosen_frag_set:
if frag_index is not None:
# search the frag set to find the exact frag_index
for info in chosen_frag_set:
if info['frag_index'] == frag_index:
chosen_frag = info
break
else:
chosen_frag = chosen_frag_set[-1]
# If we successfully found a frag then set results
if chosen_frag:
results['data_info'] = chosen_frag
results['durable_frag_set'] = durable_frag_set
results['chosen_frag_set'] = chosen_frag_set
if chosen_frag_set != durable_frag_set:
# hide meta files older than data file but newer than durable
# file so they don't get marked as obsolete (we already threw
# out .meta's that are older than a .durable)
exts['.meta'], _older = self._split_gt_timestamp(
exts['.meta'], chosen_frag['timestamp'])
results['frag_sets'] = frag_sets
# Mark everything older than most recent durable data as obsolete
# and remove from the exts dict.
if durable_info:
for ext in exts.keys():
exts[ext], older = self._split_gte_timestamp(
exts[ext], durable_info['timestamp'])
results.setdefault('obsolete', []).extend(older)
# Mark any isolated legacy .durable as obsolete
if exts.get('.durable') and not durable_frag_set:
results.setdefault('obsolete', []).extend(exts['.durable'])
exts.pop('.durable')
# Fragments *may* be ready for reclaim, unless they are durable
for frag_set in frag_sets.values():
if frag_set in (durable_frag_set, chosen_frag_set):
continue
results.setdefault('possible_reclaim', []).extend(frag_set)
# .meta files *may* be ready for reclaim if there is no durable data
if exts.get('.meta') and not durable_frag_set:
results.setdefault('possible_reclaim', []).extend(
exts.get('.meta'))
def _verify_ondisk_files(self, results, frag_index=None,
frag_prefs=None, **kwargs):
"""
Verify that the final combination of on disk files complies with the
erasure-coded diskfile contract.
:param results: files that have been found and accepted
:param frag_index: specifies a specific fragment index .data file
:param frag_prefs: if set, indicates that fragment preferences have
been specified and therefore that a selected fragment is not
required to be durable.
:returns: True if the file combination is compliant, False otherwise
"""
if super(ECDiskFileManager, self)._verify_ondisk_files(
results, **kwargs):
have_data_file = results['data_file'] is not None
have_durable = (results.get('durable_frag_set') is not None or
(have_data_file and frag_prefs is not None))
return have_data_file == have_durable
return False
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
the given ondisk_info.
The only difference between this method and the replication policy
function is the way that data files update hashes dict. Instead of all
filenames hashed into a single hasher, each data file name will fall
into a bucket keyed by its fragment index.
:param hashes: a dict of md5 hashes to be updated
:param ondisk_info: a dict describing the state of ondisk files, as
returned by get_ondisk_files
"""
for frag_set in ondisk_info['frag_sets'].values():
for file_info in frag_set:
fi = file_info['frag_index']
hashes[fi].update(file_info['timestamp'].internal)
if 'durable_frag_set' in ondisk_info:
# The durable_frag_set may be indicated by a legacy
# <timestamp>.durable file or by a durable <timestamp>#fi#d.data
# file. Either way we update hashes[None] with the string
# <timestamp>.durable which is a consistent representation of the
# abstract state of the object regardless of the actual file set.
# That way if we use a local combination of a legacy t1.durable and
# t1#0.data to reconstruct a remote t1#0#d.data then, when next
# hashed, the local and remote will make identical updates to their
# suffix hashes.
file_info = ondisk_info['durable_frag_set'][0]
hashes[None].update(file_info['timestamp'].internal + '.durable')
def _hash_suffix(self, path):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param path: full path to directory
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
:returns: dict of md5 hex digests
"""
# hash_per_fi instead of single hash for whole suffix
# here we flatten out the hashers hexdigest into a dictionary instead
# of just returning the one hexdigest for the whole suffix
hash_per_fi = self._hash_suffix_dir(path)
return dict((fi, md5.hexdigest()) for fi, md5 in hash_per_fi.items())
| 0 |
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import sys
from google.datacatalog_connectors.qlik import sync
class Qlik2DataCatalogCli:
__DEFAULT_DATACATALOG_LOCATION_ID = 'us'
__DEFAULT_QLIK_DOMAIN = '.'
@classmethod
def run(cls, argv):
cls.__setup_logging()
args = cls._parse_args(argv)
args.func(args)
@classmethod
def __setup_logging(cls):
logging.basicConfig(level=logging.INFO)
@classmethod
def _parse_args(cls, argv):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--qlik-server', help='Qlik server', required=True)
parser.add_argument('--qlik-ad-domain',
help='Qlik Active Directory domain',
default=cls.__DEFAULT_QLIK_DOMAIN)
parser.add_argument('--qlik-username',
help='Qlik username',
required=True)
parser.add_argument('--qlik-password',
help='Qlik password',
required=True)
parser.add_argument('--datacatalog-project-id',
help='Google Cloud Project ID',
required=True)
parser.add_argument('--datacatalog-location-id',
help='Location ID to be used Google Data Catalog'
'to store the metadata',
default=cls.__DEFAULT_DATACATALOG_LOCATION_ID)
parser.set_defaults(func=cls.__run_synchronizer)
return parser.parse_args(argv)
@classmethod
def __run_synchronizer(cls, args):
sync.MetadataSynchronizer(
qlik_server_address=args.qlik_server,
qlik_ad_domain=args.qlik_ad_domain,
qlik_username=args.qlik_username,
qlik_password=args.qlik_password,
datacatalog_project_id=args.datacatalog_project_id,
datacatalog_location_id=args.datacatalog_location_id).run()
def main():
argv = sys.argv
Qlik2DataCatalogCli.run(argv[1:] if len(argv) > 0 else argv)
| 0 |
from m1 import f, g, C, stub_only, Gen
def test_overloaded_function(x):
g(<warning descr="Expected type 'dict', got 'int' instead">f(10)</warning>)
g(<warning descr="Expected type 'dict', got 'str' instead">f('foo')</warning>)
g(<warning descr="Expected type 'dict', got 'Union[int, str]' instead">f(<warning descr="Expected type 'int', got 'Dict[int, int]' instead">{1: 2}</warning>)</warning>)
g(<warning descr="Expected type 'dict', got 'Union[int, str]' instead">f(x)</warning>)
def test_overloaded_subscription_operator_parameters():
c = C()
print(c[10])
print(c['foo'])
print(c[<warning descr="Expected type 'int', got 'Dict[int, int]' instead">{1: 2}</warning>])
def test_overloaded_binary_operator_parameters():
c = C()
print(c + 10)
print(c + 'foo')
print(c + <warning descr="Expected type 'int', got 'Dict[int, int]' instead">{1: 2}</warning>)
def test_stub_only_function(x):
g(<warning descr="Expected type 'dict', got 'int' instead">stub_only(10)</warning>)
g(<warning descr="Expected type 'dict', got 'str' instead">stub_only('foo')</warning>)
g(<warning descr="Expected type 'dict', got 'Union[int, str]' instead">stub_only(x)</warning>)
g(<warning descr="Expected type 'dict', got 'Union[int, str]' instead">stub_only(<warning descr="Expected type 'int', got 'Dict[int, int]' instead">{1: 2}</warning>)</warning>)
def tset_overloaded_generics(x):
g(<warning descr="Expected type 'dict', got 'int' instead">Gen(10).get(10, 10)</warning>)
g(Gen(10).get(10, <weak_warning descr="Expected type 'int' (matched generic type 'TypeVar('T')'), got 'str' instead">'foo'</weak_warning>))
g(Gen('foo').get(10, <weak_warning descr="Expected type 'str' (matched generic type 'TypeVar('T')'), got 'int' instead">10</weak_warning>))
g(<warning descr="Expected type 'dict', got 'str' instead">Gen('foo').get(10, 'foo')</warning>)
| 0.039625 |
import unittest
from algorithms.sorting.insertion import sort
from helpers.contrived import Contrived
class TestSelectionSort(unittest.TestCase):
def test_sorting_empty_list_returns_empty_list(self):
collection = []
sort(collection)
self.assertFalse(collection)
def test_none_throws_exception(self):
with self.assertRaises(TypeError):
sort(None)
def test_one_element_collection(self):
collection = [-99]
sort(collection)
self.assertEqual(collection[0], -99)
def test_sorting_sorts_in_order(self):
collection = [3, 14, 1]
expected = [1, 3, 14]
sort(collection)
self.assertListEqual(collection, expected)
def test_sorting_sorts_repeating(self):
collection = [-41, 99, 11, -41, 2]
expected = [-41, -41, 2, 11, 99]
sort(collection)
self.assertListEqual(collection, expected)
def test_sorting_sorts_sorted(self):
collection = [1, 2, 3, 4, 5]
expected = [1, 2, 3, 4, 5]
sort(collection)
self.assertEqual(collection, expected)
def test_custom_objects(self):
items = [Contrived("a", 3), Contrived("a", 1), Contrived("b", 13)]
expected = [Contrived("a", 1), Contrived("a", 3), Contrived("b", 13)]
sort(items)
self.assertListEqual(items, expected)
if __name__ == '__main__':
unittest.main()
| 0 |
import unittest
import enos.utils.extra as xenos
from enos.utils.errors import (EnosFilePathError, EnosUnknownProvider)
import enos.utils.constants as C
import contextlib
import os
import pathlib
import shutil
import tempfile
import ddt
class TestMakeProvider(unittest.TestCase):
@staticmethod
def __provider_env(provider_name):
"Returns env with a provider key"
return provider_name
@staticmethod
def __provider_env_ext(provider_name):
"Returns env with an extended provider key that may include options"
return {"type": provider_name}
def test_make_g5k(self):
"Tests the creation of G5k provider"
from enos.provider.g5k import G5k
self.assertIsInstance(
xenos.make_provider(self.__provider_env('g5k')), G5k)
self.assertIsInstance(
xenos.make_provider(self.__provider_env_ext('g5k')), G5k)
def test_make_vbox(self):
"Tests the creation of Vbox provider"
from enos.provider.enos_vagrant import Enos_vagrant
self.assertIsInstance(
xenos.make_provider(self.__provider_env('vagrant')), Enos_vagrant)
self.assertIsInstance(
xenos.make_provider(self.__provider_env_ext('vagrant')),
Enos_vagrant)
def test_make_static(self):
"Tests the creation of Static provider"
from enos.provider.static import Static
self.assertIsInstance(
xenos.make_provider(self.__provider_env('static')), Static)
self.assertIsInstance(
xenos.make_provider(self.__provider_env_ext('static')), Static)
def test_make_unexist(self):
"Tests the raise of error for unknown/unloaded provider"
with self.assertRaises(EnosUnknownProvider):
xenos.make_provider(self.__provider_env('unexist'))
@ddt.ddt
class TestPathLoading(unittest.TestCase):
longMessage = True
def setUp(self):
self.sourcedir = C.ENOS_PATH
self.workdir = os.path.realpath(tempfile.mkdtemp())
def tearDown(self):
shutil.rmtree(self.workdir)
def assertPathEqual(self, p1, p2, msg=None):
self.assertEqual(os.path.normpath(p1),
os.path.normpath(p2),
msg)
@ddt.data(('/abs/path/to/inventory.sample', 'inventory.sample'),
('/abs/path/to/workload/', 'workload/'))
@ddt.unpack
def test_seek_path(self, abspath, relpath):
# Execution from the source directory
with working_directory(self.sourcedir):
self.assertPathEqual(xenos.seekpath(abspath),
abspath,
f"Seeking for an {abspath} defined "
"with an absolute path should always "
"return that path")
self.assertPathEqual(xenos.seekpath(relpath),
os.path.join(C.RSCS_DIR, relpath),
f"Seeking for {relpath} from the source "
"directory should seek into enos source")
# Execution from a working directory
with working_directory(self.workdir):
self.assertPathEqual(xenos.seekpath(abspath),
abspath,
"Seeking for %s defined "
"with an absolute path should always "
"return that path" % abspath)
self.assertPathEqual(xenos.seekpath(relpath),
os.path.join(C.RSCS_DIR, relpath),
"In absence of %s in the working "
"directory, enos should seek for that one "
"in sources" % relpath)
# Build a fake `relpath` in the working directory and
# check seekpath behaviour
_path = pathlib.Path(relpath)
_path.parent.is_dir() or _path.parent.mkdir()
_path.exists() or os.mknod(str(_path))
self.assertPathEqual(xenos.seekpath(relpath),
os.path.join(self.workdir, relpath),
"In presence of %s in the working directory,"
"enos should take this one" % relpath)
def test_seek_unexisting_path(self):
unexisting = 'an/unexisting/path'
with working_directory(self.sourcedir):
with self.assertRaises(EnosFilePathError):
xenos.seekpath(unexisting)
with working_directory(self.workdir):
with self.assertRaises(EnosFilePathError):
xenos.seekpath(unexisting)
@contextlib.contextmanager
def working_directory(path):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
See,
https://code.activestate.com/recipes/576620-changedirectory-context-manager/
"""
prev_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
if __name__ == '__main__':
unittest.main()
| 0 |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferTest
class ReferenceTest( unittest.TestCase ) :
def testLoad( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) )
b.exportForReference( "/tmp/test.grf" )
s["r"] = Gaffer.Reference()
s["r"].load( "/tmp/test.grf" )
self.assertTrue( "n1" in s["r"] )
self.assertTrue( s["r"]["out"].getInput().isSame( s["r"]["n1"]["sum"] ) )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) )
b.promotePlug( b["n1"]["op1"] )
b.exportForReference( "/tmp/test.grf" )
s = Gaffer.ScriptNode()
s["r"] = Gaffer.Reference()
s["r"].load( "/tmp/test.grf" )
self.assertTrue( "n1" in s["r"] )
self.assertTrue( s["r"]["n1"]["op1"].getInput().isSame( s["r"]["user"]["n1_op1"] ) )
self.assertTrue( s["r"]["out"].getInput().isSame( s["r"]["n1"]["sum"] ) )
s["r"]["user"]["n1_op1"].setValue( 25 )
self.assertEqual( s["r"]["out"].getValue(), 25 )
ss = s.serialise()
# referenced nodes should be referenced only, and not
# explicitly mentioned in the serialisation at all.
self.assertTrue( "AddNode" not in ss )
# but the values of user plugs should be stored, so
# they can override the values from the reference.
self.assertTrue( "\"n1_op1\"" in ss )
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertTrue( "n1" in s2["r"] )
self.assertTrue( s2["r"]["out"].getInput().isSame( s2["r"]["n1"]["sum"] ) )
self.assertEqual( s2["r"]["out"].getValue(), 25 )
def testReload( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n3"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
s["n3"]["op1"].setInput( s["n2"]["sum"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n2"] ] ) )
b.promotePlug( b["n2"]["op2"] )
b.exportForReference( "/tmp/test.grf" )
s2 = Gaffer.ScriptNode()
s2["n1"] = GafferTest.AddNode()
s2["n3"] = GafferTest.AddNode()
s2["r"] = Gaffer.Reference()
s2["r"].load( "/tmp/test.grf" )
s2["r"]["in"].setInput( s2["n1"]["sum"] )
s2["r"]["user"]["n2_op2"].setValue( 1001 )
s2["n3"]["op1"].setInput( s2["r"]["out"] )
self.assertTrue( "n2" in s2["r"] )
self.assertTrue( s2["r"]["n2"]["op1"].getInput().isSame( s2["r"]["in"] ) )
self.assertTrue( s2["r"]["n2"]["op2"].getInput().isSame( s2["r"]["user"]["n2_op2"] ) )
self.assertEqual( s2["r"]["user"]["n2_op2"].getValue(), 1001 )
self.assertTrue( s2["r"]["out"].getInput().isSame( s2["r"]["n2"]["sum"] ) )
self.assertTrue( s2["r"]["in"].getInput().isSame( s2["n1"]["sum"] ) )
self.assertTrue( s2["n3"]["op1"].getInput().isSame( s2["r"]["out"] ) )
originalReferencedNames = s2["r"].keys()
b["anotherNode"] = GafferTest.AddNode()
b.promotePlug( b["anotherNode"]["op2"] )
s.serialiseToFile( "/tmp/test.grf", b )
s2["r"].load( "/tmp/test.grf" )
self.assertTrue( "n2" in s2["r"] )
self.assertEqual( set( s2["r"].keys() ), set( originalReferencedNames + [ "anotherNode" ] ) )
self.assertTrue( s2["r"]["n2"]["op1"].getInput().isSame( s2["r"]["in"] ) )
self.assertTrue( s2["r"]["n2"]["op2"].getInput().isSame( s2["r"]["user"]["n2_op2"] ) )
self.assertEqual( s2["r"]["user"]["n2_op2"].getValue(), 1001 )
self.assertTrue( s2["r"]["anotherNode"]["op2"].getInput().isSame( s2["r"]["user"]["anotherNode_op2"] ) )
self.assertTrue( s2["r"]["out"].getInput().isSame( s2["r"]["n2"]["sum"] ) )
self.assertTrue( s2["r"]["in"].getInput().isSame( s2["n1"]["sum"] ) )
self.assertTrue( s2["n3"]["op1"].getInput().isSame( s2["r"]["out"] ) )
def testReloadPreservesPlugIdentities( self ) :
# when reloading a reference, we'd prefer to reuse the old external output plugs rather than
# replace them with new ones - this makes life much easier for observers of those plugs.
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n3"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
s["n3"]["op1"].setInput( s["n2"]["sum"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n2"] ] ) )
b.exportForReference( "/tmp/test.grf" )
s2 = Gaffer.ScriptNode()
s2["r"] = Gaffer.Reference()
s2["r"].load( "/tmp/test.grf" )
inPlug = s2["r"]["in"]
outPlug = s2["r"]["out"]
s2["r"].load( "/tmp/test.grf" )
self.assertTrue( inPlug.isSame( s2["r"]["in"] ) )
self.assertTrue( outPlug.isSame( s2["r"]["out"] ) )
def testReloadDoesntRemoveCustomPlugs( self ) :
# plugs unrelated to referencing shouldn't disappear when a reference is
# reloaded. various parts of the ui might be using them for other purposes.
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) )
b.exportForReference( "/tmp/test.grf" )
s2 = Gaffer.ScriptNode()
s2["r"] = Gaffer.Reference()
s2["r"].load( "/tmp/test.grf" )
s2["r"]["mySpecialPlug"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s2["r"].load( "/tmp/test.grf" )
self.assertTrue( "mySpecialPlug" in s2["r"] )
def testLoadScriptWithReference( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n3"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
s["n3"]["op1"].setInput( s["n2"]["sum"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n2"] ] ) )
b.promotePlug( b["n2"]["op2"] )
b.exportForReference( "/tmp/test.grf" )
s2 = Gaffer.ScriptNode()
s2["r"] = Gaffer.Reference()
s2["r"].load( "/tmp/test.grf" )
s2["a"] = GafferTest.AddNode()
s2["r"]["user"]["n2_op2"].setValue( 123 )
s2["r"]["in"].setInput( s2["a"]["sum"] )
self.assertTrue( "n2_op2" in s2["r"]["user"] )
self.assertTrue( "n2" in s2["r"] )
self.assertTrue( "out" in s2["r"] )
self.assertEqual( s2["r"]["user"]["n2_op2"].getValue(), 123 )
self.assertTrue( s2["r"]["in"].getInput().isSame( s2["a"]["sum"] ) )
s2["fileName"].setValue( "/tmp/test.gfr" )
s2.save()
s3 = Gaffer.ScriptNode()
s3["fileName"].setValue( "/tmp/test.gfr" )
s3.load()
self.assertEqual( s3["r"].keys(), s2["r"].keys() )
self.assertEqual( s3["r"]["user"].keys(), s2["r"]["user"].keys() )
self.assertEqual( s3["r"]["user"]["n2_op2"].getValue(), 123 )
self.assertTrue( s3["r"]["in"].getInput().isSame( s3["a"]["sum"] ) )
def testReferencesDontGetCustomPlugsFromBoxes( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.AddNode()
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) )
b["myCustomPlug"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
b.exportForReference( "/tmp/test.grf" )
s2 = Gaffer.ScriptNode()
s2["r"] = Gaffer.Reference()
s2["r"].load( "/tmp/test.grf" )
self.assertTrue( "myCustomPlug" not in s2["r"] )
def tearDown( self ) :
for f in (
"/tmp/test.grf",
"/tmp/test.gfr",
) :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
| 0.062836 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os.path
import sys
import time
#
from slughifi import *
import xlrd
#
from rncMongoDriver import rncMongoDB
#
#
voivList = ['pomorskie.xls','dolnoslaskie.xls','kujawsko-pomorskie.xls','lubelskie.xls','malopolskie.xls','opolskie.xls','podlaskie.xls','warminsko-mazurskie.xls','zachodniopomorskie.xls','lodzkie.xls','lubuskie.xls','mazowieckie.xls','podkarpackie.xls','slaskie.xls','swietokrzyskie.xls','wielkopolskie.xls']
#
#=================================================
#
colWOJ = 1
colPOW = 2
colGM = 3
colCity = 4
colSchoolType = 6
colSchool = 8
colPatron = 9
colStreet = 10
colHouseNum = 11
colPostcode = 12
colPhone = 14
colFax = 15
colWWW = 16
#
# Czytam nastepujace typy szkol:
# 00001 - przedszkole
# 00003 - szkoła podstawowe
# 00004 - gimnazjum
# 00014 - liceum ogolnoksztalcace
# 00015 - liceum profilowane
# 00016 - technikum
#
valid_school_types = ['00001','00003', '00004', '00014', '00015', '00016' ]
#
class menData(rncMongoDB):
def __init__(self):
print "menData"
dburi = 'mongodb://uuuuu:ppppp@127.0.1:27017/rnc_production'
rncMongoDB.__init__(self,dburi,"rnc_production")
#
self.collVoiv = self.mongoDB['voivodeships']
self.collConties = self.mongoDB['counties']
self.collCities = self.mongoDB['cities']
self.collSchoolType = self.mongoDB['schooltypes']
self.collServiceGroup = self.mongoDB['servicegroups']
self.collServiceGroupMember = self.mongoDB['servicegroupmemberss']
#
# rekreate kolekcje listy szkol
#self._recreateSchoolCollection()
self._deleteSchoolsGroupsAndMembers()
#
self.readDataFromXLS()
#
#--------------------------------------------------------------------------
#
def _deleteSchoolsGroupsAndMembers(self):
print "_deleteSchoolsGroupsAndMembers()..."
#
# czytam liste grup szkol do skasowania
schoolGroups = self.collServiceGroup.find({"group_type":"school"})
for sg in schoolGroups:
print "Kasuje czlonkow i grupę: ",sg['_id'], sg['name']
self.collServiceGroupMember.remove({"group_id":sg['_id']});
self.collServiceGroup.remove({"_id":sg['_id']});
#print schoolGroups
#--------------------------------------------------------------------------
#
"""
def _recreateSchoolCollection(self):
if "schools" in self.mongoDB.collection_names():
print "Kasuje kolekcje 'schools'..."
collection = self.mongoDB["schools"]
#print(collection.count() == 0) #Check if collection named 'posts' is empty
collection.drop()
print "Tworze nową kolekcje 'schools'..."
#
self.collSchools = self.mongoDB["schools"]
"""
#
#---------------------------------------------------------------------------
def readDataFromXLS(self):
for xlsFile in voivList:
tmpFile = os.path.join("./MEN", xlsFile )
print "Czytam dane z pliku:[%s]" % xlsFile
wb = xlrd.open_workbook( tmpFile )
sh = wb.sheet_by_index(0)
schoolCounter=0
#print sh.nrows
# city_id = 0
#county_id_counter = 0
czy_sa_dane = False
for rownum in range(sh.nrows):
#print sh.row_values(rownum)[colCity]
cWOJ = sh.row_values(rownum)[colWOJ]
cPOW = sh.row_values(rownum)[colPOW]
cGM = sh.row_values(rownum)[colGM]
#
city_name = sh.row_values(rownum)[colCity] # tutaj nei moze byc strip()
city_slug = slughifi(unicode(city_name))
#
school_type = sh.row_values(rownum)[colSchoolType]
if czy_sa_dane:
if len(city_slug)>0 and school_type in valid_school_types:
tCity = self.collCities.find_one({"slug":city_slug, "teryt_woj":cWOJ})
#print city_name,city_slug,", CITY=",tCity
tSchoolType = self.collSchoolType.find_one({"type":school_type})
#print "SchoolType=",tSchoolType
#print "SchoolType=",school_type
if tCity==None or tCity=="" or tSchoolType==None:
msg = "Brak miasta [%s] lub typu szkoly [%s] w DB" % (slughifi(city_name),school_type)
#open("./brakujace_miasta.log","a").write("%s -> woj: %s, pow: %s\n" % (msg, tCity['voiv_name'],tCity['county_name']))
open("./brakujace_miasta.log","a").write("%s -> woj: , pow: \n" % (msg))
#sys.exit(msg)
else:
#
#school_name = sh.row_values(rownum)[colSchool]
#
# znalazlem dane wiec je zapisze
#print "SchoolType=",tSchoolType
schoolGroup = {
"id":"",
"name": sh.row_values(rownum)[colSchool],
"slug": slughifi(sh.row_values(rownum)[colSchool]),
"school_info": {
"school_type_id": tSchoolType['_id'],
"school_type": tSchoolType['type'],
"school_type_name": tSchoolType['name'].lower(),
"address": "%s %s" % (sh.row_values(rownum)[colStreet].strip(),sh.row_values(rownum)[colHouseNum].strip()),
"postcode": sh.row_values(rownum)[colPostcode].strip(),
"voivodeship_id": tCity['voiv_id'],
"voivodeship": tCity['voiv_name'],
"county_id": tCity['county_id'],
"county": tCity['county_name'],
"city_id": tCity['_id'],
"city": tCity['name'],
"phone": sh.row_values(rownum)[colPhone].strip().replace(" ",""),
"www": sh.row_values(rownum)[colWWW].strip().replace(" ",""),
"email": "",
"note": "Wczytane dnia %s na podst. listy szkół MEN" % time.strftime("%Y-%m-%d"),
"patron": sh.row_values(rownum)[colPatron].strip(),
},
"is_school_class": False,
"group_type": "school",
"creation_dt": time.strftime("%Y-%m-%d %H:%M:%S"),
"edited_dt": time.strftime("%Y-%m-%d %H:%M:%S")
}
#
#print schoolGroup
#
#self.collSchools.insert(inSchool)
sgID = self.collServiceGroup.insert(schoolGroup)
#print "\n\nsgID=",sgID
self.collServiceGroup.update({"_id": sgID}, {"$set": {"id": sgID}})
schoolCounter+=1
self.collCities.update({"_id":tCity['_id']},{"$set":{"has_school":True}})
"""
#
db.save(inSchool)
#
db.test.update({"x": "y"}, {"has_school": True})
"""
#
if city_slug=="50":
czy_sa_dane=True
#
print "\twczytano szkół: ",schoolCounter
#
#break
#
#
#
#=================================================
#
if __name__ == "__main__":
mnd = menData()
#
# EOF
# | 0.046729 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import shlex
import subprocess
import six
from tempest.lib import base
import tempest.lib.cli.output_parser
from tempest.lib import exceptions
LOG = logging.getLogger(__name__)
def execute(cmd, action, flags='', params='', fail_ok=False,
merge_stderr=False, cli_dir='/usr/bin'):
"""Executes specified command for the given action.
:param cmd: command to be executed
:type cmd: string
:param action: string of the cli command to run
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: string of any optional positional args to use
:type params: string
:param fail_ok: boolean if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param merge_stderr: boolean if True the stderr buffer is merged into
stdout
:type merge_stderr: boolean
:param cli_dir: The path where the cmd can be executed
:type cli_dir: string
"""
cmd = ' '.join([os.path.join(cli_dir, cmd),
flags, action, params])
LOG.info("running: '%s'" % cmd)
if six.PY2:
cmd = cmd.encode('utf-8')
cmd = shlex.split(cmd)
result = ''
result_err = ''
stdout = subprocess.PIPE
stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr)
result, result_err = proc.communicate()
if not fail_ok and proc.returncode != 0:
raise exceptions.CommandFailed(proc.returncode,
cmd,
result,
result_err)
if six.PY2:
return result
else:
return os.fsdecode(result)
class CLIClient(object):
"""Class to use OpenStack official python client CLI's with auth
:param username: The username to authenticate with
:type username: string
:param password: The password to authenticate with
:type password: string
:param tenant_name: The name of the tenant to use with the client calls
:type tenant_name: string
:param uri: The auth uri for the OpenStack Deployment
:type uri: string
:param cli_dir: The path where the python client binaries are installed.
defaults to /usr/bin
:type cli_dir: string
:param insecure: if True, --insecure is passed to python client binaries.
:type insecure: boolean
"""
def __init__(self, username='', password='', tenant_name='', uri='',
cli_dir='', insecure=False, *args, **kwargs):
"""Initialize a new CLIClient object."""
super(CLIClient, self).__init__()
self.cli_dir = cli_dir if cli_dir else '/usr/bin'
self.username = username
self.tenant_name = tenant_name
self.password = password
self.uri = uri
self.insecure = insecure
def nova(self, action, flags='', params='', fail_ok=False,
endpoint_type='publicURL', merge_stderr=False):
"""Executes nova command for the given action.
:param action: the cli command to run using nova
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param endpoint_type: the type of endpoint for the service
:type endpoint_type: string
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
flags += ' --os-endpoint-type %s' % endpoint_type
return self.cmd_with_auth(
'nova', action, flags, params, fail_ok, merge_stderr)
def nova_manage(self, action, flags='', params='', fail_ok=False,
merge_stderr=False):
"""Executes nova-manage command for the given action.
:param action: the cli command to run using nova-manage
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
return execute(
'nova-manage', action, flags, params, fail_ok, merge_stderr,
self.cli_dir)
def keystone(self, action, flags='', params='', fail_ok=False,
merge_stderr=False):
"""Executes keystone command for the given action.
:param action: the cli command to run using keystone
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
return self.cmd_with_auth(
'keystone', action, flags, params, fail_ok, merge_stderr)
def glance(self, action, flags='', params='', fail_ok=False,
endpoint_type='publicURL', merge_stderr=False):
"""Executes glance command for the given action.
:param action: the cli command to run using glance
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param endpoint_type: the type of endpoint for the service
:type endpoint_type: string
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
flags += ' --os-endpoint-type %s' % endpoint_type
return self.cmd_with_auth(
'glance', action, flags, params, fail_ok, merge_stderr)
def ceilometer(self, action, flags='', params='',
fail_ok=False, endpoint_type='publicURL',
merge_stderr=False):
"""Executes ceilometer command for the given action.
:param action: the cli command to run using ceilometer
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param endpoint_type: the type of endpoint for the service
:type endpoint_type: string
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
flags += ' --os-endpoint-type %s' % endpoint_type
return self.cmd_with_auth(
'ceilometer', action, flags, params, fail_ok, merge_stderr)
def heat(self, action, flags='', params='',
fail_ok=False, endpoint_type='publicURL', merge_stderr=False):
"""Executes heat command for the given action.
:param action: the cli command to run using heat
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param endpoint_type: the type of endpoint for the service
:type endpoint_type: string
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
flags += ' --os-endpoint-type %s' % endpoint_type
return self.cmd_with_auth(
'heat', action, flags, params, fail_ok, merge_stderr)
def cinder(self, action, flags='', params='', fail_ok=False,
endpoint_type='publicURL', merge_stderr=False):
"""Executes cinder command for the given action.
:param action: the cli command to run using cinder
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param endpoint_type: the type of endpoint for the service
:type endpoint_type: string
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
flags += ' --endpoint-type %s' % endpoint_type
return self.cmd_with_auth(
'cinder', action, flags, params, fail_ok, merge_stderr)
def swift(self, action, flags='', params='', fail_ok=False,
endpoint_type='publicURL', merge_stderr=False):
"""Executes swift command for the given action.
:param action: the cli command to run using swift
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param endpoint_type: the type of endpoint for the service
:type endpoint_type: string
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
flags += ' --os-endpoint-type %s' % endpoint_type
return self.cmd_with_auth(
'swift', action, flags, params, fail_ok, merge_stderr)
def neutron(self, action, flags='', params='', fail_ok=False,
endpoint_type='publicURL', merge_stderr=False):
"""Executes neutron command for the given action.
:param action: the cli command to run using neutron
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param endpoint_type: the type of endpoint for the service
:type endpoint_type: string
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
flags += ' --endpoint-type %s' % endpoint_type
return self.cmd_with_auth(
'neutron', action, flags, params, fail_ok, merge_stderr)
def sahara(self, action, flags='', params='',
fail_ok=False, endpoint_type='publicURL', merge_stderr=True):
"""Executes sahara command for the given action.
:param action: the cli command to run using sahara
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param endpoint_type: the type of endpoint for the service
:type endpoint_type: string
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
flags += ' --endpoint-type %s' % endpoint_type
return self.cmd_with_auth(
'sahara', action, flags, params, fail_ok, merge_stderr)
def openstack(self, action, flags='', params='', fail_ok=False,
merge_stderr=False):
"""Executes openstack command for the given action.
:param action: the cli command to run using openstack
:type action: string
:param flags: any optional cli flags to use
:type flags: string
:param params: any optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the
cli return code is non-zero
:type fail_ok: boolean
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
return self.cmd_with_auth(
'openstack', action, flags, params, fail_ok, merge_stderr)
def cmd_with_auth(self, cmd, action, flags='', params='',
fail_ok=False, merge_stderr=False):
"""Executes given command with auth attributes appended.
:param cmd: command to be executed
:type cmd: string
:param action: command on cli to run
:type action: string
:param flags: optional cli flags to use
:type flags: string
:param params: optional positional args to use
:type params: string
:param fail_ok: if True an exception is not raised when the cli return
code is non-zero
:type fail_ok: boolean
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
creds = ('--os-username %s --os-tenant-name %s --os-password %s '
'--os-auth-url %s' %
(self.username,
self.tenant_name,
self.password,
self.uri))
if self.insecure:
flags = creds + ' --insecure ' + flags
else:
flags = creds + ' ' + flags
return execute(cmd, action, flags, params, fail_ok, merge_stderr,
self.cli_dir)
class ClientTestBase(base.BaseTestCase):
"""Base test class for testing the OpenStack client CLI interfaces."""
def setUp(self):
super(ClientTestBase, self).setUp()
self.clients = self._get_clients()
self.parser = tempest.lib.cli.output_parser
def _get_clients(self):
"""Abstract method to initialize CLIClient object.
This method must be overloaded in child test classes. It should be
used to initialize the CLIClient object with the appropriate
credentials during the setUp() phase of tests.
"""
raise NotImplementedError
def assertTableStruct(self, items, field_names):
"""Verify that all items has keys listed in field_names.
:param items: items to assert are field names in the output table
:type items: list
:param field_names: field names from the output table of the cmd
:type field_names: list
"""
for item in items:
for field in field_names:
self.assertIn(field, item)
def assertFirstLineStartsWith(self, lines, beginning):
"""Verify that the first line starts with a string
:param lines: strings for each line of output
:type lines: list
:param beginning: verify this is at the beginning of the first line
:type beginning: string
"""
self.assertTrue(lines[0].startswith(beginning),
msg=('Beginning of first line has invalid content: %s'
% lines[:3]))
| 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cpro', '0009_auto_20161001_0532'),
]
operations = [
migrations.AddField(
model_name='idol',
name='_cache_total_cards',
field=models.PositiveIntegerField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='idol',
name='_cache_total_events',
field=models.PositiveIntegerField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='idol',
name='_cache_total_fans',
field=models.PositiveIntegerField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='idol',
name='_cache_totals_last_update',
field=models.DateTimeField(null=True),
preserve_default=True,
),
]
| 0 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import constants as q_const
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import api as db_api
from neutron.db import dhcp_rpc_base
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron import manager
from neutron.openstack.common import log
from neutron.openstack.common.rpc import proxy
from neutron.openstack.common import uuidutils
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
# REVISIT(kmestery): Allow the type and mechanism drivers to supply the
# mixins and eventually remove the direct dependencies on type_tunnel.
LOG = log.getLogger(__name__)
TAP_DEVICE_PREFIX = 'tap'
TAP_DEVICE_PREFIX_LENGTH = 3
class RpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin,
type_tunnel.TunnelRpcCallbackMixin):
RPC_API_VERSION = '1.1'
# history
# 1.0 Initial version (from openvswitch/linuxbridge)
# 1.1 Support Security Group RPC
def __init__(self, notifier, type_manager):
# REVISIT(kmestery): This depends on the first three super classes
# not having their own __init__ functions. If an __init__() is added
# to one, this could break. Fix this and add a unit test to cover this
# test in H3.
super(RpcCallbacks, self).__init__(notifier, type_manager)
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def _device_to_port_id(cls, device):
# REVISIT(rkukura): Consider calling into MechanismDrivers to
# process device names, or having MechanismDrivers supply list
# of device prefixes to strip.
if device.startswith(TAP_DEVICE_PREFIX):
return device[TAP_DEVICE_PREFIX_LENGTH:]
else:
# REVISIT(irenab): Consider calling into bound MD to
# handle the get_device_details RPC, then remove the 'else' clause
if not uuidutils.is_uuid_like(device):
port = db.get_port_from_device_mac(device)
if port:
return port.id
return device
@classmethod
def get_port_from_device(cls, device):
port_id = cls._device_to_port_id(device)
port = db.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested by agent "
"%(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port_id = self._device_to_port_id(device)
session = db_api.get_session()
with session.begin(subtransactions=True):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s not found in database"),
{'device': device, 'agent_id': agent_id})
return {'device': device}
segments = db.get_network_segments(session, port.network_id)
if not segments:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s has network %(network_id)s with "
"no segments"),
{'device': device,
'agent_id': agent_id,
'network_id': port.network_id})
return {'device': device}
binding = db.ensure_port_binding(session, port.id)
if not binding.segment:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s on network %(network_id)s not "
"bound, vif_type: %(vif_type)s"),
{'device': device,
'agent_id': agent_id,
'network_id': port.network_id,
'vif_type': binding.vif_type})
return {'device': device}
segment = self._find_segment(segments, binding.segment)
if not segment:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s on network %(network_id)s "
"invalid segment, vif_type: %(vif_type)s"),
{'device': device,
'agent_id': agent_id,
'network_id': port.network_id,
'vif_type': binding.vif_type})
return {'device': device}
new_status = (q_const.PORT_STATUS_BUILD if port.admin_state_up
else q_const.PORT_STATUS_DOWN)
if port.status != new_status:
plugin = manager.NeutronManager.get_plugin()
plugin.update_port_status(rpc_context,
port_id,
new_status)
port.status = new_status
entry = {'device': device,
'network_id': port.network_id,
'port_id': port.id,
'admin_state_up': port.admin_state_up,
'network_type': segment[api.NETWORK_TYPE],
'segmentation_id': segment[api.SEGMENTATION_ID],
'physical_network': segment[api.PHYSICAL_NETWORK]}
LOG.debug(_("Returning: %s"), entry)
return entry
def _find_segment(self, segments, segment_id):
for segment in segments:
if segment[api.ID] == segment_id:
return segment
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug(_("Device %(device)s no longer exists at agent "
"%(agent_id)s"),
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = self._device_to_port_id(device)
port_exists = True
if (host and not plugin.port_bound_to_host(port_id, host)):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
return {'device': device,
'exists': port_exists}
port_exists = plugin.update_port_status(rpc_context, port_id,
q_const.PORT_STATUS_DOWN)
return {'device': device,
'exists': port_exists}
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug(_("Device %(device)s up at agent %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = self._device_to_port_id(device)
if (host and not plugin.port_bound_to_host(port_id, host)):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
return
plugin.update_port_status(rpc_context, port_id,
q_const.PORT_STATUS_ACTIVE)
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin,
type_tunnel.TunnelAgentRpcApiMixin):
"""Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
update_dhcp_port, and removed get_dhcp_port methods.
"""
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
| 0 |
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import numpy as np
from numpy.testing import assert_array_equal
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.io.kit import read_mrk
from mne.utils import (requires_mayavi, run_tests_if_main, traits_test,
modified_env)
mrk_pre_path = os.path.join(kit_data_dir, 'test_mrk_pre.sqd')
mrk_post_path = os.path.join(kit_data_dir, 'test_mrk_post.sqd')
mrk_avg_path = os.path.join(kit_data_dir, 'test_mrk.sqd')
@requires_mayavi
@traits_test
def test_combine_markers_model(tmpdir):
"""Test CombineMarkersModel Traits Model."""
from mne.gui._marker_gui import CombineMarkersModel
tempdir = str(tmpdir)
tgt_fname = os.path.join(tempdir, 'test.txt')
model = CombineMarkersModel()
# set one marker file
assert not model.mrk3.can_save
model.mrk1.file = mrk_pre_path
assert model.mrk3.can_save
assert_array_equal(model.mrk3.points, model.mrk1.points)
# setting second marker file
model.mrk2.file = mrk_pre_path
assert_array_equal(model.mrk3.points, model.mrk1.points)
# set second marker
model.mrk2.clear = True
model.mrk2.file = mrk_post_path
assert np.any(model.mrk3.points)
points_interpolate_mrk1_mrk2 = model.mrk3.points
# change interpolation method
model.mrk3.method = 'Average'
mrk_avg = read_mrk(mrk_avg_path)
assert_array_equal(model.mrk3.points, mrk_avg)
# clear second marker
model.mrk2.clear = True
assert_array_equal(model.mrk1.points, model.mrk3.points)
# I/O
model.mrk2.file = mrk_post_path
model.mrk3.save(tgt_fname)
mrk_io = read_mrk(tgt_fname)
assert_array_equal(mrk_io, model.mrk3.points)
# exclude an individual marker
model.mrk1.use = [1, 2, 3, 4]
assert_array_equal(model.mrk3.points[0], model.mrk2.points[0])
assert_array_equal(model.mrk3.points[1:], mrk_avg[1:])
# reset model
model.clear = True
model.mrk1.file = mrk_pre_path
model.mrk2.file = mrk_post_path
assert_array_equal(model.mrk3.points, points_interpolate_mrk1_mrk2)
@requires_mayavi
@traits_test
def test_combine_markers_panel(check_gui_ci):
"""Test CombineMarkersPanel."""
from mne.gui._marker_gui import CombineMarkersPanel
with modified_env(_MNE_GUI_TESTING_MODE='true'):
CombineMarkersPanel()
run_tests_if_main()
| 0 |
# coding=utf-8
"""
This module bundles commonly used utility methods or helper classes that are used in multiple places withing
OctoPrint's source code.
"""
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os
import traceback
import sys
import re
import tempfile
import logging
import shutil
import threading
from functools import wraps
import warnings
import contextlib
logger = logging.getLogger(__name__)
def warning_decorator_factory(warning_type):
def specific_warning(message, stacklevel=1, since=None, includedoc=None, extenddoc=False):
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
# we need to increment the stacklevel by one because otherwise we'll get the location of our
# func_wrapper in the log, instead of our caller (which is the real caller of the wrapped function)
warnings.warn(message, warning_type, stacklevel=stacklevel + 1)
return func(*args, **kwargs)
if includedoc is not None and since is not None:
docstring = "\n.. deprecated:: {since}\n {message}\n\n".format(since=since, message=includedoc)
if extenddoc and hasattr(func_wrapper, "__doc__") and func_wrapper.__doc__ is not None:
docstring = func_wrapper.__doc__ + "\n" + docstring
func_wrapper.__doc__ = docstring
return func_wrapper
return decorator
return specific_warning
deprecated = warning_decorator_factory(DeprecationWarning)
"""
A decorator for deprecated methods. Logs a deprecation warning via Python's `:mod:`warnings` module including the
supplied ``message``. The call stack level used (for adding the source location of the offending call to the
warning) can be overridden using the optional ``stacklevel`` parameter. If both ``since`` and ``includedoc`` are
provided, a deprecation warning will also be added to the function's docstring by providing or extending its ``__doc__``
property.
Arguments:
message (string): The message to include in the deprecation warning.
stacklevel (int): Stack level for including the caller of the offending method in the logged warning. Defaults to 1,
meaning the direct caller of the method. It might make sense to increase this in case of the function call
happening dynamically from a fixed position to not shadow the real caller (e.g. in case of overridden
``getattr`` methods).
includedoc (string): Message about the deprecation to include in the wrapped function's docstring.
extenddoc (boolean): If True the original docstring of the wrapped function will be extended by the deprecation
message, if False (default) it will be replaced with the deprecation message.
since (string): Version since when the function was deprecated, must be present for the docstring to get extended.
Returns:
function: The wrapped function with the deprecation warnings in place.
"""
pending_deprecation = warning_decorator_factory(PendingDeprecationWarning)
"""
A decorator for methods pending deprecation. Logs a pending deprecation warning via Python's `:mod:`warnings` module
including the supplied ``message``. The call stack level used (for adding the source location of the offending call to
the warning) can be overridden using the optional ``stacklevel`` parameter. If both ``since`` and ``includedoc`` are
provided, a deprecation warning will also be added to the function's docstring by providing or extending its ``__doc__``
property.
Arguments:
message (string): The message to include in the deprecation warning.
stacklevel (int): Stack level for including the caller of the offending method in the logged warning. Defaults to 1,
meaning the direct caller of the method. It might make sense to increase this in case of the function call
happening dynamically from a fixed position to not shadow the real caller (e.g. in case of overridden
``getattr`` methods).
extenddoc (boolean): If True the original docstring of the wrapped function will be extended by the deprecation
message, if False (default) it will be replaced with the deprecation message.
includedoc (string): Message about the deprecation to include in the wrapped function's docstring.
since (string): Version since when the function was deprecated, must be present for the docstring to get extended.
Returns:
function: The wrapped function with the deprecation warnings in place.
"""
def get_formatted_size(num):
"""
Formats the given byte count as a human readable rounded size expressed in the most pressing unit among B(ytes),
K(ilo)B(ytes), M(ega)B(ytes), G(iga)B(ytes) and T(era)B(ytes), with one decimal place.
Based on http://stackoverflow.com/a/1094933/2028598
Arguments:
num (int): The byte count to format
Returns:
string: The formatted byte count.
"""
for x in ["B","KB","MB","GB"]:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, "TB")
def is_allowed_file(filename, extensions):
"""
Determines if the provided ``filename`` has one of the supplied ``extensions``. The check is done case-insensitive.
Arguments:
filename (string): The file name to check against the extensions.
extensions (list): The extensions to check against, a list of strings
Return:
boolean: True if the file name's extension matches one of the allowed extensions, False otherwise.
"""
return "." in filename and filename.rsplit(".", 1)[1].lower() in map(str.lower, extensions)
def get_formatted_timedelta(d):
"""
Formats a timedelta instance as "HH:MM:ss" and returns the resulting string.
Arguments:
d (datetime.timedelta): The timedelta instance to format
Returns:
string: The timedelta formatted as "HH:MM:ss"
"""
if d is None:
return None
hours = d.days * 24 + d.seconds // 3600
minutes = (d.seconds % 3600) // 60
seconds = d.seconds % 60
return "%02d:%02d:%02d" % (hours, minutes, seconds)
def get_formatted_datetime(d):
"""
Formats a datetime instance as "YYYY-mm-dd HH:MM" and returns the resulting string.
Arguments:
d (datetime.datetime): The datetime instance to format
Returns:
string: The datetime formatted as "YYYY-mm-dd HH:MM"
"""
if d is None:
return None
return d.strftime("%Y-%m-%d %H:%M")
def get_class(name):
"""
Retrieves the class object for a given fully qualified class name.
Taken from http://stackoverflow.com/a/452981/2028598.
Arguments:
name (string): The fully qualified class name, including all modules separated by ``.``
Returns:
type: The class if it could be found.
Raises:
AttributeError: The class could not be found.
"""
parts = name.split(".")
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def get_exception_string():
"""
Retrieves the exception info of the last raised exception and returns it as a string formatted as
``<exception type>: <exception message> @ <source file>:<function name>:<line number>``.
Returns:
string: The formatted exception information.
"""
locationInfo = traceback.extract_tb(sys.exc_info()[2])[0]
return "%s: '%s' @ %s:%s:%d" % (str(sys.exc_info()[0].__name__), str(sys.exc_info()[1]), os.path.basename(locationInfo[0]), locationInfo[2], locationInfo[1])
@deprecated("get_free_bytes has been deprecated and will be removed in the future",
includedoc="Replaced by `psutil.disk_usage <http://pythonhosted.org/psutil/#psutil.disk_usage>`_.",
since="1.2.5")
def get_free_bytes(path):
import psutil
return psutil.disk_usage(path).free
def get_dos_filename(origin, existing_filenames=None, extension=None, **kwargs):
"""
Converts the provided input filename to a 8.3 DOS compatible filename. If ``existing_filenames`` is provided, the
conversion result will be guaranteed not to collide with any of the filenames provided thus.
Uses :func:`find_collision_free_name` internally.
Arguments:
input (string): The original filename incl. extension to convert to the 8.3 format.
existing_filenames (list): A list of existing filenames with which the generated 8.3 name must not collide.
Optional.
extension (string): The .3 file extension to use for the generated filename. If not provided, the extension of
the provided ``filename`` will simply be truncated to 3 characters.
kwargs (dict): Additional keyword arguments to provide to :func:`find_collision_free_name`.
Returns:
string: A 8.3 compatible translation of the original filename, not colliding with the optionally provided
``existing_filenames`` and with the provided ``extension`` or the original extension shortened to
a maximum of 3 characters.
Raises:
ValueError: No 8.3 compatible name could be found that doesn't collide with the provided ``existing_filenames``.
"""
if origin is None:
return None
if existing_filenames is None:
existing_filenames = []
filename, ext = os.path.splitext(origin)
if extension is None:
extension = ext
return find_collision_free_name(filename, extension, existing_filenames, **kwargs)
def find_collision_free_name(filename, extension, existing_filenames, max_power=2):
"""
Tries to find a collision free translation of "<filename>.<extension>" to the 8.3 DOS compatible format,
preventing collisions with any of the ``existing_filenames``.
First strips all of ``."/\\[]:;=,`` from the filename and extensions, converts them to lower case and truncates
the ``extension`` to a maximum length of 3 characters.
If the filename is already equal or less than 8 characters in length after that procedure and "<filename>.<extension>"
are not contained in the ``existing_files``, that concatenation will be returned as the result.
If not, the following algorithm will be applied to try to find a collision free name::
set counter := power := 1
while counter < 10^max_power:
set truncated := substr(filename, 0, 6 - power + 1) + "~" + counter
set result := "<truncated>.<extension>"
if result is collision free:
return result
counter++
if counter >= 10 ** power:
power++
raise ValueError
This will basically -- for a given original filename of ``some_filename`` and an extension of ``gco`` -- iterate
through names of the format ``some_f~1.gco``, ``some_f~2.gco``, ..., ``some_~10.gco``, ``some_~11.gco``, ...,
``<prefix>~<n>.gco`` for ``n`` less than 10 ^ ``max_power``, returning as soon as one is found that is not colliding.
Arguments:
filename (string): The filename without the extension to convert to 8.3.
extension (string): The extension to convert to 8.3 -- will be truncated to 3 characters if it's longer than
that.
existing_filenames (list): A list of existing filenames to prevent name collisions with.
max_power (int): Limits the possible attempts of generating a collision free name to 10 ^ ``max_power``
variations. Defaults to 2, so the name generation will maximally reach ``<name>~99.<ext>`` before
aborting and raising an exception.
Returns:
string: A 8.3 representation of the provided original filename, ensured to not collide with the provided
``existing_filenames``
Raises:
ValueError: No collision free name could be found.
"""
# TODO unit test!
if not isinstance(filename, unicode):
filename = unicode(filename)
if not isinstance(extension, unicode):
extension = unicode(extension)
def make_valid(text):
return re.sub(r"\s+", "_", text.translate({ord(i):None for i in ".\"/\\[]:;=,"})).lower()
filename = make_valid(filename)
extension = make_valid(extension)
extension = extension[:3] if len(extension) > 3 else extension
if len(filename) <= 8 and not filename + "." + extension in existing_filenames:
# early exit
return filename + "." + extension
counter = 1
power = 1
while counter < (10 ** max_power):
result = filename[:(6 - power + 1)] + "~" + str(counter) + "." + extension
if result not in existing_filenames:
return result
counter += 1
if counter >= 10 ** power:
power += 1
raise ValueError("Can't create a collision free filename")
def silent_remove(file):
"""
Silently removes a file. Does not raise an error if the file doesn't exist.
Arguments:
file (string): The path of the file to be removed
"""
try:
os.remove(file)
except OSError:
pass
def sanitize_ascii(line):
if not isinstance(line, basestring):
raise ValueError("Expected either str or unicode but got {} instead".format(line.__class__.__name__ if line is not None else None))
return to_unicode(line, encoding="ascii", errors="replace").rstrip()
def filter_non_ascii(line):
"""
Filter predicate to test if a line contains non ASCII characters.
Arguments:
line (string): The line to test
Returns:
boolean: True if the line contains non ASCII characters, False otherwise.
"""
try:
to_str(to_unicode(line, encoding="ascii"), encoding="ascii")
return False
except ValueError:
return True
def to_str(s_or_u, encoding="utf-8", errors="strict"):
"""Make sure ``s_or_u`` is a str."""
if isinstance(s_or_u, unicode):
return s_or_u.encode(encoding, errors=errors)
else:
return s_or_u
def to_unicode(s_or_u, encoding="utf-8", errors="strict"):
"""Make sure ``s_or_u`` is a unicode string."""
if isinstance(s_or_u, str):
return s_or_u.decode(encoding, errors=errors)
else:
return s_or_u
def dict_merge(a, b):
"""
Recursively deep-merges two dictionaries.
Taken from https://www.xormedia.com/recursively-merge-dictionaries-in-python/
Arguments:
a (dict): The dictionary to merge ``b`` into
b (dict): The dictionary to merge into ``a``
Returns:
dict: ``b`` deep-merged into ``a``
"""
from copy import deepcopy
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.iteritems():
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
def dict_clean(a, b):
"""
Recursively deep-cleans ``b`` from ``a``, removing all keys and corresponding values from ``a`` that appear in
``b``.
Arguments:
a (dict): The dictionary to clean from ``b``.
b (dict): The dictionary to clean ``b`` from.
Results:
dict: A new dict based on ``a`` with all keys (and corresponding values) found in ``b`` removed.
"""
from copy import deepcopy
if not isinstance(b, dict):
return a
result = deepcopy(a)
for k, v in a.iteritems():
if not k in b:
del result[k]
elif isinstance(v, dict):
result[k] = dict_clean(v, b[k])
else:
result[k] = deepcopy(v)
return result
def dict_contains_keys(a, b):
"""
Recursively deep-checks if ``a`` contains all keys found in ``b``.
Example::
>>> dict_contains_keys(dict(foo="bar", fnord=dict(a=1, b=2, c=3)), dict(foo="some_other_bar", fnord=dict(b=100)))
True
>>> dict_contains_keys(dict(foo="bar", fnord=dict(a=1, b=2, c=3)), dict(foo="some_other_bar", fnord=dict(b=100, d=20)))
False
Arguments:
a (dict): The dictionary to check for the keys from ``b``.
b (dict): The dictionary whose keys to check ``a`` for.
Returns:
boolean: True if all keys found in ``b`` are also present in ``a``, False otherwise.
"""
if not isinstance(a, dict) or not isinstance(b, dict):
return False
for k, v in a.iteritems():
if not k in b:
return False
elif isinstance(v, dict):
if not dict_contains_keys(v, b[k]):
return False
return True
class Object(object):
pass
def interface_addresses(family=None):
"""
Retrieves all of the host's network interface addresses.
"""
import netifaces
if not family:
family = netifaces.AF_INET
for interface in netifaces.interfaces():
try:
ifaddresses = netifaces.ifaddresses(interface)
except:
continue
if family in ifaddresses:
for ifaddress in ifaddresses[family]:
if not ifaddress["addr"].startswith("169.254."):
yield ifaddress["addr"]
def address_for_client(host, port):
"""
Determines the address of the network interface on this host needed to connect to the indicated client host and port.
"""
import socket
for address in interface_addresses():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((address, 0))
sock.connect((host, port))
return address
except:
continue
@contextlib.contextmanager
def atomic_write(filename, mode="w+b", prefix="tmp", suffix=""):
temp_config = tempfile.NamedTemporaryFile(mode=mode, prefix=prefix, suffix=suffix, delete=False)
yield temp_config
temp_config.close()
shutil.move(temp_config.name, filename)
class RepeatedTimer(threading.Thread):
"""
This class represents an action that should be run repeatedly in an interval. It is similar to python's
own :class:`threading.Timer` class, but instead of only running once the ``function`` will be run again and again,
sleeping the stated ``interval`` in between.
RepeatedTimers are started, as with threads, by calling their ``start()`` method. The timer can be stopped (in
between runs) by calling the :func:`cancel` method. The interval the time waited before execution of a loop may
not be exactly the same as the interval specified by the user.
For example:
.. code-block:: python
def hello():
print("Hello World!")
t = RepeatedTimer(1.0, hello)
t.start() # prints "Hello World!" every second
Another example with dynamic interval and loop condition:
.. code-block:: python
count = 0
maximum = 5
factor = 1
def interval():
global count
global factor
return count * factor
def condition():
global count
global maximum
return count <= maximum
def hello():
print("Hello World!")
global count
count += 1
t = RepeatedTimer(interval, hello, run_first=True, condition=condition)
t.start() # prints "Hello World!" 5 times, printing the first one
# directly, then waiting 1, 2, 3, 4s in between (adaptive interval)
Arguments:
interval (float or callable): The interval between each ``function`` call, in seconds. Can also be a callable
returning the interval to use, in case the interval is not static.
function (callable): The function to call.
args (list or tuple): The arguments for the ``function`` call. Defaults to an empty list.
kwargs (dict): The keyword arguments for the ``function`` call. Defaults to an empty dict.
run_first (boolean): If set to True, the function will be run for the first time *before* the first wait period.
If set to False (the default), the function will be run for the first time *after* the first wait period.
condition (callable): Condition that needs to be True for loop to continue. Defaults to ``lambda: True``.
daemon (bool): daemon flag to set on underlying thread.
"""
def __init__(self, interval, function, args=None, kwargs=None, run_first=False, condition=None, daemon=True):
threading.Thread.__init__(self)
if args is None:
args = []
if kwargs is None:
kwargs = dict()
if condition is None:
condition = lambda: True
if not callable(interval):
self.interval = lambda: interval
else:
self.interval = interval
self.function = function
self.finished = threading.Event()
self.args = args
self.kwargs = kwargs
self.run_first = run_first
self.condition = condition
self.daemon = daemon
def cancel(self):
self.finished.set()
def run(self):
while self.condition():
if self.run_first:
# if we are to run the function BEFORE waiting for the first time
self.function(*self.args, **self.kwargs)
# make sure our condition is still met before running into the downtime
if not self.condition():
break
# wait, but break if we are cancelled
self.finished.wait(self.interval())
if self.finished.is_set():
break
if not self.run_first:
# if we are to run the function AFTER waiting for the first time
self.function(*self.args, **self.kwargs)
# make sure we set our finished event so we can detect that the loop was finished
self.finished.set()
class CountedEvent(object):
def __init__(self, value=0, max=None, name=None):
logger_name = __name__ + ".CountedEvent" + (".{name}".format(name=name) if name is not None else "")
self._logger = logging.getLogger(logger_name)
self._counter = 0
self._max = max
self._mutex = threading.Lock()
self._event = threading.Event()
self._internal_set(value)
def set(self):
with self._mutex:
self._internal_set(self._counter + 1)
def clear(self, completely=False):
with self._mutex:
if completely:
self._internal_set(0)
else:
self._internal_set(self._counter - 1)
def wait(self, timeout=None):
self._event.wait(timeout)
def blocked(self):
with self._mutex:
return self._counter == 0
def _internal_set(self, value):
self._logger.debug("New counter value: {value}".format(value=value))
self._counter = value
if self._counter <= 0:
self._counter = 0
self._event.clear()
self._logger.debug("Cleared event")
else:
if self._max is not None and self._counter > self._max:
self._counter = self._max
self._event.set()
self._logger.debug("Set event")
class InvariantContainer(object):
def __init__(self, initial_data=None, guarantee_invariant=None):
from collections import Iterable
from threading import RLock
if guarantee_invariant is None:
guarantee_invariant = lambda data: data
self._data = []
self._mutex = RLock()
self._invariant = guarantee_invariant
if initial_data is not None and isinstance(initial_data, Iterable):
for item in initial_data:
self.append(item)
def append(self, item):
with self._mutex:
self._data.append(item)
self._data = self._invariant(self._data)
def remove(self, item):
with self._mutex:
self._data.remove(item)
self._data = self._invariant(self._data)
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.__iter__()
| 0.028764 |
from os.path import dirname, abspath, basename
from mycroft.audio.services import AudioBackend
from mycroft.util.log import getLogger
import vlc
logger = getLogger(abspath(__file__).split('/')[-2])
class VlcService(AudioBackend):
def __init__(self, config, emitter=None, name='vlc'):
self.instance = vlc.Instance()
self.list_player = self.instance.media_list_player_new()
self.player = self.instance.media_player_new()
self.list_player.set_media_player(self.player)
self.config = config
self.emitter = emitter
self.name = name
self.normal_volume = None
def supported_uris(self):
return ['file', 'http']
def clear_list(self):
empty = self.instance.media_list_new()
self.list_player.set_media_list(empty)
def add_list(self, tracks):
logger.info("Track list is " + str(tracks))
vlc_tracks = self.instance.media_list_new()
for t in tracks:
vlc_tracks.add_media(self.instance.media_new(t))
self.list_player.set_media_list(vlc_tracks)
def play(self):
logger.info('VLCService Play')
self.list_player.play()
def stop(self):
logger.info('VLCService Stop')
self.clear_list()
self.list_player.stop()
def pause(self):
self.player.set_pause(1)
def resume(self):
self.player.set_pause(0)
def next(self):
self.list_player.next()
def previous(self):
self.list_player.previous()
def lower_volume(self):
if self.normal_volume is None:
self.normal_volume = self.player.audio_get_volume()
self.player.audio_set_volume(self.normal_volume / 5)
def restore_volume(self):
if self.normal_volume:
self.player.audio_set_volume(self.normal_volume)
self.normal_volume = None
def track_info(self):
ret = {}
meta = vlc.Meta
t = self.player.get_media()
ret['album'] = t.get_meta(meta.Album)
ret['artists'] = [t.get_meta(meta.Artist)]
ret['name'] = t.get_meta(meta.Title)
return ret
def load_service(base_config, emitter):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'vlc']
instances = [VlcService(s[1], emitter, s[0]) for s in services]
return instances
| 0 |
from __future__ import division, absolute_import
import os
import numpy as np
import numpy.testing as npt
from .. import load_icgem_gdf
MODULE_DIR = os.path.dirname(__file__)
TEST_DATA_DIR = os.path.join(MODULE_DIR, 'data')
def test_load_icgem_gdf():
"Check if load_icgem_gdf reads ICGEM test data correctly"
fname = os.path.join(TEST_DATA_DIR, "icgem-sample.gdf")
data = load_icgem_gdf(fname)
area = [14.0, 28.0, 150.0, 164.0]
lon = np.arange(area[2], area[3] + 1, 2.0, dtype="float64")
lat = np.arange(area[0], area[1] + 1, 2.0, dtype="float64")
shape = (lat.size, lon.size)
lon, lat = np.meshgrid(lon, lat)
true_data = np.zeros_like(lat, dtype="float64")
for i in range(true_data.shape[1]):
true_data[:, i] = i
lon, lat, true_data = lon.ravel(), lat.ravel(), true_data.ravel()
assert data['shape'] == shape
assert data['area'] == area
npt.assert_equal(data['longitude'], lon)
npt.assert_equal(data['latitude'], lat)
assert data['sample_data'].size == true_data.size
npt.assert_allclose(true_data, data['sample_data'])
| 0 |
#!/usr/bin/env python
"""Converts X11 color files to RGB formatted Python dicts"""
import sys
import pprint
if len(sys.argv)<2:
print "Usage: %s filename" % sys.argv[0]
sys.exit(1)
colors = {
"black": (0. , 0. , 0. , 1.),
"silver": (0.75, 0.75, 0.75, 1.),
"gray": (0.5 , 0.5 , 0.5 , 1.),
"white": (1. , 1. , 1. , 1.),
"maroon": (0.5 , 0. , 0. , 1.),
"red": (1. , 0. , 0. , 1.),
"purple": (0.5 , 0. , 0.5 , 1.),
"fuchsia": (1. , 0. , 1. , 1.),
"green": (0. , 0.5 , 0. , 1.),
"lime": (0. , 1. , 0. , 1.),
"olive": (0.5 , 0.5 , 0. , 1.),
"yellow": (1. , 1. , 0. , 1.),
"navy": (0. , 0. , 0.5 , 1.),
"blue": (0. , 0. , 1. , 1.),
"teal": (0. , 0.5 , 0.5 , 1.),
"aqua": (0. , 1. , 1. , 1.),
}
f = open(sys.argv[1])
for line in f:
if line[0] == '!': continue
parts = line.strip().split(None, 3)
for x in xrange(3):
parts[x] = float(parts[x])/255.
parts[3:3] = [1.]
colors[parts[4].lower()] = tuple(parts[0:4])
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(colors)
| 0.043127 |
# https://leetcode.com/problems/fruit-into-baskets/
class Solution:
def totalFruit(self, tree):
i = 0
start_position = 0
length = len(tree)
d = {}
new_value_count = 0
max_count = 0
while i < length:
# print(str(i)+'. '+ str(tree[i]))
value = 0
if tree[i] in d:
value = d.get(tree[i])
else:
new_value_count = new_value_count + 1
if new_value_count == 2:
# print('Start position: ' + str(i))
start_position = i
if new_value_count == 3:
new_value_count = 1
# print('Start from here: ' + str(start_position))
i = start_position
count = Solution.get_count_of_fruit(d)
d = {}
if(count > max_count):
max_count = count
value = value + 1
d[tree[i]] = value
i+=1
count = Solution.get_count_of_fruit(d)
if(count > max_count):
max_count = count
print('Output: ' + str(max_count))
return max_count
def get_count_of_fruit(d):
count = 0
for key in d:
count += d[key]
# print('Count of d: ' + str(count))
return count
def main():
list_of_trees = [3,3,3,1,2,1,1,2,3,3,4,1,1,1]
print("Input: " + str(list_of_trees))
Solution.totalFruit(Solution, list_of_trees)
list_of_trees = [3,3,3,1,2,1,1,2,3,3,4]
print("Input: " + str(list_of_trees))
Solution.totalFruit(Solution, list_of_trees)
list_of_trees = [1,1,1,2,2,2,2,3,3,3,3,3,1,1,1]
print("Input: " + str(list_of_trees))
Solution.totalFruit(Solution, list_of_trees)
if __name__ == '__main__':
main()
| 0.034158 |
import sys
N = -1
G = None
H = None
vis = None
vis_aux = None
valence = None
flows = {}
answer = []
allowed_flows = {
3 : [-1, 1],
4 : [-1, 1, 2],
5 : [-1, 1, 2, -2]
}
def has_k_flow (graph):
global N, G, H, vis, valence, flows
G = graph
N = len(G)
H = [[0] * N for i in xrange(0, N)]
vis = [False] * N
valence = [0] * N
for v in xrange(0, N):
valence[v] = len(G[v])
if valence[v] not in flows and valence[v] != 0:
flows[valence[v]] = getWeights(valence[v])
for v in xrange(0, N):
G[v] = sorted(G[v], key=lambda u : valence[u], reverse=True)
del answer[:]
v = find_next()
return dfs(v)
def getWeights (VALENCE, e = 0):
global answer, E, edges
if e == 0:
del answer[:]
edges = [0] * VALENCE
elif e >= VALENCE:
return None
isLast = (e == (VALENCE - 1))
weight2 = [0, 2]
for w in xrange(0, 2):
edges[e] = weight2[w]
getWeights(VALENCE, e + 1)
if isLast:
edges2 = sum(edges) / 2
if (VALENCE - edges2) % 2 == 0 and not (edges2 == VALENCE and edges2 % 2 != 0):
answer.append(edges[:])
if e == 0:
return answer[:]
def find_next ():
vertices = xrange(0, N)
vertices = filter(lambda v : not vis[v], vertices)
# pick most constrained variable
vertices = sorted(vertices, key=lambda v : valence[v], reverse=True)
return vertices.pop(0)
def dfs (v = 0):
vis[v] = True
if valence[v] == 0:
sys.stderr.write ('error: vertex "%d" is 0-valent. Have you forgotten it?\n' % v)
exit(1)
constraints, neighbours = getConstraints(v)
weights = flows[valence[v]]
W = select(constraints, weights, v)
isLast = (sum(vis) == N)
if len(W) == 0:
vis[v] = False
return False
for w in W:
clear(v, neighbours)
assign(v, w)
counter = 0
for u in G[v]:
if not vis[u]:
counter += 1
if dfs(u):
return True
else:
break
deadlock = (not isLast and counter == 0)
if deadlock and dfs(find_next()):
return True
elif isLast and checkEulerian():
answer.append(H[:][:])
return True
vis[v] = False
clear(v, neighbours)
return False
def dfs_check(v, one_vertex, component, path):
global vis_aux
vis_aux[v] = component
path.append(v)
recursive_ones = 0
for u in G[v]:
if vis_aux[u] == 0 and H[v][u] == 0:
recursive_ones += dfs_check(u, one_vertex, component, path)
return int(one_vertex[v]) + recursive_ones
def checkEulerian():
global vis_aux
# for v in xrange(0, N):
# weight2 = sum(H[v]) / 2
# if (valence[v] - weight2) % 2 != 0:
# return False
vis_aux = [False] * N
one_vertex = [(sum(H[v]) / 2) % 2 != 0 for v in xrange(0, N)]
components = 0
result = True
paths = {}
for v in xrange(0, N):
if vis_aux[v] == 0:
components += 1
path = []
C_ones = dfs_check(v, one_vertex, components, path)
paths[components] = path
if C_ones % 2 != 0:
result = False
if result and False:
for i in xrange(0, components):
print i + 1, paths[i + 1]
return result
def getConstraints (v):
constraints = {}
neighbours = []
i = 0
for u in G[v]:
if H[v][u] != 0 or H[u][v] != 0:
constraints[i] = 2
neighbours.append(u)
i += 1
return constraints, neighbours
def select (constraints, possibilities, v):
r = []
for p in possibilities:
for field in constraints:
if p[field] != constraints[field]:
break
else:
r.append(p[:])
def valid (vector):
for i in xrange(0, len(vector)):
if vis[G[v][i]] and vector[i] == 2 and i not in constraints:
return False
return True
return [i for i in r if valid(i)]
def assign (v, weights):
for u in G[v]:
w = weights.pop(0)
H[u][v] = H[v][u] = w
def clear (v, neighbours):
for u in G[v]:
if u not in neighbours:
H[u][v] = H[v][u] = 0 | 0.04442 |
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop INC_R
{
addi reg, reg, 1, flags=(OF, SF, ZF, AF, PF)
};
def macroop INC_M
{
ldst t1, seg, sib, disp
addi t1, t1, 1, flags=(OF, SF, ZF, AF, PF)
st t1, seg, sib, disp
};
def macroop INC_P
{
rdip t7
ldst t1, seg, riprel, disp
addi t1, t1, 1, flags=(OF, SF, ZF, AF, PF)
st t1, seg, riprel, disp
};
def macroop INC_LOCKED_M
{
mfence
ldstl t1, seg, sib, disp
addi t1, t1, 1, flags=(OF, SF, ZF, AF, PF)
stul t1, seg, sib, disp
mfence
};
def macroop INC_LOCKED_P
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
addi t1, t1, 1, flags=(OF, SF, ZF, AF, PF)
stul t1, seg, riprel, disp
mfence
};
def macroop DEC_R
{
subi reg, reg, 1, flags=(OF, SF, ZF, AF, PF)
};
def macroop DEC_M
{
ldst t1, seg, sib, disp
subi t1, t1, 1, flags=(OF, SF, ZF, AF, PF)
st t1, seg, sib, disp
};
def macroop DEC_P
{
rdip t7
ldst t1, seg, riprel, disp
subi t1, t1, 1, flags=(OF, SF, ZF, AF, PF)
st t1, seg, riprel, disp
};
def macroop DEC_LOCKED_M
{
mfence
ldstl t1, seg, sib, disp
subi t1, t1, 1, flags=(OF, SF, ZF, AF, PF)
stul t1, seg, sib, disp
mfence
};
def macroop DEC_LOCKED_P
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
subi t1, t1, 1, flags=(OF, SF, ZF, AF, PF)
stul t1, seg, riprel, disp
mfence
};
'''
| 0 |
""" DIRAC Basic Oracle Class
It provides access to the basic Oracle methods in a multithread-safe mode
keeping used connections in a python Queue for further reuse.
These are the coded methods:
__init__( user, passwd, tns, [maxConnsInQueue=10] )
Initializes the Queue and tries to connect to the DB server,
using the _connect method.
"maxConnsInQueue" defines the size of the Queue of open connections
that are kept for reuse. It also defined the maximum number of open
connections available from the object.
maxConnsInQueue = 0 means unlimited and it is not supported.
_except( methodName, exception, errorMessage )
Helper method for exceptions: the "methodName" and the "errorMessage"
are printed with ERROR level, then the "exception" is printed (with
full description if it is a Oracle Exception) and S_ERROR is returned
with the errorMessage and the exception.
_connect()
Attemps connection to DB and sets the _connected flag to True upon success.
Returns S_OK or S_ERROR.
_query( cmd, [conn] )
Executes SQL command "cmd".
Gets a connection from the Queue (or open a new one if none is available),
the used connection is back into the Queue.
If a connection to the the DB is passed as second argument this connection
is used and is not in the Queue.
Returns S_OK with fetchall() out in Value or S_ERROR upon failure.
_getConnection()
Gets a connection from the Queue (or open a new one if none is available)
Returns S_OK with connection in Value or S_ERROR
the calling method is responsible for closing this connection once it is no
longer needed.
"""
import cx_Oracle
import types
import Queue
import time
import threading
from DIRAC import gLogger
from DIRAC import S_OK, S_ERROR
gInstancesCount = 0
__RCSID__ = "$Id$"
maxConnectRetry = 100
maxArraysize = 5000 #max allowed
class OracleDB(object):
"""
Basic multithreaded DIRAC Oracle Client Class
"""
def __init__( self, userName, password = '', tnsEntry = '', maxQueueSize = 100 ):
"""
set Oracle connection parameters and try to connect
"""
global gInstancesCount
gInstancesCount += 1
self.__initialized = False
self._connected = False
if 'logger' not in dir( self ):
self.logger = gLogger.getSubLogger( 'Oracle' )
# let the derived class decide what to do with if is not 1
self._threadsafe = cx_Oracle.threadsafety
self.logger.debug( 'thread_safe = %s' % self._threadsafe )
self.__checkQueueSize( maxQueueSize )
self.__userName = str( userName )
self.__passwd = str( password )
self.__tnsName = str( tnsEntry )
# Create the connection Queue to reuse connections
self.__connectionQueue = Queue.Queue( maxQueueSize )
# Create the connection Semaphore to limit total number of open connection
self.__connectionSemaphore = threading.Semaphore( maxQueueSize )
self.__initialized = True
self._connect()
def __del__( self ):
global gInstancesCount
while 1 and self.__initialized:
self.__connectionSemaphore.release()
try:
connection = self.__connectionQueue.get_nowait()
connection.close()
except Queue.Empty:
self.logger.debug( 'No more connection in Queue' )
break
def __checkQueueSize( self, maxQueueSize ):
if maxQueueSize <= 0:
raise Exception( 'OracleDB.__init__: maxQueueSize must positive' )
try:
test = maxQueueSize - 1
except:
raise Exception( 'OracleDB.__init__: wrong type for maxQueueSize' )
def _except( self, methodName, x, err ):
"""
print Oracle error or exeption
return S_ERROR with Exception
"""
try:
raise x
except cx_Oracle.Error, e:
self.logger.debug( '%s: %s' % ( methodName, err ),
'%s' % ( e ) )
return S_ERROR( '%s: ( %s )' % ( err, e ) )
except Exception as x:
self.logger.debug( '%s: %s' % ( methodName, err ), str( x ) )
return S_ERROR( '%s: (%s)' % ( err, str( x ) ) )
def __checkFields( self, inFields, inValues ):
if len( inFields ) != len( inValues ):
return S_ERROR( 'Missmatch between inFields and inValues.' )
return S_OK()
def _connect( self ):
"""
open connection to Oracle DB and put Connection into Queue
set connected flag to True and return S_OK
return S_ERROR upon failure
"""
self.logger.debug( '_connect:', self._connected )
if self._connected:
return S_OK()
self.logger.debug( '_connect: Attempting to access DB',
'by user %s/%s.' %
( self.__userName, self.__passwd ) )
try:
self.__newConnection()
self.logger.debug( '_connect: Connected.' )
self._connected = True
return S_OK()
except Exception as x:
return self._except( '_connect', x, 'Could not connect to DB.' )
def _query( self, cmd, conn = False ):
"""
execute Oracle query command
return S_OK structure with fetchall result as tuple
it returns an empty tuple if no matching rows are found
return S_ERROR upon error
"""
self.logger.debug( '_query:', cmd )
retDict = self.__getConnection( conn = conn )
if not retDict['OK'] :
return retDict
connection = retDict['Value']
try:
cursor = connection.cursor()
cursor.arraysize = maxArraysize
if cursor.execute( cmd ):
res = cursor.fetchall()
else:
res = ()
# Log the result limiting it to just 10 records
if len( res ) < 10:
self.logger.debug( '_query:', res )
else:
self.logger.debug( '_query: Total %d records returned' % len( res ) )
self.logger.debug( '_query: %s ...' % str( res[:10] ) )
retDict = S_OK( res )
except Exception as x:
self.logger.debug( '_query:', cmd )
retDict = self._except( '_query', x, 'Execution failed.' )
self.logger.debug( 'Start Roolback transaktio!' )
connection.rollback()
self.logger.debug( 'End Roolback transaktio!' )
try:
connection.commit()
cursor.close()
except Exception:
pass
if not conn:
self.__putConnection( connection )
return retDict
def executeStoredProcedure( self, packageName, parameters, output = True, array = None, conn = False ):
self.logger.debug( '_query:', packageName + "(" + str( parameters ) + ")" )
retDict = self.__getConnection( conn = conn )
if not retDict['OK']:
return retDict
connection = retDict['Value']
try:
cursor = connection.cursor()
result = None
results = None
if array != None and len(array) > 0:
if isinstance( type( array[0] ), basestring ):
result = cursor.arrayvar( cx_Oracle.STRING, array )
parameters += [result]
elif type(array[0]) == types.LongType or type(array[0]) == types.IntType:
result = cursor.arrayvar( cx_Oracle.NUMBER, array )
parameters += [result]
else:
return S_ERROR('The array type is not supported!!!')
if output == True:
result = connection.cursor()
result.arraysize = maxArraysize # 500x faster!!
parameters += [result]
cursor.callproc( packageName, parameters )
results = result.fetchall()
else:
cursor.callproc( packageName, parameters )
retDict = S_OK( results )
except Exception as x:
self.logger.debug( '_query:', packageName + "(" + str( parameters ) + ")" )
retDict = self._except( '_query', x, 'Execution failed.' )
connection.rollback()
try:
cursor.close()
except Exception:
pass
if not conn:
self.__putConnection( connection )
return retDict
def executeStoredFunctions( self, packageName, returnType, parameters = None, conn = False ):
if parameters == None:
parameters = []
retDict = self.__getConnection( conn = conn )
if not retDict['OK']:
return retDict
connection = retDict['Value']
try:
cursor = connection.cursor()
cursor.arraysize = maxArraysize
result = cursor.callfunc( packageName, returnType, parameters )
retDict = S_OK( result )
except Exception as x:
self.logger.debug( '_query:', packageName + "(" + str( parameters ) + ")" )
retDict = self._except( '_query', x, 'Excution failed.' )
connection.rollback()
try:
cursor.close()
except Exception:
pass
if not conn:
self.__putConnection( connection )
return retDict
def __newConnection( self ):
"""
Create a New connection and put it in the Queue
"""
self.logger.debug( '__newConnection:' )
connection = cx_Oracle.Connection( self.__userName, self.__passwd, self.__tnsName, threaded = True )
self.__putConnection( connection )
def __putConnection( self, connection ):
"""
Put a connection in the Queue, if the queue is full, the connection is closed
"""
self.logger.debug( '__putConnection:' )
# Release the semaphore first, in case something fails
self.__connectionSemaphore.release()
try:
self.__connectionQueue.put_nowait( connection )
except Queue.Full, x:
self.logger.debug( '__putConnection: Full Queue' )
try:
connection.close()
except Exception:
pass
except Exception as x:
self._except( '__putConnection', x, 'Failed to put Connection in Queue' )
def _getConnection( self ):
"""
Return a new connection to the DB
It uses the private method __getConnection
"""
self.logger.debug( '_getConnection:' )
retDict = self.__getConnection( trial = 0 )
self.__connectionSemaphore.release()
return retDict
def __getConnection( self, conn = False, trial = 0 ):
"""
Return a new connection to the DB,
if conn is provided then just return it.
then try the Queue, if it is empty add a newConnection to the Queue and retry
it will retry maxConnectRetry to open a new connection and will return
an error if it fails.
"""
self.logger.debug( '__getConnection:' )
if conn:
return S_OK( conn )
try:
self.__connectionSemaphore.acquire()
connection = self.__connectionQueue.get_nowait()
self.logger.debug( '__getConnection: Got a connection from Queue' )
if connection:
try:
# This will try to reconect if the connection has timeout
connection.commit()
except:
# if the ping fails try with a new connection from the Queue
self.__connectionSemaphore.release()
return self.__getConnection()
return S_OK( connection )
except Queue.Empty, x:
self.__connectionSemaphore.release()
self.logger.debug( '__getConnection: Empty Queue' )
try:
if trial == min( 100, maxConnectRetry ):
return S_ERROR( 'Could not get a connection after %s retries.' % maxConnectRetry )
try:
self.__newConnection()
return self.__getConnection()
except Exception as x:
self.logger.debug( '__getConnection: Fails to get connection from Queue', x )
time.sleep( trial * 5.0 )
newtrial = trial + 1
return self.__getConnection( trial = newtrial )
except Exception as x:
return self._except( '__getConnection:', x, 'Failed to get connection from Queue' )
except Exception as x:
return self._except( '__getConnection:', x, 'Failed to get connection from Queue' )
| 0.032745 |
from torch import nn as nn
from ._base import FCLayers
class Classifier(nn.Module):
"""
Basic fully-connected NN classifier
Parameters
----------
n_input
Number of input dimensions
n_hidden
Number of hidden nodes in hidden layer
n_labels
Numput of outputs dimensions
n_layers
Number of hidden layers
dropout_rate
dropout_rate for nodes
logits
Return logits or not
use_batch_norm
Whether to use batch norm in layers
use_layer_norm
Whether to use layer norm in layers
activation_fn
Valid activation function from torch.nn
"""
def __init__(
self,
n_input: int,
n_hidden: int = 128,
n_labels: int = 5,
n_layers: int = 1,
dropout_rate: float = 0.1,
logits: bool = False,
use_batch_norm: bool = True,
use_layer_norm: bool = False,
activation_fn: nn.Module = nn.ReLU,
):
super().__init__()
layers = [
FCLayers(
n_in=n_input,
n_out=n_hidden,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
use_batch_norm=use_batch_norm,
use_layer_norm=use_layer_norm,
activation_fn=activation_fn,
),
nn.Linear(n_hidden, n_labels),
]
if not logits:
layers.append(nn.Softmax(dim=-1))
self.classifier = nn.Sequential(*layers)
def forward(self, x):
return self.classifier(x)
| 0 |
class Author:
def __init__(self,aid,aname,pwd,sid,nick_name=''):
self.aid=aid
self.aname = aname
self.pwd=pwd
self.sid=sid
self.nick_name=nick_name
def setauthorname(self,aname):
self.aname= aname
def setpassword(self,password):
self.pwd=password
def setauthorid(self,aid):
self.aid=aid
def setsid(self,sid):
self.sid=sid
def setnickname(self,nick_name):
self.nick_name=nick_name
def getauthorname(self):
return self.aname
def getaid(self):
return self.aid
def getpwd(self):
return self.pwd
def getsid(self):
return self.sid
def getnickname(self):
return self.nick_name
def __eq__(self,other):
return self.aid == other.aid and self.aname == self.aname and self.pwd == other.pwd and self.sid == other.sid
def tojson(self):
import json
return json.dumps({"aid":self.aid,"aname":self.aname,"pwd":self.pwd,"nickname":self.nick_name,"sid":self.sid})
def jsontoauthor(jsonstring):
import json
dic = json.loads(jsonstring)
return Author(dic["aid"],dic["aname"],dic["pwd"],dic["sid"],dic["nickname"])
if __name__ =="__main__":
import json
a = Author("132","123","123","123","sada")
b = a.tojson()
print a == jsontoauthor(b)
| 0.041045 |
import numpy as np
import sys, os
import nrrd
if (len(sys.argv) < 3):
print 'Error: missing arguments!'
print 'e.g. python PreProcess.py image_ch1.nrrd image_ch2.nrrd [C #]' # t or tC = test run with no save.
else:
print 'Processing %s and %s...'% (str(sys.argv[1]), str(sys.argv[2]))
ng = str(sys.argv[1]).replace('1.nrrd', '3.nrrd')
if os.path.exists(ng):
nge = True
print 'Also found and processing %s...'% (ng)
else:
nge = False
if nge:
data1, header1 = nrrd.read(str(sys.argv[1]))
data2, header2 = nrrd.read(str(sys.argv[2]))
data3, header3 = nrrd.read(ng)
size = np.array(data1.shape) -1
print 'Image size is %s pixels.'% str(data2.shape)
c = -1
testrun = False
Mzflip = False
Myflip = False
Mxflip = False
if (len(sys.argv) > 3):
if ('T' in str(sys.argv[3]).upper()):
testrun = True
print 'Test run...'
if ('C' in str(sys.argv[3]).upper()):
try:
c = int(sys.argv[4])
except ValueError:
print 'Problem with given clipping threshold (must be an integer) using 0'
c = 0
else:
testrun = False
s = 2
h = s + 1
d = 10
d1 = int(round(size[0] / s))
d2 = int(round(size[1] / s))
d3 = int(round(size[2] / h))
Rs1 = np.zeros([s])
Rs2 = np.zeros([s])
Rs3 = np.zeros([s])
Rs1[0] = np.sum(data1[d1-d:d1+d,d2-d:d2+d,0:d3])
Rs1[1] = np.sum(data1[d1-d:d1+d,d2-d:d2+d,s*d3:])
Rs2[0] = np.sum(data2[d1-d:d1+d,d2-d:d2+d,0:d3])
Rs2[1] = np.sum(data2[d1-d:d1+d,d2-d:d2+d,s*d3:])
Rs3[0] = np.sum(data3[d1-d:d1+d,d2-d:d2+d,0:d3])
Rs3[1] = np.sum(data3[d1-d:d1+d,d2-d:d2+d,s*d3:])
if testrun:
print 'Results:'
print Rs1
print Rs2
print Rs3
#Cliping edges below threashold value c
if (c > -1):
Cl = 0
Cr = size[0]-1
Ct = 0
Cb = size[1]-1
Cv = 0
Cd = size[2]-1
#left
for y in range(1,d1):
if ((np.max(data1[0:y,0:,0:]) < c ) and (np.max(data2[0:y,0:,0:]) < c ) and (np.max(data3[0:y,0:,0:]) < c )):
Cl = y
else:
if testrun: print Cl
break
#right
for y in range(size[0],d1,-1):
if ((np.max(data1[y:,0:,0:]) < c ) and (np.max(data2[y:,0:,0:]) < c ) and (np.max(data3[y:,0:,0:]) < c )):
Cr = y
else:
if testrun: print Cr
break
#top / anterior
for x in range(1,d2):
if ((np.max(data1[0:,0:x,0:]) < c ) and (np.max(data2[0:,0:x,0:]) < c ) and (np.max(data3[0:,0:x,0:]) < c )):
Ct = x
else:
if testrun: print Ct
break
#bottom / posterior
for x in range(size[1],d2,-1):
if ((np.max(data1[0:,x:,0:]) < c ) and (np.max(data2[0:,x:,0:]) < c ) and (np.max(data3[0:,x:,0:]) < c )):
Cb = x
else:
if testrun: print Cb
break
#ventral
for z in range(1,d3):
if ((np.max(data1[0:,0:,0:z]) < c ) and (np.max(data2[0:,0:,0:z]) < c ) and (np.max(data3[0:,0:,0:z]) < c )):
Cv = z
else:
if testrun: print Cv
break
#dorsal
for z in range(size[2],d3,-1):
if ((np.max(data1[0:,0:,z:]) < c ) and (np.max(data2[0:,0:,z:]) < c ) and (np.max(data3[0:,0:,z:]) < c )):
Cd = z
else:
if testrun: print Cd
break
data1c =data1[Cl:Cr,Ct:Cb,Cv:Cd]
data2c =data2[Cl:Cr,Ct:Cb,Cv:Cd]
data3c =data3[Cl:Cr,Ct:Cb,Cv:Cd]
if testrun:
print 'Clipping both images from %s to %s...'% (str(data1.shape), str(data1c.shape))
print 'Results saved to ClippedImageTestC[1,2].nrrd as test run...'
nrrd.write('ClippedImageTestC1.nrrd', data1c, options=header1)
nrrd.write('ClippedImageTestC2.nrrd', data2c, options=header2)
nrrd.write('ClippedImageTestC3.nrrd', data3c, options=header3)
else:
print 'Clipping both images from %s to %s and saving...'% (str(data1.shape), str(data1c.shape))
header1['sizes']=data1c.shape
header2['sizes']=data2c.shape
header3['sizes']=data3c.shape
nrrd.write(str(sys.argv[1]), data1c, options=header1)
nrrd.write(str(sys.argv[2]), data2c, options=header2)
nrrd.write(ng, data3c, options=header3)
data1 = data1c
data2 = data2c
data3 = data3c
if (np.sum(Rs1) > (1.5 * np.sum(Rs3))): #1.5 times bias required to swap from default
print 'BG: C1\nNG: C2\nSG: C3'
if not testrun:
os.rename(str(sys.argv[1]),str(sys.argv[1]).replace('_C1.nrrd','_BG.nrrd'))
os.rename(str(sys.argv[2]),str(sys.argv[2]).replace('_C2.nrrd','_NG.nrrd'))
os.rename(ng,ng.replace('_C3.nrrd','_SG.nrrd'))
print 'Files renamed - OK'
else:
print 'Changes not saved as just a test run.'
else:
print 'BG: C3\nNG: C2\nSG: C1'
if not testrun:
os.rename(str(sys.argv[1]),str(sys.argv[1]).replace('_C1.nrrd','_SG.nrrd'))
os.rename(str(sys.argv[2]),str(sys.argv[2]).replace('_C2.nrrd','_NG.nrrd'))
os.rename(ng,ng.replace('_C3.nrrd','_BG.nrrd'))
print 'Files renamed - OK'
else:
print 'Changes not saved as just a test run.'
else:
data1, header1 = nrrd.read(str(sys.argv[1]))
data2, header2 = nrrd.read(str(sys.argv[2]))
size = np.array(data1.shape) -1
print 'Image size is %s pixels.'% str(data2.shape)
c = -1
testrun = False
if (len(sys.argv) > 3):
if ('T' in str(sys.argv[3]).upper()):
testrun = True
print 'Test run...'
if ('C' in str(sys.argv[3]).upper()):
try:
c = int(sys.argv[4])
except ValueError:
print 'Problem with given clipping threshold (must be an integer) using 0'
c = 0
else:
testrun = False
s = 2
h = s + 1
d = 10
d1 = int(round(size[0] / s))
d2 = int(round(size[1] / s))
d3 = int(round(size[2] / h))
Rs1 = np.zeros([s])
Rs2 = np.zeros([s])
Rs1[0] = np.sum(data1[d1-d:d1+d,d2-d:d2+d,0:d3])
Rs1[1] = np.sum(data1[d1-d:d1+d,d2-d:d2+d,s*d3:])
Rs2[0] = np.sum(data2[d1-d:d1+d,d2-d:d2+d,0:d3])
Rs2[1] = np.sum(data2[d1-d:d1+d,d2-d:d2+d,s*d3:])
if testrun:
print 'Results:'
print Rs1
print Rs2
#Cliping edges below threashold value c
if (c > -1):
Cl = 0
Cr = size[0]-1
Ct = 0
Cb = size[1]-1
Cv = 0
Cd = size[2]-1
#left
for y in range(1,d1):
if ((np.max(data1[0:y,0:,0:]) < c ) and (np.max(data2[0:y,0:,0:]) < c )):
Cl = y
else:
if testrun: print Cl
break
#right
for y in range(size[0],d1,-1):
if ((np.max(data1[y:,0:,0:]) < c ) and (np.max(data2[y:,0:,0:]) < c )):
Cr = y
else:
if testrun: print Cr
break
#top / anterior
for x in range(1,d2):
if ((np.max(data1[0:,0:x,0:]) < c ) and (np.max(data2[0:,0:x,0:]) < c )):
Ct = x
else:
if testrun: print Ct
break
#bottom / posterior
for x in range(size[1],d2,-1):
if ((np.max(data1[0:,x:,0:]) < c ) and (np.max(data2[0:,x:,0:]) < c )):
Cb = x
else:
if testrun: print Cb
break
#ventral
for z in range(1,d3):
if ((np.max(data1[0:,0:,0:z]) < c ) and (np.max(data2[0:,0:,0:z]) < c )):
Cv = z
else:
if testrun: print Cv
break
#dorsal
for z in range(size[2],d3,-1):
if ((np.max(data1[0:,0:,z:]) < c ) and (np.max(data2[0:,0:,z:]) < c )):
Cd = z
else:
if testrun: print Cd
break
data1c =data1[Cl:Cr,Ct:Cb,Cv:Cd]
data2c =data2[Cl:Cr,Ct:Cb,Cv:Cd]
if testrun:
print 'Clipping both images from %s to %s...'% (str(data1.shape), str(data1c.shape))
print 'Results saved to ClippedImageTestC[1,2].nrrd as test run...'
nrrd.write('ClippedImageTestC1.nrrd', data1c, options=header1)
nrrd.write('ClippedImageTestC2.nrrd', data2c, options=header2)
else:
print 'Clipping both images from %s to %s and saving...'% (str(data1.shape), str(data1c.shape))
header1['sizes']=data1c.shape
header2['sizes']=data2c.shape
nrrd.write(str(sys.argv[1]), data1c, options=header1)
nrrd.write(str(sys.argv[2]), data2c, options=header2)
data1 = data1c
data2 = data2c
if (np.sum(Rs1) > (1.5 * np.sum(Rs2))): #1.5 times bias required to swap from default
print 'BG: C1\nSG: C2'
if not testrun:
os.rename(str(sys.argv[1]),str(sys.argv[1]).replace('_C1.nrrd','_BG.nrrd'))
os.rename(str(sys.argv[2]),str(sys.argv[2]).replace('_C2.nrrd','_SG.nrrd'))
print 'Files renamed - OK'
else:
print 'Changes not saved as just a test run.'
else:
print 'BG: C2\nSG: C1'
if not testrun:
os.rename(str(sys.argv[1]),str(sys.argv[1]).replace('_C1.nrrd','_SG.nrrd'))
os.rename(str(sys.argv[2]),str(sys.argv[2]).replace('_C2.nrrd','_BG.nrrd'))
print 'Files renamed - OK'
else:
print 'Changes not saved as just a test run.'
print 'Done.'
| 0.028314 |
__author__ = 'msergeyx'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_homepage()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
| 0 |
"""Known matrices related to physics"""
from sympy import Matrix, I
def msigma(i):
"""Returns a Pauli matrix sigma_i. i=1,2,3
See also:
http://en.wikipedia.org/wiki/Pauli_matrices
"""
if i==1:
mat=( (
(0, 1),
(1, 0)
) )
elif i==2:
mat=( (
(0, -I),
(I, 0)
) )
elif i==3:
mat=( (
(1, 0),
(0, -1)
) )
else:
raise "Invalid Pauli index"
return Matrix(mat)
def mgamma(mu,lower=False):
"""Returns a Dirac gamma matrix gamma^mu in the standard
(Dirac) representation.
If you want gamma_mu, use gamma(mu, True).
We use a convention:
gamma^5 = I * gamma^0 * gamma^1 * gamma^2 * gamma^3
gamma_5 = I * gamma_0 * gamma_1 * gamma_2 * gamma_3 = - gamma^5
See also:
http://en.wikipedia.org/wiki/Gamma_matrices
"""
if not mu in [0,1,2,3,5]:
raise "Invalid Dirac index"
if mu == 0:
mat = (
(1,0,0,0),
(0,1,0,0),
(0,0,-1,0),
(0,0,0,-1)
)
elif mu == 1:
mat = (
(0,0,0,1),
(0,0,1,0),
(0,-1,0,0),
(-1,0,0,0)
)
elif mu == 2:
mat = (
(0,0,0,-I),
(0,0,I,0),
(0,I,0,0),
(-I,0,0,0)
)
elif mu == 3:
mat = (
(0,0,1,0),
(0,0,0,-1),
(-1,0,0,0),
(0,1,0,0)
)
elif mu == 5:
mat = (
(0,0,1,0),
(0,0,0,1),
(1,0,0,0),
(0,1,0,0)
)
m= Matrix(mat)
if lower:
if mu in [1,2,3,5]:
m = - m
return m
#Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field
#Theory
minkowski_tensor = Matrix( (
(1,0,0,0),
(0,-1,0,0),
(0,0,-1,0),
(0,0,0,-1)
))
| 0.048958 |
"""\
Ints.py Basic routines for integrals in the PyQuante framework
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
from CGBF import CGBF,coulomb
#from contracted_gto import coulomb
from NumWrap import zeros,dot,reshape
from PyQuante.cints import ijkl2intindex as intindex
from PyQuante.Basis.Tools import get_basis_data
import settings
import logging
logger = logging.getLogger("pyquante")
sym2powerlist = {
'S' : [(0,0,0)],
'P' : [(1,0,0),(0,1,0),(0,0,1)],
'D' : [(2,0,0),(1,1,0),(1,0,1),(0,2,0),(0,1,1),(0,0,2)],
'F' : [(3,0,0),(2,1,0),(2,0,1),(1,2,0),(1,1,1),(1,0,2),
(0,3,0),(0,2,1),(0,1,2), (0,0,3)]
}
sorted = True
jints = {}
kints = {}
def getbasis(atoms,basis_data=None,**opts):
"""\
bfs = getbasis(atoms,basis_data=None)
Given a Molecule object and a basis library, form a basis set
constructed as a list of CGBF basis functions objects.
"""
from PyQuante.Basis.basis import BasisSet
return BasisSet(atoms, basis_data, **opts)
# Option to omit f basis functions from imported basis sets
omit_f = opts.get('omit_f',False)
if not basis_data:
from PyQuante.Basis.p631ss import basis_data
elif type(basis_data) == type(''):
# Assume this is a name of a basis set, e.g. '6-31g**'
# and import dynamically
basis_data = get_basis_data(basis_data)
bfs = []
for atom in atoms:
bs = basis_data[atom.atno]
for sym,prims in bs:
if omit_f and sym == "F": continue
for power in sym2powerlist[sym]:
bf = CGBF(atom.pos(),power,atom.atid)
for expnt,coef in prims:
bf.add_primitive(expnt,coef)
bf.normalize()
bfs.append(bf)
return bfs
def getints(bfs,atoms):
logger.info("Calculating Integrals...")
S,h = get1ints(bfs,atoms)
Ints = get2ints(bfs)
logger.info("Integrals Calculated.")
return S,h,Ints
def get1ints(bfs,atoms):
"Form the overlap S and h=t+vN one-electron Hamiltonian matrices"
nbf = len(bfs)
S = zeros((nbf,nbf),'d')
h = zeros((nbf,nbf),'d')
for i in xrange(nbf):
bfi = bfs[i]
for j in xrange(nbf):
bfj = bfs[j]
S[i,j] = bfi.overlap(bfj)
h[i,j] = bfi.kinetic(bfj)
for atom in atoms:
h[i,j] = h[i,j] + atom.atno*bfi.nuclear(bfj,atom.pos())
return S,h
def getT(bfs):
"Form the kinetic energy matrix"
nbf = len(bfs)
T = zeros((nbf,nbf),'d')
for i in xrange(nbf):
bfi = bfs[i]
for j in xrange(nbf):
bfj = bfs[j]
T[i,j] = bfi.kinetic(bfj)
return T
def getS(bfs):
"Form the overlap matrix"
nbf = len(bfs)
S = zeros((nbf,nbf),'d')
for i in xrange(nbf):
bfi = bfs[i]
for j in xrange(nbf):
bfj = bfs[j]
S[i,j] = bfi.overlap(bfj)
return S
def getV(bfs,atoms):
"Form the nuclear attraction matrix V"
nbf = len(bfs)
V = zeros((nbf,nbf),'d')
for i in xrange(nbf):
bfi = bfs[i]
for j in xrange(nbf):
bfj = bfs[j]
for atom in atoms:
V[i,j] = V[i,j] + atom.atno*bfi.nuclear(bfj,atom.pos())
return V
if settings.libint_enabled == True:
# Libint Integrals
import numpy as np
import clibint
def get2ints(basis):
lenbasis = len(basis.bfs)
Ints = np.zeros((lenbasis**4),dtype=np.float64)
for i,a in enumerate(basis.shells):
for j,b in enumerate(basis.shells[:i+1]):
for k,c in enumerate(basis.shells):
for l,d in enumerate(basis.shells[:k+1]):
if (i+j)>=(k+l):
clibint.shell_compute_eri(a,b,c,d,Ints)
if sorted:
sortints(lenbasis,Ints)
return Ints
else:
# PyQuante Integrals
def get2ints(bfs):
"""Store integrals in a long array in the form (ij|kl) (chemists
notation. We only need i>=j, k>=l, and ij <= kl"""
from array import array
nbf = len(bfs)
totlen = nbf*(nbf+1)*(nbf*nbf+nbf+2)/8
Ints = array('d',[0]*totlen)
for i in xrange(nbf):
for j in xrange(i+1):
ij = i*(i+1)/2+j
for k in xrange(nbf):
for l in xrange(k+1):
kl = k*(k+1)/2+l
if ij <= kl:
Ints[intindex(i,j,k,l)] = coulomb(bfs[i],bfs[j],
bfs[k],bfs[l])
if sorted:
sortints(nbf,Ints)
return Ints
def sortints(nbf,Ints):
for i in xrange(nbf):
for j in xrange(i+1):
jints[i,j] = fetch_jints(Ints,i,j,nbf)
kints[i,j] = fetch_kints(Ints,i,j,nbf)
return
def fetch_jints(Ints,i,j,nbf):
temp = zeros(nbf*nbf,'d')
kl = 0
for k in xrange(nbf):
for l in xrange(nbf):
index = intindex(i,j,k,l)
temp[kl] = Ints[index]
kl += 1
return temp
def fetch_kints(Ints,i,j,nbf):
temp = zeros(nbf*nbf,'d')
kl = 0
for k in xrange(nbf):
for l in xrange(nbf):
temp[kl] = Ints[intindex(i,k,j,l)]
kl += 1
return temp
def getJ(Ints,D):
"Form the Coulomb operator corresponding to a density matrix D"
nbf = D.shape[0]
D1d = reshape(D,(nbf*nbf,)) #1D version of Dens
J = zeros((nbf,nbf),'d')
for i in xrange(nbf):
for j in xrange(i+1):
if sorted:
temp = jints[i,j]
else:
temp = fetch_jints(Ints,i,j,nbf)
J[i,j] = dot(temp,D1d)
J[j,i] = J[i,j]
return J
def getK(Ints,D):
"Form the exchange operator corresponding to a density matrix D"
nbf = D.shape[0]
D1d = reshape(D,(nbf*nbf,)) #1D version of Dens
K = zeros((nbf,nbf),'d')
for i in xrange(nbf):
for j in xrange(i+1):
if sorted:
temp = kints[i,j]
else:
temp = fetch_kints(Ints,i,j,nbf)
K[i,j] = dot(temp,D1d)
K[j,i] = K[i,j]
return K
def get2JmK(Ints,D):
"Form the 2J-K integrals corresponding to a density matrix D"
nbf = D.shape[0]
D1d = reshape(D,(nbf*nbf,)) #1D version of Dens
G = zeros((nbf,nbf),'d')
for i in xrange(nbf):
for j in xrange(i+1):
if sorted:
temp = 2*jints[i,j]-kints[i,j]
else:
temp = 2*fetch_jints(Ints,i,j,nbf)-fetch_kints(Ints,i,j,nbf)
G[i,j] = dot(temp,D1d)
G[j,i] = G[i,j]
return G
| 0.030172 |
from boxbranding import getBoxType
from twisted.internet import threads
from enigma import eDBoxLCD, eTimer
from config import config, ConfigSubsection, ConfigSelection, ConfigSlider, ConfigYesNo, ConfigNothing
from Components.SystemInfo import SystemInfo
from Tools.Directories import fileExists
from Screens.Screen import Screen
import usb
class dummyScreen(Screen):
skin = """<screen position="0,0" size="0,0" transparent="1">
<widget source="session.VideoPicture" render="Pig" position="0,0" size="0,0" backgroundColor="transparent" zPosition="1"/>
</screen>"""
def __init__(self, session, args=None):
Screen.__init__(self, session)
self.close()
def IconCheck(session=None, **kwargs):
if fileExists("/proc/stb/lcd/symbol_network") or fileExists("/proc/stb/lcd/symbol_usb"):
global networklinkpoller
networklinkpoller = IconCheckPoller()
networklinkpoller.start()
class IconCheckPoller:
def __init__(self):
self.timer = eTimer()
def start(self):
if self.iconcheck not in self.timer.callback:
self.timer.callback.append(self.iconcheck)
self.timer.startLongTimer(0)
def stop(self):
if self.iconcheck in self.timer.callback:
self.timer.callback.remove(self.iconcheck)
self.timer.stop()
def iconcheck(self):
try:
threads.deferToThread(self.JobTask)
except:
pass
self.timer.startLongTimer(30)
def JobTask(self):
LinkState = 0
if fileExists('/sys/class/net/wlan0/operstate'):
LinkState = open('/sys/class/net/wlan0/operstate').read()
if LinkState != 'down':
LinkState = open('/sys/class/net/wlan0/operstate').read()
elif fileExists('/sys/class/net/eth0/operstate'):
LinkState = open('/sys/class/net/eth0/operstate').read()
if LinkState != 'down':
LinkState = open('/sys/class/net/eth0/carrier').read()
LinkState = LinkState[:1]
if fileExists("/proc/stb/lcd/symbol_network") and config.lcd.mode.value == '1':
f = open("/proc/stb/lcd/symbol_network", "w")
f.write(str(LinkState))
f.close()
elif fileExists("/proc/stb/lcd/symbol_network") and config.lcd.mode.value == '0':
f = open("/proc/stb/lcd/symbol_network", "w")
f.write('0')
f.close()
USBState = 0
busses = usb.busses()
for bus in busses:
devices = bus.devices
for dev in devices:
if dev.deviceClass != 9 and dev.deviceClass != 2 and dev.idVendor > 0:
USBState = 1
if fileExists("/proc/stb/lcd/symbol_usb") and config.lcd.mode.value == '1':
f = open("/proc/stb/lcd/symbol_usb", "w")
f.write(str(USBState))
f.close()
elif fileExists("/proc/stb/lcd/symbol_usb") and config.lcd.mode.value == '0':
f = open("/proc/stb/lcd/symbol_usb", "w")
f.write('0')
f.close()
self.timer.startLongTimer(30)
class LCD:
def __init__(self):
pass
def setBright(self, value):
value *= 255
value /= 10
if value > 255:
value = 255
eDBoxLCD.getInstance().setLCDBrightness(value)
def setContrast(self, value):
value *= 63
value /= 20
if value > 63:
value = 63
eDBoxLCD.getInstance().setLCDContrast(value)
def setInverted(self, value):
if value:
value = 255
eDBoxLCD.getInstance().setInverted(value)
def setFlipped(self, value):
eDBoxLCD.getInstance().setFlipped(value)
def isOled(self):
return eDBoxLCD.getInstance().isOled()
def setMode(self, value):
if fileExists("/proc/stb/lcd/show_symbols"):
print 'setLCDMode',value
f = open("/proc/stb/lcd/show_symbols", "w")
f.write(value)
f.close()
if config.lcd.mode.value == "0":
if fileExists("/proc/stb/lcd/symbol_hdd"):
f = open("/proc/stb/lcd/symbol_hdd", "w")
f.write("0")
f.close()
if fileExists("/proc/stb/lcd/symbol_hddprogress"):
f = open("/proc/stb/lcd/symbol_hddprogress", "w")
f.write("0")
f.close()
if fileExists("/proc/stb/lcd/symbol_network"):
f = open("/proc/stb/lcd/symbol_network", "w")
f.write("0")
f.close()
if fileExists("/proc/stb/lcd/symbol_signal"):
f = open("/proc/stb/lcd/symbol_signal", "w")
f.write("0")
f.close()
if fileExists("/proc/stb/lcd/symbol_timeshift"):
f = open("/proc/stb/lcd/symbol_timeshift", "w")
f.write("0")
f.close()
if fileExists("/proc/stb/lcd/symbol_tv"):
f = open("/proc/stb/lcd/symbol_tv", "w")
f.write("0")
f.close()
if fileExists("/proc/stb/lcd/symbol_usb"):
f = open("/proc/stb/lcd/symbol_usb", "w")
f.write("0")
f.close()
def setPower(self, value):
if fileExists("/proc/stb/power/vfd"):
print 'setLCDPower',value
f = open("/proc/stb/power/vfd", "w")
f.write(value)
f.close()
elif fileExists("/proc/stb/lcd/vfd"):
print 'setLCDPower',value
f = open("/proc/stb/lcd/vfd", "w")
f.write(value)
f.close()
def setShowoutputresolution(self, value):
if fileExists("/proc/stb/lcd/show_outputresolution"):
print 'setLCDShowoutputresolution',value
f = open("/proc/stb/lcd/show_outputresolution", "w")
f.write(value)
f.close()
def setfblcddisplay(self, value):
print 'setfblcddisplay',value
f = open("/proc/stb/fb/sd_detach", "w")
f.write(value)
f.close()
def setRepeat(self, value):
if fileExists("/proc/stb/lcd/scroll_repeats"):
print 'setLCDRepeat',value
f = open("/proc/stb/lcd/scroll_repeats", "w")
f.write(value)
f.close()
def setScrollspeed(self, value):
if fileExists("/proc/stb/lcd/scroll_delay"):
print 'setLCDScrollspeed',value
f = open("/proc/stb/lcd/scroll_delay", "w")
f.write(str(value))
f.close()
def setLEDNormalState(self, value):
eDBoxLCD.getInstance().setLED(value, 0)
def setLEDDeepStandbyState(self, value):
eDBoxLCD.getInstance().setLED(value, 1)
def setLEDBlinkingTime(self, value):
eDBoxLCD.getInstance().setLED(value, 2)
def setLCDMiniTVMode(self, value):
print 'setLCDMiniTVMode',value
f = open('/proc/stb/lcd/mode', "w")
f.write(value)
f.close()
def setLCDMiniTVPIPMode(self, value):
print 'setLCDMiniTVPIPMode',value
def setLCDMiniTVFPS(self, value):
print 'setLCDMiniTVFPS',value
f = open('/proc/stb/lcd/fps', "w")
f.write("%d \n" % value)
f.close()
def leaveStandby():
config.lcd.bright.apply()
config.lcd.ledbrightness.apply()
config.lcd.ledbrightnessdeepstandby.apply()
def standbyCounterChanged(configElement):
from Screens.Standby import inStandby
inStandby.onClose.append(leaveStandby)
config.lcd.standby.apply()
config.lcd.ledbrightnessstandby.apply()
config.lcd.ledbrightnessdeepstandby.apply()
def InitLcd():
if getBoxType() in ('wetekplay', 'xpeedlxcs2', 'xpeedlxcc', 'e4hd' ,'mbmicro', 'beyonwizt2', 'amikomini', 'dynaspark', 'amiko8900', 'sognorevolution', 'arguspingulux', 'arguspinguluxmini', 'arguspinguluxplus', 'sparkreloaded', 'sabsolo', 'sparklx', 'gis8120', 'gb800se', 'gb800solo', 'gb800seplus', 'gbultrase', 'gbipbox', 'tmsingle', 'tmnano2super', 'iqonios300hd', 'iqonios300hdv2', 'optimussos1plus', 'optimussos1', 'vusolo', 'et4x00', 'et5x00', 'et6x00', 'et7000', 'mixosf7', 'mixoslumi', 'gbx1', 'gbx3'):
detected = False
else:
detected = eDBoxLCD.getInstance().detected()
SystemInfo["Display"] = detected
config.lcd = ConfigSubsection();
if fileExists("/proc/stb/lcd/mode"):
f = open("/proc/stb/lcd/mode", "r")
can_lcdmodechecking = f.read().strip().split(" ")
f.close()
else:
can_lcdmodechecking = False
SystemInfo["LCDMiniTV"] = can_lcdmodechecking
if detected:
if can_lcdmodechecking:
def setLCDModeMinitTV(configElement):
try:
f = open("/proc/stb/lcd/mode", "w")
f.write(configElement.value)
f.close()
except:
pass
def setMiniTVFPS(configElement):
try:
f = open("/proc/stb/lcd/fps", "w")
f.write("%d \n" % configElement.value)
f.close()
except:
pass
def setLCDModePiP(configElement):
pass
config.lcd.modepip = ConfigSelection(choices={
"0": _("off"),
"5": _("PIP"),
"7": _("PIP with OSD")},
default = "0")
if config.misc.boxtype.value == 'gbquad' or config.misc.boxtype.value == 'gbquadplus':
config.lcd.modepip.addNotifier(setLCDModePiP)
else:
config.lcd.modepip = ConfigNothing()
config.lcd.modeminitv = ConfigSelection(choices={
"0": _("normal"),
"1": _("MiniTV"),
"2": _("OSD"),
"3": _("MiniTV with OSD")},
default = "0")
config.lcd.fpsminitv = ConfigSlider(default=30, limits=(0, 30))
config.lcd.modeminitv.addNotifier(setLCDModeMinitTV)
config.lcd.fpsminitv.addNotifier(setMiniTVFPS)
else:
config.lcd.modeminitv = ConfigNothing()
config.lcd.fpsminitv = ConfigNothing()
config.lcd.scroll_speed = ConfigSelection(default = "300", choices = [
("500", _("slow")),
("300", _("normal")),
("100", _("fast"))])
config.lcd.scroll_delay = ConfigSelection(default = "10000", choices = [
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("60000", "1 " + _("minute")),
("300000", "5 " + _("minutes")),
("noscrolling", _("off"))])
def setLCDbright(configElement):
ilcd.setBright(configElement.value);
def setLCDcontrast(configElement):
ilcd.setContrast(configElement.value);
def setLCDinverted(configElement):
ilcd.setInverted(configElement.value);
def setLCDflipped(configElement):
ilcd.setFlipped(configElement.value);
def setLCDmode(configElement):
ilcd.setMode(configElement.value);
def setLCDpower(configElement):
ilcd.setPower(configElement.value);
def setfblcddisplay(configElement):
ilcd.setfblcddisplay(configElement.value);
def setLCDshowoutputresolution(configElement):
ilcd.setShowoutputresolution(configElement.value);
def setLCDminitvmode(configElement):
ilcd.setLCDMiniTVMode(configElement.value)
def setLCDminitvpipmode(configElement):
ilcd.setLCDMiniTVPIPMode(configElement.value)
def setLCDminitvfps(configElement):
ilcd.setLCDMiniTVFPS(configElement.value)
def setLEDnormalstate(configElement):
ilcd.setLEDNormalState(configElement.value);
def setLEDdeepstandby(configElement):
ilcd.setLEDDeepStandbyState(configElement.value);
def setLEDblinkingtime(configElement):
ilcd.setLEDBlinkingTime(configElement.value);
def setPowerLEDstanbystate(configElement):
if fileExists("/proc/stb/power/standbyled"):
f = open("/proc/stb/power/standbyled", "w")
f.write(configElement.value)
f.close()
config.usage.lcd_standbypowerled = ConfigSelection(default = "on", choices = [("off", _("Off")), ("on", _("On"))])
config.usage.lcd_standbypowerled.addNotifier(setPowerLEDstanbystate)
standby_default = 0
ilcd = LCD()
if not ilcd.isOled():
config.lcd.contrast = ConfigSlider(default=5, limits=(0, 20))
config.lcd.contrast.addNotifier(setLCDcontrast);
else:
config.lcd.contrast = ConfigNothing()
standby_default = 1
if getBoxType() in ('mixosf5', 'mixosf5mini', 'gi9196m', 'gi9196lite', 'zgemmas2s', 'zgemmash1', 'zgemmash2'):
config.lcd.standby = ConfigSlider(default=standby_default, limits=(0, 4))
config.lcd.bright = ConfigSlider(default=4, limits=(0, 4))
else:
config.lcd.standby = ConfigSlider(default=standby_default, limits=(0, 10))
config.lcd.bright = ConfigSlider(default=5, limits=(0, 10))
config.lcd.standby.addNotifier(setLCDbright);
config.lcd.standby.apply = lambda : setLCDbright(config.lcd.standby)
config.lcd.bright.addNotifier(setLCDbright);
config.lcd.bright.apply = lambda : setLCDbright(config.lcd.bright)
config.lcd.bright.callNotifiersOnSaveAndCancel = True
config.lcd.invert = ConfigYesNo(default=False)
config.lcd.invert.addNotifier(setLCDinverted);
config.lcd.flip = ConfigYesNo(default=False)
config.lcd.flip.addNotifier(setLCDflipped)
if SystemInfo["LcdLiveTV"]:
def lcdLiveTvChanged(configElement):
open(SystemInfo["LcdLiveTV"], "w").write(configElement.value and "0" or "1")
from Screens.InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
InfoBarInstance and InfoBarInstance.session.open(dummyScreen)
config.lcd.showTv = ConfigYesNo(default = False)
config.lcd.showTv.addNotifier(lcdLiveTvChanged)
if SystemInfo["LCDMiniTV"]:
config.lcd.minitvmode = ConfigSelection([("0", _("normal")), ("1", _("MiniTV")), ("2", _("OSD")), ("3", _("MiniTV with OSD"))], "0")
config.lcd.minitvmode.addNotifier(setLCDminitvmode)
config.lcd.minitvpipmode = ConfigSelection([("0", _("off")), ("5", _("PIP")), ("7", _("PIP with OSD"))], "0")
config.lcd.minitvpipmode.addNotifier(setLCDminitvpipmode)
config.lcd.minitvfps = ConfigSlider(default=30, limits=(0, 30))
config.lcd.minitvfps.addNotifier(setLCDminitvfps)
if SystemInfo["VFD_scroll_repeats"] and getBoxType() not in ('ixussone', 'ixusszero'):
def scroll_repeats(el):
open(SystemInfo["VFD_scroll_repeats"], "w").write(el.value)
choicelist = [("0", _("None")), ("1", _("1X")), ("2", _("2X")), ("3", _("3X")), ("4", _("4X")), ("500", _("Continues"))]
config.usage.vfd_scroll_repeats = ConfigSelection(default = "3", choices = choicelist)
config.usage.vfd_scroll_repeats.addNotifier(scroll_repeats, immediate_feedback = False)
if SystemInfo["VFD_scroll_delay"] and getBoxType() not in ('ixussone', 'ixusszero'):
def scroll_delay(el):
open(SystemInfo["VFD_scroll_delay"], "w").write(str(el.value))
config.usage.vfd_scroll_delay = ConfigSlider(default = 150, increment = 10, limits = (0, 500))
config.usage.vfd_scroll_delay.addNotifier(scroll_delay, immediate_feedback = False)
config.lcd.hdd = ConfigSelection([("0", _("No")), ("1", _("Yes"))], "1")
else:
config.lcd.hdd = ConfigNothing()
if SystemInfo["VFD_initial_scroll_delay"] and getBoxType() not in ('ixussone', 'ixusszero'):
def initial_scroll_delay(el):
open(SystemInfo["VFD_initial_scroll_delay"], "w").write(el.value)
choicelist = [
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("0", _("no delay"))]
config.usage.vfd_initial_scroll_delay = ConfigSelection(default = "1000", choices = choicelist)
config.usage.vfd_initial_scroll_delay.addNotifier(initial_scroll_delay, immediate_feedback = False)
if SystemInfo["VFD_final_scroll_delay"] and getBoxType() not in ('ixussone', 'ixusszero'):
def final_scroll_delay(el):
open(SystemInfo["VFD_final_scroll_delay"], "w").write(el.value)
choicelist = [
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("0", _("no delay"))]
config.usage.vfd_final_scroll_delay = ConfigSelection(default = "1000", choices = choicelist)
config.usage.vfd_final_scroll_delay.addNotifier(final_scroll_delay, immediate_feedback = False)
if fileExists("/proc/stb/lcd/show_symbols"):
config.lcd.mode = ConfigSelection([("0", _("No")), ("1", _("Yes"))], "1")
config.lcd.mode.addNotifier(setLCDmode);
else:
config.lcd.mode = ConfigNothing()
if fileExists("/proc/stb/power/vfd") or fileExists("/proc/stb/lcd/vfd"):
config.lcd.power = ConfigSelection([("0", _("No")), ("1", _("Yes"))], "1")
config.lcd.power.addNotifier(setLCDpower);
else:
config.lcd.power = ConfigNothing()
if fileExists("/proc/stb/fb/sd_detach"):
config.lcd.fblcddisplay = ConfigSelection([("1", _("No")), ("0", _("Yes"))], "1")
config.lcd.fblcddisplay.addNotifier(setfblcddisplay);
else:
config.lcd.fblcddisplay = ConfigNothing()
if fileExists("/proc/stb/lcd/show_outputresolution"):
config.lcd.showoutputresolution = ConfigSelection([("0", _("No")), ("1", _("Yes"))], "1")
config.lcd.showoutputresolution.addNotifier(setLCDshowoutputresolution);
else:
config.lcd.showoutputresolution = ConfigNothing()
if getBoxType() == 'vuultimo':
config.lcd.ledblinkingtime = ConfigSlider(default = 5, increment = 1, limits = (0,15))
config.lcd.ledblinkingtime.addNotifier(setLEDblinkingtime);
config.lcd.ledbrightnessdeepstandby = ConfigSlider(default = 1, increment = 1, limits = (0,15))
config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDnormalstate);
config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDdeepstandby);
config.lcd.ledbrightnessdeepstandby.apply = lambda : setLEDdeepstandby(config.lcd.ledbrightnessdeepstandby)
config.lcd.ledbrightnessstandby = ConfigSlider(default = 1, increment = 1, limits = (0,15))
config.lcd.ledbrightnessstandby.addNotifier(setLEDnormalstate);
config.lcd.ledbrightnessstandby.apply = lambda : setLEDnormalstate(config.lcd.ledbrightnessstandby)
config.lcd.ledbrightness = ConfigSlider(default = 3, increment = 1, limits = (0,15))
config.lcd.ledbrightness.addNotifier(setLEDnormalstate);
config.lcd.ledbrightness.apply = lambda : setLEDnormalstate(config.lcd.ledbrightness)
config.lcd.ledbrightness.callNotifiersOnSaveAndCancel = True
else:
def doNothing():
pass
config.lcd.ledbrightness = ConfigNothing()
config.lcd.ledbrightness.apply = lambda : doNothing()
config.lcd.ledbrightnessstandby = ConfigNothing()
config.lcd.ledbrightnessstandby.apply = lambda : doNothing()
config.lcd.ledbrightnessdeepstandby = ConfigNothing()
config.lcd.ledbrightnessdeepstandby.apply = lambda : doNothing()
config.lcd.ledblinkingtime = ConfigNothing()
else:
def doNothing():
pass
config.lcd.contrast = ConfigNothing()
config.lcd.bright = ConfigNothing()
config.lcd.standby = ConfigNothing()
config.lcd.bright.apply = lambda : doNothing()
config.lcd.standby.apply = lambda : doNothing()
config.lcd.power = ConfigNothing()
config.lcd.fblcddisplay = ConfigNothing()
config.lcd.mode = ConfigNothing()
config.lcd.hdd = ConfigNothing()
config.lcd.scroll_speed = ConfigSelection(choices = [("300", _("normal"))])
config.lcd.scroll_delay = ConfigSelection(choices = [("noscrolling", _("off"))])
config.lcd.showoutputresolution = ConfigNothing()
config.lcd.ledbrightness = ConfigNothing()
config.lcd.ledbrightness.apply = lambda : doNothing()
config.lcd.ledbrightnessstandby = ConfigNothing()
config.lcd.ledbrightnessstandby.apply = lambda : doNothing()
config.lcd.ledbrightnessdeepstandby = ConfigNothing()
config.lcd.ledbrightnessdeepstandby.apply = lambda : doNothing()
config.lcd.ledblinkingtime = ConfigNothing()
config.misc.standbyCounter.addNotifier(standbyCounterChanged, initial_call = False)
| 0.032974 |
#!/usr/bin/python
#specific to snipe 3.5.1
import cStringIO
import os
import pycurl
import sys
import tempfile
import json
import string
import random
import time
from HTMLParser import HTMLParser
import urllib
class FieldsetHtmlParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.recording = 0
self.fields = False
self.fieldsets = {}
self.temp = None
self.tempb = None
self.tempfieldsets = []
self.ct = 0
def handle_starttag(self, tag, attributes):
if self.fields is False: #look at fieldsets first
if tag == 'a':
#look at fields once we find the a tag of the create new field button
if "/admin/custom_fields/create-field" in str(attributes):
self.fields = {}
else:
for key, value in attributes:
if key == 'href':
if "/admin/custom_fields/" in value and "create" not in value:
t = value.split("/")
self.temp = t[-1]
else:
if tag == 'form':
if self.ct == 5:
self.ct = 0
for key, value in attributes:
if "/admin/custom_fields/delete-field/" in value:
t = value.split("/")
self.fields[t[-1]] = {'name':self.tempb, 'fieldsets':self.tempfieldsets}
self.tempb = None
self.tempfieldsets = []
if tag == 'td':
self.ct = self.ct + 1
if self.ct == 1:
self.temp = True
if tag == 'a':
if self.ct == 4:
for key, value in attributes:
if "/admin/custom_fields" in value:
t = value.split("/")
self.tempfieldsets.append(t[-1])
def handle_data(self, data):
if self.temp is not None:
if self.fields is False:
self.fieldsets[self.temp] = data
else:
self.tempb = data
self.temp = None
def get_fieldsets(self):
return self.fieldsets
def get_fields(self):
return self.fields
class AssetdataHtmlParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.recording = 0
self.data = []
self.open = False
self.lastkey = None
self.lastvalue = None
self.lastdata = None
self.lasttag = None
self.closed = True
self.lastcheck = False
self.ischeck = False
def handle_starttag(self, tag, attributes):
if tag == 'option':
if "selected" in str(attributes):
for key, value in attributes:
if key == "value":
self.lastvalue = value
if tag == 'textarea' or tag == 'select' or tag == 'input':
self.lastkey = None
self.lastvalue = None
self.lastdata = None
for key, value in attributes:
if key == "name":
self.lastkey = value
if key == "value":
self.lastvalue = value
if key == "checked":
self.lastchecked = True
if key == "type":
if value.lower() == "checkbox":
self.ischeck = True
#patch for the missing input closing tag on snipeit custom fields parsing
if self.open and self.lastkey is not None and tag == 'input':
if self.lastchecked and self.ischeck:
self.data.append([self.lastkey, "1"])
else:
self.data.append([self.lastkey, str(self.lastvalue).strip()])
self.open = False
self.lastchecked = False
self.ischeck = False
return
if tag == 'textarea':
self.lasttag = 'textarea'
self.open = True
def handle_data(self, data):
if self.open:
if str(self.lasttag) == 'textarea':
if self.lastvalue is not None:
self.lastvalue = self.lastvalue+str(data)
else:
self.lastvalue = data
return
if not data.startswith("Select") and "No custom fields" not in data:
self.lastdata = data
def handle_endtag(self, tag):
if self.lasttag == 'textarea':
self.lasttag = None
elif tag == 'select' or tag == 'input' or tag == 'textarea':
if self.lastdata is not None:
self.data.append([self.lastkey, str(self.lastvalue).strip(), str(self.lastdata).strip()])
elif self.lastvalue is not None:
self.data.append([self.lastkey, str(self.lastvalue).strip()])
else:
self.data.append([self.lastkey])
self.open = False
self.lastchecked = False
self.ischeck = False
def getData(self):
return self.data
class SnipeAPIAdapter():
#timeout in seconds, endpoint is http://snipeurl.com (no final /)
def __init__(self, endpoint, username, password, timeout=10):
self.timeout = timeout
self.username = username
self.password = password
self.endpoint = endpoint
#private variables
self.glob_token = None
self.glob_cookie = None
def cleanup(self):
self.glob_token = None
self.glob_cookie = None
#remove the cookie, we don't need it now (global declaration only needed in sub-functions)
if self.glob_cookie is not None:
os.remove(self.glob_cookie.name)#remove the temporary cookie file now that we're done with our login/request
def curlQuery(self, url_suffix, request_type, post_data=None, header=None):
if self.glob_cookie is None:
#set a cookie to login and such
self.glob_cookie = tempfile.NamedTemporaryFile(delete=False)
response = cStringIO.StringIO()
c = pycurl.Curl()
c.setopt(c.URL, str(self.endpoint)+str(url_suffix))
c.setopt(c.TIMEOUT, self.timeout)
c.setopt(c.COOKIEJAR, self.glob_cookie.name)
c.setopt(c.COOKIEFILE, self.glob_cookie.name)
c.setopt(c.CUSTOMREQUEST, request_type)
c.setopt(c.WRITEFUNCTION, response.write)
if header is not None:
c.setopt(c.HTTPHEADER, header)
if post_data is not None:
#e.g. post_data looks like post_data = {'field': 'value'}
postfields = urllib.urlencode(post_data)
c.setopt(c.POSTFIELDS, postfields)
running = True
while running:
try:
c.perform()
running = False
except:
print("Failed to query "+str(url_suffix)+"... Retrying")
c.close()
return response.getvalue()
def queryAPI(self, api_suffix, post_data_api=None):
if self.glob_token is None:
token_response = self.curlQuery(url_suffix="/login", request_type="GET")
for line in token_response.split("\n"):
if "_token" in line:
parts = line.split("\"")
self.glob_token = parts[5]
break
if self.glob_token is None:
return ""
#actually login
header = ["Accept: text/html,application/json"]
post_data = {'_token':self.glob_token,'username':self.username,'password':self.password}
self.curlQuery(url_suffix="/login", request_type="POST", post_data=post_data, header=header)
#do the api query
header = ["Accept: text/html,application/json", "_token: "+self.glob_token]
if post_data_api is None:
data_reply = self.curlQuery(url_suffix=api_suffix, request_type="GET", header=header)
else:
post_data_api.update({'_token':self.glob_token})
header = ["Accept: text/html,application/json"]
data_reply = self.curlQuery(url_suffix=api_suffix, request_type="POST", header=header, post_data=post_data_api)
return data_reply
#returns all of the current status labels and ids for use in asset editing / creation
def getStatusId(self):
out = {}
response = self.queryAPI(api_suffix="/api/statuslabels/list?sort=asc&limit=1000")
j_resp = json.loads(response)
if len(j_resp['rows']) > 0:
for row in j_resp['rows']:
out[row['name']] = row['id']
return out
#init to deployed/deployable/54b552, fault/pending/ff0000, spare/deployable/ff7a00, repairing/pending/00cfff
#in English: deletes all the starting statuses and initiates our statuses (only should be run once at db init, probably)... this is left as lingering code
def initStatuses(self):
status = {"Deployed":"Deployable/54b552", "Fault":"Pending/ff0000", "Spare":"Deployable/ff7a00", "Repairing":"Pending/00cfff"}
#determine if the statuses are already in place, if not, add the new ones and delete the ones already there that shouldn't be there
unaltered = []
delete = []
response = self.queryAPI(api_suffix="/api/statuslabels/list?sort=asc&limit=1000")
j_resp = json.loads(response)
for row in j_resp['rows']:
if row['name'] in status:
item = status[row['name']]
parts = item.split("/")
#row['color'] in format <div class="pull-left" style="margin-right: 5px; height: 20px; width: 20px; background-color: #54b552"></div>#54b552
if row['type'] in parts[0] and row['color'][-6:] in parts[1]:
unaltered.append(row['name'])
else:
delete.append(row['id'])
else:
delete.append(row['id'])
for key,value in status.iteritems():
if key not in unaltered:
#add these keys
parts = value.split("/")
#status label posted as all lower case...
post_data = {'name':key, 'statuslabel_types':parts[0].lower(), 'color':"#"+parts[1], 'notes':''}
response = self.queryAPI(api_suffix="/admin/settings/statuslabels/create", post_data_api=post_data)
print("added label "+key)
for key in delete:
url = "/admin/settings/statuslabels/"+key+"/delete"
response = self.queryAPI(api_suffix=url)
print("Deleted status "+key)
def getManufacturerId(self, manufacturer=None):
if manufacturer is None:
return False
reply = self.queryAPI(api_suffix="/api/manufacturers/list?sort=asc&limit=1000&search="+manufacturer)
j_reply = json.loads(reply)
for row in j_reply['rows']:
manu = row['name'].split("\">")[1].split("<")[0]
if manu == manufacturer:
return row['id']
post_data = {'name':manufacturer}
response = self.queryAPI(api_suffix="/admin/settings/manufacturers/create", post_data_api=post_data)
return self.getManufacturerId(manufacturer)
#gets the company name from the ID using the listing page (html parsing)
def getCompanyName(self, company_id):
reply = self.queryAPI(api_suffix="/admin/settings/companies")
start = 0
id = ""
for line in reply.split("\n"):
if start > 0:
if "<td>" in line:
if start == 1:
id = line.split(">")[1].split("<")[0]
if start == 2:
name = line.split(">")[1].split("<")[0]
if str(id).strip() == str(company_id).strip():
return name
start = start + 1
if "</tr>" in line:
start = 1
if "Company Name</th>" in line:
start = 1
return False
#creates the company if it doesn't exist
def getCompanyId(self, company=None):
if company is None:
return False
reply = self.queryAPI(api_suffix="/admin/settings/companies")
start = 0
id = ""
for line in reply.split("\n"):
if start > 0:
if "<td>" in line:
if start == 1:
id = line.split(">")[1].split("<")[0]
if start == 2:
name = line.split(">")[1].split("<")[0]
if str(name).strip() == str(company).strip():
return id
start = start + 1
if "</tr>" in line:
start = 1
if "Company Name</th>" in line:
start = 1
#if we make it here, the company doesn't exist
post_data = {'name':company}
response = self.queryAPI(api_suffix="/admin/settings/companies/create", post_data_api=post_data)
return self.getCompanyId(company)
#defaults to asset type, can also be "accessory", "consumable", or "component"
def getCategoryId(self, category=None, category_type="asset", eula_text=""):
if category is None:
return False
reply = self.queryAPI(api_suffix="/api/categories/list?sort=asc&limit=1000")
j_reply = json.loads(reply)
for row in j_reply['rows']:
thename = row['name'].split("\">")[1].split("<")[0]
if thename == category:
return row['id']
post_data = {'name':category, 'category_type':category_type, 'eula_text':eula_text}
response = self.queryAPI(api_suffix="/admin/settings/categories/create", post_data_api=post_data)
return self.getCategoryId(category)
def getAssetModelNameFromId(self, asset_id):
#there shouldn't be more than 1000 asset model names, if there are, this should be rethought out
reply = self.queryAPI(api_suffix="/api/models/list?sort=asc&limit=1000")
j_reply = json.loads(reply)
for row in j_reply['rows']:
theid = str(row['id'])
thename = row['name'].split("\">")[1].split("<")[0]
if str(asset_id) == theid:
return thename
#this will automatically create a manufacturer and category if one doesn't exist (as defined here)
def getAssetModelId(self, asset_model_name=None, manufacturer=None, category=None, model_number="", notes="", custom_fieldset_id=""):
if asset_model_name is None:
return False
reply = self.queryAPI(api_suffix="/api/models/list?sort=asc&limit=1000")
j_reply = json.loads(reply)
for row in j_reply['rows']:
thename = row['name'].split("\">")[1].split("<")[0]
if thename == asset_model_name:
return row['id']
if manufacturer is None or category is None:
return False
#figure out what the category and manufacturer ids are (or create the missing ones)
manufacturer_id = self.getManufacturerId(manufacturer)
if str(category).isdigit():
category_id = category
else:
category_id = self.getCategoryId(category)
if category_id is None or manufacturer_id is None:
return False
post_data = {'name':asset_model_name, 'modelno':model_number, 'note':notes, 'filename':'', 'custom_fieldset':custom_fieldset_id, \
'eol':'', 'depreciation_id':'', 'category_id':category_id, 'manufacturer_id':manufacturer_id}
response = self.queryAPI(api_suffix="/hardware/models/create", post_data_api=post_data)
return self.getAssetModelId(asset_model_name)
def getSupplierId(self, supplier_name=None, contact="", phone="", email="", notes=""):
if supplier_name is None:
return False
reply = self.queryAPI(api_suffix="/api/suppliers/list?sort=asc&limit=1000")
j_reply = json.loads(reply)
for row in j_reply['rows']:
thename = row['name'].split("\">")[1].split("<")[0]
if thename == supplier_name:
return row['id']
post_data = {'name':supplier_name, 'contact':contact, 'phone':phone, 'email':email, 'notes':notes, 'address':'', 'address2':'', 'city':'', 'state':'', 'country':'', 'zip':'', 'fax':'', 'url':''}
response = self.queryAPI(api_suffix="/admin/settings/suppliers/create", post_data_api=post_data)
return self.getSupplierId(supplier_name)
def getLocationName(self, location_id):
reply = self.queryAPI(api_suffix="/api/locations/list?sort=asc&limit=1000")
j_reply = json.loads(reply)
for row in j_reply['rows']:
thename = row['name'].split("\">")[1].split("<")[0]
if str(location_id).strip() == str(row['id']).strip():
return thename
return False
def getLocationId(self, location_name=None, address="", city="", state="", zip=""):
if location_name is None:
return False
if len(location_name) < 3:
print("The location "+str(location_name)+" is too short, it must be at least 3 characters")
return False
reply = self.queryAPI(api_suffix="/api/locations/list?sort=asc&limit=1000")
j_reply = json.loads(reply)
for row in j_reply['rows']:
thename = row['name'].split("\">")[1].split("<")[0]
if thename == location_name:
return row['id']
post_data = {'name':location_name, 'address':address, 'address2':'', 'city':city, 'state':state, 'country':'', 'zip':zip, 'currency':'', 'parent_id':''}
response = self.queryAPI(api_suffix="/admin/settings/locations/create", post_data_api=post_data)
return self.getLocationId(location_name)
def getUserGroupId(self, group_name=None):
if group_name is None:
return False
reply = self.queryAPI(api_suffix="/api/groups/list?sort=asc&limit=1000")
j_reply = json.loads(reply)
for row in j_reply['rows']:
if row['name'] == group_name:
return row['id']
post_data = {'permission[superuser]':0,'permission[admin]':0,'permission[reports.view]':0,'permission[assets.view]':0, \
'permission[assets.create]':0, 'permission[assets.edit]':0, 'permission[assets.delete]':0, 'permission[assets.checkin]':0, \
'permission[assets.checkout]':0, 'permission[assets.view.requestable]':0, 'permission[accessories.view]':0, 'permission[accessories.create]':0, \
'permission[accessories.edit]':0, 'permission[accessories.delete]':0, 'permission[accessories.checkout]':0, 'permission[accessories.checkin]':0, \
'permission[consumables.view]':0, 'permission[consumables.create]':0, 'permission[consumables.edit]':0, 'permission[consumables.delete]':0, \
'permission[consumables.checkout]':0, 'permission[licenses.view]':0, 'permission[licenses.create]':0, 'permission[licenses.edit]':0, \
'permission[licenses.delete]':0, 'permission[licenses.checkout]':0, 'permission[licenses.keys]':0, 'permission[components.view]':0, \
'permission[components.create]':0, 'permission[components.edit]':0, 'permission[components.delete]':0, 'permission[components.checkout]':0, \
'permission[components.checkin]':0, 'permission[users.view]':0, 'permission[users.create]':0, 'permission[users.edit]':0, 'permission[users.delete]':0, \
'name':group_name}
response = self.queryAPI(api_suffix="/admin/groups/create", post_data_api=post_data)
return self.getUserGroupId(group_name)
#gets all the users in the system (100k of them at least... if we have more than this, there is likely an issue)
def getUserIds(self, prefix=""):
#need to loop for this data (1000 max at a time supported by snipe)
offset = 0
total = 1
ids = {}
#doing 1000 at a time
while offset < total:
print ("User ids retrieving query offset "+str(offset)+" of total user count "+str(total))
response = self.queryAPI(api_suffix="/api/users/list?sort=asc&offset="+str(offset)+"&limit=1000&search="+prefix)
j_response = json.loads(response)
total = j_response['total']
offset = offset + 1000
for row in j_response['rows']:
ids.update({row['id']:row['username']})
return ids
#changes the group of the user id in 3 queries (we shouldn't use this often enough to justify using a cached group name)
def editUserGroup(self, username, group):
user_id = False
request = self.queryAPI(api_suffix="/api/users/list?sort=asc&limit=25&search="+username)
j_request = json.loads(request)
for item in j_request['rows']:
if item['username'] == username:
user_id = item['id']
new_group_id = self.getUserGroupId(group_name=group)
if new_group_id is False or user_id is False:
return False
post_data = {'permission[superuser]':0,'permission[admin]':0,'permission[reports.view]':0,'permission[assets.view]':0, \
'permission[assets.create]':0, 'permission[assets.edit]':0, 'permission[assets.delete]':0, 'permission[assets.checkin]':0, \
'permission[assets.checkout]':0, 'permission[assets.view.requestable]':0, 'permission[accessories.view]':0, 'permission[accessories.create]':0, \
'permission[accessories.edit]':0, 'permission[accessories.delete]':0, 'permission[accessories.checkout]':0, 'permission[accessories.checkin]':0, \
'permission[consumables.view]':0, 'permission[consumables.create]':0, 'permission[consumables.edit]':0, 'permission[consumables.delete]':0, \
'permission[consumables.checkout]':0, 'permission[licenses.view]':0, 'permission[licenses.create]':0, 'permission[licenses.edit]':0, \
'permission[licenses.delete]':0, 'permission[licenses.checkout]':0, 'permission[licenses.keys]':0, 'permission[components.view]':0, \
'permission[components.create]':0, 'permission[components.edit]':0, 'permission[components.delete]':0, 'permission[components.checkout]':0, \
'permission[components.checkin]':0, 'permission[users.view]':0, 'permission[users.create]':0, 'permission[users.edit]':0, 'permission[users.delete]':0, \
'first_name':username, 'last_name':'', 'username':username, 'password':'', 'password_confirm':'', 'email':'', 'company_id':0, 'locale':'', \
'employee_num':'', 'jobtitle':'', 'manager_id':'', 'location_id':'', 'phone':'', 'activated':1, 'notes':'', 'groups[]':new_group_id}
response = self.queryAPI(api_suffix="/admin/users/"+str(user_id)+"/edit", post_data_api=post_data)
return True
#creates a user and returns that ID if nothing found
#bus is the default group for new vehicles that hold hardware (users are containers here)
def getUserId(self, username=None, group="bus"):
if username is None:
return False
request = self.queryAPI(api_suffix="/api/users/list?sort=asc&limit=1000&search="+username)
j_request = json.loads(request)
for item in j_request['rows']:
if item['username'] == username:
return item['id']
group_id = self.getUserGroupId(group_name=group)
#create the user
random_pass = ''.join(random.sample(string.ascii_uppercase + string.digits*8, 8))
post_data = {'permission[superuser]':0,'permission[admin]':0,'permission[reports.view]':0,'permission[assets.view]':0, \
'permission[assets.create]':0, 'permission[assets.edit]':0, 'permission[assets.delete]':0, 'permission[assets.checkin]':0, \
'permission[assets.checkout]':0, 'permission[assets.view.requestable]':0, 'permission[accessories.view]':0, 'permission[accessories.create]':0, \
'permission[accessories.edit]':0, 'permission[accessories.delete]':0, 'permission[accessories.checkout]':0, 'permission[accessories.checkin]':0, \
'permission[consumables.view]':0, 'permission[consumables.create]':0, 'permission[consumables.edit]':0, 'permission[consumables.delete]':0, \
'permission[consumables.checkout]':0, 'permission[licenses.view]':0, 'permission[licenses.create]':0, 'permission[licenses.edit]':0, \
'permission[licenses.delete]':0, 'permission[licenses.checkout]':0, 'permission[licenses.keys]':0, 'permission[components.view]':0, \
'permission[components.create]':0, 'permission[components.edit]':0, 'permission[components.delete]':0, 'permission[components.checkout]':0, \
'permission[components.checkin]':0, 'permission[users.view]':0, 'permission[users.create]':0, 'permission[users.edit]':0, 'permission[users.delete]':0, \
'first_name':username, 'last_name':'', 'username':username, 'password':random_pass, 'password_confirm':random_pass, 'email':'', 'company_id':0, 'locale':'', \
'employee_num':'', 'jobtitle':'', 'manager_id':'', 'location_id':'', 'phone':'', 'activated':1, 'notes':'', 'groups[]':group_id}
response = self.queryAPI(api_suffix="/admin/users/create", post_data_api=post_data)
return self.getUserId(username) #simply call this function again to get the user id once we've posted.
#gets all the assets in the system (250k of them at least... if we have more than this, there is likely an issue)
def getAssetIds(self, prefix=""):
ids = {}
#need to loop for this data (1000 max at a time supported by snipe)
offset = 0
total = 1
#doing 1000 at a time
while offset < total:
print ("Asset ids retrieving query offset "+str(offset)+" of total asset count "+str(total))
response = self.queryAPI(api_suffix="/api/hardware/list?sort=asc&limit=1000&offset="+str(offset)+"&search="+prefix)
j_response = json.loads(response)
total = j_response['total']
offset = offset + 1000
for row in j_response['rows']:
thename = row['name'].split("\">")[1].split("<")[0].replace("\\", "")
theid = row['id']
ids.update({theid:thename})
return ids
def getAssetId(self, tag=None, user_id="", model_id=None, status_id=None, serial="", company_id="", supplier_id="", purchase_date="", purchase_cost="", order="", warranty_months="", notes="", location_id="", custom_field_def={}):
if tag is None:
return False
if purchase_cost is not "":
purchase_cost = ("%.2f" % float(purchase_cost))
response = self.queryAPI(api_suffix="/api/hardware/list?sort=asc&limit=25&search="+tag)
j_response = json.loads(response)
for row in j_response['rows']:
thename = row['name'].split("\">")[1].split("<")[0].replace("\\", "")
if thename == tag:
return row['id']
if model_id is None or status_id is None:
return False #return false for these only if we aren't doing a lookup
#purchase_date in yyyy-mm-dd
post_data = {'asset_tag':tag, 'model_id':model_id, 'status_id':status_id, 'assigned_to':user_id, 'serial':serial, 'name':tag, 'company_id':company_id, \
'purchase_date':purchase_date, 'supplier_id':supplier_id, 'order_number':order, 'purchase_cost':purchase_cost, 'warranty_months':warranty_months, \
'notes':notes, 'rtd_location_id':location_id, 'requestable':0, 'image':''}
if len(custom_field_def) > 0:
for key in custom_field_def:
thekey = "_snipeit_"+key.lower()
post_data[thekey] = custom_field_def[key]
response = self.queryAPI(api_suffix="/hardware/create", post_data_api=post_data)
return self.getAssetId(tag)
def getAssetUsername(self, tag=None):
if not id:
return False
response = self.queryAPI(api_suffix="/api/hardware/list?search="+str(tag))
j_response = json.loads(response)
for row in j_response['rows']:
thename = row['name'].split("\">")[1].split("<")[0].replace("\\", "")
if thename == tag:
return row['assigned_to'].split("\">")[1].split("<")[0].replace("\\", "")
return False
def getAllUsersMap(self, prefix):
#get all of the current users in snipe (for vehicle users)
currentusers_byid = self.getUserIds(prefix=prefix)
currentusers = {}
for key in currentusers_byid:
tid = str(key)
tuser = str(currentusers_byid[key]).lower()
if tuser in currentusers:
print("WARN: the user "+tuser+" is already in the current users cache with id "+str(currentusers[tuser])+", it will be replaced with id "+str(tid))
currentusers.update({tuser:tid})
return currentusers
#example method call:
#grabs all of the asset data, similarly to getAssetData with all users/ids, but a full array, memory intensive
def getAllAssetDataForEditing(self, prefix="", custom_field_def=[]):
data = {}#assetid:data
#need to loop for this data (1000 max at a time supported by snipe)
offset = 0
total = 1
#get the status ids in the system
status_ids = self.getStatusId()
company_ids = {} #in format "agency":"id"
#doing 1000 at a time
while offset < total:
print ("Asset ids retrieving query offset "+str(offset)+" of total asset count "+str(total))
response = self.queryAPI(api_suffix="/api/hardware/list?sort=asc&limit=1000&offset="+str(offset)+"&search="+prefix)
j_response = json.loads(response)
total = j_response['total']
offset = offset + 1000
for row in j_response['rows']:
#supplier is not supported by the api to allow bulk editing, this should only change once on create anyways (or modified manually in bulk)
#warranty is not supported by the api to allow bulk editing, this shoudl only change once on create anyways (or modified manually in bulk)
#requestable is not supported by the api, so this is also ommitted
post_data = {'asset_tag':'', 'model_id':'', 'assigned_to':'', 'serial':'', 'name':'', 'company_id':'', \
'purchase_date':'', 'order_number':'', 'purchase_cost':'', 'notes':'', 'rtd_location_id':'', 'image':''}
if len(custom_field_def) > 0:
for key in custom_field_def:
thekey = "_snipeit_"+key.lower()
post_data[thekey] = ''
if len(str(row['asset_tag']).strip()) > 0:
#status_id is only updated if it's not "deployed"
if str(row['status_label']).strip().lower() != "deployed":
if str(row['status_label']).strip() in status_ids:
post_data.update({'status_id':str(status_ids[str(row['status_label']).strip()]).strip()})
post_data['asset_tag'] = str(row['asset_tag']).split("\">")[1].split("<")[0].replace("\\", "")
post_data['model_id'] = str(row['model']).split("/hardware/models/")[1].split("/view\"")[0]
post_data['assigned_to'] = str(row['assigned_to']).split("admin/users/")[1].split("/view\"")[0]
post_data['serial'] = str(row['serial']).strip()
post_data['name'] = str(row['name']).split("\">")[1].split("<")[0].replace("\\", "")
if str(row['companyName']).strip() not in company_ids:
print("query companyid for "+str(row['companyName']).strip())
company_id = self.getCompanyId(str(row['companyName']).strip()) #returns a company id or creates one
company_ids.update({str(row['companyName']).strip():str(company_id)})
post_data['company_id'] = company_ids[str(row['companyName']).strip()]
if str(row['purchase_date']).strip().lower() != "none":
post_data['purchase_date'] = str(row['purchase_date']).strip()
post_data['order_number'] = str(row['order_number']).strip()
if str(row['purchase_cost']).strip().lower() != "none":
post_data['purchase_cost'] = str(row['purchase_cost']).strip()
post_data['notes'] = str(row['notes']).strip()
post_data['rtd_location_id'] = str(row['location']).split("settings/locations/")[1].split("/view\"")[0]
if len(str(row['image']).strip()) > 0:
post_data['image'] = str(row['image']).strip()
#handle custom fields
if len(custom_field_def) > 0:
for key in custom_field_def:
thekey = "_snipeit_"+key.lower()
post_data[thekey] = str(row[thekey]).strip()
theid = str(row['id']).strip()
data.update({theid:post_data})
return data
def getAssetData(self, id=None, custom_field_def=[]):
if not id:
return False
if not str(id).isdigit():
return False
#get the data from the edit page by parsing the HTML form fields
html = self.queryAPI(api_suffix="/hardware/"+str(id)+"/edit")
parser = AssetdataHtmlParser()
parser.feed(html)
data = parser.getData()
parser.close()
post_data = {'asset_tag':'', 'model_id':'', 'status_id':'', 'assigned_to':'', 'serial':'', 'name':'', 'company_id':'', \
'purchase_date':'', 'supplier_id':'', 'order_number':'', 'purchase_cost':'', 'warranty_months':'', \
'notes':'', 'rtd_location_id':'', 'requestable':'', 'image':''}
if len(custom_field_def) > 0:
for key in custom_field_def:
thekey = "_snipeit_"+key.lower()
post_data[thekey] = ''
for item in data:
if item[0] in post_data:
if len(item) > 1:
if len(item[1]) > 0:
if not item[1].lower().strip() == "none":
post_data[item[0]] = item[1]
return post_data
def editAsset(self, tag=None, data_array=None, asset_id=None, model_id=None, status_id=None, serial=None, company_id=None, supplier_id=None, purchase_date=None, purchase_cost=None, order=None, warranty_months=None, notes=None, location_id=None, custom_field_def={}):
if tag is None:
#to do the data array post (editing from outside source), we must have both the asset id and data array with ALL variables populated correctly
if data_array is None or asset_id is None:
return False
#use the data array to post back to the management system
self.queryAPI(api_suffix="/hardware/"+str(asset_id)+"/edit", post_data_api=data_array)
return True
if purchase_cost is not None:
purchase_cost = ("%.2f" % float(purchase_cost))
asset_id = self.getAssetId(tag)
if not asset_id:
return False
existing_data = self.getAssetData(id=asset_id, custom_field_def=custom_field_def)
#purchase_date in yyyy-mm-dd
edit_data = {'asset_tag':tag, 'model_id':model_id, 'status_id':status_id, 'serial':serial, 'name':tag, 'company_id':company_id, \
'purchase_date':purchase_date, 'supplier_id':supplier_id, 'order_number':order, 'purchase_cost':purchase_cost, 'warranty_months':warranty_months, \
'notes':notes, 'rtd_location_id':location_id, 'requestable':'', 'image':''}
if len(custom_field_def) > 0:
for key in custom_field_def:
thekey = "_snipeit_"+key.lower()
edit_data[thekey] = custom_field_def[key]
changes = []
for key in edit_data:
value = edit_data[key]
if value is not None:
if str(existing_data[key]) == str(edit_data[key]):
continue
changes.append([key, str(existing_data[key]), str(edit_data[key])])
existing_data[key] = edit_data[key]
if len(changes) > 0:
existing_data['notes'] = "ChangesTimeKeyOldNew("+time.strftime("%Y-%m-%d %H:%M:%S")+"): "+str(changes)+"\n"+existing_data['notes']
#publish the data with a new note
self.queryAPI(api_suffix="/hardware/"+str(asset_id)+"/edit", post_data_api=existing_data)
print "Data Edited Successfully on asset "+str(tag)+"("+str(asset_id)+")! Changes(keyoldnew): "+str(changes)
return asset_id
#checkout_date in yyyy-mm-dd
def checkout(self, asset_id=None, user_id=None, checkout_date=None, notes=''):
if asset_id is None or user_id is None:
return False
if checkout_date is None:
checkout_date = time.strftime("%Y-%m-%d")
if not str(asset_id).isdigit() or not str(user_id).isdigit():
return False
post_data = {'assigned_to':user_id, 'checkout_at':checkout_date, 'expected_checkin':'', 'note':notes}
response = self.queryAPI(api_suffix="/hardware/"+str(asset_id)+"/checkout", post_data_api=post_data)
return True
#create a new maintenance action, maintenacetype is "Repair", "Upgrade", "Maintenance"; start/end are in yyyy-mm-dd; one of supplier_name or supplier_id must be filled in
def createMaintenanceAction(self, asset_id, maintenancetype, name, start, supplier_id="", supplier_name="", end="", warrantyImprovement=False, cost="", notes=""):
types = ["Repair", "Upgrade", "Maintenance"]
if not str(asset_id).isdigit() or maintenancetype not in types or len(name) < 4:
return False
if len(str(supplier_id)) < 1:
if len(supplier_name) < 4:
return False
supplier_id = self.getSupplierId(supplier_name=supplier_name)
post_data = {'asset_id':asset_id, 'supplier_id':supplier_id, 'asset_maintenance_type':maintenancetype, 'title':name, 'start_date':start, \
'completion_date':end, 'cost':cost, 'notes':notes}
if warrantyImprovement is True:
post_data.update({'is_warranty':1})
response = self.queryAPI(api_suffix="/admin/asset_maintenances/create", post_data_api=post_data)
return True
def getCustomFieldData(self):
#get the data from the edit page by parsing the HTML form fields
html = self.queryAPI(api_suffix="/admin/custom_fields")
parser = FieldsetHtmlParser()
parser.feed(html)
fieldsets = parser.get_fieldsets()
fields = parser.get_fields()
parser.close()
return [fieldsets, fields]
#only add fieldsets that don't exist
def getCustomFieldSets(self, name=None, custom_fields=[]):
if name is None:
return False
[fieldsets, fields] = self.getCustomFieldData()
fieldset_id = None
for key in fieldsets:
if fieldsets[key] == name:
fieldset_id = key
#we submitted only a name to get the id
if len(custom_fields) < 1:
if fieldset_id is not None:
return fieldset_id
elif fieldset_id is not None: #we submitted a name and fields... let's verify our picture is what exists
matches = []
for key in fields:
if str(fieldset_id) in fields[key]['fieldsets']:
matches.append(fieldsets[str(fieldset_id)])
#match found
if len(custom_fields) == len(matches):
return fieldset_id
#no match found
else:
return False
else:
#let's add this fieldset / fields
new_fields = []
existing_fields = []
for key in fields:
existing_fields.append(fields[key]['name'])
for customfield in custom_fields:
if customfield not in existing_fields:
new_fields.append(customfield)
#add new fields
if len(new_fields) > 0:
for thefn in new_fields:
post_data = {'name':thefn, 'element':'text', 'field_values':'', 'format':'ANY', 'custom_format':''}
self.queryAPI(api_suffix="/admin/custom_fields/create-field", post_data_api=post_data)
#add the fieldset
[fieldsets, fields] = self.getCustomFieldData()
assoc_fids = []
for key in fields:
if fields[key]['name'] in custom_fields:
assoc_fids.append(key)
order = 0
post_data = {'name':name}
result = self.queryAPI(api_suffix="/admin/custom_fields", post_data_api=post_data)
for line in result.split("\n"):
if "http-equiv=\"refresh\"" in line:
new_fs_id = line.split("\"")[-2].split("/")[-1]
for fkey in assoc_fids:
order = order + 1
post_data = {'order':str(order), 'field_id':str(fkey)}
self.queryAPI(api_suffix="/admin/custom_fields/"+str(new_fs_id)+"/associate", post_data_api=post_data)
return new_fs_id
return False
| 0.042287 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Bundles for Jasmine test runner."""
from invenio.ext.assets import Bundle, RequireJSFilter
from invenio.base.bundles import jquery as _j, invenio as _i
jasmine_js = Bundle(
# es5-shim is needed by PhantomJS
# 'vendors/es5-shim/es5-shim.js',
# 'vendors/es5-shim/es5-sham.js',
"js/jasmine/init.js",
output="jasmine.js",
weight=50,
filters=RequireJSFilter(exclude=[_j, _i]),
bower={
"jasmine": ">=2",
"jasmine-jquery": ">=2",
"jasmine-flight": ">=3",
"jasmine-ajax": ">=2",
}
)
jasmine_styles = Bundle(
'vendors/jasmine/lib/jasmine-core/jasmine.css',
weight=-1,
output='jasmine.css'
)
| 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.addons.message_center_compassion.mappings.base_mapping import \
OnrampMapping
class ICPMapping(OnrampMapping):
ODOO_MODEL = 'compassion.child.ble'
MAPPING_NAME = "new_child_lifecyle"
CONNECT_MAPPING = {
'DateOfDeath': 'child_death_date',
'DeathCategory': 'child_death_category',
'DeathSubcategory': 'child_death_subcategory',
'FamilyImpact': 'family_impact',
'FutureHopes': 'future_hopes',
'IsFinalLetterSent': 'final_letter_sent',
'IsPrimarySchoolComplete': 'primary_school_finished',
'LastAttendedDate': 'last_attended_project',
'NewSituation': 'new_situation',
'SponsorImpact': 'sponsor_impact',
'CurrentICP': None,
'Status': 'Cancelled',
'DeathInterventionInformation': None,
'EffectiveDate': 'date',
'BeneficiaryLifecycleEvent_ID': None,
'ReasonForRequest': 'request_reason',
'RecordType': 'Planned Exit',
'ExpectedArrivalDate': None,
'NewBeneficiaryLocalNumber': ('child_id.local_id', 'compassion.child'),
'NewICPID': None,
'OtherReasonForTransfer': 'other_transfer_reason',
'BeneficiaryTransitionType': 'transition_type',
'NewProgram': None,
'PreviouslyActiveProgram': None,
'BeneficiaryStatus': None,
'Beneficiary_GlobalID': ('child_id.global_id', 'compassion.child'),
'Beneficiary_LocalID': ('child_id.local_id', 'compassion.child'),
# Not used in Odoo
'NewICPName': None,
}
FIELDS_TO_SUBMIT = {
'gpid': None,
}
CONSTANTS = {
'SourceKitName': 'BeneficiaryLifecycleEventKit',
'BeneficiaryStatus': 'Active',
}
| 0 |
#!//bin/env python
#coding:utf-8
import os
import sys
import string
import time
import datetime
import MySQLdb
import global_functions as func
from multiprocessing import Process;
def check_server_resource(ip):
command="./check_linux_resource.sh"
os.system("%s %s"%(command,ip))
def main():
func.mysql_exec("insert into linux_resource_history select * from linux_resource",'')
func.mysql_exec("delete from linux_resource",'')
linux_servers_ip = func.get_config('linux_server','server_ip')
servers=linux_servers_ip.split("|")
if servers:
print("%s: check_server_resource controller started." % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),));
plist = []
for ip in servers:
if ip <> '':
print ip
p = Process(target = check_server_resource, args=(ip,))
plist.append(p)
p.start()
for p in plist:
p.join()
print("%s: check_server_resource controller finished." % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),))
if __name__=='__main__':
main()
| 0.031167 |
import nibabel as nib
import numpy as np
from os import path
from nilearn.input_data import NiftiMasker
from scipy.io import loadmat
from samri.report.utilities import add_roi_data
from joblib import Parallel, delayed
import statsmodels.formula.api as smf
import multiprocessing as mp
import pandas as pd
def from_img_threshold(image, threshold,
two_tailed=False,
save_as='',
):
"""Create an ROI based on an input volumetric image and a threshold value (absolute).
Parameters
----------
image : str or nibabel.nifti1.Nifti1Image
Either the path to a NIfTI image, or a NiBabel object containing the image data. Image can have any dimensionality.
threshold : int or float
Numeric value to use as threshold
two_tailed : bool, optional
Whether to include voxels with values below the negative of `threshold` in the ROI.
save_as : str, optional
Path to which to save the otput.
Returns
-------
str or nibabel.nifti.Nifti1Image
Path to generated ROI file (if `save_as` is specified), or `nibabel.nifti.Nifti1Image` object of the ROI (if `save_as` evaluates to `False`).
Notes
-----
Percentile support is planned, but not currently implemented.
"""
if isinstance(image,str):
image = path.abspath(path.expanduser(image))
image = nib.load(image)
data = image.get_data()
header = image.header
affine = image.affine
roi_pos = np.where(data>threshold,[True],[False])
if two_tailed:
roi_neg = np.where(data<-threshold,[True],[False])
roi = np.logical_or(roi_pos, roi_neg)
else:
roi = roi_pos
roi = 1*roi
roi = nib.Nifti1Image(roi, affine, header)
if save_as:
save_as = path.abspath(path.expanduser(save_as))
nib.save(roi, save_as)
return roi
def per_session(substitutions, roi_mask,
filename_template="~/ni_data/ofM.dr/l1/{l1_dir}/sub-{subject}/ses-{session}/sub-{subject}_ses-{session}_trial-{scan}_tstat.nii.gz",
roi_mask_normalize="",
):
"""
roi_mask : str
Path to the ROI mask for which to select the t-values.
roi_mask_normalize : str
Path to a ROI mask by the mean of whose t-values to normalite the t-values in roi_mask.
"""
if isinstance(roi_mask,str):
roi_mask = path.abspath(path.expanduser(roi_mask))
roi_mask = nib.load(roi_mask)
masker = NiftiMasker(mask_img=roi_mask)
n_jobs = mp.cpu_count()-2
roi_data = Parallel(n_jobs=n_jobs, verbose=0, backend="threading")(map(delayed(add_roi_data),
[filename_template]*len(substitutions),
[masker]*len(substitutions),
substitutions,
))
subject_dfs, voxel_dfs = zip(*roi_data)
subjectdf = pd.concat(subject_dfs)
voxeldf = pd.concat(voxel_dfs)
if roi_mask_normalize:
#TODO: how relay this back to plot?
#figure="per-participant"
if isinstance(roi_mask_normalize,str):
mask_normalize = path.abspath(path.expanduser(roi_mask_normalize))
masker_normalize = NiftiMasker(mask_img=mask_normalize)
roi_data = Parallel(n_jobs=n_jobs, verbose=0, backend="threading")(map(delayed(add_roi_data),
[filename_template]*len(substitutions),
[masker_normalize]*len(substitutions),
substitutions,
))
subject_dfs_normalize, _ = zip(*roi_data)
subjectdf_normalize = pd.concat(subject_dfs_normalize)
subjectdf['t'] = subjectdf['t']/subjectdf_normalize['t']
#this is a nasty hack to mitigate +/-inf values appearing if the normalization ROI mean is close to 0
subjectdf_ = deepcopy(subjectdf)
subjectdf_= subjectdf_.replace([np.inf, -np.inf], np.nan).dropna(subset=["t"], how="all")
subjectdf=subjectdf.replace([-np.inf], subjectdf_[['t']].min(axis=0)[0])
subjectdf=subjectdf.replace([np.inf], subjectdf_[['t']].max(axis=0)[0])
return subjectdf, voxeldf
def mean(img_path, mask_path):
"""Return the mean of the masked region of an image.
"""
mask = path.abspath(path.expanduser(mask_path))
if mask_path.endswith("roi"):
mask = loadmat(mask)["ROI"]
while mask.ndim != 3:
mask=mask[0]
img_path = path.abspath(path.expanduser(img_path))
img = nib.load(img_path)
print(mask)
print(np.shape(mask))
print(np.shape(img))
print(img[mask])
else:
masker = NiftiMasker(mask_img=mask)
add_roi_data(img_path,masker)
| 0.033244 |
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-get-output-data
# Author : Stuart Paterson
########################################################################
"""
Retrieve the output data files of a DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.registerSwitch( "D:", "Dir=", "Store the output in this directory" )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
outputDir = ''
for sw, v in Script.getUnprocessedSwitches():
if sw in ( 'D', 'Dir' ):
outputDir = v
for job in args:
result = dirac.getJobOutputData( job, destinationDir = outputDir )
if result['OK']:
print 'Job %s output data retrieved' % ( job )
else:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
| 0.027102 |
import tensorflow as tf
from numpy.random import RandomState
batch_size = 10
w1 = tf.Variable(tf.random_normal([2,3],stddev = 1,seed = 1))
w2 = tf.Variable(tf.random_normal([3,1],stddev = 1,seed = 1))
x = tf.placeholder(tf.float32,shape=(None,2))
y = tf.placeholder(tf.float32,shape=(None,1))
a = tf.nn.relu(tf.matmul(x,w1))
yhat = tf.nn.relu(tf.matmul(a,w2))
cross_entropy=-tf.reduce_mean(y*tf.log(tf.clip_by_value(yhat,1e-10,10)))
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
rdm = RandomState(1)
data_size = 516
X=rdm.rand(data_size,2)
Y=[[int(x1+x2 < 1)] for (x1,x2) in X ]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(w1))
print(sess.run(w2))
steps=11000
for in range(steps):
start = i * batch_size % data_size
end = min(start +batch_size,data_size)
sess.run(train_step,feed_dict={x:X[start:end],y:Y[start:end]})
if i % 1000 == 0:
training_loss = sess.run(cross_entropy,feed_dict{x:X,y:Y})
print("run time %d after,training loss is %g" %(i,training_loss))
| 0.037205 |
# coding=utf-8
import os, sys, datetime, unicodedata
import xbmc, xbmcgui, xbmcvfs, urllib
import xml.etree.ElementTree as xmltree
from xml.dom.minidom import parse
from xml.sax.saxutils import escape as escapeXML
import thread
from traceback import print_exc
from unicodeutils import try_decode
import calendar
from time import gmtime, strftime
import random
import datafunctions
DATA = datafunctions.DataFunctions()
import library
LIBRARY = library.LibraryFunctions()
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
ADDON = sys.modules[ "__main__" ].ADDON
ADDONID = sys.modules[ "__main__" ].ADDONID
CWD = sys.modules[ "__main__" ].CWD
DATAPATH = os.path.join( xbmc.translatePath( "special://profile/addon_data/" ).decode('utf-8'), ADDONID )
SKINPATH = xbmc.translatePath( "special://skin/shortcuts/" ).decode('utf-8')
DEFAULTPATH = xbmc.translatePath( os.path.join( CWD, 'resources', 'shortcuts').encode("utf-8") ).decode("utf-8")
LANGUAGE = ADDON.getLocalizedString
KODIVERSION = xbmc.getInfoLabel( "System.BuildVersion" ).split(".")[0]
ACTION_CANCEL_DIALOG = ( 9, 10, 92, 216, 247, 257, 275, 61467, 61448, )
ACTION_CONTEXT_MENU = ( 117, )
ISEMPTY = "IsEmpty"
if int( KODIVERSION ) >= 17:
ISEMPTY = "String.IsEmpty"
if not xbmcvfs.exists(DATAPATH):
xbmcvfs.mkdir(DATAPATH)
def log(txt):
if ADDON.getSetting( "enable_logging" ) == "true":
try:
if isinstance (txt,str):
txt = txt.decode('utf-8')
message = u'%s: %s' % (ADDONID, txt)
xbmc.log(msg=message.encode('utf-8'), level=xbmc.LOGDEBUG)
except:
pass
def is_hebrew(text):
if type(text) != unicode:
text = text.decode('utf-8')
for chr in text:
if ord(chr) >= 1488 and ord(chr) <= 1514:
return True
return False
class GUI( xbmcgui.WindowXMLDialog ):
def __init__( self, *args, **kwargs ):
self.group = kwargs[ "group" ]
try:
self.defaultGroup = kwargs[ "defaultGroup" ]
if self.defaultGroup == "":
self.defaultGroup = None
except:
self.defaultGroup = None
self.nolabels = kwargs[ "nolabels" ]
self.groupname = kwargs[ "groupname" ]
self.shortcutgroup = 1
# Empty arrays for different shortcut types
self.thumbnailBrowseDefault = None
self.thumbnailNone = None
self.backgroundBrowse = None
self.backgroundBrowseDefault = None
self.widgetPlaylists = False
self.widgetPlaylistsType = None
self.widgetRename = True
# Variables for overrides
self.onBack = {}
self.saveWithProperty = []
# Has skin overriden GUI 308
self.alwaysReset = False
self.alwaysRestore = False
self.allListItems = []
# Additional button ID's we'll handle for setting custom properties
self.customPropertyButtons = {}
self.customToggleButtons = {}
# Context menu
self.contextControls = []
self.contextItems = []
# Onclicks
self.customOnClick = {}
self.windowProperties = {}
self.changeMade = False
log( 'Management module loaded' )
def onInit( self ):
if self.group == '':
self._close()
else:
self.window_id = xbmcgui.getCurrentWindowDialogId()
self.currentWindow = xbmcgui.Window( xbmcgui.getCurrentWindowDialogId() )
xbmcgui.Window(self.window_id).setProperty('groupname', self.group)
if self.groupname is not None:
xbmcgui.Window( self.window_id ).setProperty( 'groupDisplayName', self.groupname )
# Load widget and background names
self._load_overrides()
# Load context menu options
self._load_overrides_context()
# Load additional onclick overrides
self._load_overrides_onclick()
# Load additional button ID's we'll handle for custom properties
self._load_customPropertyButtons()
# Load current shortcuts
self.load_shortcuts()
# Set window title label
try:
if self.getControl( 500 ).getLabel() == "":
if self.group == "mainmenu":
self.getControl( 500 ).setLabel( LANGUAGE(32071) )
elif self.groupname is not None:
self.getControl( 500 ).setLabel( LANGUAGE(32080).replace( "::MENUNAME::", self.groupname ) )
else:
self.getControl( 500 ).setLabel( LANGUAGE(32072) )
except:
pass
# Set enabled condition for various controls
has111 = True
try:
self.getControl( 111 ).setEnableCondition( "%s(Container(211).ListItem.Property(LOCKED))" %( ISEMPTY ) )
except:
has111 = False
try:
self.getControl( 302 ).setEnableCondition( "%s(Container(211).ListItem.Property(LOCKED))" %( ISEMPTY ) )
except:
pass
try:
self.getControl( 307 ).setEnableCondition( "%s(Container(211).ListItem.Property(LOCKED))" %( ISEMPTY ) )
except:
pass
try:
self.getControl( 401 ).setEnableCondition( "%s(Container(211).ListItem.Property(LOCKED))" %( ISEMPTY ) )
except:
pass
# Set button labels
if self.nolabels == "false":
try:
if self.getControl( 301 ).getLabel() == "":
self.getControl( 301 ).setLabel( LANGUAGE(32000) )
except:
log( "No add shortcut button on GUI (id 301)" )
try:
if self.getControl( 302 ).getLabel() == "":
self.getControl( 302 ).setLabel( LANGUAGE(32001) )
except:
log( "No delete shortcut button on GUI (id 302)" )
try:
if self.getControl( 303 ).getLabel() == "":
self.getControl( 303 ).setLabel( LANGUAGE(32002) )
except:
log( "No move shortcut up button on GUI (id 303)" )
try:
if self.getControl( 304 ).getLabel() == "":
self.getControl( 304 ).setLabel( LANGUAGE(32003) )
except:
log( "No move shortcut down button on GUI (id 304)" )
try:
if self.getControl( 305 ).getLabel() == "":
self.getControl( 305 ).setLabel( LANGUAGE(32025) )
except:
log( "Not set label button on GUI (id 305)" )
try:
if self.getControl( 306 ).getLabel() == "":
self.getControl( 306 ).setLabel( LANGUAGE(32026) )
except:
log( "No edit thumbnail button on GUI (id 306)" )
try:
if self.getControl( 307 ).getLabel() == "":
self.getControl( 307 ).setLabel( LANGUAGE(32027) )
except:
log( "Not edit action button on GUI (id 307)" )
try:
if self.getControl( 308 ).getLabel() == "":
self.getControl( 308 ).setLabel( LANGUAGE(32028) )
except:
log( "No reset shortcuts button on GUI (id 308)" )
try:
if self.getControl( 309 ).getLabel() == "":
self.getControl( 309 ).setLabel( LANGUAGE(32044) )
log( "Warning: Deprecated widget button (id 309)" )
except:
pass
try:
if self.getControl( 310 ).getLabel() == "":
self.getControl( 310 ).setLabel( LANGUAGE(32045) )
except:
log( "No background button on GUI (id 310)" )
try:
if self.getControl( 312 ).getLabel() == "":
self.getControl( 312 ).setLabel( LANGUAGE(32044) )
except:
log( "No widget button on GUI (id 309)" )
try:
if self.getControl( 401 ).getLabel() == "":
self.getControl( 401 ).setLabel( LANGUAGE(32048) )
except:
log( "No widget button on GUI (id 401)" )
# Load library shortcuts in thread
thread.start_new_thread( LIBRARY.loadAllLibrary, () )
if has111:
try:
self._display_shortcuts()
except:
pass
# Clear window property indicating we're loading
xbmcgui.Window( 10000 ).clearProperty( "skinshortcuts-loading" )
# ======================
# === LOAD/SAVE DATA ===
# ======================
def load_shortcuts( self, includeUserShortcuts = True, addShortcutsToWindow = True ):
log( "Loading shortcuts" )
DATA._clear_labelID()
isSubLevel = False
if "." in self.group and self.group.rsplit( ".", 1)[ 1 ].isdigit() and int( self.group.rsplit( ".", 1 )[ 1 ] ) in range( 1, 6 ):
isSubLevel = True
if includeUserShortcuts:
shortcuts = DATA._get_shortcuts( self.group, defaultGroup = self.defaultGroup, isSubLevel = isSubLevel )
else:
shortcuts = DATA._get_shortcuts( self.group, defaultGroup = self.defaultGroup, defaultsOnly = True )
#listitems = []
for shortcut in shortcuts.getroot().findall( "shortcut" ):
# Parse the shortcut, and add it to the list of shortcuts
item = self._parse_shortcut( shortcut )
self.allListItems.append( item[1] )
# Add all visible shortcuts to control 211
self._display_listitems()
def _display_listitems( self, focus = None ):
# Displays listitems that are visible from self.allListItems
# Initial properties
count = 0
visible = False
DATA._clear_labelID()
listitems = []
for listitem in self.allListItems:
# Get icon overrides
self._get_icon_overrides( listitem )
# Set order index in case its changed
listitem.setProperty( "skinshortcuts-orderindex", str( count ) )
shouldDisplay = True
# Check for a visibility condition
if listitem.getProperty( "visible-condition" ):
shouldDisplay = xbmc.getCondVisibility( listitem.getProperty( "visible-condition" ) )
if shouldDisplay == True:
visible = True
listitems.append( listitem )
# Increase our count
count += 1
# If there are no shortcuts, add a blank one
if visible == False:
listitem = xbmcgui.ListItem( LANGUAGE(32013), iconImage = "DefaultShortcut.png" )
listitem.setProperty( "Path", 'noop' )
listitem.setProperty( "icon", "DefaultShortcut.png" )
listitem.setProperty( "skinshortcuts-orderindex", str( count ) )
listitems.append( listitem )
self.allListItems.append( listitem )
self.getControl( 211 ).reset()
self.getControl( 211 ).addItems( listitems )
if focus is not None:
self.getControl( 211 ).selectItem( focus )
self._add_additional_properties()
def _parse_shortcut( self, item ):
# Parse a shortcut node
localLabel = DATA.local( item.find( "label" ).text )
localLabel2 = DATA.local( item.find( "label2" ).text )
# Get icon and thumb (and set to None if there isn't any)
icon = item.find( "icon" )
if icon is not None and icon.text:
icon = icon.text
else:
icon = "DefaultShortcut.png"
thumb = item.find( "thumb" )
if thumb is not None and thumb.text:
thumb = thumb.text
else:
thumb = ""
# If either localLabel[ 2 ] starts with a $, ask Kodi to parse it for us
if localLabel[ 2 ].startswith( "$" ):
localLabel[ 2 ] = xbmc.getInfoLabel( localLabel[ 2 ] )
if localLabel2[ 2 ].startswith( "$" ):
localLabel2[ 2 ] = xbmc.getInfoLabel( localLabel2[ 2 ] )
# Create the list item
listitem = xbmcgui.ListItem( label=localLabel[2], label2 = localLabel2[2], iconImage = xbmc.getInfoLabel(icon), thumbnailImage = xbmc.getInfoLabel(thumb) )
listitem.setProperty( "localizedString", localLabel[0] )
listitem.setProperty( "icon", icon )
listitem.setProperty( "thumbnail", thumb )
# Set the action
action = item.find( "action" ).text
self._add_additionalproperty( listitem, "translatedPath", action )
if "special://skin/" in action:
translate = xbmc.translatePath( "special://skin/" ).decode( "utf-8" )
action = action.replace( "special://skin/", translate )
listitem.setProperty( "path", action )
listitem.setProperty( "displayPath", action )
# Set the disabled property
if item.find( "disabled" ) is not None:
listitem.setProperty( "skinshortcuts-disabled", "True" )
else:
listitem.setProperty( "skinshortcuts-disabled", "False" )
# If there's an overriden icon, use it
overridenIcon = item.find( "override-icon" )
if overridenIcon is not None:
listitem.setIconImage( overridenIcon.text )
listitem.setProperty( "icon", overridenIcon.text )
listitem.setProperty( "original-icon", icon )
# Set the labelID, displayID, shortcutType
listitem.setProperty( "labelID", item.find( "labelID" ).text )
listitem.setProperty( "defaultID", item.find( "defaultID" ).text )
listitem.setProperty( "shortcutType", localLabel2[0] )
# Set any visible condition
isVisible = True
visibleCondition = item.find( "visible" )
if visibleCondition is not None:
listitem.setProperty( "visible-condition", visibleCondition.text )
isVisible = xbmc.getCondVisibility( visibleCondition.text )
# Check if the shortcut is locked
locked = item.find( "lock" )
if locked is not None:
if locked.text.lower() == "true" or locked.text == xbmc.getSkinDir():
listitem.setProperty( "LOCKED", locked.text )
# Additional properties
additionalProperties = item.find( "additional-properties" )
if additionalProperties is not None:
listitem.setProperty( "additionalListItemProperties", additionalProperties.text )
else:
listitem.setProperty( "additionalListItemProperties", "[]" )
self._add_additional_properties( listitem )
return [ isVisible, listitem ]
def _add_additional_properties( self, listitem = None ):
allProps = {}
backgroundName = None
backgroundPlaylistName = None
# If the listitem is None, grab the current listitem from 211
if listitem is None:
listitem = self.getControl( 211 ).getSelectedItem()
# Process current properties
currentProperties = listitem.getProperty( "skinshortcuts-allproperties" )
if currentProperties != "":
currentProperties = eval( currentProperties )
else:
currentProperties = {}
# Process all custom properties
customProperties = listitem.getProperty( "additionalListItemProperties" )
if customProperties != "":
customProperties = eval( customProperties )
for customProperty in customProperties:
if customProperty[1].startswith("$") and not customProperty[ 1 ].startswith( "$SKIN" ):
#Translate some listItem properties if needed so they're displayed correctly in the gui
allProps[ customProperty[ 0 ] ] = xbmc.getInfoLabel( customProperty[ 1 ] )
else:
allProps[ customProperty[ 0 ] ] = DATA.local( customProperty[ 1 ] )[ 2 ]
if customProperty[ 1 ].isdigit():
allProps[ "%s-NUM" %( customProperty[ 0 ] ) ] = customProperty[ 1 ]
# if this is backgroundName or backgroundPlaylistName, keep them so we can localise them properly
if customProperty[0] == "backgroundName":
backgroundName = customProperty[1]
if customProperty[1] == "backgroundPlaylistName":
backgroundPlaylistName = customProperty[1]
# If we've kept backgroundName, localise it with the updated playlist name
if backgroundName is not None and backgroundPlaylistName is not None:
allProps[ "backgroundName" ] = DATA.local( backgroundName )[2].replace( "::PLAYLIST::", backgroundPlaylistName )
# Get fallback properties
fallbackProperties, fallbacks = DATA._getCustomPropertyFallbacks( self.group )
# Add fallback properties
for key in fallbackProperties:
if key not in allProps.keys():
# Check whether we have a fallback for the value
for propertyMatch in fallbacks[ key ]:
matches = False
if propertyMatch[ 1 ] is None:
# This has no conditions, so it matched
matches = True
elif propertyMatch[ 1 ] in allProps.keys() and allProps[ propertyMatch[ 1 ] ] == propertyMatch[ 2 ]:
matches = True
if matches:
allProps[ key ] = propertyMatch[ 0 ]
break
# Get property requirements
otherProperties, requires, templateOnly = DATA._getPropertyRequires()
# Remove any properties whose requirements haven't been met
for key in otherProperties:
if key in allProps.keys() and key in requires.keys() and requires[ key ] not in allProps.keys():
# This properties requirements aren't met
allProps.pop( key )
if "%s-NUM" %( key ) in allProps.keys():
allProps.pop( "%s-NUM" %( key ) )
# Save the new properties to the listitem
listitem.setProperty( "skinshortcuts-allproperties", repr( allProps ) )
added, removed, changed = self.DictDiffer( allProps, currentProperties )
for key in added:
listitem.setProperty( key, allProps[ key ] )
for key in removed:
if key not in allProps.keys(): continue
listitem.setProperty( key, None )
for key in changed:
listitem.setProperty( key, allProps[ key ] )
# Save the new properties to the window
added, removed, changed = self.DictDiffer( allProps, self.windowProperties )
for key in added:
self.currentWindow.setProperty( key, allProps[ key ] )
for key in removed:
self.currentWindow.clearProperty( key )
for key in changed:
self.currentWindow.setProperty( key, allProps[ key ] )
self.windowProperties = allProps
def DictDiffer( self, current_dict, past_dict ):
# Get differences between dictionaries
self.current_dict, self.past_dict = current_dict, past_dict
set_current, set_past = set(current_dict.keys()), set(past_dict.keys())
intersect = set_current.intersection(set_past)
# Added Removed Changed
return( set_current - intersect, set_past - intersect, set(o for o in intersect if past_dict[o] != current_dict[o]) )
def _get_icon_overrides( self, listitem, setToDefault = True, labelID = None ):
# Start by getting the labelID
if not labelID:
labelID = listitem.getProperty( "localizedString" )
if labelID == None or labelID == "":
labelID = listitem.getLabel()
labelID = DATA._get_labelID( DATA.local( labelID )[3], listitem.getProperty( "path" ) )
# Retrieve icon
icon = listitem.getProperty( "icon" )
oldicon = None
iconIsVar = False
if listitem.getProperty( "untranslatedIcon" ):
iconIsVar = True
# If the icon is a VAR or an INFO, we're going to translate it and set the untranslatedIcon property
if icon.startswith( "$" ):
listitem.setProperty( "untranslatedIcon", icon )
icon = xbmc.getInfoLabel( icon )
listitem.setProperty( "icon", icon )
listitem.setIconImage( icon )
iconIsVar = True
if icon.startswith("resource://"):
iconIsVar = True
# Check for overrides
tree = DATA._get_overrides_skin()
for elem in tree.findall( "icon" ):
if oldicon is None:
if ("labelID" in elem.attrib and elem.attrib.get( "labelID" ) == labelID) or ("image" in elem.attrib and elem.attrib.get( "image" ) == icon):
# LabelID matched
if "group" in elem.attrib:
if elem.attrib.get( "group" ) == self.group:
# Group also matches - change icon
oldicon = icon
icon = elem.text
elif "grouping" not in elem.attrib:
# No group - change icon
oldicon = icon
icon = elem.text
# If the skin doesn't have the icon, replace it with DefaultShortcut.png
setDefault = False
if ( not xbmc.skinHasImage( icon ) and setToDefault == True ) and not iconIsVar:
if oldicon == None:
oldicon = icon
setDefault = True
icon = "DefaultShortcut.png"
# If we changed the icon, update the listitem
if oldicon is not None:
listitem.setIconImage( icon )
listitem.setProperty( "icon", icon )
listitem.setProperty( "original-icon", oldicon )
if setDefault == True and setToDefault == True:
# We set this to the default icon, so we need to check if /that/ icon is overriden
self._get_icon_overrides( listitem, False, labelID )
def _save_shortcuts( self, weEnabledSystemDebug = False, weEnabledScriptDebug = False ):
# Entry point to save shortcuts - we will call the _save_shortcuts_function and, if it
# fails, enable debug options (if not enabled) + recreate the error, then offer to upload
# debug log (if relevant add-on is installed)
# Save the shortcuts
try:
self._save_shortcuts_function()
return
except:
print_exc()
log( "Failed to save shortcuts" )
# We failed to save the shortcuts
if weEnabledSystemDebug or weEnabledScriptDebug:
# Disable any logging we enabled
if weEnabledSystemDebug:
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method":"Settings.setSettingValue", "params": {"setting":"debug.showloginfo", "value":false} } ' )
if weEnabledScriptDebug:
ADDON.setSetting( "enable_logging", "false" )
if xbmc.getCondVisibility( "System.HasAddon( script.kodi.loguploader)" ):
# Offer to upload a debug log
ret = xbmcgui.Dialog().yesno( ADDON.getAddonInfo( "name" ), LANGUAGE( 32097 ), LANGUAGE( 32093 ) )
if ret:
xbmc.executebuiltin( "RunScript(script.kodi.loguploader)" )
else:
# Inform user menu couldn't be saved
xbmcgui.Dialog().ok( ADDON.getAddonInfo( "name" ), LANGUAGE( 32097 ), LANGUAGE( 32094 ) )
# We're done
return
# Enable any debug logging needed
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method": "Settings.getSettings" }')
json_query = unicode(json_query, 'utf-8', errors='ignore')
json_response = simplejson.loads(json_query)
enabledSystemDebug = False
enabledScriptDebug = False
if json_response.has_key('result') and json_response['result'].has_key('settings') and json_response['result']['settings'] is not None:
for item in json_response['result']['settings']:
if item["id"] == "debug.showloginfo":
if item["value"] == False:
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method":"Settings.setSettingValue", "params": {"setting":"debug.showloginfo", "value":true} } ' )
enabledSystemDebug = True
if ADDON.getSetting( "enable_logging" ) != "true":
ADDON.setSetting( "enable_logging", "true" )
enabledScriptDebug = True
if enabledSystemDebug or enabledScriptDebug:
# We enabled one or more of the debug options, re-run this function
self._save_shortcuts( enabledSystemDebug, enabledScriptDebug )
else:
if xbmc.getCondVisibility( "System.HasAddon( script.kodi.loguploader )" ):
# Offer to upload a debug log
ret = xbmcgui.Dialog().yesno( ADDON.getAddonInfo( "name" ), LANGUAGE( 32097 ), LANGUAGE( 32093 ) )
if ret:
xbmc.executebuiltin( "RunScript(script.kodi.loguploader)" )
else:
# Inform user menu couldn't be saved
xbmcgui.Dialog().ok( ADDON.getAddonInfo( "name" ), LANGUAGE( 32097 ), LANGUAGE( 32094 ) )
def _save_shortcuts_function( self ):
# Save shortcuts
if self.changeMade == True:
log( "Saving changes" )
# Create a new tree
tree = xmltree.ElementTree( xmltree.Element( "shortcuts" ) )
root = tree.getroot()
properties = []
labelIDChanges = []
labelIDChangesDict = {}
DATA._clear_labelID()
for listitem in self.allListItems:
# If the item has a label or an action, or a specified property from the override is present
if try_decode( listitem.getLabel() ) != LANGUAGE(32013) or listitem.getProperty( "path" ) != "noop" or self.hasSaveWithProperty( listitem ):
# Generate labelID, and mark if it has changed
labelID = listitem.getProperty( "labelID" )
newlabelID = labelID
# defaultID
defaultID = try_decode( listitem.getProperty( "defaultID" ) )
localizedString = listitem.getProperty( "localizedString" )
if localizedString is None or localizedString == "":
localLabel = DATA.local( listitem.getLabel() )
else:
localLabel = DATA.local( localizedString )
newlabelID = DATA._get_labelID( localLabel[3], listitem.getProperty( "path" ) )
if self.group == "mainmenu":
labelIDChanges.append( [labelID, newlabelID, defaultID] )
labelIDChangesDict[ labelID ] = newlabelID
# We want to save this
shortcut = xmltree.SubElement( root, "shortcut" )
xmltree.SubElement( shortcut, "defaultID" ).text = defaultID
# Label and label2
xmltree.SubElement( shortcut, "label" ).text = localLabel[0]
xmltree.SubElement( shortcut, "label2" ).text = DATA.local( listitem.getLabel2() )[0]
# Icon and thumbnail
if listitem.getProperty( "untranslatedIcon" ):
icon = listitem.getProperty( "untranslatedIcon" )
else:
if listitem.getProperty( "original-icon" ):
icon = listitem.getProperty( "original-icon" )
else:
icon = listitem.getProperty( "icon" )
thumb = listitem.getProperty( "thumbnail" )
xmltree.SubElement( shortcut, "icon" ).text = try_decode( icon )
xmltree.SubElement( shortcut, "thumb" ).text = try_decode( thumb )
# Action
xmltree.SubElement( shortcut, "action" ).text = try_decode( listitem.getProperty( "path" ) )
# Visible
if listitem.getProperty( "visible-condition" ):
xmltree.SubElement( shortcut, "visible" ).text = listitem.getProperty( "visible-condition" )
# Disabled
if listitem.getProperty( "skinshortcuts-disabled" ) == "True":
xmltree.SubElement( shortcut, "disabled" ).text = "True"
# Locked
if listitem.getProperty( "LOCKED" ):
xmltree.SubElement( shortcut, "lock" ).text = listitem.getProperty( "LOCKED" )
# Additional properties
if listitem.getProperty( "additionalListItemProperties" ):
additionalProperties = eval( listitem.getProperty( "additionalListItemProperties" ) )
if icon != "":
additionalProperties.append( [ "icon", icon ] )
if thumb != "":
additionalProperties.append( [ "thumb", thumb ] )
properties.append( [ newlabelID, additionalProperties ] )
# Check whether this is an additional level
isSubLevel = False
if "." in self.group and self.group.rsplit( ".", 1 )[ 1 ].isdigit() and int( self.group.rsplit( ".", 1 )[ 1 ] ) in range( 1, 6 ):
isSubLevel = True
# Save the shortcuts
DATA.indent( root )
path = os.path.join( DATAPATH , DATA.slugify( self.group, True, isSubLevel = isSubLevel ) + ".DATA.xml" )
path = try_decode( path )
tree.write( path.replace( ".shortcuts", ".DATA.xml" ), encoding="UTF-8" )
# Now make any labelID changes
copyDefaultProperties = []
while not len( labelIDChanges ) == 0:
# Get the first labelID change, and check that we're not changing anything from that
labelIDFrom = labelIDChanges[0][0]
labelIDTo = labelIDChanges[0][1]
defaultIDFrom = labelIDChanges[0][2]
# If labelIDFrom is empty. this is a new item so we want to set the From the same as the To
# (this will ensure any default .shortcuts file is copied across)
if labelIDFrom == "" or labelIDFrom is None:
labelIDFrom = labelIDTo
# Check that there isn't another item in the list whose 'From' is the same as our 'To'
# - if so, we're going to move our items elsewhere, and move 'em to the correct place later
# (This ensures we don't overwrite anything incorrectly)
if not len( labelIDChanges ) == 1:
for x in range( 1, len( labelIDChanges ) ):
if labelIDChanges[x][0] == labelIDTo:
tempLocation = str( random.randrange(0,9999999999999999) )
labelIDChanges[0][1] = tempLocation
labelIDChanges.append( [tempLocation, labelIDTo, defaultIDFrom] )
labelIDTo = tempLocation
break
# Make the change (0 - the main sub-menu, 1-5 - additional submenus )
for i in range( 0, 6 ):
if i == 0:
groupName = labelIDFrom
paths = [[os.path.join( DATAPATH, DATA.slugify( labelIDFrom, True ) + ".DATA.xml" ).encode( "utf-8" ), "Move"], [os.path.join( SKINPATH, DATA.slugify( defaultIDFrom ) + ".DATA.xml" ).encode( "utf-8" ), "Copy"], [os.path.join( DEFAULTPATH, DATA.slugify( defaultIDFrom ) + ".DATA.xml" ).encode( "utf-8" ), "Copy"], [None, "New"]]
target = os.path.join( DATAPATH, DATA.slugify( labelIDTo, True ) + ".DATA.xml" ).encode( "utf-8" )
else:
groupName = "%s.%s" %( labelIDFrom, str( i ) )
paths = [[os.path.join( DATAPATH, DATA.slugify( "%s.%s" %( labelIDFrom, str( i )), True, isSubLevel = True ) + ".DATA.xml" ).encode( "utf-8" ), "Move"], [os.path.join( SKINPATH, DATA.slugify( "%s.%s" %( defaultIDFrom, str( i ) ), isSubLevel = True ) + ".DATA.xml" ).encode( "utf-8" ), "Copy"], [os.path.join( DEFAULTPATH, DATA.slugify( "%s.%s" %( defaultIDFrom, str( i ) ), isSubLevel = True ) + ".DATA.xml" ).encode( "utf-8" ), "Copy"]]
target = os.path.join( DATAPATH, DATA.slugify( "%s.%s" %( labelIDTo, str( i ) ), True, isSubLevel = True ) + ".DATA.xml" ).encode( "utf-8" )
target = try_decode( target )
for path in paths:
path[0] = try_decode( path[0] )
path[1] = try_decode( path[1] )
if path[1] == "New":
tree = xmltree.ElementTree( xmltree.Element( "shortcuts" ) )
tree.write( target, encoding="UTF-8" )
log( "Creating empty file - %s" %( target ) )
break
elif xbmcvfs.exists( path[0] ):
# The XML file exists
if path[1] == "Move":
if path[0] != target:
# Move the original to the target path
log( "Moving " + path[0] + " > " + target )
xbmcvfs.rename( path[0], target )
else:
# We're copying the file (actually, we'll re-write the file without
# any LOCKED elements and with icons/thumbs adjusted to absolute paths)
newtree = xmltree.parse( path[0] )
for newnode in newtree.getroot().findall( "shortcut" ):
searchNode = newnode.find( "locked" )
if searchNode is not None:
newnode.remove( searchNode )
# Write it to the target
DATA.indent( newtree.getroot() )
newtree.write( target, encoding="utf-8" )
log( "Copying " + path[0] + " > " + target )
# We'll need to import it's default properties, so save the groupName
copyDefaultProperties.append( groupName )
break
labelIDChanges.pop( 0 )
# Save widgets, backgrounds and custom properties
self._save_properties( properties, labelIDChangesDict, copyDefaultProperties )
# Note that we've saved stuff
xbmcgui.Window( 10000 ).setProperty( "skinshortcuts-reloadmainmenu", "True" )
def hasSaveWithProperty( self, listitem ):
for propertyName in self.saveWithProperty:
if listitem.getProperty( propertyName ) != "":
return True
return False
def _save_properties( self, properties, labelIDChanges, copyDefaults ):
# Save all additional properties (widgets, backgrounds, custom)
log( "Saving properties" )
currentProperties = []
# Get previously loaded properties
path = os.path.join( DATAPATH , xbmc.getSkinDir().decode('utf-8') + ".properties" )
if xbmcvfs.exists( path ):
# The properties file exists, load from it
listProperties = eval( xbmcvfs.File( path ).read() )
for listProperty in listProperties:
# listProperty[0] = groupname
# listProperty[1] = labelID
# listProperty[2] = property name
# listProperty[3] = property value
currentProperties.append( [listProperty[0], listProperty[1], listProperty[2], listProperty[3]] )
# Copy any items not in the current group to the array we'll save, and
# make any labelID changes whilst we're at it
saveData = []
for property in currentProperties:
#[ groupname, itemLabelID, property, value ]
if not property[0] == self.group:
if property[0] in labelIDChanges.keys():
property[0] = labelIDChanges[property[0]]
elif "." in property[0] and property[ 0 ].rsplit( ".", 1 )[ 1 ].isdigit():
# Additional menu
groupName, groupValue = property[ 0 ].rsplit( ".", 1 )
if groupName in labelIDChanges.keys() and int( groupValue ) in range( 1, 6 ):
property[0] = "%s.%s" %( labelIDChanges[ groupName ], groupValue )
saveData.append( property )
# Add all the properties we've been passed
for property in properties:
# property[0] = labelID
for toSave in property[1]:
# toSave[0] = property name
# toSave[1] = property value
saveData.append( [ self.group, property[0], toSave[0], toSave[1] ] )
# Add any default properties
for group in copyDefaults:
for defaultProperty in DATA.defaultProperties:
#[ groupname, itemLabelID, property, value ]
if defaultProperty[ 0 ] == group:
saveData.append( [ group, defaultProperty[ 1 ], defaultProperty[ 2 ], defaultProperty[ 3 ] ] )
# Try to save the file
try:
f = xbmcvfs.File( os.path.join( DATAPATH , xbmc.getSkinDir().decode('utf-8') + ".properties" ), 'w' )
f.write( repr( saveData ).replace( "],", "],\n" ) )
f.close()
except:
print_exc()
log( "### ERROR could not save file %s" % DATAPATH )
# Clear saved properties in DATA, so it will pick up any new ones next time we load a file
DATA.currentProperties = None
def _load_overrides( self ):
# Load various overrides from the skin, most notably backgrounds and thumbnails
self.backgrounds = "LOADING"
self.thumbnails = "LOADING"
# Load skin overrides
tree = DATA._get_overrides_skin()
# Should we allow the user to select a playlist as a widget...
elem = tree.find('widgetPlaylists')
if elem is not None and elem.text == "True":
self.widgetPlaylists = True
if "type" in elem.attrib:
self.widgetPlaylistsType = elem.attrib.get( "type" )
# Get backgrounds and thumbnails - we do this in a separate thread as the json used to load VFS paths
# is very expensive
thread.start_new_thread( self._load_backgrounds_thumbnails, () )
# Should we allow the user to browse for background images...
elem = tree.find('backgroundBrowse')
if elem is not None and elem.text.lower() in ("true", "single", "multi"):
self.backgroundBrowse = elem.text.lower()
if "default" in elem.attrib:
self.backgroundBrowseDefault = elem.attrib.get( "default" )
# Find the default thumbnail browse directory
elem = tree.find("thumbnailBrowseDefault")
if elem is not None and len(elem.text) > 0:
self.thumbnailBrowseDefault = elem.text
# Should we allow the user to rename a widget?
elem = tree.find( "widgetRename" )
if elem is not None and elem.text.lower() == "false":
self.widgetRename = False
# Does the skin override GUI 308?
elem = tree.find( "alwaysReset" )
if elem is not None and elem.text.lower() == "true":
self.alwaysReset = True
elem = tree.find( "alwaysRestore" )
if elem is not None and elem.text.lower() == "true":
self.alwaysRestore = True
# Do we enable 'Get More...' button when browsing Skin Helper widgets
elem = tree.find( "defaultwidgetsGetMore" )
if elem is not None and elem.text.lower() == "false":
LIBRARY.skinhelperWidgetInstall = False
# Are there any controls we don't close the window on 'back' for?
for elem in tree.findall( "onback" ):
self.onBack[ int( elem.text ) ] = int( elem.attrib.get( "to" ) )
# Are there any custom properties that shortcuts should be saved if present
for elem in tree.findall( "saveWithProperty" ):
self.saveWithProperty.append( elem.text )
def _load_overrides_context( self ):
# Load context menu settings from overrides
# Check we're running Krypton or later - we don't support the context menu on earlier versions
if int( KODIVERSION ) <= 16:
return
for overrideType in [ "skin", "script" ]:
# Load overrides
if overrideType == "skin":
tree = DATA._get_overrides_skin()
else:
tree = DATA._get_overrides_script()
# Check if context menu overrides in tree
elem = tree.find( "contextmenu" )
if elem is None:
# It isn't
continue
# Get which controls the context menu is enabled on
for control in elem.findall( "enableon" ):
self.contextControls.append( int( control.text ) )
# Get the context menu items
for item in elem.findall( "item" ):
if "control" not in item.attrib:
# There's no control specified, so it's no use to us
continue
condition = None
if "condition" in item.attrib:
condition = item.attrib.get( "condition" )
self.contextItems.append( ( int( item.attrib.get( "control" ) ), condition, item.text ) )
# If we get here, we've loaded context options, so we're done
return
def _load_overrides_onclick( self ):
# Load additional onlcicks from overrides
# Get overrides
tree = DATA._get_overrides_skin()
# Get additional onclick handlers
for control in tree.findall( "onclick" ):
self.customOnClick[ int( control.get( "id" ) ) ] = control.text
def _load_backgrounds_thumbnails( self ):
# Load backgrounds (done in background thread)
backgrounds = []
thumbnails = []
# Load skin overrides
tree = DATA._get_overrides_skin()
# Get backgrounds
elems = tree.findall('background')
for elem in elems:
if "condition" in elem.attrib:
if not xbmc.getCondVisibility( elem.attrib.get( "condition" ) ):
continue
if elem.text.startswith("||BROWSE||"):
#we want to include images from a VFS path...
images = LIBRARY.getImagesFromVfsPath(elem.text.replace("||BROWSE||",""))
for image in images:
backgrounds.append( [image[0], image[1] ] )
elif "icon" in elem.attrib:
backgrounds.append( [elem.attrib.get( "icon" ), DATA.local( elem.attrib.get( 'label' ) )[2] ] )
else:
backgrounds.append( [elem.text, DATA.local( elem.attrib.get( 'label' ) )[2] ] )
self.backgrounds = backgrounds
# Get thumbnails
elems = tree.findall('thumbnail')
for elem in elems:
if "condition" in elem.attrib:
if not xbmc.getCondVisibility( elem.attrib.get( "condition" ) ):
continue
if elem.text.startswith("||BROWSE||"):
#we want to include images from a VFS path...
images = LIBRARY.getImagesFromVfsPath(elem.text.replace("||BROWSE||",""))
for image in images:
thumbnails.append( [image[0], image[1] ] )
elif elem.text == "::NONE::":
if "label" in elem.attrib:
self.thumbnailNone = elem.attrib.get( "label" )
else:
self.thumbnailNone = "231"
else:
thumbnails.append( [elem.text, DATA.local( elem.attrib.get( 'label' ) )[2] ] )
self.thumbnails = thumbnails
def _load_customPropertyButtons( self ):
# Load a list of addition button IDs we'll handle for setting additional properties
# Load skin overrides
tree = DATA._get_overrides_skin()
for elem in tree.findall( "propertySettings" ):
if "buttonID" in elem.attrib and "property" in elem.attrib:
self.customPropertyButtons[ int( elem.attrib.get( "buttonID" ) ) ] = elem.attrib.get( "property" )
elif "buttonID" in elem.attrib and "toggle" in elem.attrib:
self.customToggleButtons[ int( elem.attrib.get( "buttonID" ) ) ] = elem.attrib.get( "toggle" )
# ========================
# === GUI INTERACTIONS ===
# ========================
def onClick(self, controlID):
if controlID == 102:
# Move to previous type of shortcuts
self.shortcutgroup = self.shortcutgroup - 1
if self.shortcutgroup == 0:
self.shortcutgroup = LIBRARY.flatGroupingsCount()
self._display_shortcuts()
elif controlID == 103:
# Move to next type of shortcuts
self.shortcutgroup = self.shortcutgroup + 1
if self.shortcutgroup > LIBRARY.flatGroupingsCount():
self.shortcutgroup = 1
self._display_shortcuts()
elif controlID == 111:
# User has selected an available shortcut they want in their menu
log( "Select shortcut (111)" )
listControl = self.getControl( 211 )
itemIndex = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( itemIndex ).getProperty( "skinshortcuts-orderindex" ) )
altAction = None
if self.warnonremoval( listControl.getListItem( itemIndex ) ) == False:
return
# Copy the new shortcut
selectedItem = self.getControl( 111 ).getSelectedItem()
listitemCopy = self._duplicate_listitem( selectedItem, listControl.getListItem( itemIndex ) )
path = listitemCopy.getProperty( "path" )
if path.startswith( "||BROWSE||" ):
# If this is a plugin, call our plugin browser
returnVal = LIBRARY.explorer( ["plugin://" + path.replace( "||BROWSE||", "" )], "plugin://" + path.replace( "||BROWSE||", "" ), [self.getControl( 111 ).getSelectedItem().getLabel()], [self.getControl( 111 ).getSelectedItem().getProperty("thumbnail")], self.getControl( 111 ).getSelectedItem().getProperty("shortcutType") )
if returnVal is not None:
# Convert backslashes to double-backslashes (windows fix)
newAction = returnVal.getProperty( "Path" )
newAction = newAction.replace( "\\", "\\\\" )
returnVal.setProperty( "path", newAction )
returnVal.setProperty( "displayPath", newAction )
listitemCopy = self._duplicate_listitem( returnVal, listControl.getListItem( itemIndex ) )
else:
listitemCopy = None
elif path == "||UPNP||":
returnVal = LIBRARY.explorer( ["upnp://"], "upnp://", [self.getControl( 111 ).getSelectedItem().getLabel()], [self.getControl( 111 ).getSelectedItem().getProperty("thumbnail")], self.getControl( 111 ).getSelectedItem().getProperty("shortcutType") )
if returnVal is not None:
listitemCopy = self._duplicate_listitem( returnVal, listControl.getListItem( itemIndex ) )
else:
listitemCopy = None
elif path.startswith( "||SOURCE||" ):
returnVal = LIBRARY.explorer( [path.replace( "||SOURCE||", "" )], path.replace( "||SOURCE||", "" ), [self.getControl( 111 ).getSelectedItem().getLabel()], [self.getControl( 111 ).getSelectedItem().getProperty("thumbnail")], self.getControl( 111 ).getSelectedItem().getProperty("shortcutType") )
if returnVal is not None:
if "upnp://" in returnVal.getProperty( "Path" ):
listitemCopy = self._duplicate_listitem( returnVal, listControl.getListItem( itemIndex ) )
else:
returnVal = LIBRARY._sourcelink_choice( returnVal )
if returnVal is not None:
listitemCopy = self._duplicate_listitem( returnVal, listControl.getListItem( itemIndex ) )
else:
listitemCopy = None
else:
listitemCopy = None
elif path.startswith( "::PLAYLIST" ):
log( "Selected playlist" )
if not ">" in path or "VideoLibrary" in path:
# Give the user the choice of playing or displaying the playlist
dialog = xbmcgui.Dialog()
userchoice = dialog.yesno( LANGUAGE( 32040 ), LANGUAGE( 32060 ), "", "", LANGUAGE( 32061 ), LANGUAGE( 32062 ) )
# False: Display
# True: Play
if not userchoice:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-show" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-show" ) )
else:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-play" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-play" ) )
elif ">" in path:
# Give the user the choice of playing, displaying or party more for the playlist
dialog = xbmcgui.Dialog()
userchoice = dialog.select( LANGUAGE( 32060 ), [ LANGUAGE( 32061 ), LANGUAGE( 32062 ), xbmc.getLocalizedString( 589 ) ] )
# 0 - Display
# 1 - Play
# 2 - Party mode
if not userchoice or userchoice == 0:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-show" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-show" ) )
elif userchoice == 1:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-play" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-play" ) )
else:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-party" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-party" ) )
if listitemCopy is None:
# Nothing was selected in the explorer
return
self.changeMade = True
# Replace the allListItems listitem with our new list item
self.allListItems[ orderIndex ] = listitemCopy
# Delete playlist (TO BE REMOVED!)
LIBRARY._delete_playlist( listControl.getListItem( itemIndex ).getProperty( "path" ) )
# Display list items
self._display_listitems( focus = itemIndex )
elif controlID in [301, 1301]:
# Add a new item
log( "Add item (301)" )
self.changeMade = True
listControl = self.getControl( 211 )
num = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( num ).getProperty( "skinshortcuts-orderindex" ) ) + 1
# Set default label and action
listitem = xbmcgui.ListItem( LANGUAGE(32013) )
listitem.setProperty( "Path", 'noop' )
listitem.setProperty( "additionalListItemProperties", "[]" )
# Add fallback custom property values
self._add_additional_properties( listitem )
# Add new item to both displayed list and list kept in memory
self.allListItems.insert( orderIndex, listitem )
self._display_listitems( num + 1 )
# If Control 1301 is used we want to add a new item and immediately select a shortcut
if controlID == 1301:
xbmc.executebuiltin('SendClick(401)')
elif controlID == 302:
# Delete an item
log( "Delete item (302)" )
listControl = self.getControl( 211 )
num = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( num ).getProperty( "skinshortcuts-orderindex" ) )
if self.warnonremoval( listControl.getListItem( num ) ) == False:
return
LIBRARY._delete_playlist( listControl.getListItem( num ).getProperty( "path" ) )
self.changeMade = True
# Remove item from memory list, and reload all list items
self.allListItems.pop( orderIndex )
self._display_listitems( num )
elif controlID == 303:
# Move item up in list
log( "Move up (303)" )
listControl = self.getControl( 211 )
itemIndex = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( itemIndex ).getProperty( "skinshortcuts-orderindex" ) )
if itemIndex == 0:
# Top item, can't move it up
return
self.changeMade = True
while True:
# Move the item one up in the list
self.allListItems[ orderIndex - 1 ], self.allListItems[ orderIndex ] = self.allListItems[ orderIndex ], self.allListItems[ orderIndex - 1 ]
# If we've just moved to the top of the list, break
if orderIndex == 1:
break
# Check if the item we've just swapped is visible
shouldBreak = True
if self.allListItems[ orderIndex ].getProperty( "visible-condition" ):
shouldBreak = xbmc.getCondVisibility( self.allListItems[ orderIndex ].getProperty( "visible-condition" ) )
if shouldBreak:
break
orderIndex -= 1
# Display the updated order
self._display_listitems( itemIndex - 1 )
elif controlID == 304:
# Move item down in list
log( "Move down (304)" )
listControl = self.getControl( 211 )
itemIndex = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( itemIndex ).getProperty( "skinshortcuts-orderindex" ) )
log( str( itemIndex ) + " : " + str( listControl.size() ) )
if itemIndex == listControl.size() - 1:
return
self.changeMade = True
while True:
# Move the item one up in the list
self.allListItems[ orderIndex + 1 ], self.allListItems[ orderIndex ] = self.allListItems[ orderIndex ], self.allListItems[ orderIndex + 1 ]
# If we've just moved to the top of the list, break
if orderIndex == len( self.allListItems ) - 1:
break
# Check if the item we've just swapped is visible
shouldBreak = True
if self.allListItems[ orderIndex ].getProperty( "visible-condition" ):
shouldBreak = xbmc.getCondVisibility( self.allListItems[ orderIndex ].getProperty( "visible-condition" ) )
if shouldBreak:
break
orderIndex += 1
# Display the updated order
self._display_listitems( itemIndex + 1 )
elif controlID == 305:
# Change label
log( "Change label (305)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# Retreive current label and labelID
label = listitem.getLabel()
oldlabelID = listitem.getProperty( "labelID" )
# If the item is blank, set the current label to empty
if try_decode( label ) == LANGUAGE(32013):
label = ""
# Get new label from keyboard dialog
if is_hebrew(label):
label = label.decode('utf-8')[::-1]
keyboard = xbmc.Keyboard( label, xbmc.getLocalizedString(528), False )
keyboard.doModal()
if ( keyboard.isConfirmed() ):
label = keyboard.getText()
if label == "":
label = LANGUAGE(32013)
else:
return
self.changeMade = True
self._set_label( listitem, label )
elif controlID == 306:
# Change thumbnail
log( "Change thumbnail (306)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# Get new thumbnail from browse dialog
dialog = xbmcgui.Dialog()
custom_thumbnail = dialog.browse( 2 , xbmc.getLocalizedString(1030), 'files', '', True, False, self.thumbnailBrowseDefault)
if custom_thumbnail:
# Update the thumbnail
self.changeMade = True
listitem.setThumbnailImage( custom_thumbnail )
listitem.setProperty( "thumbnail", custom_thumbnail )
else:
return
elif controlID == 307:
# Change Action
log( "Change action (307)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
if self.warnonremoval( listitem ) == False:
return
# Retrieve current action
action = listitem.getProperty( "path" )
if action == "noop":
action = ""
if self.currentWindow.getProperty( "custom-grouping" ):
selectedShortcut = LIBRARY.selectShortcut(custom = True, currentAction = listitem.getProperty("path"), grouping = self.currentWindow.getProperty( "custom-grouping" ))
self.currentWindow.clearProperty( "custom-grouping" )
else:
selectedShortcut = LIBRARY.selectShortcut(custom = True, currentAction = listitem.getProperty("path"))
if not selectedShortcut:
# User cancelled
return
if selectedShortcut.getProperty( "chosenPath" ):
action = try_decode( selectedShortcut.getProperty( "chosenPath" ) )
elif selectedShortcut.getProperty( "path" ):
action = try_decode(selectedShortcut.getProperty( "path" ))
if action == "":
action = "noop"
if listitem.getProperty( "path" ) == action:
return
self.changeMade = True
LIBRARY._delete_playlist( listitem.getProperty( "path" ) )
# Update the action
listitem.setProperty( "path", action )
listitem.setProperty( "displaypath", action )
listitem.setLabel2( LANGUAGE(32024) )
listitem.setProperty( "shortcutType", "32024" )
elif controlID == 308:
# Reset shortcuts
log( "Reset shortcuts (308)" )
# Ask the user if they want to restore a shortcut, or reset to skin defaults
if self.alwaysReset:
# The skin has disable the restore function, so set response as if user has chose the reset to
# defaults option
response = 1
elif self.alwaysRestore:
# The skin has disabled the reset function, so set response as if the user has chosen to restore
# a skin-default shortcut
response = 0
else:
# No skin override, so let user decide to restore or reset
if not DATA.checkIfMenusShared():
# Also offer to import from another skin
response = xbmcgui.Dialog().select( LANGUAGE(32102), [ LANGUAGE(32103), LANGUAGE(32104), "Import from compatible skin" ] )
else:
response = xbmcgui.Dialog().select( LANGUAGE(32102), [ LANGUAGE(32103), LANGUAGE(32104) ] )
if response == -1:
# User cancelled
return
elif response == 0:
# We're going to restore a particular shortcut
restorePretty = []
restoreItems = []
# Save the labelID list from DATA
originalLabelIDList = DATA.labelIDList
DATA.labelIDList = []
# Get a list of all shortcuts that were originally in the menu and restore labelIDList
DATA._clear_labelID()
shortcuts = DATA._get_shortcuts( self.group, defaultGroup = self.defaultGroup, defaultsOnly = True )
DATA.labelIDList = originalLabelIDList
for shortcut in shortcuts.getroot().findall( "shortcut" ):
# Parse the shortcut
item = self._parse_shortcut( shortcut )
# Check if a shortcuts labelID is already in the list
if item[1].getProperty( "labelID" ) not in DATA.labelIDList:
restorePretty.append( LIBRARY._create(["", item[ 1 ].getLabel(), item[1].getLabel2(), { "icon": item[1].getProperty( "icon" ) }] ) )
restoreItems.append( item[1] )
if len( restoreItems ) == 0:
xbmcgui.Dialog().ok( LANGUAGE(32103), LANGUAGE(32105) )
return
# Let the user select a shortcut to restore
w = library.ShowDialog( "DialogSelect.xml", CWD, listing=restorePretty, windowtitle=LANGUAGE(32103) )
w.doModal()
restoreShortcut = w.result
del w
if restoreShortcut == -1:
# User cancelled
return
# We now have our shortcut to return. Add it to self.allListItems and labelID list
self.allListItems.append( restoreItems[ restoreShortcut ] )
DATA.labelIDList.append( restoreItems[ restoreShortcut ].getProperty( "labelID" ) )
self.changeMade = True
self._display_listitems()
elif response == 1:
# We're going to reset all the shortcuts
self.changeMade = True
# Delete any auto-generated source playlists
for x in range(0, self.getControl( 211 ).size()):
LIBRARY._delete_playlist( self.getControl( 211 ).getListItem( x ).getProperty( "path" ) )
self.getControl( 211 ).reset()
self.allListItems = []
# Call the load shortcuts function, but add that we don't want
# previously saved user shortcuts
self.load_shortcuts( False )
else:
# We're going to offer to import menus from another compatible skin
skinList, sharedFiles = DATA.getSharedSkinList()
if len( skinList ) == 0:
xbmcgui.Dialog().ok( LANGUAGE(32110), LANGUAGE(32109) )
return
# Let the user select a shortcut to restore
importMenu = xbmcgui.Dialog().select( LANGUAGE(32110), skinList )
if importMenu == -1:
# User cancelled
return
# Delete any auto-generated source playlists
for x in range(0, self.getControl( 211 ).size()):
LIBRARY._delete_playlist( self.getControl( 211 ).getListItem( x ).getProperty( "path" ) )
if importMenu == 0 and not len( sharedFiles ) == 0:
# User has chosen to import the shared menu
DATA.importSkinMenu( sharedFiles )
else:
# User has chosen to import from a particular skin
DATA.importSkinMenu( DATA.getFilesForSkin( skinList[ importMenu ] ), skinList[ importMenu ] )
self.getControl( 211 ).reset()
self.allListItems = []
# Call the load shortcuts function
self.load_shortcuts( True )
elif controlID == 309:
# Choose widget
log( "Warning: Deprecated control 309 (Choose widget) selected")
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# Check that widgets have been loaded
LIBRARY.loadLibrary( "widgets" )
# If we're setting for an additional widget, get it's number
widgetID = ""
if self.currentWindow.getProperty( "widgetID" ):
widgetID += "." + self.currentWindow.getProperty( "widgetID" )
self.currentWindow.clearProperty( "widgetID" )
# Get the default widget for this item
defaultWidget = self.find_default( "widget", listitem.getProperty( "labelID" ), listitem.getProperty( "defaultID" ) )
# Generate list of widgets for select dialog
widget = [""]
widgetLabel = [LANGUAGE(32053)]
widgetName = [""]
widgetType = [ None ]
for key in LIBRARY.dictionaryGroupings[ "widgets-classic" ]:
widget.append( key[0] )
widgetName.append( "" )
widgetType.append( key[2] )
if key[0] == defaultWidget:
widgetLabel.append( key[1] + " (%s)" %( LANGUAGE(32050) ) )
else:
widgetLabel.append( key[1] )
# If playlists have been enabled for widgets, add them too
if self.widgetPlaylists:
# Ensure playlists are loaded
LIBRARY.loadLibrary( "playlists" )
# Add them
for playlist in LIBRARY.widgetPlaylistsList:
widget.append( "::PLAYLIST::" + playlist[0] )
widgetLabel.append( playlist[1] )
widgetName.append( playlist[2] )
widgetType.append( self.widgetPlaylistsType )
for playlist in LIBRARY.scriptPlaylists():
widget.append( "::PLAYLIST::" + playlist[0] )
widgetLabel.append( playlist[1] )
widgetName.append( playlist[2] )
widgetType.append( self.widgetPlaylistsType )
# Show the dialog
selectedWidget = xbmcgui.Dialog().select( LANGUAGE(32044), widgetLabel )
if selectedWidget == -1:
# User cancelled
return
elif selectedWidget == 0:
# User selected no widget
self._remove_additionalproperty( listitem, "widget" + widgetID )
self._remove_additionalproperty( listitem, "widgetName" + widgetID )
self._remove_additionalproperty( listitem, "widgetType" + widgetID )
self._remove_additionalproperty( listitem, "widgetPlaylist" + widgetID )
else:
if widget[selectedWidget].startswith( "::PLAYLIST::" ):
self._add_additionalproperty( listitem, "widget" + widgetID, "Playlist" )
self._add_additionalproperty( listitem, "widgetName" + widgetID, widgetName[selectedWidget] )
self._add_additionalproperty( listitem, "widgetPlaylist" + widgetID, widget[selectedWidget].strip( "::PLAYLIST::" ) )
if self.currentWindow.getProperty( "useWidgetNameAsLabel" ) == "true" and widgetID == "":
self._set_label( listitem, widgetName[selectedWidget] )
self.currentWindow.clearProperty( "useWidgetNameAsLabel" )
else:
self._add_additionalproperty( listitem, "widgetName" + widgetID, widgetLabel[selectedWidget].replace( " (%s)" %( LANGUAGE(32050) ), "" ) )
self._add_additionalproperty( listitem, "widget" + widgetID, widget[selectedWidget] )
self._remove_additionalproperty( listitem, "widgetPlaylist" + widgetID )
if self.currentWindow.getProperty( "useWidgetNameAsLabel" ) == "true" and widgetID == "":
self._set_label( listitem, widgetLabel[selectedWidget].replace( " (%s)" %( LANGUAGE(32050) ), "" ) )
self.currentWindow.clearProperty( "useWidgetNameAsLabel" )
if widgetType[ selectedWidget ] is not None:
self._add_additionalproperty( listitem, "widgetType" + widgetID, widgetType[ selectedWidget] )
else:
self._remove_additionalproperty( listitem, "widgetType" + widgetID )
self.changeMade = True
elif controlID == 312:
# Alternative widget select
log( "Choose widget (312)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# If we're setting for an additional widget, get its number
widgetID = ""
if self.currentWindow.getProperty( "widgetID" ):
widgetID = "." + self.currentWindow.getProperty( "widgetID" )
self.currentWindow.clearProperty( "widgetID" )
# Get the default widget for this item
defaultWidget = self.find_default( "widget", listitem.getProperty( "labelID" ), listitem.getProperty( "defaultID" ) )
# Ensure widgets are loaded
LIBRARY.loadLibrary( "widgets" )
# Let user choose widget
if listitem.getProperty( "widgetPath" ) == "":
selectedShortcut = LIBRARY.selectShortcut( grouping = "widget", showNone = True )
else:
selectedShortcut = LIBRARY.selectShortcut( grouping = "widget", showNone = True, custom = True, currentAction = listitem.getProperty( "widgetPath" ) )
if selectedShortcut is None:
# User cancelled
return
if selectedShortcut.getProperty( "Path" ) and selectedShortcut.getProperty( "custom" ) == "true":
# User has manually edited the widget path, so we'll update that property only
self._add_additionalproperty( listitem, "widgetPath" + widgetID, selectedShortcut.getProperty( "Path" ) )
self.changeMade = True
elif selectedShortcut.getProperty( "Path" ):
# User has chosen a widget
# Let user edit widget title, if they want & skin hasn't disabled it
widgetName = selectedShortcut.getProperty( "widgetName" )
if self.widgetRename:
if widgetName.startswith("$"):
widgetTempName = xbmc.getInfoLabel(widgetName)
else:
widgetTempName = DATA.local( widgetName )[2]
if is_hebrew(widgetTempName):
widgetTempName = widgetTempName[::-1]
keyboard = xbmc.Keyboard( widgetTempName, xbmc.getLocalizedString(16105), False )
keyboard.doModal()
if ( keyboard.isConfirmed() ) and keyboard.getText() != "":
if widgetTempName != try_decode( keyboard.getText() ):
widgetName = try_decode( keyboard.getText() )
# Add any necessary reload parameter
widgetPath = LIBRARY.addWidgetReload( selectedShortcut.getProperty( "widgetPath" ) )
self._add_additionalproperty( listitem, "widget" + widgetID, selectedShortcut.getProperty( "widget" ) )
self._add_additionalproperty( listitem, "widgetName" + widgetID, widgetName )
self._add_additionalproperty( listitem, "widgetType" + widgetID, selectedShortcut.getProperty( "widgetType" ) )
self._add_additionalproperty( listitem, "widgetTarget" + widgetID, selectedShortcut.getProperty( "widgetTarget" ) )
self._add_additionalproperty( listitem, "widgetPath" + widgetID, widgetPath )
if self.currentWindow.getProperty( "useWidgetNameAsLabel" ) == "true" and widgetID == "":
self._set_label( listitem, selectedShortcut.getProperty( ( "widgetName" ) ) )
self.currentWindow.clearProperty( "useWidgetNameAsLabel" )
self.changeMade = True
else:
# User has selected 'None'
self._remove_additionalproperty( listitem, "widget" + widgetID )
self._remove_additionalproperty( listitem, "widgetName" + widgetID )
self._remove_additionalproperty( listitem, "widgetType" + widgetID )
self._remove_additionalproperty( listitem, "widgetTarget" + widgetID )
self._remove_additionalproperty( listitem, "widgetPath" + widgetID )
if self.currentWindow.getProperty( "useWidgetNameAsLabel" ) == "true" and widgetID == "":
self._set_label( listitem, selectedShortcut.getProperty( ( "widgetName" ) ) )
self.currentWindow.clearProperty( "useWidgetNameAsLabel" )
self.changeMade = True
return
elif controlID == 310:
# Choose background
log( "Choose background (310)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
usePrettyDialog = False
# Create lists for the select dialog, with image browse buttons if enabled
if self.backgroundBrowse == "true":
log( "Adding both browse options" )
background = ["", "", ""]
backgroundLabel = [LANGUAGE(32053), LANGUAGE(32051), LANGUAGE(32052)]
backgroundPretty = [ LIBRARY._create(["", LANGUAGE(32053), "", { "icon": "DefaultAddonNone.png" }] ), LIBRARY._create(["", LANGUAGE(32051), "", { "icon": "DefaultFile.png" }] ), LIBRARY._create(["", LANGUAGE(32052), "", { "icon": "DefaultFolder.png" }] ) ]
elif self.backgroundBrowse == "single":
log( "Adding single browse option" )
background = ["", ""]
backgroundLabel = [LANGUAGE(32053), LANGUAGE(32051)]
backgroundPretty = [ LIBRARY._create(["", LANGUAGE(32053), "", { "icon": "DefaultAddonNone.png" }] ), LIBRARY._create(["", LANGUAGE(32051), "", { "icon": "DefaultFile.png" }] ) ]
elif self.backgroundBrowse == "multi":
log( "Adding multi browse option" )
background = ["", ""]
backgroundLabel = [LANGUAGE(32053), LANGUAGE(32052)]
backgroundPretty = [ LIBRARY._create(["", LANGUAGE(32053), "", { "icon": "DefaultAddonNone.png" }] ), LIBRARY._create(["", LANGUAGE(32052), "", { "icon": "DefaultFolder.png" }] ) ]
else:
background = [""]
backgroundLabel = [LANGUAGE(32053)]
backgroundPretty = [ LIBRARY._create(["", LANGUAGE(32053), "", { "icon": "DefaultAddonNone.png" }] ) ]
# Wait to ensure that all backgrounds are loaded
count = 0
while self.backgrounds == "LOADING" and count < 20:
if xbmc.Monitor().waitForAbort(0.1):
return
count = count + 1
if self.backgrounds == "LOADING":
self.backgrounds = []
# Get the default background for this item
defaultBackground = self.find_default( "background", listitem.getProperty( "labelID" ), listitem.getProperty( "defaultID" ) )
# Generate list of backgrounds for the dialog
for key in self.backgrounds:
if "::PLAYLIST::" in key[1]:
for playlist in LIBRARY.widgetPlaylistsList:
background.append( [ key[0], playlist[0], playlist[1] ] )
backgroundLabel.append( key[1].replace( "::PLAYLIST::", playlist[1] ) )
backgroundPretty.append( LIBRARY._create(["", key[1].replace( "::PLAYLIST::", playlist[1] ), "", {}] ) )
for playlist in LIBRARY.scriptPlaylists():
background.append( [ key[0], playlist[0], playlist[1] ] )
backgroundLabel.append( key[1].replace( "::PLAYLIST::", playlist[1] ) )
backgroundPretty.append( LIBRARY._create(["", key[1].replace( "::PLAYLIST::", playlist[1] ), "", {}] ) )
else:
background.append( key[0] )
virtualImage = None
if key[0].startswith("$INFO") or key[0].startswith("$VAR"):
virtualImage = key[0].replace("$INFO[","").replace("$VAR[","").replace("]","")
virtualImage = xbmc.getInfoLabel(virtualImage)
#fix for resource addon images
if key[0].startswith("resource://"):
virtualImage = key[0]
label = key[ 1 ]
if label.startswith( "$INFO" ) or label.startswith( "$VAR" ):
label = xbmc.getInfoLabel( label )
if defaultBackground == key[ 0 ]:
label = "%s (%s)" %( label, LANGUAGE( 32050 ) )
backgroundLabel.append( label )
if xbmc.skinHasImage( key[ 0 ] ) or virtualImage:
usePrettyDialog = True
backgroundPretty.append( LIBRARY._create(["", label, "", { "icon": key[ 0 ] } ] ) )
else:
backgroundPretty.append( LIBRARY._create(["", label, "", {} ] ) )
if usePrettyDialog:
w = library.ShowDialog( "DialogSelect.xml", CWD, listing=backgroundPretty, windowtitle=LANGUAGE(32045) )
w.doModal()
selectedBackground = w.result
del w
else:
# Show the dialog
selectedBackground = xbmcgui.Dialog().select( LANGUAGE(32045), backgroundLabel )
if selectedBackground == -1:
# User cancelled
return
elif selectedBackground == 0:
# User selected no background
self._remove_additionalproperty( listitem, "background" )
self._remove_additionalproperty( listitem, "backgroundName" )
self._remove_additionalproperty( listitem, "backgroundPlaylist" )
self._remove_additionalproperty( listitem, "backgroundPlaylistName" )
self.changeMade = True
return
elif self.backgroundBrowse and (selectedBackground == 1 or (self.backgroundBrowse == "true" and selectedBackground == 2)):
# User has chosen to browse for an image/folder
imagedialog = xbmcgui.Dialog()
if selectedBackground == 1 and self.backgroundBrowse != "multi": # Single image
custom_image = imagedialog.browse( 2 , xbmc.getLocalizedString(1030), 'files', '', True, False, self.backgroundBrowseDefault)
else: # Multi-image
custom_image = imagedialog.browse( 0 , xbmc.getLocalizedString(1030), 'files', '', True, False, self.backgroundBrowseDefault)
if custom_image:
self._add_additionalproperty( listitem, "background", custom_image )
self._add_additionalproperty( listitem, "backgroundName", custom_image )
self._remove_additionalproperty( listitem, "backgroundPlaylist" )
self._remove_additionalproperty( listitem, "backgroundPlaylistName" )
else:
# User cancelled
return
else:
if isinstance( background[selectedBackground], list ):
# User has selected a playlist backgrounds
self._add_additionalproperty( listitem, "background", background[selectedBackground][0] )
self._add_additionalproperty( listitem, "backgroundName", backgroundLabel[selectedBackground].replace("::PLAYLIST::", background[selectedBackground][1]) )
self._add_additionalproperty( listitem, "backgroundPlaylist", background[selectedBackground][1] )
self._add_additionalproperty( listitem, "backgroundPlaylistName", background[selectedBackground][2] )
else:
# User has selected a normal background
self._add_additionalproperty( listitem, "background", background[selectedBackground] )
self._add_additionalproperty( listitem, "backgroundName", backgroundLabel[selectedBackground].replace( " (%s)" %( LANGUAGE(32050) ), "" ) )
self._remove_additionalproperty( listitem, "backgroundPlaylist" )
self._remove_additionalproperty( listitem, "backgroundPlaylistName" )
self.changeMade = True
elif controlID == 311:
# Choose thumbnail
log( "Choose thumbnail (311)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# Create lists for the select dialog
thumbnail = [""]
thumbnailLabel = [LIBRARY._create(["", LANGUAGE(32096), "", {}] )]
# Add a None option if the skin specified it
if self.thumbnailNone:
thumbnail.append( "" )
thumbnailLabel.insert( 0, LIBRARY._create(["", self.thumbnailNone, "", {}] ) )
# Ensure thumbnails have been loaded
count = 0
while self.thumbnails == "LOADING" and count < 20:
if xbmc.Monitor().waitForAbort(0.1):
return
count = count + 1
if self.thumbnails == "LOADING":
self.thumbnails = []
# Generate list of thumbnails for the dialog
for key in self.thumbnails:
log( repr( key[ 0 ] ) + " " + repr( key[ 1 ] ) )
thumbnail.append( key[0] )
thumbnailLabel.append( LIBRARY._create(["", key[ 1 ], "", {"icon": key[ 0 ] }] ) )
# Show the dialog
w = library.ShowDialog( "DialogSelect.xml", CWD, listing=thumbnailLabel, windowtitle="Select thumbnail" )
w.doModal()
selectedThumbnail = w.result
del w
if selectedThumbnail == -1:
# User cancelled
return
elif self.thumbnailNone and selectedThumbnail == 0:
# User has chosen 'None'
listitem.setThumbnailImage( None )
listitem.setProperty( "thumbnail", None )
elif (not self.thumbnailNone and selectedThumbnail == 0) or (self.thumbnailNone and selectedThumbnail == 1):
# User has chosen to browse for an image
imagedialog = xbmcgui.Dialog()
if self.thumbnailBrowseDefault:
custom_image = imagedialog.browse( 2 , xbmc.getLocalizedString(1030), 'files', '', True, False, self.thumbnailBrowseDefault)
else:
custom_image = imagedialog.browse( 2 , xbmc.getLocalizedString(1030), 'files', '', True, False, self.backgroundBrowseDefault)
if custom_image:
listitem.setThumbnailImage( custom_image )
listitem.setProperty( "thumbnail", custom_image )
else:
# User cancelled
return
else:
# User has selected a normal thumbnail
listitem.setThumbnailImage( thumbnail[ selectedThumbnail ] )
listitem.setProperty( "thumbnail", thumbnail[ selectedThumbnail ] )
self.changeMade = True
elif controlID == 313:
# Toggle disabled
log( "Toggle disabled (313)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# Retrieve and toggle current disabled state
disabled = listitem.getProperty( "skinshortcuts-disabled" )
if disabled == "True":
listitem.setProperty( "skinshortcuts-disabled", "False" )
else:
# Display any warning
if self.warnonremoval( listitem ) == False:
return
# Toggle to true, add highlighting to label
listitem.setProperty( "skinshortcuts-disabled", "True" )
self.changeMade = True
elif controlID == 401:
# Select shortcut
log( "Select shortcut (401)" )
num = self.getControl( 211 ).getSelectedPosition()
orderIndex = int( self.getControl( 211 ).getListItem( num ).getProperty( "skinshortcuts-orderindex" ) )
if self.warnonremoval( self.getControl( 211 ).getListItem( num ) ) == False:
return
if self.currentWindow.getProperty( "custom-grouping" ):
selectedShortcut = LIBRARY.selectShortcut( grouping = self.currentWindow.getProperty( "custom-grouping" ) )
self.currentWindow.clearProperty( "custom-grouping" )
else:
selectedShortcut = LIBRARY.selectShortcut()
if selectedShortcut is not None:
listitemCopy = self._duplicate_listitem( selectedShortcut, self.getControl( 211 ).getListItem( num ) )
#add a translated version of the path as property
self._add_additionalproperty( listitemCopy, "translatedPath", selectedShortcut.getProperty( "path" ) )
if selectedShortcut.getProperty( "smartShortcutProperties" ):
for listitemProperty in eval( selectedShortcut.getProperty( "smartShortcutProperties" ) ):
self._add_additionalproperty( listitemCopy, listitemProperty[0], listitemProperty[1] )
#set default background for this item (if any)
defaultBackground = self.find_defaultBackground( listitemCopy.getProperty( "labelID" ), listitemCopy.getProperty( "defaultID" ) )
if defaultBackground:
self._add_additionalproperty( listitemCopy, "background", defaultBackground["path"] )
self._add_additionalproperty( listitemCopy, "backgroundName", defaultBackground["label"] )
#set default widget for this item (if any)
defaultWidget = self.find_defaultWidget( listitemCopy.getProperty( "labelID" ), listitemCopy.getProperty( "defaultID" ) )
if defaultWidget:
self._add_additionalproperty( listitemCopy, "widget", defaultWidget["widget"] )
self._add_additionalproperty( listitemCopy, "widgetName", defaultWidget["name"] )
self._add_additionalproperty( listitemCopy, "widgetType", defaultWidget["type"] )
self._add_additionalproperty( listitemCopy, "widgetPath", defaultWidget["path"] )
self._add_additionalproperty( listitemCopy, "widgetTarget", defaultWidget["target"] )
if selectedShortcut.getProperty( "chosenPath" ):
listitemCopy.setProperty( "path", selectedShortcut.getProperty( "chosenPath" ) )
listitemCopy.setProperty( "displayPath", selectedShortcut.getProperty( "chosenPath" ) )
LIBRARY._delete_playlist( self.getControl( 211 ).getListItem( num ).getProperty( "path" ) )
self.changeMade = True
self.allListItems[ orderIndex ] = listitemCopy
self._display_listitems( num )
else:
return
elif controlID == 405 or controlID == 406 or controlID == 407 or controlID == 408 or controlID == 409 or controlID == 410:
# Launch management dialog for submenu
if xbmcgui.Window( 10000 ).getProperty( "skinshortcuts-loading" ) and int( calendar.timegm( gmtime() ) ) - int( xbmcgui.Window( 10000 ).getProperty( "skinshortcuts-loading" ) ) <= 5:
return
log( "Launching management dialog for submenu/additional menu (" + str( controlID ) + ")" )
xbmcgui.Window( 10000 ).setProperty( "skinshortcuts-loading", str( calendar.timegm( gmtime() ) ) )
# Get the group we're about to edit
launchGroup = self.getControl( 211 ).getSelectedItem().getProperty( "labelID" )
launchDefaultGroup = self.getControl( 211 ).getSelectedItem().getProperty( "defaultID" )
groupName = self.getControl( 211 ).getSelectedItem().getLabel()
if launchDefaultGroup == None:
launchDefaultGroup = ""
# If the labelID property is empty, we need to generate one
if launchGroup is None or launchGroup == "":
DATA._clear_labelID()
num = self.getControl( 211 ).getSelectedPosition()
orderIndex = self.getControl( 211 ).getListItem( num )
# Get the labelID's of all other menu items
for listitem in self.allListItems:
if listitem != orderIndex:
DATA._get_labelID( listitem.getProperty( "labelID" ), listitem.getProperty( "path" ) )
# Now generate labelID for this menu item, if it doesn't have one
labelID = self.getControl( 211 ).getListItem( num ).getProperty( "localizedString" )
if labelID is None or labelID == "":
launchGroup = DATA._get_labelID( self.getControl( 211 ).getListItem( num ).getLabel(), self.getControl( 211 ).getListItem( num ).getProperty( "path" ) )
else:
launchGroup = DATA._get_labelID( labelID, self.getControl( 211 ).getListItem( num ).getProperty( "path" ) )
self.getControl( 211 ).getListItem( num ).setProperty( "labelID", launchGroup )
# Check if we're launching a specific additional menu
if controlID == 406:
launchGroup = launchGroup + ".1"
launchDefaultGroup = launchDefaultGroup + ".1"
elif controlID == 407:
launchGroup = launchGroup + ".2"
launchDefaultGroup = launchDefaultGroup + ".2"
elif controlID == 408:
launchGroup = launchGroup + ".3"
launchDefaultGroup = launchDefaultGroup + ".3"
elif controlID == 409:
launchGroup = launchGroup + ".4"
launchDefaultGroup = launchDefaultGroup + ".4"
elif controlID == 410:
launchGroup = launchGroup + ".5"
launchDefaultGroup = launchDefaultGroup + ".5"
# Check if 'level' property has been set
elif self.currentWindow.getProperty("level"):
launchGroup = launchGroup + "." + self.currentWindow.getProperty("level")
self.currentWindow.clearProperty("level")
# Check if 'groupname' property has been set
if self.currentWindow.getProperty( "overrideName" ):
groupName = self.currentWindow.getProperty( "overrideName" )
self.currentWindow.clearProperty( "overrideName" )
# Execute the script
self.currentWindow.setProperty( "additionalDialog", "True" )
import gui
ui= gui.GUI( "script-skinshortcuts.xml", CWD, "default", group=launchGroup, defaultGroup=launchDefaultGroup, nolabels=self.nolabels, groupname=groupName )
ui.doModal()
del ui
self.currentWindow.clearProperty( "additionalDialog" )
if controlID in self.customToggleButtons:
# Toggle a custom property
log( "Toggling custom property (%s)" %( str( controlID ) ) )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
propertyName = self.customToggleButtons[ controlID ]
self.changeMade = True
if listitem.getProperty( propertyName ) == "True":
self._remove_additionalproperty( listitem, propertyName )
else:
self._add_additionalproperty( listitem, propertyName, "True" )
if controlID == 404 or controlID in self.customPropertyButtons:
# Set custom property
# We do this last so that, if the skinner has specified a default Skin Shortcuts control is used to set the
# property, that is completed before we get here.
log( "Setting custom property (%s)" %( str( controlID ) ) )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
propertyName = ""
propertyValue = ""
usePrettyDialog = False
# Retrieve the custom property
if self.currentWindow.getProperty( "customProperty" ):
propertyName = self.currentWindow.getProperty( "customProperty" )
self.currentWindow.clearProperty( "customProperty" )
propertyValue = self.currentWindow.getProperty( "customValue" )
self.currentWindow.clearProperty( "customValue" )
if propertyName == "thumb":
# Special treatment if we try to set the thumb with the property method
listitem.setThumbnailImage( xbmc.getInfoLabel(propertyValue) )
listitem.setIconImage( xbmc.getInfoLabel(propertyValue) )
listitem.setProperty( "thumbnail", propertyValue )
listitem.setProperty( "icon", propertyValue )
if not propertyValue:
listitem.setProperty( "original-icon", None )
elif not propertyValue:
# No value set, so remove it from additionalListItemProperties
self._remove_additionalproperty( listitem, propertyName )
else:
# Set the property
self._add_additionalproperty( listitem, propertyName, propertyValue )
# notify that we have changes
self.changeMade = True
elif controlID != 404 or self.currentWindow.getProperty( "chooseProperty" ):
if controlID == 404:
# Button 404, so we get the property from the window property
propertyName = self.currentWindow.getProperty( "chooseProperty" )
self.currentWindow.clearProperty( "chooseProperty" )
else:
# Custom button, so we get the property from the dictionary
propertyName = self.customPropertyButtons[ controlID ]
# Get the overrides
tree = DATA._get_overrides_skin()
# Set options
dialogTitle = LANGUAGE(32101)
showNone = True
imageBrowse = False
browseSingle = False
browseMulti = False
for elem in tree.findall( "propertySettings" ):
# Get property settings based on property value matching
if "property" in elem.attrib and elem.attrib.get( "property" ) == propertyName:
if "title" in elem.attrib:
dialogTitle = elem.attrib.get( "title" )
if "showNone" in elem.attrib and elem.attrib.get( "showNone" ).lower() == "false":
showNone = False
if "imageBrowse" in elem.attrib and elem.attrib.get( "imageBrowse" ).lower() == "true":
imageBrowse = True
# Create lists for the select dialog
property = []
propertyLabel = []
propertyPretty = []
if showNone:
# Add a 'None' option to the list
property.append( "" )
propertyLabel.append( LANGUAGE(32053) )
propertyPretty.append( LIBRARY._create(["", LANGUAGE(32053), "", { "icon": "DefaultAddonNone.png" }] ) )
if imageBrowse:
# Add browse single/multi options to the list
property.extend( [ "", "" ] )
propertyLabel.extend( [ LANGUAGE(32051), LANGUAGE(32052) ] )
propertyPretty.extend( [ LIBRARY._create(["", LANGUAGE(32051), "", { "icon": "DefaultFile.png" }] ), LIBRARY._create(["", LANGUAGE(32052), "", { "icon": "DefaultFolder.png" }] ) ] )
# Get all the skin-defined properties
for elem in tree.findall( "property" ):
if "property" in elem.attrib and elem.attrib.get( "property" ) == propertyName:
if "condition" in elem.attrib and not xbmc.getCondVisibility( elem.attrib.get( "condition" ) ):
continue
foundProperty = elem.text
property.append( foundProperty )
if "icon" in elem.attrib:
usePrettyDialog = True
iconImage = { "icon": elem.attrib.get( "icon" ) }
else:
iconImage = {}
if "label" in elem.attrib:
labelValue = elem.attrib.get( "label" )
if labelValue.startswith( "$INFO" ) or labelValue.startswith( "$VAR" ):
propertyLabel.append( xbmc.getInfoLabel( labelValue ) )
propertyPretty.append( LIBRARY._create( [ "", xbmc.getInfoLabel( labelValue ), "", iconImage ] ) )
else:
propertyLabel.append( DATA.local( labelValue )[ 2 ] )
propertyPretty.append( LIBRARY._create( [ "", labelValue, "", iconImage ] ) )
else:
propertyLabel.append( DATA.local( foundProperty )[2] )
propertyPretty.append( LIBRARY._create( [ "", foundProperty, "", iconImage ] ) )
# Show the dialog
if usePrettyDialog:
w = library.ShowDialog( "DialogSelect.xml", CWD, listing=propertyPretty, windowtitle=dialogTitle )
w.doModal()
selectedProperty = w.result
del w
else:
selectedProperty = xbmcgui.Dialog().select( dialogTitle, propertyLabel )
if selectedProperty == -1:
# User cancelled
return
elif selectedProperty == 0 and showNone:
# User selected no property
self.changeMade = True
self._remove_additionalproperty( listitem, propertyName )
elif ( selectedProperty == 0 and not showNone and imageBrowse ) or ( selectedProperty == 1 and showNone and imageBrowse ):
# User has selected to browse for a single image
browseSingle = True
elif ( selectedProperty == 1 and not showNone and imageBrowse ) or ( selectedProperty == 2 and showNone and imageBrowse ):
# User has selected to browse for a multi-image
browseMulti = True
else:
self.changeMade = True
self._add_additionalproperty( listitem, propertyName, property[ selectedProperty ] )
if browseSingle or browseMulti:
# User has chosen to browse for an image/folder
imagedialog = xbmcgui.Dialog()
if browseSingle: # Single image
custom_image = imagedialog.browse( 2 , xbmc.getLocalizedString(1030), 'files', '', True, False, None )
else: # Multi-image
custom_image = imagedialog.browse( 0 , xbmc.getLocalizedString(1030), 'files', '', True, False, None )
if custom_image:
self.changeMade = True
self._add_additionalproperty( listitem, propertyName, custom_image )
else:
# User cancelled
return
else:
# The customProperty or chooseProperty window properties needs to be set, so return
self.currentWindow.clearProperty( "customValue" )
return
# Custom onclick actions
if controlID in self.customOnClick:
xbmc.executebuiltin( self.customOnClick[ controlID ] )
# ========================
# === HELPER FUNCTIONS ===
# ========================
def _display_shortcuts( self ):
# Load the currently selected shortcut group
newGroup = LIBRARY.retrieveGroup( self.shortcutgroup )
label = DATA.local( newGroup[0] )[2]
self.getControl( 111 ).reset()
for item in newGroup[1]:
newItem = self._duplicate_listitem( item )
if item.getProperty( "action-show" ):
newItem.setProperty( "action-show", item.getProperty( "action-show" ) )
newItem.setProperty( "action-play", item.getProperty( "action-play" ) )
newItem.setProperty( "action-party", item.getProperty( "action-party" ) )
self.getControl( 111 ).addItem( newItem )
self.getControl( 101 ).setLabel( label + " (%s)" %self.getControl( 111 ).size() )
def _duplicate_listitem( self, listitem, originallistitem = None ):
# Create a copy of an existing listitem
listitemCopy = xbmcgui.ListItem(label=listitem.getLabel(), label2=listitem.getLabel2(), iconImage=listitem.getProperty("icon"), thumbnailImage=listitem.getProperty("thumbnail"))
listitemCopy.setProperty( "path", listitem.getProperty("path") )
listitemCopy.setProperty( "displaypath", listitem.getProperty("path") )
listitemCopy.setProperty( "icon", listitem.getProperty("icon") )
listitemCopy.setProperty( "thumbnail", listitem.getProperty("thumbnail") )
listitemCopy.setProperty( "localizedString", listitem.getProperty("localizedString") )
listitemCopy.setProperty( "shortcutType", listitem.getProperty("shortcutType") )
listitemCopy.setProperty( "skinshortcuts-disabled", listitem.getProperty( "skinshortcuts-disabled" ) )
if listitem.getProperty( "LOCKED" ):
listitemCopy.setProperty( "LOCKED", listitem.getProperty( "LOCKED" ) )
if listitem.getProperty( "defaultID" ):
listitemCopy.setProperty( "defaultID", listitem.getProperty( "defaultID" ) )
elif listitem.getProperty( "labelID" ):
listitemCopy.setProperty( "defaultID", listitem.getProperty( "labelID" ) )
else:
listitemCopy.setProperty( "defaultID", DATA._get_labelID( DATA.local( listitem.getProperty( "localizedString" ) )[3], listitem.getProperty( "path" ), True ) )
# If the item has an untranslated icon, set the icon image to it
if listitem.getProperty( "untranslatedIcon" ):
icon = listitem.getProperty( "untranslatedIcon" )
listitemCopy.setIconImage( icon )
listitemCopy.setProperty( "icon", icon )
# Revert to original icon (because we'll override it again in a minute!)
if listitem.getProperty( "original-icon" ):
icon = listitem.getProperty( "original-icon" )
if icon == "":
icon = None
listitemCopy.setIconImage( icon )
listitemCopy.setProperty( "icon", icon )
# If we've haven't been passed an originallistitem, set the following from the listitem we were passed
foundProperties = []
if originallistitem is None:
listitemCopy.setProperty( "labelID", listitem.getProperty("labelID") )
if listitem.getProperty( "visible-condition" ):
listitemCopy.setProperty( "visible-condition", listitem.getProperty( "visible-condition" ) )
if listitem.getProperty( "additionalListItemProperties" ):
listitemCopy.setProperty( "additionalListItemProperties", listitem.getProperty( "additionalListItemProperties" ) )
else:
# Set these from the original item we were passed (this will keep original labelID and additional properties
# in tact)
listitemCopy.setProperty( "labelID", originallistitem.getProperty( "labelID" ) )
if originallistitem.getProperty( "visible-condition" ):
listitemCopy.setProperty( "visible-condition", originallistitem.getProperty( "visible-condition" ) )
if originallistitem.getProperty( "additionalListItemProperties" ):
listitemCopy.setProperty( "additionalListItemProperties", originallistitem.getProperty( "additionalListItemProperties" ) )
# Add custom properties
self._add_additional_properties( listitemCopy )
return listitemCopy
def _add_additionalproperty( self, listitem, propertyName, propertyValue ):
# Add an item to the additional properties of a user items
properties = []
if listitem.getProperty( "additionalListItemProperties" ):
properties = eval( listitem.getProperty( "additionalListItemProperties" ) )
foundProperty = False
for property in properties:
if property[0] == propertyName:
foundProperty = True
property[1] = DATA.local( propertyValue )[0]
if foundProperty == False:
properties.append( [propertyName, DATA.local( propertyValue )[0] ] )
#translate any INFO labels (if needed) so they will be displayed correctly in the gui
if propertyValue:
if propertyValue.startswith("$") and not propertyValue.startswith( "$SKIN" ):
listitem.setProperty( propertyName, xbmc.getInfoLabel(propertyValue) )
else:
listitem.setProperty( propertyName, DATA.local( propertyValue )[2] )
if propertyValue.isdigit():
listitem.setProperty( "%s-NUM" %( propertyName ), propertyValue )
listitem.setProperty( "additionalListItemProperties", repr( properties ) )
self._add_additional_properties( listitem )
def _remove_additionalproperty( self, listitem, propertyName ):
# Remove an item from the additional properties of a user item
properties = []
hasProperties = False
if listitem.getProperty( "additionalListItemProperties" ):
properties = eval( listitem.getProperty( "additionalListItemProperties" ) )
hasProperties = True
for property in properties:
if property[0] == propertyName or "%s-NUM" %( property[0] ) == "%s-NUM" %( propertyName ):
properties.remove( property )
listitem.setProperty( property[0], None )
listitem.setProperty( "additionalListItemProperties", repr( properties ) )
self._add_additional_properties( listitem )
def warnonremoval( self, item ):
# This function will warn the user before they modify a settings link
# (if the skin has enabled this function)
tree = DATA._get_overrides_skin()
for elem in tree.findall( "warn" ):
if elem.text.lower() == item.getProperty( "displaypath" ).lower():
# We want to show the message :)
message = DATA.local( elem.attrib.get( "message" ) )[2]
heading = DATA.local( elem.attrib.get( "heading" ) )[2]
dialog = xbmcgui.Dialog()
return dialog.yesno( heading, message )
return True
def find_defaultBackground( self, labelID, defaultID ):
# This function finds the default background, including properties
count = 0
while self.backgrounds == "LOADING" and count < 20:
if xbmc.Monitor().waitForAbort(0.1):
return
count = count + 1
if self.backgrounds == "LOADING":
self.backgrounds = []
result = {}
defaultBackground = self.find_default( "background", labelID, defaultID )
if defaultBackground:
for key in self.backgrounds:
if defaultBackground == key[ 0 ]:
result["path"] = key[ 0 ]
result["label"] = key[ 1 ]
elif defaultBackground == key[ 1 ]:
result["path"] = key[ 0 ]
result["label"] = key[ 1 ]
return result
def find_defaultWidget( self, labelID, defaultID ):
# This function finds the default widget, including properties
result = {}
#first look for any widgetdefaultnodes
defaultWidget = self.find_default( "widgetdefaultnode", labelID, defaultID )
if defaultWidget is not None:
result["path"] = defaultWidget.get( "path" )
result["name"] = defaultWidget.get( "label" )
result["widget"] = defaultWidget.text
result["type"] = defaultWidget.get( "type" )
result["target"] = defaultWidget.get( "target" )
else:
#find any classic widgets
defaultWidget = self.find_default( "widget", labelID, defaultID )
for key in LIBRARY.dictionaryGroupings[ "widgets-classic" ]:
if key[0] == defaultWidget:
result["widget"] = key[ 0 ]
result["name"] = key[ 1 ]
result["type"] = key[ 2 ]
result["path"] = key[ 3 ]
result["target"] = key[ 5 ]
break
return result
def find_default( self, backgroundorwidget, labelID, defaultID ):
# This function finds the id of an items default background or widget
if labelID == None:
labelID = defaultID
tree = DATA._get_overrides_skin()
if backgroundorwidget == "background":
elems = tree.getroot().findall( "backgrounddefault" )
elif backgroundorwidget == "widgetdefaultnode":
elems = tree.getroot().findall( "widgetdefaultnode" )
else:
elems = tree.getroot().findall( "widgetdefault" )
if elems is not None:
for elem in elems:
if elem.attrib.get( "labelID" ) == labelID or elem.attrib.get( "defaultID" ) == defaultID:
if "group" in elem.attrib:
if elem.attrib.get( "group" ) == self.group:
if backgroundorwidget == "widgetdefaultnode":
#if it's a widgetdefaultnode, return the whole element
return elem
else:
return elem.text
else:
continue
else:
return elem.text
return None
def _set_label( self, listitem, label ):
# Update the label, local string and labelID
listitem.setLabel( label )
listitem.setProperty( "localizedString", None )
LIBRARY._rename_playlist( listitem.getProperty( "path" ), label )
# If there's no label2, set it to custom shortcut
if not listitem.getLabel2():
listitem.setLabel2( LANGUAGE(32024) )
listitem.setProperty( "shortcutType", "32024" )
def onAction( self, action ):
currentFocus = self.getFocusId()
if action.getId() in ACTION_CANCEL_DIALOG:
# Close action
if currentFocus and currentFocus in self.onBack:
# Action overriden to return to a control
self.setFocusId( self.onBack[ currentFocus ] )
return
# Close window
self._save_shortcuts()
xbmcgui.Window(self.window_id).clearProperty('groupname')
self._close()
elif currentFocus in self.contextControls and action.getId() in ACTION_CONTEXT_MENU:
# Context menu action
self._display_Context_Menu()
if currentFocus == 211:
# Changed highlighted item, update window properties
self._add_additional_properties()
def _display_Context_Menu( self ):
# Displays a context menu
contextActions = []
contextItems = []
# Find active context menu items
for item in self.contextItems:
# Check any condition
if item[ 1 ] is None or xbmc.getCondVisibility( item[ 1 ] ):
# Add the items
contextActions.append( item[ 0 ] )
contextItems.append( item[ 2 ] )
# Check that there are some items to display
if len( contextItems ) == 0:
log( "Context menu called, but no items to display" )
return
# Display the context menu
selectedItem = xbmcgui.Dialog().contextmenu( list=contextItems )
if selectedItem == -1:
# Nothing selected
return
# Call the control associated with the selected item
self.onClick( contextActions[ selectedItem ] )
def _close( self ):
self.close()
| 0.028155 |
# coding=utf8
class Shape:
def draw(self, drawSystem):
pass
class Pane:
BBOX_X_MIN=0x08
BBOX_Y_MIN=0x08
BBOX_X_MAX=0xF7
BBOX_Y_MAX=0xF7
def __init__(self, left, top, right, bottom):
self.left=left
self.top=top
self.right=right
self.bottom=bottom
def __str__(self):
return "%s"%([self.left, self.top, self.right, self.bottom])
def __eq__(self, other):
return isinstance(other, Pane) and (
self.left == other.left and self.top == other.top
and self.right == other.right and self.bottom == other.bottom
)
def __ne__(self, other):
return not self.__eq__(other)
def clone(self):
return Pane(self.left, self.top, self.right, self.bottom)
def translateBy(self, offset):
self.offsetLeftAndRight(offset[0])
self.offsetTopAndBottom(offset[1])
def scale(self, pivot, ratio):
pivotX = pivot[0]
pivotY = pivot[1]
self.left = (self.left - pivotX) * ratio + pivotX;
self.right = (self.right - pivotX) * ratio + pivotX;
self.top = (self.top - pivotY) * ratio + pivotY;
self.bottom = (self.bottom - pivotY) * ratio + pivotY;
def offsetLeftAndRight(self, offset):
self.left += offset
self.right += offset
def offsetTopAndBottom(self, offset):
self.top += offset
self.bottom += offset
@property
def width(self):
return self.right-self.left+1
@property
def height(self):
return self.bottom-self.top+1
def containsPoint(self, point):
x, y = point
return (self.left<=x<=self.right) and (self.top<=y<=self.bottom)
def containsPane(self, pane):
return self.containsPoint(pane.getLeftTop()) and self.containsPoint(pane.getRightBottom())
def limitedToPane(self, pane):
left=max(self.left, pane.left)
top=max(self.top, pane.top)
right=min(self.right, pane.right)
bottom=min(self.bottom, pane.bottom)
self.left=left
self.top=top
self.right=right
self.bottom=bottom
def getLeft(self):
return self.left
def getTop(self):
return self.top
def getRight(self):
return self.right
def getBottom(self):
return self.bottom
def getWidth(self):
return self.width
def getHeight(self):
return self.height
def getLeftTop(self):
return (self.left, self.top)
def getRightBottom(self):
return (self.right, self.bottom)
def transformRelativePointByTargetPane(self, point, targetPane):
(x, y)=point
newX=int((x-self.getLeft())*targetPane.getWidth()/self.getWidth())+targetPane.getLeft()
newY=int((y-self.getTop())*targetPane.getHeight()/self.getHeight())+targetPane.getTop()
return (newX, newY)
def transformRelativePaneByTargetPane(self, relativePane, targetPane):
(left, top)=self.transformRelativePointByTargetPane(relativePane.getLeftTop(), targetPane)
(right, bottom)=self.transformRelativePointByTargetPane(relativePane.getRightBottom(), targetPane)
return Pane(left, top, right, bottom)
# 字面框(Bounding Box)
Pane.BBOX=Pane(
Pane.BBOX_X_MIN,
Pane.BBOX_Y_MIN,
Pane.BBOX_X_MAX,
Pane.BBOX_Y_MAX,
)
class Boundary(Pane):
def __init__(self, left, top, right, bottom):
assert left <= right and top <= bottom
super().__init__(left, top, right, bottom)
self.left=left
self.top=top
self.right=right
self.bottom=bottom
def getLeft(self):
return self.left
def getTop(self):
return self.top
def getRight(self):
return self.right
def getBottom(self):
return self.bottom
def getTopLeft(self):
return (self.left, self.top)
def getTopRight(self):
return (self.right, self.top)
def getBottomLeft(self):
return (self.left, self.bottom)
def getBottomRight(self):
return (self.right, self.bottom)
def getWidth(self):
return self.right - self.left
def getHeight(self):
return self.bottom - self.top
Boundary.Default = Boundary(0, 0, 256, 256)
# def getBoundary(self):
# return (self.left, self.top, self.right, self.bottom)
class Drawable:
def __init__(self, boundary = Boundary.Default):
self.boundary = boundary
def getBoundary(self):
return self.boundary
def draw(self, drawSystem):
pass
class Rectangle(Drawable):
def __init__(self, x=0, y=0, w=0, h=0):
super().__init__()
self.x=x
self.y=y
self.w=w
self.h=h
def setGeometry(self, x, y, w, h):
[self.x, self.y, self.w, self.h,]=[x, y, w, h,]
def getGeometry(self):
return [self.x, self.y, self.w, self.h,]
def __str__(self):
return "(%s, %s, %s, %s)"%(self.x, self.y, self.w, self.h,)
def draw(self, drawSystem):
drawSystem.startDrawing((0, 0))
drawSystem.lineTo((self.w, 0))
drawSystem.lineTo((0, self.h))
drawSystem.lineTo((-self.w, 0))
drawSystem.lineTo((0, -self.h))
drawSystem.endDrawing()
def offsetBoundary(boundary, offset):
return (boundary[0]+offset[0], boundary[1]+offset[1], boundary[2]+offset[0], boundary[3]+offset[1],)
def mergeBoundary(boundaryA, boundaryB):
return (min(boundaryA[0], boundaryB[0]), min(boundaryA[1], boundaryB[1]),
max(boundaryA[2], boundaryB[2]), max(boundaryA[3], boundaryB[3]),)
| 0.042067 |
from django.contrib import admin
from labourwages.models import *
# Register your models here.
class AgoperationAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Agoperation,AgoperationAdmin)
class WagetypeAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Wagetype,WagetypeAdmin)
class WorkprogrammeAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Workprogramme,WorkprogrammeAdmin)
class ObligationtypeAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Obligationtype,ObligationtypeAdmin)
class TaskdescriptionAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Taskdescription,TaskdescriptionAdmin)
class CollecteditemsAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Collecteditems,CollecteditemsAdmin)
class MagencyAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Magency,MagencyAdmin)
class CheatingfacedAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Cheatingfaced,CheatingfacedAdmin)
class WorkdescriptionAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Workdescription,WorkdescriptionAdmin)
class AnimaltypeAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Animaltype,AnimaltypeAdmin)
class AnimalproductionAdmin(admin.ModelAdmin):
fields=('name',)
admin.site.register(Animalproduction,AnimalproductionAdmin)
class LabourdaysAdmin(admin.ModelAdmin):
fields=('household','household_number','labour_deployed','s_no','crop','extent','agricultural_operation','family_labour_days_m','family_labour_days_w','family_labour_days_c','family_labour_hours_m','family_labour_hours_w','family_labour_hours_c','daily_labour_days_m','daily_labour_days_w','daily_labour_days_c','daily_labour_hours_m','daily_labour_hours_w','daily_labour_hours_c','daily_labour_wages_m','daily_labour_wages_w','daily_labour_wages_c','exchange_labour_days_m','exchange_labour_days_w','exchange_labour_days_c','exchange_labour_hours_m','exchange_labour_hours_w','exchange_labour_hours_c','piece_rated_cash','piece_rated_kind','machine_labour_workhours','machine_labourpayment','comments',)
admin.site.register(Labourdays,LabourdaysAdmin)
class WagesAdmin(admin.ModelAdmin):
fields=('household','household_number','is_agricultural_labour','worker_name','crop','operation','type_wage','place_work','labour_days','work_hours','earnings_cash','income','piece_rate_kind','contract_number_acres','contract_remuniration','contract_howmany_workers','contract_total_wage','wagerates_increased','migrations_declined','isthere_change_peasants','has_baragaining_power_increased','comments',)
admin.site.register(Wages,WagesAdmin)
class NonaglabourAdmin(admin.ModelAdmin):
fields=('household','household_number','workedin_nonag_operation','worker_name','description_specify_programme', 'type_wage_contract','place_work','number_days','work_hours','wage_rate' ,'totalearnings_cash','comments')
admin.site.register(Nonaglabour,NonaglabourAdmin)
class EmpfreedomAdmin(admin.ModelAdmin):
fields=('household' , 'household_number', 'comments',)
admin.site.register(Empfreedom,EmpfreedomAdmin)
class IncomeotherAdmin(admin.ModelAdmin):
fields=('household','household_number','worker_name','work_description','work_place','totalnet_earnings','earlier_income_kind','comments',)
admin.site.register(Incomeother,IncomeotherAdmin)
class AnimalsourceAdmin(admin.ModelAdmin):
fields=('household','household_number','animal_owned','type','s_no','nu','age','feed_home_grown','feed_purchased','total_present_value','veternary_charges','maintanence_buildings','insurance','interest_loans_livestock','labour_charges','others','income_production_one','production_work_qty_one','production_work_price_one','income_production_two','production_work_qty_two','production_work_price_two','comments')
admin.site.register(Animalsource,AnimalsourceAdmin)
| 0.048127 |
"""
Low-level utilities for reading in raw MNIST files.
"""
__author__ = "David Warde-Farley"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["David Warde-Farley"]
__license__ = "3-clause BSD"
__email__ = "wardefar@iro"
__maintainer__ = "David Warde-Farley"
import struct
import numpy
from theano.compat import six
MNIST_IMAGE_MAGIC = 2051
MNIST_LABEL_MAGIC = 2049
class open_if_filename(object):
"""
.. todo::
WRITEME
Parameters
----------
f : WRITEME
mode : WRITEME
buffering : WRITEME
"""
def __init__(self, f, mode='r', buffering=-1):
self._f = f
self._mode = mode
self._buffering = buffering
self._handle = None
def __enter__(self):
"""
.. todo::
WRITEME
"""
if isinstance(self._f, six.string_types):
self._handle = open(self._f, self._mode, self._buffering)
else:
self._handle = self._f
return self._handle
def __exit__(self, exc_type, exc_value, traceback):
"""
.. todo::
WRITEME
"""
if self._handle is not self._f:
self._handle.close()
def read_mnist_images(fn, dtype=None):
"""
Read MNIST images from the original ubyte file format.
Parameters
----------
fn : str or object
Filename/path from which to read labels, or an open file
object for the same (will not be closed for you).
dtype : str or object, optional
A NumPy dtype or string that can be converted to one.
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : ndarray, shape (n_images, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was boolean, the resulting array will
be boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float or complex dtype, the values
will be mapped to the unit interval [0, 1], with pixel values
that were 255 in the original unsigned byte representation
equal to 1.0.
"""
with open_if_filename(fn, 'rb') as f:
magic, number, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError('wrong magic number reading MNIST image file: ' +
fn)
array = numpy.fromfile(f, dtype='uint8').reshape((number, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
# If the user wants booleans, threshold at half the range.
if dtype.kind is 'b':
array = array >= 128
else:
# Otherwise, just convert.
array = array.astype(dtype)
# I don't know why you'd ever turn MNIST into complex,
# but just in case, check for float *or* complex dtypes.
# Either way, map to the unit interval.
if dtype.kind in ('f', 'c'):
array /= 255.
return array
def read_mnist_labels(fn):
"""
Read MNIST labels from the original ubyte file format.
Parameters
----------
fn : str or object
Filename/path from which to read labels, or an open file
object for the same (will not be closed for you).
Returns
-------
labels : ndarray, shape (nlabels,)
A one-dimensional unsigned byte array containing the
labels as integers.
"""
with open_if_filename(fn, 'rb') as f:
magic, number = struct.unpack('>ii', f.read(8))
if magic != MNIST_LABEL_MAGIC:
raise ValueError('wrong magic number reading MNIST label file: ' +
fn)
array = numpy.fromfile(f, dtype='uint8')
return array
| 0 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from copy import deepcopy
import re
import numpy as np
from .constants import FIFF
from ..utils import (logger, verbose, _validate_type, fill_doc, _ensure_int,
_check_option)
def get_channel_type_constants(include_defaults=False):
"""Return all known channel types, and associated FIFF constants.
Parameters
----------
include_defaults : bool
Whether to include default values for "unit" and "coil_type" for all
entries (see Notes). Defaults are generally based on values normally
present for a VectorView MEG system. Defaults to ``False``.
Returns
-------
channel_types : dict
The keys are channel type strings, and the values are dictionaries of
FIFF constants for "kind", and possibly "unit" and "coil_type".
Notes
-----
Values which might vary within a channel type across real data
recordings are excluded unless ``include_defaults=True``. For example,
"ref_meg" channels may have coil type
``FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD``, ``FIFFV_COIL_VV_MAG_T3``, etc
(depending on the recording system), so no "coil_type" entry is given
for "ref_meg" unless ``include_defaults`` is requested.
"""
base = dict(grad=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T_M),
mag=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T),
ref_meg=dict(kind=FIFF.FIFFV_REF_MEG_CH),
eeg=dict(kind=FIFF.FIFFV_EEG_CH,
unit=FIFF.FIFF_UNIT_V,
coil_type=FIFF.FIFFV_COIL_EEG),
seeg=dict(kind=FIFF.FIFFV_SEEG_CH,
unit=FIFF.FIFF_UNIT_V,
coil_type=FIFF.FIFFV_COIL_EEG),
dbs=dict(kind=FIFF.FIFFV_DBS_CH,
unit=FIFF.FIFF_UNIT_V,
coil_type=FIFF.FIFFV_COIL_EEG),
ecog=dict(kind=FIFF.FIFFV_ECOG_CH,
unit=FIFF.FIFF_UNIT_V,
coil_type=FIFF.FIFFV_COIL_EEG),
eog=dict(kind=FIFF.FIFFV_EOG_CH, unit=FIFF.FIFF_UNIT_V),
emg=dict(kind=FIFF.FIFFV_EMG_CH, unit=FIFF.FIFF_UNIT_V),
ecg=dict(kind=FIFF.FIFFV_ECG_CH, unit=FIFF.FIFF_UNIT_V),
resp=dict(kind=FIFF.FIFFV_RESP_CH, unit=FIFF.FIFF_UNIT_V),
bio=dict(kind=FIFF.FIFFV_BIO_CH, unit=FIFF.FIFF_UNIT_V),
misc=dict(kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V),
stim=dict(kind=FIFF.FIFFV_STIM_CH),
exci=dict(kind=FIFF.FIFFV_EXCI_CH),
syst=dict(kind=FIFF.FIFFV_SYST_CH),
ias=dict(kind=FIFF.FIFFV_IAS_CH),
gof=dict(kind=FIFF.FIFFV_GOODNESS_FIT),
dipole=dict(kind=FIFF.FIFFV_DIPOLE_WAVE),
chpi=dict(kind=[FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1,
FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3,
FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G,
FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]),
fnirs_cw_amplitude=dict(
kind=FIFF.FIFFV_FNIRS_CH,
unit=FIFF.FIFF_UNIT_V,
coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE),
fnirs_fd_ac_amplitude=dict(
kind=FIFF.FIFFV_FNIRS_CH,
unit=FIFF.FIFF_UNIT_V,
coil_type=FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE),
fnirs_fd_phase=dict(
kind=FIFF.FIFFV_FNIRS_CH,
unit=FIFF.FIFF_UNIT_RAD,
coil_type=FIFF.FIFFV_COIL_FNIRS_FD_PHASE),
fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH,
coil_type=FIFF.FIFFV_COIL_FNIRS_OD),
hbo=dict(kind=FIFF.FIFFV_FNIRS_CH,
unit=FIFF.FIFF_UNIT_MOL,
coil_type=FIFF.FIFFV_COIL_FNIRS_HBO),
hbr=dict(kind=FIFF.FIFFV_FNIRS_CH,
unit=FIFF.FIFF_UNIT_MOL,
coil_type=FIFF.FIFFV_COIL_FNIRS_HBR),
csd=dict(kind=FIFF.FIFFV_EEG_CH,
unit=FIFF.FIFF_UNIT_V_M2,
coil_type=FIFF.FIFFV_COIL_EEG_CSD))
if include_defaults:
coil_none = dict(coil_type=FIFF.FIFFV_COIL_NONE)
unit_none = dict(unit=FIFF.FIFF_UNIT_NONE)
defaults = dict(
grad=dict(coil_type=FIFF.FIFFV_COIL_VV_PLANAR_T1),
mag=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3),
ref_meg=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3,
unit=FIFF.FIFF_UNIT_T),
misc=dict(**coil_none, **unit_none), # NB: overwrites UNIT_V
stim=dict(unit=FIFF.FIFF_UNIT_V, **coil_none),
eog=coil_none,
ecg=coil_none,
emg=coil_none,
bio=coil_none,
fnirs_od=unit_none,
)
for key, value in defaults.items():
base[key].update(value)
return base
_first_rule = {
FIFF.FIFFV_MEG_CH: 'meg',
FIFF.FIFFV_REF_MEG_CH: 'ref_meg',
FIFF.FIFFV_EEG_CH: 'eeg',
FIFF.FIFFV_STIM_CH: 'stim',
FIFF.FIFFV_EOG_CH: 'eog',
FIFF.FIFFV_EMG_CH: 'emg',
FIFF.FIFFV_ECG_CH: 'ecg',
FIFF.FIFFV_RESP_CH: 'resp',
FIFF.FIFFV_MISC_CH: 'misc',
FIFF.FIFFV_EXCI_CH: 'exci',
FIFF.FIFFV_IAS_CH: 'ias',
FIFF.FIFFV_SYST_CH: 'syst',
FIFF.FIFFV_SEEG_CH: 'seeg',
FIFF.FIFFV_DBS_CH: 'dbs',
FIFF.FIFFV_BIO_CH: 'bio',
FIFF.FIFFV_QUAT_0: 'chpi',
FIFF.FIFFV_QUAT_1: 'chpi',
FIFF.FIFFV_QUAT_2: 'chpi',
FIFF.FIFFV_QUAT_3: 'chpi',
FIFF.FIFFV_QUAT_4: 'chpi',
FIFF.FIFFV_QUAT_5: 'chpi',
FIFF.FIFFV_QUAT_6: 'chpi',
FIFF.FIFFV_HPI_G: 'chpi',
FIFF.FIFFV_HPI_ERR: 'chpi',
FIFF.FIFFV_HPI_MOV: 'chpi',
FIFF.FIFFV_DIPOLE_WAVE: 'dipole',
FIFF.FIFFV_GOODNESS_FIT: 'gof',
FIFF.FIFFV_ECOG_CH: 'ecog',
FIFF.FIFFV_FNIRS_CH: 'fnirs',
}
# How to reduce our categories in channel_type (originally)
_second_rules = {
'meg': ('unit', {FIFF.FIFF_UNIT_T_M: 'grad',
FIFF.FIFF_UNIT_T: 'mag'}),
'fnirs': ('coil_type', {FIFF.FIFFV_COIL_FNIRS_HBO: 'hbo',
FIFF.FIFFV_COIL_FNIRS_HBR: 'hbr',
FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE:
'fnirs_cw_amplitude',
FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE:
'fnirs_fd_ac_amplitude',
FIFF.FIFFV_COIL_FNIRS_FD_PHASE:
'fnirs_fd_phase',
FIFF.FIFFV_COIL_FNIRS_OD: 'fnirs_od',
}),
'eeg': ('coil_type', {FIFF.FIFFV_COIL_EEG: 'eeg',
FIFF.FIFFV_COIL_EEG_BIPOLAR: 'eeg',
FIFF.FIFFV_COIL_NONE: 'eeg', # MNE-C backward compat
FIFF.FIFFV_COIL_EEG_CSD: 'csd',
})
}
def channel_type(info, idx):
"""Get channel type.
Parameters
----------
info : instance of Info
A measurement info object.
idx : int
Index of channel.
Returns
-------
type : str
Type of channel. Will be one of::
{'grad', 'mag', 'eeg', 'csd', 'stim', 'eog', 'emg', 'ecg',
'ref_meg', 'resp', 'exci', 'ias', 'syst', 'misc', 'seeg', 'dbs',
'bio', 'chpi', 'dipole', 'gof', 'ecog', 'hbo', 'hbr'}
"""
# This is faster than the original _channel_type_old now in test_pick.py
# because it uses (at most!) two dict lookups plus one conditional
# to get the channel type string.
ch = info['chs'][idx]
try:
first_kind = _first_rule[ch['kind']]
except KeyError:
raise ValueError('Unknown channel type (%s) for channel "%s"'
% (ch['kind'], ch["ch_name"]))
if first_kind in _second_rules:
key, second_rule = _second_rules[first_kind]
first_kind = second_rule[ch[key]]
return first_kind
def pick_channels(ch_names, include, exclude=[], ordered=False):
"""Pick channels by names.
Returns the indices of ``ch_names`` in ``include`` but not in ``exclude``.
Parameters
----------
ch_names : list of str
List of channels.
include : list of str
List of channels to include (if empty include all available).
.. note:: This is to be treated as a set. The order of this list
is not used or maintained in ``sel``.
exclude : list of str
List of channels to exclude (if empty do not exclude any channel).
Defaults to [].
ordered : bool
If true (default False), treat ``include`` as an ordered list
rather than a set, and any channels from ``include`` are missing
in ``ch_names`` an error will be raised.
.. versionadded:: 0.18
Returns
-------
sel : array of int
Indices of good channels.
See Also
--------
pick_channels_regexp, pick_types
"""
if len(np.unique(ch_names)) != len(ch_names):
raise RuntimeError('ch_names is not a unique list, picking is unsafe')
_check_excludes_includes(include)
_check_excludes_includes(exclude)
if not ordered:
if not isinstance(include, set):
include = set(include)
if not isinstance(exclude, set):
exclude = set(exclude)
sel = []
for k, name in enumerate(ch_names):
if (len(include) == 0 or name in include) and name not in exclude:
sel.append(k)
else:
if not isinstance(include, list):
include = list(include)
if len(include) == 0:
include = list(ch_names)
if not isinstance(exclude, list):
exclude = list(exclude)
sel, missing = list(), list()
for name in include:
if name in ch_names:
if name not in exclude:
sel.append(ch_names.index(name))
else:
missing.append(name)
if len(missing):
raise ValueError('Missing channels from ch_names required by '
'include:\n%s' % (missing,))
return np.array(sel, int)
def pick_channels_regexp(ch_names, regexp):
"""Pick channels using regular expression.
Returns the indices of the good channels in ch_names.
Parameters
----------
ch_names : list of str
List of channels.
regexp : str
The regular expression. See python standard module for regular
expressions.
Returns
-------
sel : array of int
Indices of good channels.
See Also
--------
pick_channels
Examples
--------
>>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1')
[0]
>>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *')
[0, 1, 2]
"""
r = re.compile(regexp)
return [k for k, name in enumerate(ch_names) if r.match(name)]
def _triage_meg_pick(ch, meg):
"""Triage an MEG pick type."""
if meg is True:
return True
elif ch['unit'] == FIFF.FIFF_UNIT_T_M:
if meg == 'grad':
return True
elif meg == 'planar1' and ch['ch_name'].endswith('2'):
return True
elif meg == 'planar2' and ch['ch_name'].endswith('3'):
return True
elif (meg == 'mag' and ch['unit'] == FIFF.FIFF_UNIT_T):
return True
return False
def _triage_fnirs_pick(ch, fnirs, warned):
"""Triage an fNIRS pick type."""
if fnirs is True:
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO and fnirs == 'hbo':
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR and fnirs == 'hbr':
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE and \
fnirs == 'fnirs_cw_amplitude':
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE and \
fnirs == 'fnirs_fd_ac_amplitude':
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_PHASE and \
fnirs == 'fnirs_fd_phase':
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and fnirs == 'fnirs_od':
return True
return False
def _check_meg_type(meg, allow_auto=False):
"""Ensure a valid meg type."""
if isinstance(meg, str):
allowed_types = ['grad', 'mag', 'planar1', 'planar2']
allowed_types += ['auto'] if allow_auto else []
if meg not in allowed_types:
raise ValueError('meg value must be one of %s or bool, not %s'
% (allowed_types, meg))
def _check_info_exclude(info, exclude):
_validate_type(info, "info")
info._check_consistency()
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
elif exclude == 'bads':
exclude = info.get('bads', [])
elif not isinstance(exclude, (list, tuple)):
raise ValueError('exclude must either be "bads" or a list of strings.'
' If only one channel is to be excluded, use '
'[ch_name] instead of passing ch_name.')
return exclude
def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False,
emg=False, ref_meg='auto', misc=False, resp=False, chpi=False,
exci=False, ias=False, syst=False, seeg=False, dipole=False,
gof=False, bio=False, ecog=False, fnirs=False, csd=False,
dbs=False, include=(), exclude='bads', selection=None):
"""Pick channels by type and names.
Parameters
----------
info : dict
The measurement info.
meg : bool | str
If True include MEG channels. If string it can be 'mag', 'grad',
'planar1' or 'planar2' to select only magnetometers, all gradiometers,
or a specific type of gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg : bool | str
If True include CTF / 4D reference channels. If 'auto', reference
channels are included if compensations are present and ``meg`` is not
False. Can also be the string options for the ``meg`` parameter.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can be
'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
csd : bool
Current source density channels.
dbs : bool
Deep brain stimulation channels.
include : list of str
List of additional channels to include. If empty do not include any.
exclude : list of str | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of str
Restrict sensor channels (MEG, EEG) to this list of channel names.
Returns
-------
sel : array of int
Indices of good channels.
"""
# NOTE: Changes to this function's signature should also be changed in
# PickChannelsMixin
_validate_type(meg, (bool, str), 'meg')
exclude = _check_info_exclude(info, exclude)
nchan = info['nchan']
pick = np.zeros(nchan, dtype=bool)
_check_meg_type(ref_meg, allow_auto=True)
_check_meg_type(meg)
if isinstance(ref_meg, str) and ref_meg == 'auto':
ref_meg = ('comps' in info and info['comps'] is not None and
len(info['comps']) > 0 and meg is not False)
for param in (eeg, stim, eog, ecg, emg, misc, resp, chpi, exci,
ias, syst, seeg, dipole, gof, bio, ecog, csd, dbs):
if not isinstance(param, bool):
w = ('Parameters for all channel types (with the exception of '
'"meg", "ref_meg" and "fnirs") must be of type bool, not {}.')
raise ValueError(w.format(type(param)))
param_dict = dict(eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dbs=dbs, dipole=dipole,
gof=gof, bio=bio, ecog=ecog, csd=csd)
# avoid triage if possible
if isinstance(meg, bool):
for key in ('grad', 'mag'):
param_dict[key] = meg
if isinstance(fnirs, bool):
for key in _FNIRS_CH_TYPES_SPLIT:
param_dict[key] = fnirs
warned = [False]
for k in range(nchan):
ch_type = channel_type(info, k)
try:
pick[k] = param_dict[ch_type]
except KeyError: # not so simple
assert ch_type in (
'grad', 'mag', 'ref_meg') + _FNIRS_CH_TYPES_SPLIT
if ch_type in ('grad', 'mag'):
pick[k] = _triage_meg_pick(info['chs'][k], meg)
elif ch_type == 'ref_meg':
pick[k] = _triage_meg_pick(info['chs'][k], ref_meg)
else: # ch_type in ('hbo', 'hbr')
pick[k] = _triage_fnirs_pick(info['chs'][k], fnirs, warned)
# restrict channels to selection if provided
if selection is not None:
# the selection only restricts these types of channels
sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH,
FIFF.FIFFV_EEG_CH]
for k in np.where(pick)[0]:
if (info['chs'][k]['kind'] in sel_kind and
info['ch_names'][k] not in selection):
pick[k] = False
myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]]
myinclude += include
if len(myinclude) == 0:
sel = np.array([], int)
else:
sel = pick_channels(info['ch_names'], myinclude, exclude)
return sel
@verbose
def pick_info(info, sel=(), copy=True, verbose=None):
"""Restrict an info structure to a selection of channels.
Parameters
----------
info : dict
Info structure from evoked or raw data.
sel : list of int | None
Indices of channels to include. If None, all channels
are included.
copy : bool
If copy is False, info is modified inplace.
%(verbose)s
Returns
-------
res : dict
Info structure restricted to a selection of channels.
"""
# avoid circular imports
from .meas_info import _bad_chans_comp
info._check_consistency()
info = info.copy() if copy else info
if sel is None:
return info
elif len(sel) == 0:
raise ValueError('No channels match the selection.')
n_unique = len(np.unique(np.arange(len(info['ch_names']))[sel]))
if n_unique != len(sel):
raise ValueError('Found %d / %d unique names, sel is not unique'
% (n_unique, len(sel)))
# make sure required the compensation channels are present
if len(info.get('comps', [])) > 0:
ch_names = [info['ch_names'][idx] for idx in sel]
_, comps_missing = _bad_chans_comp(info, ch_names)
if len(comps_missing) > 0:
logger.info('Removing %d compensators from info because '
'not all compensation channels were picked.'
% (len(info['comps']),))
info['comps'] = []
info['chs'] = [info['chs'][k] for k in sel]
info._update_redundant()
info['bads'] = [ch for ch in info['bads'] if ch in info['ch_names']]
if 'comps' in info:
comps = deepcopy(info['comps'])
for c in comps:
row_idx = [k for k, n in enumerate(c['data']['row_names'])
if n in info['ch_names']]
row_names = [c['data']['row_names'][i] for i in row_idx]
rowcals = c['rowcals'][row_idx]
c['rowcals'] = rowcals
c['data']['nrow'] = len(row_names)
c['data']['row_names'] = row_names
c['data']['data'] = c['data']['data'][row_idx]
info['comps'] = comps
info._check_consistency()
return info
def _has_kit_refs(info, picks):
"""Determine if KIT ref channels are chosen.
This is currently only used by make_forward_solution, which cannot
run when KIT reference channels are included.
"""
for p in picks:
if info['chs'][p]['coil_type'] == FIFF.FIFFV_COIL_KIT_REF_MAG:
return True
return False
def pick_channels_evoked(orig, include=[], exclude='bads'):
"""Pick channels from evoked data.
Parameters
----------
orig : Evoked object
One evoked dataset.
include : list of str, (optional)
List of channels to include (if empty, include all available).
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in orig.info['bads']. Defaults to 'bads'.
Returns
-------
res : instance of Evoked
Evoked data restricted to selected channels. If include and
exclude are empty it returns orig without copy.
"""
if len(include) == 0 and len(exclude) == 0:
return orig
exclude = _check_excludes_includes(exclude, info=orig.info,
allow_bads=True)
sel = pick_channels(orig.info['ch_names'], include=include,
exclude=exclude)
if len(sel) == 0:
raise ValueError('Warning : No channels match the selection.')
res = deepcopy(orig)
#
# Modify the measurement info
#
res.info = pick_info(res.info, sel)
#
# Create the reduced data set
#
res.data = res.data[sel, :]
return res
@verbose
def pick_channels_forward(orig, include=[], exclude=[], ordered=False,
copy=True, verbose=None):
"""Pick channels from forward operator.
Parameters
----------
orig : dict
A forward solution.
include : list of str
List of channels to include (if empty, include all available).
Defaults to [].
exclude : list of str | 'bads'
Channels to exclude (if empty, do not exclude any). Defaults to [].
If 'bads', then exclude bad channels in orig.
ordered : bool
If true (default False), treat ``include`` as an ordered list
rather than a set.
.. versionadded:: 0.18
copy : bool
If True (default), make a copy.
.. versionadded:: 0.19
%(verbose)s
Returns
-------
res : dict
Forward solution restricted to selected channels. If include and
exclude are empty it returns orig without copy.
"""
orig['info']._check_consistency()
if len(include) == 0 and len(exclude) == 0:
return orig.copy() if copy else orig
exclude = _check_excludes_includes(exclude,
info=orig['info'], allow_bads=True)
# Allow for possibility of channel ordering in forward solution being
# different from that of the M/EEG file it is based on.
sel_sol = pick_channels(orig['sol']['row_names'], include=include,
exclude=exclude, ordered=ordered)
sel_info = pick_channels(orig['info']['ch_names'], include=include,
exclude=exclude, ordered=ordered)
fwd = deepcopy(orig) if copy else orig
# Check that forward solution and original data file agree on #channels
if len(sel_sol) != len(sel_info):
raise ValueError('Forward solution and functional data appear to '
'have different channel names, please check.')
# Do we have something?
nuse = len(sel_sol)
if nuse == 0:
raise ValueError('Nothing remains after picking')
logger.info(' %d out of %d channels remain after picking'
% (nuse, fwd['nchan']))
# Pick the correct rows of the forward operator using sel_sol
fwd['sol']['data'] = fwd['sol']['data'][sel_sol, :]
fwd['_orig_sol'] = fwd['_orig_sol'][sel_sol, :]
fwd['sol']['nrow'] = nuse
ch_names = [fwd['sol']['row_names'][k] for k in sel_sol]
fwd['nchan'] = nuse
fwd['sol']['row_names'] = ch_names
# Pick the appropriate channel names from the info-dict using sel_info
fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel_info]
fwd['info']._update_redundant()
fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names]
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel_sol, :]
fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel_sol, :]
fwd['sol_grad']['nrow'] = nuse
fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k]
for k in sel_sol]
return fwd
def pick_types_forward(orig, meg=False, eeg=False, ref_meg=True, seeg=False,
ecog=False, dbs=False, include=[], exclude=[]):
"""Pick by channel type and names from a forward operator.
Parameters
----------
orig : dict
A forward solution.
meg : bool | str
If True include MEG channels. If string it can be 'mag', 'grad',
'planar1' or 'planar2' to select only magnetometers, all gradiometers,
or a specific type of gradiometer.
eeg : bool
If True include EEG channels.
ref_meg : bool
If True include CTF / 4D reference channels.
seeg : bool
If True include stereotactic EEG channels.
ecog : bool
If True include electrocorticography channels.
dbs : bool
If True include deep brain stimulation channels.
include : list of str
List of additional channels to include. If empty do not include any.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in orig['info']['bads'].
Returns
-------
res : dict
Forward solution restricted to selected channel types.
"""
info = orig['info']
sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg,
ecog=ecog, dbs=dbs, include=include, exclude=exclude)
if len(sel) == 0:
raise ValueError('No valid channels found')
include_ch_names = [info['ch_names'][k] for k in sel]
return pick_channels_forward(orig, include_ch_names)
@fill_doc
def channel_indices_by_type(info, picks=None):
"""Get indices of channels by type.
Parameters
----------
info : instance of Info
A measurement info object.
%(picks_all)s
Returns
-------
idx_by_type : dict
A dictionary that maps each channel type to a (possibly empty) list of
channel indices.
"""
idx_by_type = {key: list() for key in _PICK_TYPES_KEYS if
key not in ('meg', 'fnirs')}
idx_by_type.update(mag=list(), grad=list(), hbo=list(), hbr=list(),
fnirs_cw_amplitude=list(), fnirs_fd_ac_amplitude=list(),
fnirs_fd_phase=list(), fnirs_od=list())
picks = _picks_to_idx(info, picks,
none='all', exclude=(), allow_empty=True)
for k in picks:
ch_type = channel_type(info, k)
for key in idx_by_type.keys():
if ch_type == key:
idx_by_type[key].append(k)
return idx_by_type
def pick_channels_cov(orig, include=[], exclude='bads', ordered=False,
copy=True):
"""Pick channels from covariance matrix.
Parameters
----------
orig : Covariance
A covariance.
include : list of str, (optional)
List of channels to include (if empty, include all available).
exclude : list of str, (optional) | 'bads'
Channels to exclude (if empty, do not exclude any). Defaults to 'bads'.
ordered : bool
If True (default False), ensure that the order of the channels in the
modified instance matches the order of ``include``.
.. versionadded:: 0.20.0
copy : bool
If True (the default), return a copy of the covariance matrix with the
modified channels. If False, channels are modified in-place.
.. versionadded:: 0.20.0
Returns
-------
res : dict
Covariance solution restricted to selected channels.
"""
if copy:
orig = orig.copy()
# A little peculiarity of the cov objects is that these two fields
# should not be copied over when None.
if 'method' in orig and orig['method'] is None:
del orig['method']
if 'loglik' in orig and orig['loglik'] is None:
del orig['loglik']
exclude = orig['bads'] if exclude == 'bads' else exclude
sel = pick_channels(orig['names'], include=include, exclude=exclude,
ordered=ordered)
data = orig['data'][sel][:, sel] if not orig['diag'] else orig['data'][sel]
names = [orig['names'][k] for k in sel]
bads = [name for name in orig['bads'] if name in orig['names']]
orig['data'] = data
orig['names'] = names
orig['bads'] = bads
orig['dim'] = len(data)
return orig
def _mag_grad_dependent(info):
"""Determine of mag and grad should be dealt with jointly."""
# right now just uses SSS, could be computed / checked from cov
# but probably overkill
return any(ph.get('max_info', {}).get('sss_info', {}).get('in_order', 0)
for ph in info.get('proc_history', []))
def _contains_ch_type(info, ch_type):
"""Check whether a certain channel type is in an info object.
Parameters
----------
info : instance of Info
The measurement information.
ch_type : str
the channel type to be checked for
Returns
-------
has_ch_type : bool
Whether the channel type is present or not.
"""
_validate_type(ch_type, 'str', "ch_type")
meg_extras = list(_MEG_CH_TYPES_SPLIT)
fnirs_extras = list(_FNIRS_CH_TYPES_SPLIT)
valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS
if key != 'meg'] + meg_extras + fnirs_extras)
_check_option('ch_type', ch_type, valid_channel_types)
if info is None:
raise ValueError('Cannot check for channels of type "%s" because info '
'is None' % (ch_type,))
return any(ch_type == channel_type(info, ii)
for ii in range(info['nchan']))
def _picks_by_type(info, meg_combined=False, ref_meg=False, exclude='bads'):
"""Get data channel indices as separate list of tuples.
Parameters
----------
info : instance of mne.measuerment_info.Info
The info.
meg_combined : bool | 'auto'
Whether to return combined picks for grad and mag.
Can be 'auto' to choose based on Maxwell filtering status.
ref_meg : bool
If True include CTF / 4D reference channels
exclude : list of str | str
List of channels to exclude. If 'bads' (default), exclude channels
in info['bads'].
Returns
-------
picks_list : list of tuples
The list of tuples of picks and the type string.
"""
_validate_type(ref_meg, bool, 'ref_meg')
exclude = _check_info_exclude(info, exclude)
if meg_combined == 'auto':
meg_combined = _mag_grad_dependent(info)
picks_list = []
picks_list = {ch_type: list() for ch_type in _DATA_CH_TYPES_SPLIT}
for k in range(info['nchan']):
if info['chs'][k]['ch_name'] not in exclude:
this_type = channel_type(info, k)
try:
picks_list[this_type].append(k)
except KeyError:
# This annoyance is due to differences in pick_types
# and channel_type behavior
if this_type == 'ref_meg':
ch = info['chs'][k]
if _triage_meg_pick(ch, ref_meg):
if ch['unit'] == FIFF.FIFF_UNIT_T:
picks_list['mag'].append(k)
elif ch['unit'] == FIFF.FIFF_UNIT_T_M:
picks_list['grad'].append(k)
else:
pass # not a data channel type
picks_list = [(ch_type, np.array(picks_list[ch_type], int))
for ch_type in _DATA_CH_TYPES_SPLIT]
assert _DATA_CH_TYPES_SPLIT[:2] == ('mag', 'grad')
if meg_combined and len(picks_list[0][1]) and len(picks_list[1][1]):
picks_list.insert(
0, ('meg', np.unique(np.concatenate([picks_list.pop(0)[1],
picks_list.pop(0)[1]])))
)
picks_list = [p for p in picks_list if len(p[1])]
return picks_list
def _check_excludes_includes(chs, info=None, allow_bads=False):
"""Ensure that inputs to exclude/include are list-like or "bads".
Parameters
----------
chs : any input, should be list, tuple, set, str
The channels passed to include or exclude.
allow_bads : bool
Allow the user to supply "bads" as a string for auto exclusion.
Returns
-------
chs : list
Channels to be excluded/excluded. If allow_bads, and chs=="bads",
this will be the bad channels found in 'info'.
"""
from .meas_info import Info
if not isinstance(chs, (list, tuple, set, np.ndarray)):
if allow_bads is True:
if not isinstance(info, Info):
raise ValueError('Supply an info object if allow_bads is true')
elif chs != 'bads':
raise ValueError('If chs is a string, it must be "bads"')
else:
chs = info['bads']
else:
raise ValueError(
'include/exclude must be list, tuple, ndarray, or "bads". ' +
'You provided type {}'.format(type(chs)))
return chs
_PICK_TYPES_DATA_DICT = dict(
meg=True, eeg=True, csd=True, stim=False, eog=False, ecg=False, emg=False,
misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True,
dbs=True)
_PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg'])
_MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2')
_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude',
'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od')
_DATA_CH_TYPES_ORDER_DEFAULT = (
'mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'resp', 'emg', 'ref_meg',
'misc', 'stim', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog',
'dbs') + _FNIRS_CH_TYPES_SPLIT + ('whitened',)
# Valid data types, ordered for consistency, used in viz/evoked.
_VALID_CHANNEL_TYPES = (
'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'resp', 'emg', 'dipole', 'gof',
'bio', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd')
_DATA_CH_TYPES_SPLIT = (
'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT
def _pick_data_channels(info, exclude='bads', with_ref_meg=True,
with_aux=False):
"""Pick only data channels."""
kwargs = _PICK_TYPES_DATA_DICT
if with_aux:
kwargs = kwargs.copy()
kwargs.update(eog=True, ecg=True, emg=True, bio=True)
return pick_types(info, ref_meg=with_ref_meg, exclude=exclude, **kwargs)
def _pick_data_or_ica(info, exclude=()):
"""Pick only data or ICA channels."""
if any(ch_name.startswith('ICA') for ch_name in info['ch_names']):
picks = pick_types(info, exclude=exclude, misc=True)
else:
picks = _pick_data_channels(info, exclude=exclude, with_ref_meg=True)
return picks
def _picks_to_idx(info, picks, none='data', exclude='bads', allow_empty=False,
with_ref_meg=True, return_kind=False):
"""Convert and check pick validity."""
from .meas_info import Info
picked_ch_type_or_generic = False
#
# None -> all, data, or data_or_ica (ndarray of int)
#
if isinstance(info, Info):
n_chan = info['nchan']
else:
info = _ensure_int(info, 'info', 'an int or Info')
n_chan = info
assert n_chan >= 0
orig_picks = picks
# We do some extra_repr gymnastics to avoid calling repr(orig_picks) too
# soon as it can be a performance bottleneck (repr on ndarray is slow)
extra_repr = ''
if picks is None:
if isinstance(info, int): # special wrapper for no real info
picks = np.arange(n_chan)
extra_repr = ', treated as range(%d)' % (n_chan,)
else:
picks = none # let _picks_str_to_idx handle it
extra_repr = 'None, treated as "%s"' % (none,)
#
# slice
#
if isinstance(picks, slice):
picks = np.arange(n_chan)[picks]
#
# -> ndarray of int (and make a copy)
#
picks = np.atleast_1d(picks) # this works even for picks == 'something'
picks = np.array([], dtype=int) if len(picks) == 0 else picks
if picks.ndim != 1:
raise ValueError('picks must be 1D, got %sD' % (picks.ndim,))
if picks.dtype.char in ('S', 'U'):
picks = _picks_str_to_idx(info, picks, exclude, with_ref_meg,
return_kind, extra_repr, allow_empty,
orig_picks)
if return_kind:
picked_ch_type_or_generic = picks[1]
picks = picks[0]
if picks.dtype.kind not in ['i', 'u']:
raise TypeError('picks must be a list of int or list of str, got '
'a data type of %s' % (picks.dtype,))
del extra_repr
picks = picks.astype(int)
#
# ensure we have (optionally non-empty) ndarray of valid int
#
if len(picks) == 0 and not allow_empty:
raise ValueError('No appropriate channels found for the given picks '
'(%r)' % (orig_picks,))
if (picks < -n_chan).any():
raise ValueError('All picks must be >= %d, got %r'
% (-n_chan, orig_picks))
if (picks >= n_chan).any():
raise ValueError('All picks must be < n_channels (%d), got %r'
% (n_chan, orig_picks))
picks %= n_chan # ensure positive
if return_kind:
return picks, picked_ch_type_or_generic
return picks
def _picks_str_to_idx(info, picks, exclude, with_ref_meg, return_kind,
extra_repr, allow_empty, orig_picks):
"""Turn a list of str into ndarray of int."""
# special case for _picks_to_idx w/no info: shouldn't really happen
if isinstance(info, int):
raise ValueError('picks as str can only be used when measurement '
'info is available')
#
# first: check our special cases
#
picks_generic = list()
if len(picks) == 1:
if picks[0] in ('all', 'data', 'data_or_ica'):
if picks[0] == 'all':
use_exclude = info['bads'] if exclude == 'bads' else exclude
picks_generic = pick_channels(
info['ch_names'], info['ch_names'], exclude=use_exclude)
elif picks[0] == 'data':
picks_generic = _pick_data_channels(info, exclude=exclude,
with_ref_meg=with_ref_meg)
elif picks[0] == 'data_or_ica':
picks_generic = _pick_data_or_ica(info, exclude=exclude)
if len(picks_generic) == 0 and orig_picks is None and \
not allow_empty:
raise ValueError('picks (%s) yielded no channels, consider '
'passing picks explicitly'
% (repr(orig_picks) + extra_repr,))
#
# second: match all to channel names
#
bad_name = None
picks_name = list()
for pick in picks:
try:
picks_name.append(info['ch_names'].index(pick))
except ValueError:
bad_name = pick
break
#
# third: match all to types
#
bad_type = None
picks_type = list()
kwargs = dict(meg=False)
meg, fnirs = set(), set()
for pick in picks:
if pick in _PICK_TYPES_KEYS:
kwargs[pick] = True
elif pick in _MEG_CH_TYPES_SPLIT:
meg |= {pick}
elif pick in _FNIRS_CH_TYPES_SPLIT:
fnirs |= {pick}
else:
bad_type = pick
break
else:
# triage MEG and FNIRS, which are complicated due to non-bool entries
extra_picks = set()
if len(meg) > 0 and not kwargs.get('meg', False):
# easiest just to iterate
for use_meg in meg:
extra_picks |= set(pick_types(
info, meg=use_meg, ref_meg=False, exclude=exclude))
if len(fnirs) > 0 and not kwargs.get('fnirs', False):
# if it has two entries, it's both, otherwise it's just one
kwargs['fnirs'] = True if len(fnirs) == 2 else list(fnirs)[0]
picks_type = pick_types(info, exclude=exclude, **kwargs)
if len(extra_picks) > 0:
picks_type = sorted(set(picks_type) | set(extra_picks))
#
# finally: ensure we have exactly one usable list
#
all_picks = (picks_generic, picks_name, picks_type)
any_found = [len(p) > 0 for p in all_picks]
if sum(any_found) == 0:
if not allow_empty:
raise ValueError(
'picks (%s) could not be interpreted as '
'channel names (no channel "%s"), channel types (no '
'type "%s"), or a generic type (just "all" or "data")'
% (repr(orig_picks) + extra_repr, bad_name, bad_type))
picks = np.array([], int)
elif sum(any_found) > 1:
raise RuntimeError('Some channel names are ambiguously equivalent to '
'channel types, cannot use string-based '
'picks for these')
else:
picks = np.array(all_picks[np.where(any_found)[0][0]])
if return_kind:
picked_ch_type_or_generic = not len(picks_name)
return picks, picked_ch_type_or_generic
return picks
def _pick_inst(inst, picks, exclude, copy=True):
"""Return an instance with picked and excluded channels."""
if copy is True:
inst = inst.copy()
picks = _picks_to_idx(inst.info, picks, exclude=[])
pick_names = [inst.info['ch_names'][pick] for pick in picks]
inst.pick_channels(pick_names)
if exclude == 'bads':
exclude = [ch for ch in inst.info['bads']
if ch in inst.info['ch_names']]
if exclude is not None:
inst.drop_channels(exclude)
return inst
def _get_channel_types(info, picks=None, unique=False, only_data_chs=False):
"""Get the data channel types in an info instance."""
none = 'data' if only_data_chs else 'all'
picks = _picks_to_idx(info, picks, none, (), allow_empty=False)
ch_types = [channel_type(info, pick) for pick in picks]
if only_data_chs:
ch_types = [ch_type for ch_type in ch_types
if ch_type in _DATA_CH_TYPES_SPLIT]
return set(ch_types) if unique is True else ch_types
| 0 |
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.gepetto import Viewer
import time
packageName = "hrp2_14_description"
meshPackageName = "hrp2_14_description"
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "hrp2_14"
urdfSuffix = "_reduced"
srdfSuffix = ""
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
#~ AFTER loading obstacles
rLegId = 'hrp2_rleg_rom'
rLeg = 'RLEG_JOINT0'
rLegOffset = [0,0,-0.105]
rLegNormal = [0,0,1]
rLegx = 0.09; rLegy = 0.05
fullBody.addLimb(rLegId,rLeg,'',rLegOffset,rLegNormal, rLegx, rLegy, 10000, "manipulability", 0.1)
lLegId = 'hrp2_lleg_rom'
lLeg = 'LLEG_JOINT0'
lLegx = 0.09; lLegy = 0.05
lLegOffset = [0,0,-0.105]
lLegNormal = [0,0,1]
fullBody.addLimb(lLegId,lLeg,'',lLegOffset,rLegNormal, lLegx, lLegy, 10000, "manipulability", 0.1)
#~ AFTER loading obstacles
larmId = 'hrp2_larm_rom'
larm = 'LARM_JOINT0'
lHand = 'LARM_JOINT5'
lArmOffset = [0,0,-0.1075]
lArmNormal = [0,0,1]
lArmx = 0.024; lArmy = 0.024
#~ fullBody.addLimb(larmId,larm,lHand,lArmOffset,lArmNormal, lArmx, lArmy, 10000, "manipulability", 0.1, "_6_DOF", False,grasp = True)
#~ fullBody.addLimb(larmId,larm,lHand,lArmOffset,lArmNormal, lArmx, lArmy, 10000, "manipulability", 0.1, "_6_DOF", True)
#~ fullBody.addLimb(larmId,larm,lHand,lArmOffset,lArmNormal, lArmx, lArmy, 10000, "manipulability", 0.1, "_6_DOF")
#~ fullBody.addLimb(larmId,larm,lHand,lArmOffset,lArmNormal, lArmx, lArmy, 10000, "manipulability", 0.1, "_6_DOF")
rarmId = 'hrp2_rarm_rom'
rarm = 'RARM_JOINT0'
rHand = 'RARM_JOINT5'
rArmOffset = [0,0,-0.1075]
rArmNormal = [0,0,1]
rArmx = 0.024; rArmy = 0.024
#disabling collision for hook
#~ fullBody.addLimb(rarmId,rarm,rHand,rArmOffset,rArmNormal, rArmx, rArmy, 10000, "manipulability", 0.1, "_6_DOF", False,grasp = True)
#~ fullBody.addLimb(rarmId,rarm,rHand,rArmOffset,rArmNormal, rArmx, rArmy, 10000, "manipulability", 0.1, "_6_DOF", True)
#~ fullBody.addLimb(rarmId,rarm,rHand,rArmOffset,rArmNormal, rArmx, rArmy, 10000, "manipulability", 0.1, "_6_DOF")
fullBody.runLimbSampleAnalysis(rLegId, "jointLimitsDistance", True)
fullBody.runLimbSampleAnalysis(lLegId, "jointLimitsDistance", True)
limbsCOMConstraints = { rLegId : {'file': "hrp2/RL_com.ineq", 'effector' : 'RLEG_JOINT5'},
lLegId : {'file': "hrp2/LL_com.ineq", 'effector' : 'LLEG_JOINT5'},
rarmId : {'file': "hrp2/RA_com.ineq", 'effector' : rHand},
larmId : {'file': "hrp2/LA_com.ineq", 'effector' : lHand} }
| 0.025476 |
# -*- coding: latin1 -*-
################################################################################################
import datetime, sys, time, json, os, os.path, shutil, time, struct, random
import subprocess
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
######################################################################################################################################################################
## Status - Versão 1 - Calcular NMI
##
## # INPUT: Arquivos com as comunidades detectadas e o ground truth
## # OUTPUT:
## - Arquivos com os índices NMI
######################################################################################################################################################################
######################################################################################################################################################################
#
# Cálculos iniciais sobre o conjunto de dados lidos.
#
######################################################################################################################################################################
def nmi_alg(communities,output,singletons,net,ground_truth):
communities = communities+singletons+"/"+net+"/"
ground_truth = ground_truth+singletons+"/"
output = output+singletons+"/"
if not os.path.exists(output):
os.makedirs(output)
print
print("######################################################################")
print ("Os arquivos serão armazenados em: "+str(output))
print("######################################################################")
if not os.path.isfile(str(output)+str(net)+".json"):
result={}
for threshold in range(51): #Parâmetro do algoritmo
threshold+=1
i=0 #Ponteiro para o ego
if os.path.isdir(str(communities)+str(threshold)+"/"):
print("######################################################################")
score = [] # Armazenar os indices de cada ego e salvar depois em uma linha para cada threshold do arquivo result.json
for file in os.listdir(str(communities)+str(threshold)+"/"):
i+=1
if os.path.isfile(str(ground_truth)+str(file)):
try:
execute = subprocess.Popen(["/home/amaury/algoritmos/Metricas/mutual3/mutual", str(communities)+str(threshold)+"/"+str(file),str(ground_truth)+str(file)], stdout=subprocess.PIPE)
value = execute.communicate()[0]
a = value.split('\t')
nmi = float(a[1])
print ("NMI para a rede: "+str(net)+" - THRESHOLD: "+str(threshold)+" - ego: "+str(i)+": "+str(nmi))
score.append(nmi)
except Exception as e:
print e
else:
print ("ERROR - EGO: "+str(i)+" - Arquivo de ground truth não encontrado:" +(str(ground_truth)+str(file)))
print("######################################################################")
result[threshold] = score
else:
print ("Diretório com as comunidades não encontrado: "+str(communities)+str(threshold))
if len(result) > 0:
with open(str(output)+str(net)+".json", 'w') as f:
f.write(json.dumps(result))
else:
print ("Arquivo de destino já existe: "+str(output)+str(net)+".json")
print("######################################################################")
######################################################################################################################################################################
#
# Método principal do programa.
# Realiza teste e coleta dos dados de cada user especificado no arquivo.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
os.system('clear')
print "################################################################################"
print" "
print" Algoritmo para cálculo da métrica NMI "
print" "
print"#################################################################################"
singletons = "full"
#######################################################################
#######################################################################
print("######################################################################")
print
print "Algoritmo utilizado na detecção das comunidades"
print
print" 1 - COPRA - Without Weight - K=10"
print" 2 - COPRA - Without Weight - K=2-20"
print" 3 - OSLOM - Without Weight - K=5,10,50"
print" 4 - OSLOM - Without Weight - K=50"
print" 5 - RAK - Without Weight"
#
# print" 6 - INFOMAP - Partition"
print" 6 - INFOMAP - Partition - Without Weight"
print
op2 = int(raw_input("Escolha uma opção acima: "))
#
if op2 == 1:
alg = "copra_without_weight_k10"
elif op2 == 2:
alg = "copra_without_weight"
elif op2 == 3:
alg = "oslom_without_weight"
elif op2 == 4:
alg = "oslom_without_weight_k50"
elif op2 == 5:
alg = "rak_without_weight"
elif op2 == 6:
alg = "infomap_without_weight"
else:
alg = ""
print("Opção inválida! Saindo...")
sys.exit()
print ("\n")
print
print"#################################################################################"
print
######################################################################################################################
#####Alterar as linhas para Dropbox quando executado em ambiente de produção
ground_truth = "/home/amaury/dataset/ground_truth/lists_users_TXT_hashmap/"
communities1 = "/home/amaury/communities_hashmap/graphs_with_ego/"+str(alg)+"/"
# communities2 = "/home/amaury/communities_hashmap/graphs_without_ego/"+str(alg)+"/"
output1 = "/home/amaury/Dropbox/evaluation_hashmap/with_ground_truth/nmi/graphs_with_ego/"+str(alg)+"/"
# output2 = "/home/amaury/Dropbox/evaluation_hashmap/with_ground_truth/nmi/graphs_without_ego/"+str(alg)+"/"
nets = ["n1","n2","n3","n4"]
for net in nets: # Para cada rede-ego gerada
print
print ("Calculando NMI nas comunidades detectadas na rede: "+str(net)+" - COM o ego - Algoritmo: "+str(alg))
nmi_alg(communities1,output1,singletons,net,ground_truth)
print
# print ("Calculando NMI nas comunidades detectadas na rede: "+str(net)+" - SEM o ego - Algoritmo: "+str(alg))
# nmi_alg(communities2,output2,singletons,net,ground_truth)
print("######################################################################")
print
print("######################################################################")
print("Script finalizado!")
print("######################################################################\n")
######################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
######################################################################################################################
if __name__ == "__main__": main()
| 0.02627 |
"""
Override the default Cmd from cmd2 module with a subcommand feature
"""
from cmd2 import Cmd as Cmd2, EmptyStatement, ParsedString, Statekeeper
import pyparsing
import inspect
import sys
import datetime
import re
import StringIO
import subprocess
import os
try:
from termcolor import colored
except ImportError:
def colored(string, a=None, b=None, attrs=None):
return string
# Clean up Cmd
class BaseCmd(object):pass
for a in dir(Cmd2):
if a.startswith('do_'):
if a != 'do_help':
f = getattr(Cmd2, a)
delattr(Cmd2, a)
setattr(BaseCmd, a, f)
class Cmd(Cmd2, object):
case_insensitive = False
debug = True
def colored(self, val, a=None, b=None, attrs=None):
if self.stdout == self.initial_stdout:
return colored(val, a, b , attrs)
return val
def __init__(self):
Cmd2.__init__(self)
self.stderr = sys.stderr
self.initial_stdin = sys.stdin
self.initial_stderr = sys.stderr
self.doc_leader = colored(self.__class__.__name__, 'white', attrs=['bold']) + ' help'
self.doc_header = "Commands (type help <topic>):"
self.maxcol = 120
# Init functions for a subCmd
def subCmd(class_, obj, command):
if inspect.isclass(obj):
obj = obj()
def object_(self):
return obj
def do_(self, args):
kept_state = Statekeeper(obj, ('stdout', 'stdin', 'stderr',))
try:
obj.stdout = self.stdout
obj.stdin = self.stdin
obj.stderr = self.stderr
if not args:
self.printError('*** No command\n')
return obj.do_help("", stdout = self.stderr)
else:
if isinstance(args, ParsedString):
args = args.parsed.args
return obj.onecmd_plus_hooks(args)
finally:
kept_state.restore()
def complete_(self, text, line, start_index, end_index):
if hasattr(obj, 'subCmds'):
inCmdList=line.split()
for cmd in obj.subCmds:
if inCmdList[len(inCmdList)-1]==cmd:
return obj.subCmds[cmd].completenames(text)
return obj.completenames(text)
else:
return obj.completenames(text)
def help_(self):
kept_state = Statekeeper(obj, ('stdout', 'stdin', 'stderr'))
try:
obj.stdout = self.stdout
obj.stdin = self.stdin
obj.stderr = self.stderr
return obj.onecmd('help')
finally:
kept_state.restore()
setattr(class_, 'do_' + command, do_)
setattr(class_, 'complete_' + command, complete_)
setattr(class_, 'help_' + command, help_)
setattr(class_, 'object_' + command, object_)
return obj
if hasattr(self, 'subCmds'):
for cmd in self.subCmds:
subCmd(self.__class__, self.subCmds[cmd], cmd)
def printError(self, errmsg):
self.stderr.write(self.colored(str(errmsg), 'red'))
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.printError('*** Command not found: %s\n'%line)
def do_help(self, arg, stdout = None):
'List available commands with "help" or detailed help with "help cmd".'
if stdout == None:
stdout = self.stdout
# Call help for command
def callHelp(arg, depth = True, stdout = None, stderr = None):
if stdout == None:
stdout = self.stdout
if stderr == None:
stderr = self.stderr
if not depth:
try:
func = getattr(self, 'object_' + arg)
doc = func().__doc__
if doc:
stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
try:
func = getattr(self, 'help_' + arg)
func()
except AttributeError:
try:
doc = getattr(self, 'do_' + arg).__doc__
if doc:
stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
stderr.write(self.colored("%s\n"%str(self.nohelp % (arg,)), 'red'))
# Create a help sum
def sumHelp(arg, column):
keepstate = Statekeeper(self, ('stdout', 'stderr',))
keepsys = Statekeeper(sys, ('stdout', 'stderr',))
try:
data = None
stdout = StringIO.StringIO()
# Replace stderr and stdout
sys.stdout = self.stdout = stdout
self.stderr = sys.stderr = StringIO.StringIO()
callHelp(arg, False, stdout=stdout)
data = stdout.getvalue()
#if not data or len(data) == 0:
# data = str(self.nohelp % (arg,))
data = data.split('\n\n', 1)[0].replace("\n", " ")
return splitHelp(data, column)
finally:
# Restore
keepstate.restore()
keepsys.restore()
def splitHelp(data, columnSize):
rows = []
for n in range(0, len(data), columnSize):
rows.append(data[n: n + columnSize])
return rows
# Forward help
args = re.split('\s+', arg, 1)
if len(args) > 1:
if hasattr(self, 'object_' + args[0]):
funct = getattr(self, 'object_' + args[0])
obj = funct()
return obj.onecmd('help ' + args[1])
else:
arg = args[0]
if arg:
return callHelp(arg, stdout=stdout)
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
# Print
if self.ruler:
stdout.write("%s\n"%str(self.ruler * self.maxcol))
stdout.write("%s\n"%str(self.doc_leader))
if self.ruler:
stdout.write("%s\n"%str(self.ruler * self.maxcol))
cmdMaxCol = 30
helpMaxCol = self.maxcol - cmdMaxCol - 3
for cmd in cmds_doc:
rows = sumHelp(cmd, helpMaxCol)
stdout.write(str('{0:<'+str(cmdMaxCol)+'}| {1:<' + str(helpMaxCol) + '}\n').format(cmd, rows[0]))
for n in range(1, len(rows)):
stdout.write(str('{0:<'+str(cmdMaxCol)+'}| {1:<' + str(helpMaxCol) + '}\n').format("", rows[n]))
def parsed(self, raw, **kwargs):
if isinstance(raw, ParsedString):
p = raw
else:
# preparse is an overridable hook; default makes no changes
s = self.preparse(raw, **kwargs)
s = self.inputParser.transformString(s.lstrip())
s = self.commentGrammars.transformString(s)
for (shortcut, expansion) in self.shortcuts:
if s.lower().startswith(shortcut):
s = s.replace(shortcut, expansion + ' ', 1)
break
result = self.parser.parseString(s)
if isinstance(result.command, pyparsing.ParseResults):
result.command = result.command[0]
if isinstance(result.multilineCommand, pyparsing.ParseResults):
result.multilineCommand = result.multilineCommand[0]
result['raw'] = raw
result['command'] = result.multilineCommand or result.command
result = self.postparse(result)
p = ParsedString(result.args)
p.parsed = result
p.parser = self.parsed
for (key, val) in kwargs.items():
p.parsed[key] = val
return p
def _init_parser(self):
#outputParser = (pyparsing.Literal('>>') | (pyparsing.WordStart() + '>') | pyparsing.Regex('[^=]>'))('output')
outputParser = (pyparsing.Literal(self.redirector *2) | \
(pyparsing.WordStart() + self.redirector) | \
pyparsing.Regex('[^=]' + self.redirector))('output')
inputMark = pyparsing.Literal('<')('input')
terminatorParser = pyparsing.Or([(hasattr(t, 'parseString') and t) or pyparsing.Literal(t) for t in self.terminators])('terminator')
stringEnd = pyparsing.stringEnd ^ '\nEOF'
self.multilineCommand = pyparsing.Or([pyparsing.Keyword(c, caseless=self.case_insensitive) for c in self.multilineCommands])('multilineCommand')
oneLineCommand = (~self.multilineCommand + pyparsing.Word(self.legalChars))('command')
pipe = pyparsing.Keyword('|', identChars='|')
self.commentGrammars.ignore(pyparsing.quotedString).setParseAction(lambda x: '')
doNotParse = self.commentGrammars | self.commentInProgress | pyparsing.quotedString
afterElements = \
pyparsing.Optional(inputMark + pyparsing.SkipTo(outputParser ^ pipe ^ stringEnd, ignore=doNotParse).setParseAction(lambda x: x[0].strip())('inputFrom')) + \
pyparsing.Optional(pipe + pyparsing.SkipTo(outputParser ^ stringEnd, ignore=doNotParse)('pipeTo')) + \
pyparsing.Optional(outputParser + pyparsing.SkipTo(stringEnd, ignore=doNotParse).setParseAction(lambda x: x[0].strip())('outputTo'))
if self.case_insensitive:
self.multilineCommand.setParseAction(lambda x: x[0].lower())
oneLineCommand.setParseAction(lambda x: x[0].lower())
if self.blankLinesAllowed:
self.blankLineTerminationParser = pyparsing.NoMatch
else:
self.blankLineTerminator = (pyparsing.lineEnd + pyparsing.lineEnd)('terminator')
self.blankLineTerminator.setResultsName('terminator')
self.blankLineTerminationParser = ((self.multilineCommand ^ oneLineCommand) + pyparsing.SkipTo(self.blankLineTerminator, ignore=doNotParse).setParseAction(lambda x: x[0].strip())('args') + self.blankLineTerminator)('statement')
self.multilineParser = (((self.multilineCommand ^ oneLineCommand) + pyparsing.SkipTo(terminatorParser, ignore=doNotParse).setParseAction(lambda x: x[0].strip())('args') + terminatorParser)('statement') +
pyparsing.SkipTo(outputParser ^ inputMark ^ pipe ^ stringEnd, ignore=doNotParse).setParseAction(lambda x: x[0].strip())('suffix') + afterElements)
self.multilineParser.ignore(self.commentInProgress)
self.singleLineParser = ((oneLineCommand + pyparsing.SkipTo(terminatorParser ^ stringEnd ^ pipe ^ outputParser ^ inputMark, ignore=doNotParse).setParseAction(lambda x:x[0].strip())('args'))('statement') +
pyparsing.Optional(terminatorParser) + afterElements)
#self.multilineParser = self.multilineParser.setResultsName('multilineParser')
#self.singleLineParser = self.singleLineParser.setResultsName('singleLineParser')
self.blankLineTerminationParser = self.blankLineTerminationParser.setResultsName('statement')
self.parser = self.prefixParser + (
stringEnd |
self.multilineParser |
self.singleLineParser |
self.blankLineTerminationParser |
self.multilineCommand + pyparsing.SkipTo(stringEnd, ignore=doNotParse)
)
self.parser.ignore(self.commentGrammars)
fileName = pyparsing.Word(self.legalChars + '/\\')
inputFrom = fileName('inputFrom')
# a not-entirely-satisfactory way of distinguishing < as in "import from" from <
# as in "lesser than"
self.inputParser = inputMark + pyparsing.Optional(inputFrom) + pyparsing.Optional('>') + \
pyparsing.Optional(fileName) + (pyparsing.stringEnd | '|')
self.inputParser.ignore(self.commentInProgress)
def redirect_streams(self, statement):
self.kept_state = Statekeeper(self, ('stdout','stdin','stderr',))
self.kept_sys = Statekeeper(sys, ('stdout','stdin','stderr',))
if statement.parsed.pipeTo:
self.redirect = subprocess.Popen(statement.parsed.pipeTo, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
sys.stdout = self.stdout = self.redirect.stdin
elif statement.parsed.output:
if (not statement.parsed.outputTo) and (not can_clip):
raise EnvironmentError('Cannot redirect to paste buffer; install ``xclip`` and re-run to enable')
if statement.parsed.outputTo:
mode = 'w'
if statement.parsed.output == 2 * self.redirector:
mode = 'a'
sys.stdout = self.stdout = open(os.path.expanduser(statement.parsed.outputTo), mode)
else:
sys.stdout = self.stdout = tempfile.TemporaryFile(mode="w+")
if statement.parsed.output == '>>':
self.stdout.write(get_paste_buffer())
if statement.parsed.input:
if (not statement.parsed.inputFrom) and (not can_clip):
raise EnvironmentError('Cannot redirect from paste buffer; install ``xclip`` and re-run to enable')
if statement.parsed.inputFrom:
mode = 'r'
sys.stdin = self.stdin = open(os.path.expanduser(statement.parsed.inputFrom), mode)
else:
self.stdin.write(get_paste_buffer())
def restore_streams(self, statement):
if self.kept_state:
if statement.parsed.output:
if not statement.parsed.outputTo:
self.stdout.seek(0)
write_to_paste_buffer(self.stdout.read())
self.stdout.close()
elif statement.parsed.pipeTo:
for result in self.redirect.communicate():
self.kept_state.stdout.write(result or '')
self.stdout.close()
if statement.parsed.input:
self.stdin.close()
self.kept_state.restore()
self.kept_sys.restore()
self.kept_state = None
def pseudo_raw_input(self, prompt):
"""copied from cmd's cmdloop; like raw_input, but accounts for changed stdin, stdout"""
if self.use_rawinput:
line = raw_input(prompt)
else:
sys.stderr.write(prompt)
sys.stderr.flush()
line = self.stdin.readline()
if not len(line):
raise EOFError()
else:
if line[-1] == '\n': # this was always true in Cmd
line = line[:-1]
return line
def _cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
# An almost perfect copy from Cmd; however, the pseudo_raw_input portion
# has been split out so that it can be called separately
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while (stop!=1 or stop!=True):
try:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
line = self.pseudo_raw_input(self.prompt)
if (self.echo) and (isinstance(self.stdin, file)):
self.stdout.write(line + '\n')
stop = self.onecmd_plus_hooks(line)
except EOFError:
pass
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
return stop
def run_commands_at_invocation(self, callargs):
for initial_command in callargs:
code= self.onecmd_plus_hooks(initial_command + '\n')
if code:
return code
def join_args(self, args):
ret = ""
for arg in args:
if " " in arg:
ret += "'%s' " % arg
else:
ret += "%s " % arg
return ret.strip()
def onecmd_plus_hooks(self, line):
# The outermost level of try/finally nesting can be condensed once
# Python 2.4 support can be dropped.
stop = 0
try:
try:
statement = None
statement = self.complete_statement(line)
(stop, statement) = self.postparsing_precmd(statement)
if stop:
return self.postparsing_postcmd(stop)
if statement.parsed.command not in self.excludeFromHistory:
self.history.append(statement.parsed.raw)
try:
self.redirect_streams(statement)
timestart = datetime.datetime.now()
statement = self.precmd(statement)
stop = self.onecmd(statement)
stop = self.postcmd(stop, statement)
if self.timing:
self.pfeedback('Elapsed: %s' % str(datetime.datetime.now() - timestart))
except KeyboardInterrupt:
print "\nExiting command..."
except BaseException as e:
self.perror(str(e), statement)
finally:
self.restore_streams(statement)
except pyparsing.ParseException as e:
print "File parsing error with line: "+line.rstrip()
except EmptyStatement:
return 0
except Exception as e:
self.perror(str(e), statement)
except Exception as e:
self.perror(str(e))
finally:
return self.postparsing_postcmd(stop)
class CmdUtils(BaseCmd, Cmd):
"""Utility box"""
def __init__(self):
super(CmdUtils, self).__init__()
self.doc_leader = colored("Utility box", 'white', attrs=['bold']) + ' help'
class CoreGlobal:
def set_globals(self, api, login=None, password=None, apikeys=None):
self.api=api
if login is not None:
self.login=login
if password is not None:
self.password=password
if apikeys is not None:
self.apikeys=apikeys
| 0.03265 |
#! /usr/bin/env python
"""Solitaire game, much like the one that comes with MS Windows.
Limitations:
- No cute graphical images for the playing cards faces or backs.
- No scoring or timer.
- No undo.
- No option to turn 3 cards at a time.
- No keyboard shortcuts.
- Less fancy animation when you win.
- The determination of which stack you drag to is more relaxed.
Apology:
I'm not much of a card player, so my terminology in these comments may
at times be a little unusual. If you have suggestions, please let me
know!
"""
# Imports
import math
import random
from Tkinter import *
from Canvas import Rectangle, CanvasText, Group, Window
# Fix a bug in Canvas.Group as distributed in Python 1.4. The
# distributed bind() method is broken. Rather than asking you to fix
# the source, we fix it here by deriving a subclass:
class Group(Group):
def bind(self, sequence=None, command=None):
return self.canvas.tag_bind(self.id, sequence, command)
# Constants determining the size and lay-out of cards and stacks. We
# work in a "grid" where each card/stack is surrounded by MARGIN
# pixels of space on each side, so adjacent stacks are separated by
# 2*MARGIN pixels. OFFSET is the offset used for displaying the
# face down cards in the row stacks.
CARDWIDTH = 100
CARDHEIGHT = 150
MARGIN = 10
XSPACING = CARDWIDTH + 2*MARGIN
YSPACING = CARDHEIGHT + 4*MARGIN
OFFSET = 5
# The background color, green to look like a playing table. The
# standard green is way too bright, and dark green is way to dark, so
# we use something in between. (There are a few more colors that
# could be customized, but they are less controversial.)
BACKGROUND = '#070'
# Suits and colors. The values of the symbolic suit names are the
# strings used to display them (you change these and VALNAMES to
# internationalize the game). The COLOR dictionary maps suit names to
# colors (red and black) which must be Tk color names. The keys() of
# the COLOR dictionary conveniently provides us with a list of all
# suits (in arbitrary order).
HEARTS = 'Heart'
DIAMONDS = 'Diamond'
CLUBS = 'Club'
SPADES = 'Spade'
RED = 'red'
BLACK = 'black'
COLOR = {}
for s in (HEARTS, DIAMONDS):
COLOR[s] = RED
for s in (CLUBS, SPADES):
COLOR[s] = BLACK
ALLSUITS = COLOR.keys()
NSUITS = len(ALLSUITS)
# Card values are 1-13. We also define symbolic names for the picture
# cards. ALLVALUES is a list of all card values.
ACE = 1
JACK = 11
QUEEN = 12
KING = 13
ALLVALUES = range(1, 14) # (one more than the highest value)
NVALUES = len(ALLVALUES)
# VALNAMES is a list that maps a card value to string. It contains a
# dummy element at index 0 so it can be indexed directly with the card
# value.
VALNAMES = ["", "A"] + map(str, range(2, 11)) + ["J", "Q", "K"]
# Solitaire constants. The only one I can think of is the number of
# row stacks.
NROWS = 7
# The rest of the program consists of class definitions. These are
# further described in their documentation strings.
class Card:
"""A playing card.
A card doesn't record to which stack it belongs; only the stack
records this (it turns out that we always know this from the
context, and this saves a ``double update'' with potential for
inconsistencies).
Public methods:
moveto(x, y) -- move the card to an absolute position
moveby(dx, dy) -- move the card by a relative offset
tkraise() -- raise the card to the top of its stack
showface(), showback() -- turn the card face up or down & raise it
Public read-only instance variables:
suit, value, color -- the card's suit, value and color
face_shown -- true when the card is shown face up, else false
Semi-public read-only instance variables (XXX should be made
private):
group -- the Canvas.Group representing the card
x, y -- the position of the card's top left corner
Private instance variables:
__back, __rect, __text -- the canvas items making up the card
(To show the card face up, the text item is placed in front of
rect and the back is placed behind it. To show it face down, this
is reversed. The card is created face down.)
"""
def __init__(self, suit, value, canvas):
"""Card constructor.
Arguments are the card's suit and value, and the canvas widget.
The card is created at position (0, 0), with its face down
(adding it to a stack will position it according to that
stack's rules).
"""
self.suit = suit
self.value = value
self.color = COLOR[suit]
self.face_shown = 0
self.x = self.y = 0
self.group = Group(canvas)
text = "%s %s" % (VALNAMES[value], suit)
self.__text = CanvasText(canvas, CARDWIDTH/2, 0,
anchor=N, fill=self.color, text=text)
self.group.addtag_withtag(self.__text)
self.__rect = Rectangle(canvas, 0, 0, CARDWIDTH, CARDHEIGHT,
outline='black', fill='white')
self.group.addtag_withtag(self.__rect)
self.__back = Rectangle(canvas, MARGIN, MARGIN,
CARDWIDTH-MARGIN, CARDHEIGHT-MARGIN,
outline='black', fill='blue')
self.group.addtag_withtag(self.__back)
def __repr__(self):
"""Return a string for debug print statements."""
return "Card(%s, %s)" % (`self.suit`, `self.value`)
def moveto(self, x, y):
"""Move the card to absolute position (x, y)."""
self.moveby(x - self.x, y - self.y)
def moveby(self, dx, dy):
"""Move the card by (dx, dy)."""
self.x = self.x + dx
self.y = self.y + dy
self.group.move(dx, dy)
def tkraise(self):
"""Raise the card above all other objects in its canvas."""
self.group.tkraise()
def showface(self):
"""Turn the card's face up."""
self.tkraise()
self.__rect.tkraise()
self.__text.tkraise()
self.face_shown = 1
def showback(self):
"""Turn the card's face down."""
self.tkraise()
self.__rect.tkraise()
self.__back.tkraise()
self.face_shown = 0
class Stack:
"""A generic stack of cards.
This is used as a base class for all other stacks (e.g. the deck,
the suit stacks, and the row stacks).
Public methods:
add(card) -- add a card to the stack
delete(card) -- delete a card from the stack
showtop() -- show the top card (if any) face up
deal() -- delete and return the top card, or None if empty
Method that subclasses may override:
position(card) -- move the card to its proper (x, y) position
The default position() method places all cards at the stack's
own (x, y) position.
userclickhandler(), userdoubleclickhandler() -- called to do
subclass specific things on single and double clicks
The default user (single) click handler shows the top card
face up. The default user double click handler calls the user
single click handler.
usermovehandler(cards) -- called to complete a subpile move
The default user move handler moves all moved cards back to
their original position (by calling the position() method).
Private methods:
clickhandler(event), doubleclickhandler(event),
motionhandler(event), releasehandler(event) -- event handlers
The default event handlers turn the top card of the stack with
its face up on a (single or double) click, and also support
moving a subpile around.
startmoving(event) -- begin a move operation
finishmoving() -- finish a move operation
"""
def __init__(self, x, y, game=None):
"""Stack constructor.
Arguments are the stack's nominal x and y position (the top
left corner of the first card placed in the stack), and the
game object (which is used to get the canvas; subclasses use
the game object to find other stacks).
"""
self.x = x
self.y = y
self.game = game
self.cards = []
self.group = Group(self.game.canvas)
self.group.bind('<1>', self.clickhandler)
self.group.bind('<Double-1>', self.doubleclickhandler)
self.group.bind('<B1-Motion>', self.motionhandler)
self.group.bind('<ButtonRelease-1>', self.releasehandler)
self.makebottom()
def makebottom(self):
pass
def __repr__(self):
"""Return a string for debug print statements."""
return "%s(%d, %d)" % (self.__class__.__name__, self.x, self.y)
# Public methods
def add(self, card):
self.cards.append(card)
card.tkraise()
self.position(card)
self.group.addtag_withtag(card.group)
def delete(self, card):
self.cards.remove(card)
card.group.dtag(self.group)
def showtop(self):
if self.cards:
self.cards[-1].showface()
def deal(self):
if not self.cards:
return None
card = self.cards[-1]
self.delete(card)
return card
# Subclass overridable methods
def position(self, card):
card.moveto(self.x, self.y)
def userclickhandler(self):
self.showtop()
def userdoubleclickhandler(self):
self.userclickhandler()
def usermovehandler(self, cards):
for card in cards:
self.position(card)
# Event handlers
def clickhandler(self, event):
self.finishmoving() # In case we lost an event
self.userclickhandler()
self.startmoving(event)
def motionhandler(self, event):
self.keepmoving(event)
def releasehandler(self, event):
self.keepmoving(event)
self.finishmoving()
def doubleclickhandler(self, event):
self.finishmoving() # In case we lost an event
self.userdoubleclickhandler()
self.startmoving(event)
# Move internals
moving = None
def startmoving(self, event):
self.moving = None
tags = self.game.canvas.gettags('current')
for i in range(len(self.cards)):
card = self.cards[i]
if card.group.tag in tags:
break
else:
return
if not card.face_shown:
return
self.moving = self.cards[i:]
self.lastx = event.x
self.lasty = event.y
for card in self.moving:
card.tkraise()
def keepmoving(self, event):
if not self.moving:
return
dx = event.x - self.lastx
dy = event.y - self.lasty
self.lastx = event.x
self.lasty = event.y
if dx or dy:
for card in self.moving:
card.moveby(dx, dy)
def finishmoving(self):
cards = self.moving
self.moving = None
if cards:
self.usermovehandler(cards)
class Deck(Stack):
"""The deck is a stack with support for shuffling.
New methods:
fill() -- create the playing cards
shuffle() -- shuffle the playing cards
A single click moves the top card to the game's open deck and
moves it face up; if we're out of cards, it moves the open deck
back to the deck.
"""
def makebottom(self):
bottom = Rectangle(self.game.canvas,
self.x, self.y,
self.x+CARDWIDTH, self.y+CARDHEIGHT,
outline='black', fill=BACKGROUND)
self.group.addtag_withtag(bottom)
def fill(self):
for suit in ALLSUITS:
for value in ALLVALUES:
self.add(Card(suit, value, self.game.canvas))
def shuffle(self):
n = len(self.cards)
newcards = []
for i in randperm(n):
newcards.append(self.cards[i])
self.cards = newcards
def userclickhandler(self):
opendeck = self.game.opendeck
card = self.deal()
if not card:
while 1:
card = opendeck.deal()
if not card:
break
self.add(card)
card.showback()
else:
self.game.opendeck.add(card)
card.showface()
def randperm(n):
"""Function returning a random permutation of range(n)."""
r = range(n)
x = []
while r:
i = random.choice(r)
x.append(i)
r.remove(i)
return x
class OpenStack(Stack):
def acceptable(self, cards):
return 0
def usermovehandler(self, cards):
card = cards[0]
stack = self.game.closeststack(card)
if not stack or stack is self or not stack.acceptable(cards):
Stack.usermovehandler(self, cards)
else:
for card in cards:
self.delete(card)
stack.add(card)
self.game.wincheck()
def userdoubleclickhandler(self):
if not self.cards:
return
card = self.cards[-1]
if not card.face_shown:
self.userclickhandler()
return
for s in self.game.suits:
if s.acceptable([card]):
self.delete(card)
s.add(card)
self.game.wincheck()
break
class SuitStack(OpenStack):
def makebottom(self):
bottom = Rectangle(self.game.canvas,
self.x, self.y,
self.x+CARDWIDTH, self.y+CARDHEIGHT,
outline='black', fill='')
def userclickhandler(self):
pass
def userdoubleclickhandler(self):
pass
def acceptable(self, cards):
if len(cards) != 1:
return 0
card = cards[0]
if not self.cards:
return card.value == ACE
topcard = self.cards[-1]
return card.suit == topcard.suit and card.value == topcard.value + 1
class RowStack(OpenStack):
def acceptable(self, cards):
card = cards[0]
if not self.cards:
return card.value == KING
topcard = self.cards[-1]
if not topcard.face_shown:
return 0
return card.color != topcard.color and card.value == topcard.value - 1
def position(self, card):
y = self.y
for c in self.cards:
if c == card:
break
if c.face_shown:
y = y + 2*MARGIN
else:
y = y + OFFSET
card.moveto(self.x, y)
class Solitaire:
def __init__(self, master):
self.master = master
self.canvas = Canvas(self.master,
background=BACKGROUND,
highlightthickness=0,
width=NROWS*XSPACING,
height=3*YSPACING + 20 + MARGIN)
self.canvas.pack(fill=BOTH, expand=TRUE)
self.dealbutton = Button(self.canvas,
text="Deal",
highlightthickness=0,
background=BACKGROUND,
activebackground="green",
command=self.deal)
Window(self.canvas, MARGIN, 3*YSPACING + 20,
window=self.dealbutton, anchor=SW)
x = MARGIN
y = MARGIN
self.deck = Deck(x, y, self)
x = x + XSPACING
self.opendeck = OpenStack(x, y, self)
x = x + XSPACING
self.suits = []
for i in range(NSUITS):
x = x + XSPACING
self.suits.append(SuitStack(x, y, self))
x = MARGIN
y = y + YSPACING
self.rows = []
for i in range(NROWS):
self.rows.append(RowStack(x, y, self))
x = x + XSPACING
self.openstacks = [self.opendeck] + self.suits + self.rows
self.deck.fill()
self.deal()
def wincheck(self):
for s in self.suits:
if len(s.cards) != NVALUES:
return
self.win()
self.deal()
def win(self):
"""Stupid animation when you win."""
cards = []
for s in self.openstacks:
cards = cards + s.cards
while cards:
card = random.choice(cards)
cards.remove(card)
self.animatedmoveto(card, self.deck)
def animatedmoveto(self, card, dest):
for i in range(10, 0, -1):
dx, dy = (dest.x-card.x)/i, (dest.y-card.y)/i
card.moveby(dx, dy)
self.master.update_idletasks()
def closeststack(self, card):
closest = None
cdist = 999999999
# Since we only compare distances,
# we don't bother to take the square root.
for stack in self.openstacks:
dist = (stack.x - card.x)**2 + (stack.y - card.y)**2
if dist < cdist:
closest = stack
cdist = dist
return closest
def deal(self):
self.reset()
self.deck.shuffle()
for i in range(NROWS):
for r in self.rows[i:]:
card = self.deck.deal()
r.add(card)
for r in self.rows:
r.showtop()
def reset(self):
for stack in self.openstacks:
while 1:
card = stack.deal()
if not card:
break
self.deck.add(card)
card.showback()
# Main function, run when invoked as a stand-alone Python program.
def main():
root = Tk()
game = Solitaire(root)
root.protocol('WM_DELETE_WINDOW', root.quit)
root.mainloop()
if __name__ == '__main__':
main()
| 0.030569 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import fmt_money, formatdate, format_time, now_datetime, \
get_url_to_form, get_url_to_list, flt
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from frappe.core.doctype.user.user import STANDARD_USERS
import frappe.desk.notifications
from erpnext.accounts.utils import get_balance_on
user_specific_content = ["calendar_events", "todo_list"]
from frappe.model.document import Document
class EmailDigest(Document):
def __init__(self, arg1, arg2=None):
super(EmailDigest, self).__init__(arg1, arg2)
self.from_date, self.to_date = self.get_from_to_date()
self.set_dates()
self._accounts = {}
self.currency = frappe.db.get_value("Company", self.company,
"default_currency")
def get_users(self):
"""get list of users"""
user_list = frappe.db.sql("""
select name, enabled from tabUser
where name not in ({})
and user_type != "Website User"
order by enabled desc, name asc""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1)
if self.recipient_list:
recipient_list = self.recipient_list.split("\n")
else:
recipient_list = []
for p in user_list:
p["checked"] = p["name"] in recipient_list and 1 or 0
frappe.response['user_list'] = user_list
def send(self):
# send email only to enabled users
valid_users = [p[0] for p in frappe.db.sql("""select name from `tabUser`
where enabled=1""")]
recipients = filter(lambda r: r in valid_users,
self.recipient_list.split("\n"))
original_user = frappe.session.user
if recipients:
for user_id in recipients:
frappe.set_user(user_id)
msg_for_this_receipient = self.get_msg_html()
if msg_for_this_receipient:
frappe.sendmail(
recipients=user_id,
subject="{frequency} Digest".format(frequency=self.frequency),
message=msg_for_this_receipient,
bulk=True,
reference_doctype = self.doctype,
reference_name = self.name,
unsubscribe_message = _("Unsubscribe from this Email Digest"))
frappe.set_user(original_user)
def get_msg_html(self):
"""Build email digest content"""
frappe.flags.ignore_account_permission = True
from erpnext.setup.doctype.email_digest.quotes import get_random_quote
context = frappe._dict()
context.update(self.__dict__)
self.set_title(context)
self.set_style(context)
self.set_accounting_cards(context)
context.events = self.get_calendar_events()
context.todo_list = self.get_todo_list()
context.notifications = self.get_notifications()
quote = get_random_quote()
context.quote = {"text": quote[0], "author": quote[1]}
if not (context.events or context.todo_list or context.notifications or context.cards):
return None
frappe.flags.ignore_account_permission = False
# style
return frappe.render_template("erpnext/setup/doctype/email_digest/templates/default.html",
context, is_path=True)
def set_title(self, context):
"""Set digest title"""
if self.frequency=="Daily":
context.title = _("Daily Reminders")
context.subtitle = _("Pending activities for today")
elif self.frequency=="Weekly":
context.title = _("This Week's Summary")
context.subtitle = _("Summary for this week and pending activities")
elif self.frequency=="Monthly":
context.title = _("This Month's Summary")
context.subtitle = _("Summary for this month and pending activities")
def set_style(self, context):
"""Set standard digest style"""
context.text_muted = '#8D99A6'
context.text_color = '#36414C'
context.h1 = 'margin-bottom: 30px; margin-top: 40px; font-weight: 400; font-size: 30px;'
context.h2 = 'margin-bottom: 30px; margin-top: -20px; font-weight: 400; font-size: 20px;'
context.label_css = '''display: inline-block; color: {text_muted};
padding: 3px 7px; margin-right: 7px;'''.format(text_muted = context.text_muted)
context.section_head = 'margin-top: 60px; font-size: 16px;'
context.line_item = 'padding: 5px 0px; margin: 0; border-bottom: 1px solid #d1d8dd;'
context.link_css = 'color: {text_color}; text-decoration: none;'.format(text_color = context.text_color)
def get_notifications(self):
"""Get notifications for user"""
notifications = frappe.desk.notifications.get_notifications()
notifications = sorted(notifications.get("open_count_doctype", {}).items(),
lambda a, b: 1 if a[1] < b[1] else -1)
notifications = [{"key": n[0], "value": n[1],
"link": get_url_to_list(n[0])} for n in notifications if n[1]]
return notifications
def get_calendar_events(self):
"""Get calendar events for given user"""
from frappe.desk.doctype.event.event import get_events
events = get_events(self.future_from_date.strftime("%Y-%m-%d"),
self.future_to_date.strftime("%Y-%m-%d")) or []
for i, e in enumerate(events):
e.starts_on_label = format_time(e.starts_on)
e.ends_on_label = format_time(e.ends_on) if e.ends_on else None
e.date = formatdate(e.starts)
e.link = get_url_to_form("Event", e.name)
return events
def get_todo_list(self, user_id=None):
"""Get to-do list"""
if not user_id:
user_id = frappe.session.user
todo_list = frappe.db.sql("""select *
from `tabToDo` where (owner=%s or assigned_by=%s) and status="Open"
order by field(priority, 'High', 'Medium', 'Low') asc, date asc limit 20""",
(user_id, user_id), as_dict=True)
for t in todo_list:
t.link = get_url_to_form("ToDo", t.name)
return todo_list
def set_accounting_cards(self, context):
"""Create accounting cards if checked"""
cache = frappe.cache()
context.cards = []
for key in ("income", "expenses_booked", "income_year_to_date", "expense_year_to_date",
"invoiced_amount", "payables", "bank_balance"):
if self.get(key):
cache_key = "email_digest:card:{0}:{1}".format(self.company, key)
card = cache.get(cache_key)
if card:
card = eval(card)
else:
card = frappe._dict(getattr(self, "get_" + key)())
# format values
if card.last_value:
card.diff = int(flt(card.value - card.last_value) / card.last_value * 100)
if card.diff < 0:
card.diff = str(card.diff)
card.gain = False
else:
card.diff = "+" + str(card.diff)
card.gain = True
card.last_value = self.fmt_money(card.last_value)
card.value = self.fmt_money(card.value)
cache.setex(cache_key, card, 24 * 60 * 60)
context.cards.append(card)
def get_income(self):
"""Get income for given period"""
income, past_income = self.get_period_amounts(self.get_root_type_accounts("income"))
return {
"label": self.meta.get_label("income"),
"value": income,
"last_value": past_income
}
def get_income_year_to_date(self):
"""Get income to date"""
return self.get_year_to_date_balance("income")
def get_expense_year_to_date(self):
"""Get income to date"""
return self.get_year_to_date_balance("expense")
def get_year_to_date_balance(self, root_type):
"""Get income to date"""
balance = 0.0
for account in self.get_root_type_accounts(root_type):
balance += get_balance_on(account, date = self.future_to_date)
return {
"label": self.meta.get_label(root_type + "_year_to_date"),
"value": balance
}
def get_bank_balance(self):
# account is of type "Bank" or "Cash"
return self.get_type_balance('bank_balance', 'Bank')
def get_payables(self):
return self.get_type_balance('payables', 'Payable')
def get_invoiced_amount(self):
return self.get_type_balance('invoiced_amount', 'Receivable')
def get_expenses_booked(self):
expense, past_expense = self.get_period_amounts(self.get_root_type_accounts("expense"))
return {
"label": self.meta.get_label("expenses_booked"),
"value": expense,
"last_value": past_expense
}
def get_period_amounts(self, accounts):
"""Get amounts for current and past periods"""
balance = past_balance = 0.0
for account in accounts:
balance += (get_balance_on(account, date = self.future_to_date)
- get_balance_on(account, date = self.future_from_date))
past_balance += (get_balance_on(account, date = self.past_to_date)
- get_balance_on(account, date = self.past_from_date))
return balance, past_balance
def get_type_balance(self, fieldname, account_type):
accounts = [d.name for d in \
frappe.db.get_all("Account", filters={"account_type": account_type,
"company": self.company, "is_group": 0})]
balance = prev_balance = 0.0
for account in accounts:
balance += get_balance_on(account, date=self.future_from_date)
prev_balance += get_balance_on(account, date=self.past_from_date)
return {
'label': self.meta.get_label(fieldname),
'value': balance,
'last_value': prev_balance
}
def get_root_type_accounts(self, root_type):
if not root_type in self._accounts:
self._accounts[root_type] = [d.name for d in \
frappe.db.get_all("Account", filters={"root_type": root_type.title(),
"company": self.company, "is_group": 0})]
return self._accounts[root_type]
def get_from_to_date(self):
today = now_datetime().date()
# decide from date based on email digest frequency
if self.frequency == "Daily":
# from date, to_date is yesterday
from_date = to_date = today - timedelta(days=1)
elif self.frequency == "Weekly":
# from date is the previous week's monday
from_date = today - timedelta(days=today.weekday(), weeks=1)
# to date is sunday i.e. the previous day
to_date = from_date + timedelta(days=6)
else:
# from date is the 1st day of the previous month
from_date = today - relativedelta(days=today.day-1, months=1)
# to date is the last day of the previous month
to_date = today - relativedelta(days=today.day)
return from_date, to_date
def set_dates(self):
self.future_from_date, self.future_to_date = self.from_date, self.to_date
# decide from date based on email digest frequency
if self.frequency == "Daily":
self.past_from_date = self.past_to_date = self.future_from_date - relativedelta(days = 1)
elif self.frequency == "Weekly":
self.past_from_date = self.future_from_date - relativedelta(weeks=1)
self.past_to_date = self.future_from_date - relativedelta(days=1)
else:
self.past_from_date = self.future_from_date - relativedelta(months=1)
self.past_to_date = self.future_from_date - relativedelta(days=1)
def get_next_sending(self):
from_date, to_date = self.get_from_to_date()
send_date = to_date + timedelta(days=1)
if self.frequency == "Daily":
next_send_date = send_date + timedelta(days=1)
elif self.frequency == "Weekly":
next_send_date = send_date + timedelta(weeks=1)
else:
next_send_date = send_date + relativedelta(months=1)
self.next_send = formatdate(next_send_date) + " at midnight"
return send_date
def onload(self):
self.get_next_sending()
def fmt_money(self, value):
return fmt_money(abs(value), currency = self.currency)
def send():
now_date = now_datetime().date()
for ed in frappe.db.sql("""select name from `tabEmail Digest`
where enabled=1 and docstatus<2""", as_list=1):
ed_obj = frappe.get_doc('Email Digest', ed[0])
if (now_date == ed_obj.get_next_sending()):
ed_obj.send()
@frappe.whitelist()
def get_digest_msg(name):
return frappe.get_doc("Email Digest", name).get_msg_html()
| 0.027848 |
# Copyright 2016 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic_lib.common.i18n import _
from ironic_lib import exception
from ironic_lib import metrics
from ironic_lib import metrics_statsd
metrics_opts = [
cfg.StrOpt('backend',
default='noop',
choices=['noop', 'statsd'],
help='Backend to use for the metrics system.'),
cfg.BoolOpt('prepend_host',
default=False,
help='Prepend the hostname to all metric names. '
'The format of metric names is '
'[global_prefix.][host_name.]prefix.metric_name.'),
cfg.BoolOpt('prepend_host_reverse',
default=True,
help='Split the prepended host value by "." and reverse it '
'(to better match the reverse hierarchical form of '
'domain names).'),
cfg.StrOpt('global_prefix',
help='Prefix all metric names with this value. '
'By default, there is no global prefix. '
'The format of metric names is '
'[global_prefix.][host_name.]prefix.metric_name.')
]
CONF = cfg.CONF
CONF.register_opts(metrics_opts, group='metrics')
def get_metrics_logger(prefix='', backend=None, host=None, delimiter='.'):
"""Return a metric logger with the specified prefix.
The format of the prefix is:
[global_prefix<delim>][host_name<delim>]prefix
where <delim> is the delimiter (default is '.')
:param prefix: Prefix for this metric logger.
Value should be a string or None.
:param backend: Backend to use for the metrics system.
Possible values are 'noop' and 'statsd'.
:param host: Name of this node.
:param delimiter: Delimiter to use for the metrics name.
:return: The new MetricLogger.
"""
if not isinstance(prefix, str):
msg = (_("This metric prefix (%s) is of unsupported type. "
"Value should be a string or None")
% str(prefix))
raise exception.InvalidMetricConfig(msg)
if CONF.metrics.prepend_host and host:
if CONF.metrics.prepend_host_reverse:
host = '.'.join(reversed(host.split('.')))
if prefix:
prefix = delimiter.join([host, prefix])
else:
prefix = host
if CONF.metrics.global_prefix:
if prefix:
prefix = delimiter.join([CONF.metrics.global_prefix, prefix])
else:
prefix = CONF.metrics.global_prefix
backend = backend or CONF.metrics.backend
if backend == 'statsd':
return metrics_statsd.StatsdMetricLogger(prefix, delimiter=delimiter)
elif backend == 'noop':
return metrics.NoopMetricLogger(prefix, delimiter=delimiter)
else:
msg = (_("The backend is set to an unsupported type: "
"%s. Value should be 'noop' or 'statsd'.")
% backend)
raise exception.InvalidMetricConfig(msg)
def list_opts():
"""Entry point for oslo-config-generator."""
return [('metrics', metrics_opts)]
| 0 |
"""Support for deCONZ switches."""
from homeassistant.components.switch import SwitchDevice
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import NEW_LIGHT, POWER_PLUGS, SIRENS
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up deCONZ switches."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches for deCONZ component.
Switches are based same device class as lights in deCONZ.
"""
gateway = get_gateway_from_config_entry(hass, config_entry)
@callback
def async_add_switch(lights):
"""Add switch from deCONZ."""
entities = []
for light in lights:
if light.type in POWER_PLUGS:
entities.append(DeconzPowerPlug(light, gateway))
elif light.type in SIRENS:
entities.append(DeconzSiren(light, gateway))
async_add_entities(entities, True)
gateway.listeners.append(async_dispatcher_connect(
hass, gateway.async_event_new_device(NEW_LIGHT), async_add_switch))
async_add_switch(gateway.api.lights.values())
class DeconzPowerPlug(DeconzDevice, SwitchDevice):
"""Representation of a deCONZ power plug."""
@property
def is_on(self):
"""Return true if switch is on."""
return self._device.state
async def async_turn_on(self, **kwargs):
"""Turn on switch."""
data = {'on': True}
await self._device.async_set_state(data)
async def async_turn_off(self, **kwargs):
"""Turn off switch."""
data = {'on': False}
await self._device.async_set_state(data)
class DeconzSiren(DeconzDevice, SwitchDevice):
"""Representation of a deCONZ siren."""
@property
def is_on(self):
"""Return true if switch is on."""
return self._device.alert == 'lselect'
async def async_turn_on(self, **kwargs):
"""Turn on switch."""
data = {'alert': 'lselect'}
await self._device.async_set_state(data)
async def async_turn_off(self, **kwargs):
"""Turn off switch."""
data = {'alert': 'none'}
await self._device.async_set_state(data)
| 0 |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SLA (Service-level agreement) is set of details for determining compliance
with contracted values such as maximum error rate or minimum response time.
"""
from rally.common.i18n import _
from rally.common import streaming_algorithms
from rally import consts
from rally.task import sla
@sla.configure(name="outliers")
class Outliers(sla.SLA):
"""Limit the number of outliers (iterations that take too much time).
The outliers are detected automatically using the computation of the mean
and standard deviation (std) of the data.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"max": {"type": "integer", "minimum": 0},
"min_iterations": {"type": "integer", "minimum": 3},
"sigmas": {"type": "number", "minimum": 0.0,
"exclusiveMinimum": True}
}
}
def __init__(self, criterion_value):
super(Outliers, self).__init__(criterion_value)
self.max_outliers = self.criterion_value.get("max", 0)
# NOTE(msdubov): Having 3 as default is reasonable (need enough data).
self.min_iterations = self.criterion_value.get("min_iterations", 3)
self.sigmas = self.criterion_value.get("sigmas", 3.0)
self.iterations = 0
self.outliers = 0
self.threshold = None
self.mean_comp = streaming_algorithms.MeanComputation()
self.std_comp = streaming_algorithms.StdDevComputation()
def add_iteration(self, iteration):
# NOTE(ikhudoshyn): This method can not be implemented properly.
# After adding a new iteration, both mean and standard deviation
# may change. Hence threshold will change as well. In this case we
# should again compare durations of all accounted iterations
# to the threshold. Unfortunately we can not do it since
# we do not store durations.
# Implementation provided here only gives rough approximation
# of outliers number.
if not iteration.get("error"):
duration = iteration["duration"]
self.iterations += 1
# NOTE(msdubov): First check if the current iteration is an outlier
if ((self.iterations >= self.min_iterations and self.threshold and
duration > self.threshold)):
self.outliers += 1
# NOTE(msdubov): Then update the threshold value
self.mean_comp.add(duration)
self.std_comp.add(duration)
if self.iterations >= 2:
mean = self.mean_comp.result()
std = self.std_comp.result()
self.threshold = mean + self.sigmas * std
self.success = self.outliers <= self.max_outliers
return self.success
def merge(self, other):
# NOTE(ikhudoshyn): This method can not be implemented properly.
# After merge, both mean and standard deviation may change.
# Hence threshold will change as well. In this case we
# should again compare durations of all accounted iterations
# to the threshold. Unfortunately we can not do it since
# we do not store durations.
# Implementation provided here only gives rough approximation
# of outliers number.
self.iterations += other.iterations
self.outliers += other.outliers
self.mean_comp.merge(other.mean_comp)
self.std_comp.merge(other.std_comp)
if self.iterations >= 2:
mean = self.mean_comp.result()
std = self.std_comp.result()
self.threshold = mean + self.sigmas * std
self.success = self.outliers <= self.max_outliers
return self.success
def details(self):
return (_("Maximum number of outliers %i <= %i - %s") %
(self.outliers, self.max_outliers, self.status()))
| 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Stephane Eicher <seicher@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from odoo import models, api, fields
_logger = logging.getLogger(__name__)
class ResUser(models.Model):
_inherit = 'res.users'
connect_agent = fields.Boolean(
string='Connect the Xivo agent after check_in ',
default=True)
@api.multi
def asterisk_connect(self, log=True):
for ast_user in self.filtered('connect_agent'):
try:
user, ast_server, ast_manager = \
self.env['asterisk.server'].sudo(
ast_user.id)._connect_to_asterisk()
channel = '%s/%s' % (
ast_user.asterisk_chan_type, user.resource)
_prefix = '*31' if log else '*32'
extension = _prefix + ast_user.internal_number
ast_manager.Originate(
channel,
context='default',
extension=extension,
priority=1,
timeout=unicode(ast_server.wait_time * 1000),
caller_id=ast_user.internal_number,
account=ast_user.cdraccount)
message = 'Your Xivo agent is now {}.'.format(
'connected' if log else 'disconnected')
ast_user.notify_info(message)
ast_manager.Logoff()
except Exception, e:
message = "Impossible to connect your Xivo agent\n"
message += unicode(e)
ast_user.notify_info(message)
| 0 |
############################################################################
#
# This file is a part of siple.
#
# Copyright 2010 David Maxwell
#
# siple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
############################################################################
from siple import Parameters
from siple.reporting import msg
from siple.exceptions import IterationCountFailure
from math import sqrt
class KrylovSolver:
"""
Base class for solving a linear ill-posed problem
T(x) = y
via an iterative Krylov method.
"""
@staticmethod
def defaultParameters():
return Parameters('KrylovSolver', ITER_MAX=200, mu=1.1, cg_reset=0, steepest_descent=False)
def __init__(self, params=None):
self.params = self.defaultParameters()
if not params is None: self.params.update(params)
self.iteration_listeners = []
def forwardProblem(self):
"""
Returns the LinearForwardProblem that defines the inverse problem.
"""
raise NotImplementedError()
def solve(self,x0,y,*args):
"""
Run the iterative method starting from the initial point x0.
"""
raise NotImplementedError()
def addIterationListener(self,listener):
"""
Add an object to be called after each iteration.
"""
self.iteration_listeners.append(listener)
def iterationHook(self,count,x,y,d,r,*args):
"""
Called during each iteration with the pertinent computations. Handy for debugging and visualization.
"""
for listener in self.iteration_listeners:
listener(self,count,x,y,d,r,*args)
def initialize(self,x0,y,*args):
"""
This method is a hook called at the beginning of a run. It gives an opportunity for the class to
set up information needed to decide conditions for the final stopping criterion.
It may also be that the initial data 'x0' expresses the the initial data for the problem T(x)=y
indirectly. Or it could be that x0 and y are expressed as some sort of concrete vectors rather than
some invtools.AbstractVector's.
So initialize returns a pair of AbstractVectors (x0,y) which are possibly modified and or wrapped
versions of the input data.
The arguments \*args are passed directly from 'run'.
"""
return (x0,y)
def finalize(self,x,y):
"""
This method is a hook called at the end of a run, and gives the class a way to make adjustments to x and y before
finishing the run.
"""
return (x,y)
def stopConditionMet(self,iter,x,y,r):
"""
Given a current iteration number, current value of x, desired value y of F(X), and current residual,
returns whether the stop condition has been met.
"""
raise NotImplementedError()
class KrylovCG(KrylovSolver):
"""
Base class for solving an ill-posed linear problem
T(x) = y
using a conjugate gradient method. Necessarily, T:X->X must be self adjoint.
"""
def solve(self,x0,y,*args):
(x,y) = self.initialize(x0,y,*args)
cg_reset = x.size()
if( self.params.cg_reset != 0): cg_reset = self.params.cg_reset
forward_problem = self.forwardProblem()
r=y.copy()
Tx = forward_problem.T(x)
r -= Tx
normsq_r = forward_problem.domainIP(r,r)
# d = r
d = r.copy()
# Eventually will contain T(d)
Td = None
count = 0
while True:
if count > self.params.ITER_MAX:
raise IterationCountFailure(self.params.ITER_MAX)
count += 1
if self.stopConditionMet(count,x,y,r):
msg('done at iteration %d', count)
break
if self.params.verbose:
msg('solving linear problem')
Td = forward_problem.T(d,out=Td)
self.iterationHook( count, x, y, d, r, Td )
alpha = normsq_r/forward_problem.domainIP(d,Td)
if( (count+1 % cg_reset) == 0 ):
msg('resetting cg via steepest descent')
alpha = 1
# x = x + alpha*d
x.axpy(alpha,d)
# r = r - alpha*Td
r.axpy(-alpha,Td)
prev_normsq_r = normsq_r
normsq_r = forward_problem.domainIP(r,r)
beta = normsq_r / prev_normsq_r
if( (count+1 % cg_reset) == 0 ): beta = 0
if(self.params.steepest_descent):
beta = 0
# d = T*r + beta*d
d *= beta
d += r
y = forward_problem.T(x)
return self.finalize(x,y)
class KrylovCGNE(KrylovSolver):
"""
Base class for solving an ill-posed linear problem
T(x) = y
using a conjugate gradient method applied to the normal equation
T^*T x = T^* y
"""
def solve(self,x0,y,*args):
(x,y) = self.initialize(x0,y,*args)
forward_problem = self.forwardProblem()
Tx = forward_problem.T(x)
r = y.copy()
r -= Tx
cg_reset = x.size()
if( self.params.cg_reset != 0): cg_rest = self.params.cg_reset
TStarR = forward_problem.TStar(r)
normsq_TStarR = forward_problem.domainIP(TStarR,TStarR)
# d = T^* r
d = TStarR.copy()
# Eventual storage for T(d)
Td = None
count = 0
while True:
if count > self.params.ITER_MAX:
raise IterationCountFailure(self.params.ITER_MAX)
break
count += 1
if self.stopConditionMet(count,x,y,r):
msg('done at iteration %d', count)
break
Td = forward_problem.T(d,out=Td)
self.iterationHook( count, x, y, d, r, Td, TStarR )
alpha = normsq_TStarR/forward_problem.rangeIP(Td,Td)
if( (count+1 % cg_reset) == 0 ):
msg('resetting cg via steepest descent')
alpha = 1
# x = x + alpha*d
x.axpy(alpha,d)
# r = r - alpha*Td
r.axpy(-alpha,Td)
# beta = ||r_{k+1}||^2 / ||r_k||^2
prev_normsq_TStarR = normsq_TStarR
TStarR = forward_problem.TStar(r,out=TStarR)
normsq_TStarR = forward_problem.domainIP(TStarR,TStarR)
beta = normsq_TStarR/prev_normsq_TStarR
if( (count+1 % cg_reset) == 0 ): beta = 0
if(self.params.steepest_descent):
beta = 0
# d = T*r + beta*d
d *= beta
d += TStarR
Tx = forward_problem.T(x, out=Tx)
return self.finalize(x,Tx)
class BasicKrylovCG(KrylovCG):
"""
Implements the CG regularization method for solving the linear ill posed problem
T(x) = y
using the Morozov discrepancy principle. The discrepancy of 'x' is
||y-T(x)||_Y
and the algorithm is run until a target discrepancy (specified as an argument to solve)
is reached.
The specific problem to solve is specified as an argument to the constructor.
"""
def __init__(self,forward_problem,params=None):
KrylovCG.__init__(self,params)
self.forward_problem = forward_problem
def forwardProblem(self):
"""
Returns the LinearForwardProblem that defines the inverse problem.
"""
return self.forward_problem
def solve(self,x0,y,targetDiscrepancy):
"""
Run the iterative method starting from the initial point x0.
The third argument is the desired value of ||y-T(x)||_Y
"""
return KrylovCG.solve(self,x0,y,targetDiscrepancy)
def initialize(self,x0,y,targetDiscrepancy):
"""
This method is a hook called at the beginning of a run. It gives an opportunity for the class to
set up information needed to decide conditions for the final stopping criterion.
It may be that the initial data 'x0' expresses the the initial data for the problem T(x)=y
indirectly. Or it could be that x0 and y are expressed as dolfin.Function's rather than dolfind.GenericVectors.
So initialize returns a triple of vectors (x0,y) which are possibly modified versions of the input data.
The arguments \*args are passed directly from 'run'.
"""
self.targetDiscrepancy = targetDiscrepancy
return (x0,y)
def stopConditionMet(self,iter,x,y,r):
"""
Given a current iteration number, current value of x, desired value y of F(X), and current residual,
returns whether the stop condition has been met.
"""
return sqrt(self.forward_problem.rangeIP(r,r)) <= self.params.mu*self.targetDiscrepancy
class BasicKrylovCGNE(KrylovCGNE):
"""
Implements the CGNE regularization method for solving the linear ill posed problem
T(x) = y
using the Morozov discrepancy principle. The discrepancy of 'x' is
||y-T(x)||_Y
and the algorithm is run until a target discrepancy (specified as an argument to solve)
is reached.
The specific problem to solve is specified as an argument to the constructor.
"""
def __init__(self,forward_problem,params=None):
KrylovCGNE.__init__(self,params)
self.forward_problem = forward_problem
def forwardProblem(self):
"""
Returns the LinearForwardProblem that defines the inverse problem.
"""
return self.forward_problem
def solve(self,x0,y,targetDiscrepancy):
"""
Run the iterative method starting from the initial point x0.
The third argument is the desired value of ||y-T(x)||_Y
"""
return KrylovCGNE.solve(self,x0,y,targetDiscrepancy)
def initialize(self,x0,y,targetDiscrepancy):
"""
This method is a hook called at the beginning of a run. It gives an opportunity for the class to
set up information needed to decide conditions for the final stopping criterion.
It may be that the initial data 'x0' expresses the the initial data for the problem T(x)=y
indirectly. Or it could be that x0 and y are expressed as dolfin.Function's rather than dolfind.GenericVectors.
So initialize returns a triple of vectors (x0,y) which are possibly modified versions of the input data.
The arguments \*args are passed directly from 'run'.
"""
self.targetDiscrepancy = targetDiscrepancy
return (x0,y)
def stopConditionMet(self,iter,x,y,r):
"""
Given a current iteration number, current value of x, desired value y of F(X), and current residual,
returns whether the stop condition has been met.
"""
disc = sqrt(self.forward_problem.rangeIP(r,r))
target = self.params.mu*self.targetDiscrepancy
if self.params.verbose:
msg('Iteration %d: discrepancy %g target %g',iter,disc,target)
return disc <= target
| 0.025302 |
# Copyright 2015 Matthew Rogge
#
# This file is part of Retr3d.
#
# Retr3d is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Retr3d is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Retr3d. If not, see <http://www.gnu.org/licenses/>.
#import Math stuff
from __future__ import division # allows floating point division from integersimport math
import math
from itertools import product
#import FreeCAD modules
import FreeCAD as App
import FreeCAD# as #
import Part
import Sketcher
import Draft
#Specific to printer
import globalVars as gv
import utilityFunctions as uf
class XEndstop(object):
def __init__(self):
self.name = "xEndstop"
def assemble(self):
App.ActiveDocument=App.getDocument(self.name)
endstop = App.ActiveDocument.ActiveObject.Shape
App.ActiveDocument=App.getDocument("PrinterAssembly")
App.ActiveDocument.addObject('Part::Feature',self.name).Shape= endstop
#Color Part
#Get the feature and move it into position
objs = App.ActiveDocument.getObjectsByLabel(self.name)
endstop = objs[-1]
#Add cap to assembly
App.ActiveDocument=App.getDocument(self.name+"Cap")
cap = App.ActiveDocument.ActiveObject.Shape
App.ActiveDocument=App.getDocument("PrinterAssembly")
App.ActiveDocument.addObject('Part::Feature',self.name+"Cap").Shape= cap
#Color Part
#Get the feature and move it into position
objs = App.ActiveDocument.getObjectsByLabel(self.name+"Cap")
cap = objs[-1]
#Define shifts and move the left clamp into place
xShift = 0
yShift = 0
zShift = gv.xEndstopHeight
App.ActiveDocument=App.getDocument("PrinterAssembly")
Draft.move([cap],App.Vector(xShift, yShift, zShift),copy=False)
App.ActiveDocument.recompute()
endstopAndCap = [endstop,cap]
#Rotate endstop and cap into correct orientation
rotateAngle = 90
rotateCenter = App.Vector(0,0,0)
rotateAxis = App.Vector(1,0,0)
Draft.rotate(endstopAndCap,rotateAngle,rotateCenter,axis = rotateAxis,copy=False)
#Define shifts and move into place
xShift = (-gv.xRodLength/2
- gv.xRodClampWidth
+ gv.xRodClampPocketDepth
+ gv.xRodClampWidth
- gv.xRodClampMountHoleToEdgePadding
- gv.printedToPrintedDia/2)
yShift = (gv.extruderNozzleStandoff
- gv.zRodStandoff
- gv.xEndZRodHolderFaceThickness
- gv.xEndZRodHolderMaxNutFaceToFace/2
- gv.xMotorMountPlateThickness
- 2*gv.xRodClampThickness
- gv.xRodDiaMax)
zShift = gv.xRodSpacing - gv.xRodAxisToMountHoleDist
App.ActiveDocument=App.getDocument("PrinterAssembly")
Draft.move(endstopAndCap,App.Vector(xShift, yShift, zShift),copy=False)
App.ActiveDocument.recompute()
if endstop not in gv.xAxisParts:
gv.xAxisParts.append(endstop)
if cap not in gv.xAxisParts:
gv.xAxisParts.append(cap)
def draw(self):
#Helper variables
width = 2*gv.xEndstopPadding+2*gv.xEndstopChannelWidth + gv.xEndstopContactSpacing
try:
App.getDocument(self.name).recompute()
App.closeDocument(self.name)
App.setActiveDocument("")
App.ActiveDocument=None
except:
pass
#make document
App.newDocument(self.name)
App.setActiveDocument(self.name)
App.ActiveDocument=App.getDocument(self.name)
#Sketch Points
p1x = -(width/2)
p1y = -(gv.printedToPrintedDia/2+gv.xEndstopPadding)
p2x = p1x
p2y = gv.xEndstopLength+p1y
p3x = -p1x
p3y = p2y
p4x = p3x
p4y = p1y
p5x = 0
p5y = 0
#make endstop body
App.activeDocument().addObject('Sketcher::SketchObject','Sketch')
App.activeDocument().Sketch.Placement = App.Placement(App.Vector(0.000000,0.000000,0.000000),App.Rotation(0.000000,0.000000,0.000000,1.000000))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(p1x,p1y,0),App.Vector(p4x,p4y,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(p4x,p4y,0),App.Vector(p3x,p3y,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(p3x,p3y,0),App.Vector(p2x,p2y,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(p2x,p2y,0),App.Vector(p1x,p1y,0)))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',0,2,1,1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',1,2,2,1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,3,1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',3,2,0,1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Horizontal',0))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Horizontal',2))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Vertical',1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Vertical',3))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Symmetric',1,2,2,2,-2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addGeometry(Part.Circle(App.Vector(p5x,p5y,0),App.Vector(0,0,1),gv.printedToPrintedDia/2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',4,3,-1,1))
App.ActiveDocument.recompute()
#Add dimensions
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('DistanceY',1,gv.xEndstopLength))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('DistanceX',0,width))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Radius',4,gv.printedToPrintedDia/2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Distance',-1,1,0,gv.xEndstopPadding+gv.printedToPrintedDia/2))
App.ActiveDocument.recompute()
App.getDocument(self.name).recompute()
#Pad sketch
App.activeDocument().addObject("PartDesign::Pad","Pad")
App.activeDocument().Pad.Sketch = App.activeDocument().Sketch
App.activeDocument().Pad.Length = 10.0
App.ActiveDocument.recompute()
App.ActiveDocument.Pad.Length = gv.xEndstopHeight
App.ActiveDocument.Pad.Reversed = 0
App.ActiveDocument.Pad.Midplane = 0
App.ActiveDocument.Pad.Length2 = 100.000000
App.ActiveDocument.Pad.Type = 0
App.ActiveDocument.Pad.UpToFace = None
App.ActiveDocument.recompute()
#Cut Channels
#Sketch Points
p1x = -width/2
p1y = (gv.printedToPrintedDia/2
+ gv.xEndstopPadding
+ gv.xEndstopChannelWidth
+ gv.xEndstopContactSpacing)
p2x = p1x
p2y = (gv.printedToPrintedDia/2
+ gv.xEndstopPadding
+ 2*gv.xEndstopChannelWidth
+ gv.xEndstopContactSpacing)
p3x = p1x+gv.xEndstopPadding
p3y = p2y
p4x = p3x
p4y = gv.xEndstopLength-(gv.printedToPrintedDia/2+gv.xEndstopPadding)
p5x = p4x+gv.xEndstopChannelWidth
p5y = p4y
p6x = p5x
p6y = p1y
p7x = p1x
p7y = (gv.printedToPrintedDia/2
+ gv.xEndstopPadding)
p8x = p1x
p8y = p7y+gv.xEndstopChannelWidth
p9x = p6x+gv.xEndstopContactSpacing
p9y = p8y
p10x = p9x
p10y = p4y
p11x = p9x+gv.xEndstopChannelWidth
p11y = p4y
p12x = p11x
p12y = p7y
#Make Sketch
App.activeDocument().addObject('Sketcher::SketchObject','Sketch001')
App.activeDocument().Sketch001.Support = uf.getFace(App.ActiveDocument.Pad,
None,None,
None, None,
gv.xEndstopHeight, 0)
App.activeDocument().recompute()
App.ActiveDocument.Sketch001.addExternal("Pad",uf.getEdge(App.ActiveDocument.Pad,
0,0,
0,1,
gv.xEndstopHeight, 0))
App.ActiveDocument.Sketch001.addExternal("Pad",uf.getEdge(App.ActiveDocument.Pad,
-width/2,0,
None, None,
gv.xEndstopHeight, 0))
App.ActiveDocument.Sketch001.addExternal("Pad",uf.getEdge(App.ActiveDocument.Pad,
width/2,0,
None, None,
gv.xEndstopHeight, 0))
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p1x,p1y,0),App.Vector(p2x,p2y,0)))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('PointOnObject',0,1,-4))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Vertical',0))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p2x,p2y,0),App.Vector(p3x,p3y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',0,2,1,1))
App.ActiveDocument.recompute()
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Horizontal',1))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p3x,p3y,0),App.Vector(p4x,p4y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',1,2,2,1))
App.ActiveDocument.recompute()
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('PointOnObject',2,2,-3))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Vertical',2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p4x,p4y,0),App.Vector(p5x,p5y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',2,2,3,1))
App.ActiveDocument.recompute()
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('PointOnObject',3,2,-3))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p5x,p5y,0),App.Vector(p6x,p6y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',3,2,4,1))
App.ActiveDocument.recompute()
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Vertical',4))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p6x,p6y,0),App.Vector(p1x,p1y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',4,2,5,1))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',5,2,0,1))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Horizontal',5))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Equal',3,0))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p7x,p7y,0),App.Vector(p8x,p8y,0)))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('PointOnObject',6,1,-4))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('PointOnObject',6,2,-4))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p8x,p8y,0),App.Vector(p9x,p9y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',6,2,7,1))
App.ActiveDocument.recompute()
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Horizontal',7))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p9x,p9y,0),App.Vector(p10x,p10y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',7,2,8,1))
App.ActiveDocument.recompute()
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('PointOnObject',8,2,-3))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Vertical',8))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p10x,p10y,0),App.Vector(p11x,p11y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',8,2,9,1))
App.ActiveDocument.recompute()
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('PointOnObject',9,2,-3))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p11x,p11y,0),App.Vector(p12x,p12y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',9,2,10,1))
App.ActiveDocument.recompute()
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Vertical',10))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p12x,p12y,0),App.Vector(p7x,p7y,0)))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',10,2,11,1))
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',11,2,6,1))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Horizontal',11))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Equal',6,9))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Equal',9,3))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p5x,p5y,0),App.Vector(p10x,p10y,0)))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',12,1,3,2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',12,2,8,2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p1x,p1y,0),App.Vector(p8x,p8y,0)))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',13,1,0,1))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',13,2,6,2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.toggleConstruction(13)
App.ActiveDocument.Sketch001.toggleConstruction(12)
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Equal',12,13))
App.ActiveDocument.recompute()
#Add dimensions
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('DistanceY',0,gv.xEndstopChannelWidth))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('DistanceY',13,-gv.xEndstopContactSpacing))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Distance',-1,1,11,p7y))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Distance',9,2,-5,2,gv.xEndstopPadding))
App.ActiveDocument.recompute()
App.getDocument(self.name).recompute()
#Cut channels
App.activeDocument().addObject("PartDesign::Pocket","Pocket")
App.activeDocument().Pocket.Sketch = App.activeDocument().Sketch001
App.activeDocument().Pocket.Length = 5.0
App.ActiveDocument.recompute()
App.ActiveDocument.Pocket.Length = 3.000000
App.ActiveDocument.Pocket.Type = 0
App.ActiveDocument.Pocket.UpToFace = None
App.ActiveDocument.recompute()
#Make cap
try:
App.getDocument(self.name+"Cap").recompute()
App.closeDocument(self.name+"Cap")
App.setActiveDocument("")
App.ActiveDocument=None
except:
pass
#make document
App.newDocument(self.name+"Cap")
App.setActiveDocument(self.name+"Cap")
App.ActiveDocument=App.getDocument(self.name+"Cap")
#Sketch Points
p1x = -(width/2)
p1y = -(gv.printedToPrintedDia/2+gv.xEndstopPadding)
p2x = p1x
p2y = gv.xEndstopLength+p1y
p3x = -p1x
p3y = p2y
p4x = p3x
p4y = p1y
p5x = 0
p5y = 0
#make endstop body
App.activeDocument().addObject('Sketcher::SketchObject','Sketch')
App.activeDocument().Sketch.Placement = App.Placement(App.Vector(0.000000,0.000000,0.000000),App.Rotation(0.000000,0.000000,0.000000,1.000000))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(p1x,p1y,0),App.Vector(p4x,p4y,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(p4x,p4y,0),App.Vector(p3x,p3y,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(p3x,p3y,0),App.Vector(p2x,p2y,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(p2x,p2y,0),App.Vector(p1x,p1y,0)))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',0,2,1,1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',1,2,2,1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',2,2,3,1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',3,2,0,1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Horizontal',0))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Horizontal',2))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Vertical',1))
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Vertical',3))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Symmetric',1,2,2,2,-2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addGeometry(Part.Circle(App.Vector(p5x,p5y,0),App.Vector(0,0,1),gv.printedToPrintedDia/2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Coincident',4,3,-1,1))
App.ActiveDocument.recompute()
#Add dimensions
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('DistanceY',1,gv.xEndstopLength))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('DistanceX',0,width))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Radius',4,gv.printedToPrintedDia/2))
App.ActiveDocument.recompute()
App.ActiveDocument.Sketch.addConstraint(Sketcher.Constraint('Distance',-1,1,0,gv.xEndstopPadding+gv.printedToPrintedDia/2))
App.ActiveDocument.recompute()
App.getDocument(self.name+"Cap").recompute()
#Pad sketch
App.activeDocument().addObject("PartDesign::Pad","Pad")
App.activeDocument().Pad.Sketch = App.activeDocument().Sketch
App.activeDocument().Pad.Length = 10.0
App.ActiveDocument.recompute()
App.ActiveDocument.Pad.Length = gv.xEndstopCapThickness
App.ActiveDocument.Pad.Reversed = 0
App.ActiveDocument.Pad.Midplane = 0
App.ActiveDocument.Pad.Length2 = 100.000000
App.ActiveDocument.Pad.Type = 0
App.ActiveDocument.Pad.UpToFace = None
App.ActiveDocument.recompute()
| 0.054722 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions used by Group Policy plugin and drivers."""
from neutron.common import exceptions
class GroupPolicyDriverError(exceptions.NeutronException):
"""Policy driver call failed."""
message = _("%(method)s failed.")
class GroupPolicyException(exceptions.NeutronException):
"""Base for policy driver exceptions returned to user."""
pass
class GroupPolicyDeploymentError(GroupPolicyException):
message = _("Deployment not configured properly. See logs for details.")
class GroupPolicyInternalError(GroupPolicyException):
message = _("Unexpected internal failure. See logs for details.")
class GroupPolicyBadRequest(exceptions.BadRequest, GroupPolicyException):
"""Base for policy driver exceptions returned to user."""
pass
class GroupPolicyNotSupportedError(GroupPolicyBadRequest):
message = _("Operation %(method_name)s for resource "
"%(resource_name)s is not supported by this "
"deployment.")
class PolicyTargetRequiresPolicyTargetGroup(GroupPolicyBadRequest):
message = _("An policy target group was not specified when "
"creating policy_target.")
class PolicyTargetGroupUpdateOfPolicyTargetNotSupported(GroupPolicyBadRequest):
message = _("Updating policy target group of policy target "
"is not supported.")
class PolicyTargetGroupSubnetRemovalNotSupported(GroupPolicyBadRequest):
message = _("Removing a subnet from an policy target group is not "
"supported.")
class L2PolicyUpdateOfPolicyTargetGroupNotSupported(GroupPolicyBadRequest):
message = _("Updating L2 policy of policy target group is not supported.")
class L3PolicyUpdateOfL2PolicyNotSupported(GroupPolicyBadRequest):
message = _("Updating L3 policy of L2 policy is not supported.")
class UnsettingInjectDefaultRouteOfL2PolicyNotSupported(GroupPolicyBadRequest):
message = _("Unsetting inject_default_route attribute of L2 policy is not "
"supported.")
class L3PolicyMultipleRoutersNotSupported(GroupPolicyBadRequest):
message = _("L3 policy does not support multiple routers.")
class L3PolicyRoutersUpdateNotSupported(GroupPolicyBadRequest):
message = _("Updating L3 policy's routers is not supported.")
class NoSubnetAvailable(exceptions.ResourceExhausted, GroupPolicyException):
message = _("No subnet is available from l3 policy's pool.")
class PolicyTargetGroupInUse(GroupPolicyBadRequest):
message = _("Policy Target Group %(policy_target_group)s is in use")
class InvalidPortForPTG(GroupPolicyBadRequest):
message = _("Subnet %(port_subnet_id)s of port %(port_id)s does not "
"match subnet %(ptg_subnet_id)s of Policy Target Group "
"%(policy_target_group_id)s.")
class InvalidPortExtraAttributes(GroupPolicyBadRequest):
message = _("Port extra attribute %(attribute)s is invalid for the "
"following reason: %(reason)s")
class InvalidSubnetForPTG(GroupPolicyBadRequest):
message = _("Subnet %(subnet_id)s does not belong to network "
"%(network_id)s associated with L2P %(l2p_id)s for PTG "
"%(ptg_id)s.")
class OverlappingIPPoolsInSameTenantNotAllowed(GroupPolicyBadRequest):
message = _("IP Pool %(ip_pool)s overlaps with one of the existing L3P "
"for the same tenant %(overlapping_pools)s.")
class SharedResourceReferenceError(GroupPolicyBadRequest):
message = _("Shared resource of type %(res_type)s with id %(res_id)s "
"can't reference the non shared resource of type "
"%(ref_type)s with id %(ref_id)s")
class InvalidSharedResource(GroupPolicyBadRequest):
message = _("Resource of type %(type)s cannot be shared by driver "
"%(driver)s")
class CrossTenantL2PolicyL3PolicyNotSupported(GroupPolicyBadRequest):
message = _("Cross tenancy not supported between L2Ps and L3Ps")
class CrossTenantPolicyTargetGroupL2PolicyNotSupported(
GroupPolicyBadRequest):
message = _("Cross tenancy not supported between PTGs and L2Ps")
class NonSharedNetworkOnSharedL2PolicyNotSupported(GroupPolicyBadRequest):
message = _("Non Shared Network can't be set for a shared L2 Policy")
class InvalidSharedAttributeUpdate(GroupPolicyBadRequest):
message = _("Invalid shared attribute update. Shared resource %(id)s is "
"referenced by %(rid)s, which is either shared or owned by a "
"different tenant.")
class ExternalRouteOverlapsWithL3PIpPool(GroupPolicyBadRequest):
message = _("Destination %(destination)s for ES %(es_id)s overlaps with "
"L3P %(l3p_id)s.")
class ExternalSegmentSubnetOverlapsWithL3PIpPool(GroupPolicyBadRequest):
message = _("Subnet %(subnet)s for ES %(es_id)s overlaps with "
"L3P %(l3p_id)s.")
class ExternalRouteNextHopNotInExternalSegment(GroupPolicyBadRequest):
message = _("One or more external routes' nexthop are not part of "
"subnet %(cidr)s.")
class InvalidL3PExternalIPAddress(GroupPolicyBadRequest):
message = _("Address %(ip)s allocated for l3p %(l3p_id)s on segment "
"%(es_id)s doesn't belong to the segment subnet %(es_cidr)s")
class InvalidAttributeUpdateForES(GroupPolicyBadRequest):
message = _("Attribute %(attribute)s cannot be updated for External "
"Segment.")
class MultipleESPerEPNotSupported(GroupPolicyBadRequest):
message = _("Multiple External Segments per External Policy is not "
"supported.")
class ESIdRequiredWhenCreatingEP(GroupPolicyBadRequest):
message = _("External Segment ID is required when creating ExternalPolicy")
class ESUpdateNotSupportedForEP(GroupPolicyBadRequest):
message = _("external_segments update for External Policy is not "
"supported.")
class MultipleESPerL3PolicyNotSupported(GroupPolicyBadRequest):
message = _("Only one External Segment per L3 Policy supported.")
class InvalidSubnetForES(GroupPolicyBadRequest):
message = _("External Segment subnet %(sub_id)s is not part of an "
"external network %(net_id)s.")
class OnlyOneEPPerTenantAllowed(GroupPolicyBadRequest):
message = _("Only one External Policy per Tenant is allowed.")
class ImplicitSubnetNotSupported(GroupPolicyBadRequest):
message = _("RMD doesn't support implicit external subnet creation.")
class DefaultL3PolicyAlreadyExists(GroupPolicyBadRequest):
message = _("Default L3 Policy with name %(l3p_name)s already "
"exists and is visible for this tenant.")
class DefaultExternalSegmentAlreadyExists(GroupPolicyBadRequest):
message = _("Default External Segment with name %(es_name)s already "
"exists and is visible for this tenant.")
class InvalidCrossTenantReference(GroupPolicyBadRequest):
message = _("Not supported cross tenant reference: object "
"%(res_type)s:%(res_id)s can't link %(ref_type)s:%(ref_id)s "
"unless it's shared.")
class InvalidNetworkAccess(GroupPolicyBadRequest):
message = _("%(msg)s : Network id %(network_id)s doesn't belong to "
" the tenant id %(tenant_id)s.")
class InvalidRouterAccess(GroupPolicyBadRequest):
message = _("%(msg)s : Router id %(router_id)s does not belong to the "
" tenant id %(tenant_id)s.")
class MultipleRedirectActionsNotSupportedForRule(GroupPolicyBadRequest):
message = _("Resource Mapping Driver does not support multiple redirect "
"actions in a Policy Rule.")
class MultipleRedirectActionsNotSupportedForPRS(GroupPolicyBadRequest):
message = _("Resource Mapping Driver does not support multiple redirect "
"actions in a Policy Rule Set.")
class InvalidNetworkServiceParameters(GroupPolicyBadRequest):
message = _("Resource Mapping Driver currently supports only one "
"parameter of type: ip_single and value: self_subnet and one "
"parameter of type ip_single or ip_pool and value nat_pool")
class ESSubnetRequiredForNatPool(GroupPolicyBadRequest):
message = _("Resource Mapping Driver requires an External Segment which "
"has an external subnet specified to create a Nat Pool")
class InvalidESSubnetCidrForNatPool(GroupPolicyBadRequest):
message = _("Resource Mapping Driver requires an External Segment which "
"maps to ip pool value specified in the nat pool")
class NSPRequiresES(GroupPolicyBadRequest):
message = _("Resource Mapping Driver requires an External Segment in "
"l3policy to associate a NSP with value nat_pool to a PTG")
class NSPRequiresNatPool(GroupPolicyBadRequest):
message = _("Resource Mapping Driver requires an External Segment in "
"l3policy which has nat_pool associated for associating a NSP "
"with value nat_pool to a PTG")
class L3PEsinUseByNSP(exceptions.InUse, GroupPolicyException):
message = _("The External Segment in L3Policy cannot be updated because "
"it is in use by Network Service Policy")
class NatPoolinUseByNSP(exceptions.InUse, GroupPolicyException):
message = _("The Nat Pool is in use by Network Service Policy")
class OverlappingNATPoolInES(GroupPolicyBadRequest):
message = _("One or more NAT Pools associated with ES %(es_id)s overlaps "
"with NAT Pool %(np_id)s.")
class OverlappingSubnetForNATPoolInES(GroupPolicyBadRequest):
message = _("One or more subnets associated with network %(net_id)s "
"partially overlaps with NAT Pool %(np_id)s.")
class InvalidProxiedGroupL3P(GroupPolicyBadRequest):
message = _("Cannot proxy PTG %(ptg_id)s: it's on a different L3 policy "
"%(l3p_id)s")
class InvalidProxiedGroupL2P(GroupPolicyBadRequest):
message = _("Cannot proxy PTG %(ptg_id)s: it's on the same L2 Policy as "
"the proxy group of type L2.")
class OnlyOneProxyGatewayAllowed(GroupPolicyBadRequest):
message = _("Another proxy gateway PT already exists for group "
"%(group_id)s")
class OnlyOneGroupDefaultGatewayAllowed(GroupPolicyBadRequest):
message = _("Another group default gateway PT already exists for group "
"%(group_id)s")
class PTGAlreadyProvidingRedirectPRS(GroupPolicyBadRequest):
message = _("PTG %(ptg_id)s is already providing a redirect PRS.")
class InvalidClusterId(GroupPolicyBadRequest):
message = _("In RMD and derived drivers, a PT cluster_id should point to "
"an existing PT.")
class PolicyTargetInUse(GroupPolicyBadRequest):
message = _("Cannot delete a PT in use by a cluster.")
class InvalidClusterPtg(GroupPolicyBadRequest):
message = _("Inter PTG clustering disallowed.")
class NatPoolInUseByPort(exceptions.InUse, GroupPolicyException):
message = _("Ports or floating IP addresses are using the subnet "
"corresponding to Nat Pool.")
class IdenticalExternalRoute(GroupPolicyBadRequest):
message = _("External segments %(es1)s and %(es2)s cannot have "
"identical external route CIDR %(cidr)s if associated "
"with a common L3 policy.")
| 0 |
#!/usr/bin/env python
#coding:utf-8
"""
Truncatable primes
The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3.
Find the sum of the only eleven primes that are both truncatable from left to right and right to left.
NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.
"""
def gen_primes():
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q+=1
def answer():
p=[]
m=0
a=11
for i in gen_primes():
if i>1000000: break
if a==0: break
s=str(i)
t=0
if i>10:
if s[0] in p and s[-1] in p:
if i<100: t=1
if s[1:] in p and s[:-1] in p:
if i<1000: t=1
if s[2:] in p and s[:-2] in p:
if i<10000: t=1
if s[3:] in p and s[:-3] in p:
if i<100000: t=1
if s[4:] in p and s[:-4] in p:
if i<1000000: t=1
if t:
m+=i
a-=1
p.append(s)
print m
import time
tStart=time.time()
answer()
print 'run time=',time.time()-tStart
# 748317
# run time= 28.1839311123 | 0.04878 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from sqlalchemy import MetaData
from keystone.common.sql import migration_helpers
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
if migrate_engine.name != 'mysql':
# InnoDB / MyISAM only applies to MySQL.
return
# This is a list of all the tables that might have been created with MyISAM
# rather than InnoDB.
tables = [
'credential',
'domain',
'ec2_credential',
'endpoint',
'group',
'group_domain_metadata',
'group_project_metadata',
'policy',
'project',
'role',
'service',
'token',
'trust',
'trust_role',
'user',
'user_domain_metadata',
'user_group_membership',
'user_project_metadata',
]
meta = MetaData()
meta.bind = migrate_engine
domain_table = sql.Table('domain', meta, autoload=True)
endpoint_table = sql.Table('endpoint', meta, autoload=True)
group_table = sql.Table('group', meta, autoload=True)
group_domain_metadata_table = sql.Table('group_domain_metadata', meta,
autoload=True)
group_project_metadata_table = sql.Table('group_project_metadata', meta,
autoload=True)
project_table = sql.Table('project', meta, autoload=True)
service_table = sql.Table('service', meta, autoload=True)
user_table = sql.Table('user', meta, autoload=True)
user_domain_metadata_table = sql.Table('user_domain_metadata', meta,
autoload=True)
user_group_membership_table = sql.Table('user_group_membership', meta,
autoload=True)
# Mapping of table name to the constraints on that table,
# so we can create them.
table_constraints = {
'endpoint': [{'table': endpoint_table,
'fk_column': 'service_id',
'ref_column': service_table.c.id},
],
'group': [{'table': group_table,
'fk_column': 'domain_id',
'ref_column': domain_table.c.id},
],
'group_domain_metadata': [{'table': group_domain_metadata_table,
'fk_column': 'domain_id',
'ref_column': domain_table.c.id},
],
'group_project_metadata': [{'table': group_project_metadata_table,
'fk_column': 'project_id',
'ref_column': project_table.c.id},
],
'project': [{'table': project_table,
'fk_column': 'domain_id',
'ref_column': domain_table.c.id},
],
'user': [{'table': user_table,
'fk_column': 'domain_id',
'ref_column': domain_table.c.id},
],
'user_domain_metadata': [{'table': user_domain_metadata_table,
'fk_column': 'domain_id',
'ref_column': domain_table.c.id},
],
'user_group_membership': [{'table': user_group_membership_table,
'fk_column': 'user_id',
'ref_column': user_table.c.id},
{'table': user_group_membership_table,
'fk_column': 'group_id',
'ref_column': group_table.c.id},
],
'user_project_metadata': [{'table': group_project_metadata_table,
'fk_column': 'project_id',
'ref_column': project_table.c.id},
],
}
# Maps a table name to the tables that reference it as a FK constraint
# (See the map above).
ref_tables_map = {
'service': ['endpoint', ],
'domain': ['group', 'group_domain_metadata', 'project', 'user',
'user_domain_metadata', ],
'project': ['group_project_metadata', 'user_project_metadata', ],
'user': ['user_group_membership', ],
'group': ['user_group_membership', ],
}
# The names of tables that need to have their FKs added.
fk_table_names = set()
d = migrate_engine.execute("SHOW TABLE STATUS WHERE Engine!='InnoDB';")
for row in d.fetchall():
table_name = row[0]
if table_name not in tables:
# Skip this table since it's not a Keystone table.
continue
migrate_engine.execute("ALTER TABLE `%s` Engine=InnoDB" % table_name)
# Will add the FKs to the table if any of
# a) the table itself was converted
# b) the tables that the table referenced were converted
if table_name in table_constraints:
fk_table_names.add(table_name)
ref_tables = ref_tables_map.get(table_name, [])
for other_table_name in ref_tables:
fk_table_names.add(other_table_name)
# Now add all the FK constraints to those tables
for table_name in fk_table_names:
constraints = table_constraints.get(table_name)
migration_helpers.add_constraints(constraints)
def downgrade(migrate_engine):
pass
| 0 |
#!/bin/env python
import logging
from optparse import OptionParser
import sys
import os
import re
import string
import subprocess
import math
import json
sys.path.insert(0, sys.path[0])
from config import *
from funcs import *
class submitSGEJobs:
url = ""
f=""
def __init__(self, url, f ):
self.url = url
self.f = f
def getJobParams(self, servicename,name, wkey, logging):
queue = "short"
cputime = 240
memory = 16096
cpu = 1
if (servicename != name):
data = urllib.urlencode({'func':'getJobParams', 'servicename':servicename, 'name':name, 'wkey':wkey})
jobparams=self.f.queryAPI(self.url, data, servicename, logging)
data = json.loads(jobparams)
cputime_pred = int(math.floor(int(data['cputime'])/60)+60)
memory = int(math.floor(int(data['maxmemory']))+1024)
if(servicename=="stepTophat2" or servicename=="stepRSEM" or servicename=="stepBSMap"):
cpu=4
cputime_pred=cputime_pred*2
if("picard" in servicename.lower() or "rseqc" in servicename.lower()):
cputime_pred = 2000
memory = 64000
# Set cputime and queue
if(cputime_pred>240):
queue = "long"
cputime=cputime_pred
if(cputime_pred>=20000):
cputime = 20000
if(memory>=200000):
memory = 200000
alist = (queue, str(cputime), str(memory), str(cpu))
return list(alist)
def checkJob(self, jobname, wkey, logging):
data = urllib.urlencode({'func':'checkJob', 'jobname':jobname, 'wkey':wkey})
return json.loads(self.f.queryAPI(self.url, data, jobname, logging))['Result']
def runcmd(self, command):
print command
child = os.popen(command)
data = child.read()
print data
err = child.close()
if err:
return 'ERROR: %s failed w/ exit code %d' % (command, err)
return data
def main():
try:
parser = OptionParser()
parser.add_option('-u', '--username', help='defined user in the cluster', dest='username')
parser.add_option('-k', '--key', help='defined key for the workflow', dest='wkey')
parser.add_option('-s', '--servicename', help='service name', dest='servicename')
parser.add_option('-c', '--command', help='command that is goinf to be run', dest='com')
parser.add_option('-n', '--name', help='name of the run', dest='name')
parser.add_option('-o', '--outdir', help='output directory', dest='outdir')
parser.add_option('-f', '--config', help='configuration parameter section', dest='config')
parser.add_option('-r', '--force', help='force subission', dest='force')
(options, args) = parser.parse_args()
except:
print "OptionParser Error:for help use --help"
sys.exit(2)
USERNAME = options.username
WKEY = options.wkey
OUTDIR = options.outdir
SERVICENAME = options.servicename
NAME = options.name
COM = options.com
CONFIG = options.config
FORCE = (options.force if (options.force) else "no" )
python = "python"
config=getConfig(CONFIG)
f = funcs()
submitSGEjobs = submitSGEJobs(config['url'], f)
submitCommand = f.getCommand(sys.argv)
exec_dir=os.path.dirname(os.path.abspath(__file__))
#print "EXECDIR" + exec_dir
sdir=config['tooldir']+"/src"
track=OUTDIR + "/tmp/track"
src=OUTDIR + "/tmp/src"
logs=OUTDIR + "/tmp/logs"
if (NAME == None):
NAME="job";
success_file = track+"/"+str(NAME)+".success";
if (not os.path.exists(success_file) or FORCE != "no"):
os.system("mkdir -p "+track)
os.system("mkdir -p "+src)
os.system("mkdir -p "+logs)
logfile="%s/JOB.%s.log"%(logs, NAME)
print logfile
logging.basicConfig(filename=logfile, filemode='a',format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
logging.info("File Path:%s"%os.getcwd())
print "checkJob\n";
result = submitSGEjobs.checkJob(NAME, WKEY, logging)
print result+"\n"
if (result != "START" and FORCE == "no" ):
sys.exit(0)
print "checkJob[DONE]\n";
print "getJobParams\n";
(QUEUE, TIME, MEMORY, CPU) = submitSGEjobs.getJobParams(SERVICENAME, NAME,WKEY, logging)
resources = "\'{\\\"queue\\\":\\\"%s\\\",\\\"cputime\\\":\\\"%s\\\",\\\"memory\\\":\\\"%s\\\",\\\"cpu\\\":\\\"%s\\\"}\'"%(QUEUE, TIME, MEMORY, CPU)
logging.info("resources => :%s"%(resources))
print "getJobParams["+resources+"]\n";
if (USERNAME==None):
USERNAME=subprocess.check_output("whoami", shell=True).rstrip()
print "USER:"+str(USERNAME)+"\n";
if (OUTDIR == None):
OUTDIR="~/out";
if (QUEUE == None):
queue="-q short"
else:
queue="-q "+str(QUEUE)
COM.replace('\"{','\'{')
COM.replace('}\"','}\'')
print "COMMAND: [" + COM + "]\n"
print "NAME: [" + NAME + "]\n"
print "cpu: [" + CPU + "]\n"
jobstatus_cmd = python + " %(sdir)s/jobStatus.py -f %(CONFIG)s -u %(USERNAME)s -k %(WKEY)s -s %(SERVICENAME)s -t %(TYPE)s -o %(OUTDIR)s -j %(NAME)s -m %(MESSAGE)s -r %(resources)s"
f=open(src+"/"+NAME+".bash", 'w')
f.write("#BEGIN-OF-FILE\n")
f.write("#$ -cwd\n")
f.write("#$ -o "+ logs + "/$JOB_ID.std -j y\n")
f.write("#$ -S /bin/bash\n")
f.write("#$ -V\n")
f.write("#$ -l mem_free=1G\n")
f.write("#$ -q all.q\n")
f.write("date\n")
f.write("cd " + exec_dir + "\n")
MESSAGE="2"
TYPE="dbSetStartTime"
f.write(jobstatus_cmd % locals() + " -n $JOB_ID")
f.write("\n retval=$?\n if [ $retval -ne 0 ]; then\n exit 66\n fi\n")
COMSTR=re.sub(r"'",r"''", COM)
f.write("echo '"+str(COMSTR)+"'\n")
f.write("\n\n"+ str(COM) +"\n\n")
f.write("retval=$?\necho \"[\"$retval\"]\"\nif [ $retval -eq 0 ]; then\n")
if (str(NAME) != str(SERVICENAME)):
f.write("touch "+success_file+"\n")
MESSAGE="3"
TYPE="dbSetEndTime"
f.write(jobstatus_cmd % locals() + " -n $JOB_ID")
f.write("\n retval=$?\n if [ $retval -ne 0 ]; then\n exit 66\n fi\n")
f.write(" echo success\nelse\n echo failed\n")
MESSAGE="0"
f.write(jobstatus_cmd % locals() + " -n $JOB_ID")
f.write("\n retval=$?\n if [ $retval -ne 0 ]; then\n exit 66\n fi\n")
f.write(" exit 127\nfi\ndate\n")
f.write("#END-OF-FILE\n")
f.close();
#CHANGE this submition script according to the system.
#PUT TRY CATCH HERE
command=" qsub "+src+"/"+NAME+".bash"
print command
logging.info("SUBMIT SCRIPT[" + command +"]\n\n")
output = submitSGEjobs.runcmd(command)
logging.info("SUBMIT OUT:[" + str(output) + "]\n")
words = re.split('[\t\s]+', str(output))
num = words[2]
MESSAGE="1"
TYPE="dbSubmitJob"
submitCommand=re.sub(r"'",r"''", submitCommand)
jobstatus_cmd = jobstatus_cmd + " -n %(num)s -c '"+submitCommand+"'"
command = jobstatus_cmd % locals()
logging.info("RUN COMMAND:\n" + str(command) + "\n")
if num>0:
return submitSGEjobs.runcmd(command)
f.close()
if __name__ == "__main__":
main()
| 0.034511 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class to decode Neutron network infos.
:copyright: (c) 2013 by @MIS
"""
import socket,struct
from nova.openstack.common import log as logging
from nova.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
class WparNetwork(object):
"""
This class takes the Neutron network info and
convert them into usefull information for the wparrip driver
FIXME:
- Need to handle the cases where the network does not exist on the Host
- Need to handle the case where we want to use a specific Host adpater (default to first one)
"""
def __init__(self, network_info=None):
if network_info is None:
self.network_info = None
else:
self.network_info = self._get_network_for_wpar(network_info)
def _find_fixed_ip(self, subnets):
for subnet in subnets:
for ip in subnet['ips']:
if ip['type'] == 'fixed' and ip['address']:
return ip['address']
def _find_dns_ip(self, subnets):
for subnet in subnets:
for dns in subnet['dns']:
if dns['type'] == 'dns' and dns['address']:
return dns['address']
def _find_gateway_ip(self, subnets):
for subnet in subnets:
if subnet['gateway']['type'] == 'gateway' and subnet['gateway']['address']:
return subnet['gateway']['address']
def _find_cidr(self, subnets):
for subnet in subnets:
if 'cidr' in subnet:
#only get the mask
return subnet['cidr'][-2:]
def _get_network_for_wpar(self, network_info):
data = {}
data['ip'] = self._find_fixed_ip(network_info['subnets'])
data['dns'] = self._find_dns_ip(network_info['subnets'])
data['gateway'] = self._find_gateway_ip(network_info['subnets'])
mask = int(self._find_cidr(network_info['subnets']))
data['netmask'] = self._calcDottedNetmask(mask)
return data
def _calcDottedNetmask(self, mask):
bits = 0
for i in xrange(32-mask,32):
bits |= (1 << i)
return socket.inet_ntoa(struct.pack('>I', bits))
| 0.029731 |
# Copyright (C) 2014 VA Linux Systems Japan K.K.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K.
# @author: YAMAMOTO Takashi, VA Linux Systems Japan K.K.
import mock
class _Eq(object):
def __eq__(self, other):
return repr(self) == repr(other)
def __ne__(self, other):
return not self.__eq__(other)
class _Value(_Eq):
def __or__(self, b):
return _Op('|', self, b)
def __ror__(self, a):
return _Op('|', a, self)
class _SimpleValue(_Value):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
class _Op(_Value):
def __init__(self, op, a, b):
self.op = op
self.a = a
self.b = b
def __repr__(self):
return '%s%s%s' % (self.a, self.op, self.b)
def _mkcls(name):
class Cls(_Eq):
_name = name
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._hist = []
def __getattr__(self, name):
return self._kwargs[name]
def __repr__(self):
args = map(repr, self._args)
kwargs = sorted(['%s=%s' % (x, y) for x, y in
self._kwargs.items()])
return '%s(%s)' % (self._name, ', '.join(args + kwargs))
return Cls
class _Mod(object):
_cls_cache = {}
def __init__(self, name):
self._name = name
def __getattr__(self, name):
fullname = '%s.%s' % (self._name, name)
if '_' in name: # constants are named like OFPxxx_yyy_zzz
return _SimpleValue(fullname)
try:
return self._cls_cache[fullname]
except KeyError:
pass
cls = _mkcls(fullname)
self._cls_cache[fullname] = cls
return cls
def __repr__(self):
return 'Mod(%s)' % (self._name,)
def patch_fake_oflib_of():
ryu_mod = mock.Mock()
ryu_base_mod = ryu_mod.base
ryu_lib_mod = ryu_mod.lib
ryu_lib_hub = ryu_lib_mod.hub
ryu_ofproto_mod = ryu_mod.ofproto
ofp = _Mod('ryu.ofproto.ofproto_v1_3')
ofpp = _Mod('ryu.ofproto.ofproto_v1_3_parser')
ryu_ofproto_mod.ofproto_v1_3 = ofp
ryu_ofproto_mod.ofproto_v1_3_parser = ofpp
ryu_app_mod = ryu_mod.app
ryu_app_ofctl_mod = ryu_app_mod.ofctl
ryu_ofctl_api = ryu_app_ofctl_mod.api
modules = {'ryu': ryu_mod,
'ryu.base': ryu_base_mod,
'ryu.lib': ryu_lib_mod,
'ryu.lib.hub': ryu_lib_hub,
'ryu.ofproto': ryu_ofproto_mod,
'ryu.ofproto.ofproto_v1_3': ofp,
'ryu.ofproto.ofproto_v1_3_parser': ofpp,
'ryu.app': ryu_app_mod,
'ryu.app.ofctl': ryu_app_ofctl_mod,
'ryu.app.ofctl.api': ryu_ofctl_api}
return mock.patch.dict('sys.modules', modules)
| 0 |
import requests
from bs4 import BeautifulSoup
from time import sleep
from random import uniform, randint
import sys
import dbfunctions
import os
#selenium
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import time
cur_dir = os.path.dirname(os.path.realpath(__file__))
base = 'https://newyork.craigslist.org'
section = '/search'
driver = webdriver.Chrome(executable_path="%s/chromedriver"%cur_dir)
driver.implicitly_wait(10)
mainWin = driver.window_handles[0]
breaking = False
search = ''
totalamount = None
sections = ["cpg", "web", "sad", "sof"]
def hover(element):
hov = ActionChains(driver).move_to_element(element)
hov.perform()
def wait(a, b):
rand=uniform(a, b)
sleep(rand)
def get_total(search, link):
totalamount = 1
url = base+section+"/"+link+'?query='+search.replace(" ", "+")+"&is_paid=all"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
total = soup.find_all('span', attrs={'class':'totalcount'})
totalcount = total[0].get_text()
return int(totalcount)
def scrape_emails(search, totalamount, breaking, skip, link_section):
if totalamount > 120:
totalamount = (int(totalamount) // 120)+1
else:
totalamount = 1
for a in range(totalamount):
if breaking == True:
break
page_number = a*120
page = "&s={}".format(page_number)
if a == 0:
page = ''
url = base+section+"/"+link_section+'?query='+search.replace(" ", "+")+"&is_paid=all"+page
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
links = soup.find_all('a', attrs={'class':'hdrlnk'})
print("")
print("")
print(link_section)
print(str(len(links))+" results")
print("")
for link in links:
link_url = link.get('href')
if breaking == True:
break
if not dbfunctions.checkifvisited(link.get('href')):
try:
print('trying next link')
driver.get(link.get('href'))
try:
button = driver.find_element_by_class_name('reply-button')
button.click()
# ===================================================================================================================
# CHECK IF THIS WORKS LATER
# ===================================================================================================================
# try:
# captcha = WebDriverWait(driver, 2).until(lambda driver: driver.find_element_by_id('g-recaptcha'))
# if captcha:
# wait(1.0, 1.5)
# recaptchaFrame = WebDriverWait(driver, 1).until(lambda driver: driver.find_element_by_tag_name('iframe'))
# frameName = recaptchaFrame.get_attribute('name')
# # move the driver to the iFrame...
# driver.switch_to_frame(frameName)
# CheckBox = WebDriverWait(driver, 1).until(lambda driver: driver.find_element_by_id("recaptcha-anchor"))
# wait(1.0, 1.5)
# hover(CheckBox)
# wait(0.5, 0.7)
# CheckBox.click()
# wait(2.0, 2.5)
# if skip == 'y':
# sleep(10)
# else:
# try:
# driver.switch_to_window(mainWin)
# html = driver.page_source
# s = BeautifulSoup(html, 'html.parser')
# iframes = s.find_all("iframe", attrs={'title': 'recaptcha challenge'})
# secFrame = iframes[0].get('name')
# if secFrame:
# print 'There is Captcha now, try again later or try setting the solve captcha option as "y"'
# driver.close()
# breaking = True
# except:
# continue
# driver.switch_to_window(mainWin)
# except Exception as error:
# print(error, "error")
# driver.switch_to_window(mainWin)
# ===================================================================================================================
try:
e = WebDriverWait(driver, 20).until(lambda driver: driver.find_element_by_class_name('reply-email-address'))
email = e.text
except Exception as error:
print(error, "getting email")
continue
if dbfunctions.checkifexists(email):
print('Email already saved')
else:
dbfunctions.save_visited(link_url)
dbfunctions.create_email(email)
print('saving email '+email)
except Exception as error:
print(error, "getting the result site")
continue
except Exception as error:
print(error, "trying result link")
continue
else:
print('link already visited')
if __name__=='__main__':
if len(sys.argv) == 1:
driver.close()
print('')
print('')
print('Usage: python sele.py [search]')
# print('solve captcha : y/n')
print('search: word you want to search in the education section')
print('')
print('')
else:
search = sys.argv[1]
skip = ''
if len(sys.argv) > 2:
skip = sys.argv[2]
for link in sections:
try:
totalamount = get_total(search, link)
scrape_emails(search, totalamount, breaking, skip, link)
except Exception as error:
print(error, "getting total ammount")
driver.quit()
| 0.031626 |
# -*- coding: utf-8 -*-
##
## Copyright © 2007-2012, Matthias Urlichs <matthias@urlichs.de>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
"""\
This code parses a config file.
By itself, it understands nothing whatsoever. This package includes a
"help" command:
help [word...]
- show what "word" does
See the homevent.config module and the test/parser.py script
for typical usage.
"""
from homevent.context import Context
from homevent.event import Event
from homevent.base import Name
from homevent.twist import fix_exception,reraise
from twisted.internet import defer
from homevent.geventreactor import waitForDeferred
import sys
import traceback
class InputEvent(Event):
"""An event that's just a line from the interpreter"""
def _name_check(self,name):
pass
def __str__(self):
try:
return u"‹InputEvent:%s›" % (self.name,)
except Exception:
return "<InputEvent> REPORT_ERROR: "+repr(self.name)
def __unicode__(self):
try:
#return u"⌁."+unicode(self.name)
return unicode(self.name)
except Exception:
return u"⌁ REPORT_ERROR: "+repr(self.name)
def report(self, verbose=False):
try:
yield "IEVENT: "+unicode(self.name)
except Exception:
yield "IEVENT: REPORT_ERROR: "+repr(self.name)
class Processor(object):
"""Base class: Process input lines and do something with them."""
do_prompt = False
def __init__(self, parent=None, ctx=None):
self.ctx = ctx or Context()
self.parent = parent
def lookup(self, args):
me = self.ctx.words
event = InputEvent(self.ctx, *args)
fn = me.lookup(event)
fn = fn(parent=me, ctx=self.ctx)
fn.called(event)
return fn
def simple_statement(self,args):
"""\
A simple statement is a sequence of words. Analyze them.
"""
raise NotImplementedError("I cannot understand simple statements.",args)
def complex_statement(self,args):
"""\
A complex statement is a sequence of words followed by a
colon and at least one sub-statement. This procedure needs
to reply with a new translator which will (one hopes) accept
all the sub-statements.
Needs to return a processor for the sub-statements.
"""
raise NotImplementedError("I cannot understand complex statements.",args)
def done(self):
"""\
Called on a sub-translator to note that there will be no
more statements.
"""
pass
def error(self,parser,err):
reraise(err)
class CollectProcessor(Processor):
"""\
A processor which simply stores all (sub-)statements, recursively.
You need to override .store() in order to specify where;
default is the parent statement.
"""
verify = False
def __init__(self, parent=None, ctx=None, args=None, verify=None):
super(CollectProcessor,self).__init__(parent=parent, ctx=ctx)
self.args = args
self.statements = []
if verify is not None:
self.verify = verify
self.ctx = ctx
def simple_statement(self,args):
fn = self.lookup(args)
if fn.immediate:
res = fn.run(self.ctx)
if isinstance(res,defer.Deferred):
waitForDeferred(res)
return res
self.store(fn)
def complex_statement(self,args):
fn = self.lookup(args)
fn.start_block()
if fn.immediate:
return RunMe(self,fn)
else:
self.store(fn)
return fn.processor
def done(self):
return self.parent.end_block()
def store(self,proc):
self.parent.add(proc)
class RunMe(object):
"""\
This is a wrapper which runs a block as soon as it is finished.
Needed for complex statements which are marked "immediate", and
the top-level interpreter loop.
"""
def __init__(self,proc,fn):
self.proc = proc
self.fn = fn
self.fnp = fn.processor
def simple_statement(self,args):
return self.fnp.simple_statement(args)
def complex_statement(self,args):
return self.fnp.complex_statement(args)
def done(self):
self.fnp.done()
res = self.fn.run(self.proc.ctx)
if isinstance(res,defer.Deferred):
waitForDeferred(res)
class ImmediateProcessor(CollectProcessor):
"""\
A processor which directly executes all (sub-)statements.
"""
def __init__(self, parent=None, ctx=None, args=None, verify=False):
super(ImmediateProcessor,self).__init__(parent=parent, ctx=ctx)
def simple_statement(self,args):
fn = self.lookup(args)
res = fn.run(self.ctx)
if isinstance(res,defer.Deferred):
waitForDeferred(res)
return res
def complex_statement(self,args):
fn = self.lookup(args)
fn.start_block()
return RunMe(self,fn)
class Interpreter(Processor):
"""\
A basic interpreter for the main loop, which runs every
statement immediately.
"""
def __init__(self, ctx=None):
super(Interpreter,self).__init__(ctx)
if "words" not in ctx:
from homevent.statement import global_words
self.ctx = ctx(words=global_words(ctx=ctx))
else:
self.ctx = ctx
def simple_statement(self,args):
fn = self.lookup(args)
try:
fn.run(self.ctx)
except Exception as ex:
fix_exception(ex)
self.error(self,ex)
def complex_statement(self,args):
try:
fn = self.lookup(args)
except TypeError,e:
print >>self.ctx.out,"For",args,"::"
raise
fn.start_block()
return RunMe(self,fn)
def done(self):
#print >>self.ctx.out,"Exiting"
pass
class InteractiveInterpreter(Interpreter):
"""An interpreter which prints a prompt and recovers from errors"""
do_prompt = True
def error(self,parser,err):
from homevent.statement import UnknownWordError
if isinstance(err,(UnknownWordError,SyntaxError)):
print >>parser.ctx.out, "ERROR:", err
else:
print >>parser.ctx.out, "ERROR:"
traceback.print_exception(err.__class__,err,sys.exc_info()[2], file=parser.ctx.out)
if hasattr(parser,'init_state'):
parser.init_state()
return
def done(self):
self.ctx.out.write("\n")
| 0.038647 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class tmtrafficpolicy(base_resource) :
""" Configuration for TM traffic policy resource. """
def __init__(self) :
self._name = ""
self._rule = ""
self._action = ""
self._hits = 0
self.___count = 0
@property
def name(self) :
ur"""Name for the traffic policy. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the traffic policy. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
ur"""Expression, against which traffic is evaluated. Written in the classic syntax.
Maximum length of a string literal in the expression is 255 characters. A longer string can be split into smaller strings of up to 255 characters each, and the smaller strings concatenated with the + operator. For example, you can create a 500-character string as follows: '"<string of 255 characters>" + "<string of 245 characters>"'
The following requirements apply only to the NetScaler CLI:
* If the expression includes one or more spaces, enclose the entire expression in double quotation marks.
* If the expression itself includes double quotation marks, escape the quotations by using the \ character.
* Alternatively, you can use single quotation marks to enclose the rule, in which case you do not have to escape the double quotation marks.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
ur"""Expression, against which traffic is evaluated. Written in the classic syntax.
Maximum length of a string literal in the expression is 255 characters. A longer string can be split into smaller strings of up to 255 characters each, and the smaller strings concatenated with the + operator. For example, you can create a 500-character string as follows: '"<string of 255 characters>" + "<string of 245 characters>"'
The following requirements apply only to the NetScaler CLI:
* If the expression includes one or more spaces, enclose the entire expression in double quotation marks.
* If the expression itself includes double quotation marks, escape the quotations by using the \ character.
* Alternatively, you can use single quotation marks to enclose the rule, in which case you do not have to escape the double quotation marks.
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def action(self) :
ur"""Name of the action to apply to requests or connections that match this policy.<br/>Minimum length = 1.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
ur"""Name of the action to apply to requests or connections that match this policy.<br/>Minimum length = 1
"""
try :
self._action = action
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(tmtrafficpolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.tmtrafficpolicy
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add tmtrafficpolicy.
"""
try :
if type(resource) is not list :
addresource = tmtrafficpolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.action = resource.action
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].action = resource[i].action
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete tmtrafficpolicy.
"""
try :
if type(resource) is not list :
deleteresource = tmtrafficpolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update tmtrafficpolicy.
"""
try :
if type(resource) is not list :
updateresource = tmtrafficpolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.action = resource.action
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].action = resource[i].action
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of tmtrafficpolicy resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = tmtrafficpolicy()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ tmtrafficpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the tmtrafficpolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = tmtrafficpolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = tmtrafficpolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [tmtrafficpolicy() for _ in range(len(name))]
obj = [tmtrafficpolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = tmtrafficpolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of tmtrafficpolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = tmtrafficpolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the tmtrafficpolicy resources configured on NetScaler.
"""
try :
obj = tmtrafficpolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of tmtrafficpolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = tmtrafficpolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class tmtrafficpolicy_response(base_response) :
def __init__(self, length=1) :
self.tmtrafficpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.tmtrafficpolicy = [tmtrafficpolicy() for _ in range(length)]
| 0.034653 |
from django.utils.translation import ugettext as _
from django_digest import HttpDigestAuthenticator
from rest_framework.authentication import get_authorization_header
from rest_framework.authentication import BaseAuthentication
from rest_framework.authentication import TokenAuthentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework import exceptions
from onadata.apps.api.models.temp_token import TempToken
from django.utils import timezone
from django.conf import settings
def expired(time_token_created):
"""Checks if the time between when time_token_created and current time
is greater than the token expiry time.
:params time_token_created: The time the token we are checking was created.
:returns: Boolean True if not passed expired time, otherwise False.
"""
time_diff = (timezone.now() - time_token_created).total_seconds()
token_expiry_time = settings.DEFAULT_TEMP_TOKEN_EXPIRY_TIME
return True if time_diff > token_expiry_time else False
class DigestAuthentication(BaseAuthentication):
def __init__(self):
self.authenticator = HttpDigestAuthenticator()
def authenticate(self, request):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b'digest':
return None
if self.authenticator.authenticate(request):
return request.user, None
else:
raise AuthenticationFailed(
_(u"Invalid username/password"))
def authenticate_header(self, request):
response = self.authenticator.build_challenge_response()
return response['WWW-Authenticate']
class TempTokenAuthentication(TokenAuthentication):
model = TempToken
def authenticate(self, request):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b'temptoken':
return None
if len(auth) == 1:
m = 'Invalid token header. No credentials provided.'
raise exceptions.AuthenticationFailed(m)
elif len(auth) > 2:
m = 'Invalid token header. Token string should not contain spaces.'
raise exceptions.AuthenticationFailed(m)
return self.authenticate_credentials(auth[1])
def authenticate_credentials(self, key):
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
raise exceptions.AuthenticationFailed('Invalid token')
if not token.user.is_active:
raise exceptions.AuthenticationFailed('User inactive or deleted')
if expired(token.created):
raise exceptions.AuthenticationFailed('Token expired')
return (token.user, token)
def authenticate_header(self, request):
return 'TempToken'
| 0 |
##########################################################################
#
# Copyright (c) 2020, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Cinesite VFX Ltd. nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import Gaffer
# Rather than bind the C++ GraphComponent Range classes to
# Python, we just reimplement them in pure Python. This allows
# us to properly support filtering for subclasses defined in
# Python.
def __range( cls, parent ) :
for child in parent.children() :
if isinstance( child, cls ) :
yield child
def __recursiveRange( cls, parent ) :
for child in parent.children() :
if isinstance( child, cls ) :
yield child
for r in __recursiveRange( cls, child ) :
yield r
Gaffer.GraphComponent.Range = classmethod( __range )
Gaffer.GraphComponent.RecursiveRange = classmethod( __recursiveRange )
def __plugRange( cls, parent, direction ) :
for i in range( 0, len( parent ) ) :
child = parent[i]
if isinstance( child, cls ) and child.direction() == direction :
yield child
def __recursivePlugRange( cls, parent, direction = None ) :
for i in range( 0, len( parent ) ) :
child = parent[i]
if isinstance( child, cls ) and ( direction is None or child.direction() == direction ):
yield child
if isinstance( child, Gaffer.Plug ) :
for r in __recursivePlugRange( cls, child, direction ) :
yield r
Gaffer.Plug.InputRange = classmethod( functools.partial( __plugRange, direction = Gaffer.Plug.Direction.In ) )
Gaffer.Plug.OutputRange = classmethod( functools.partial( __plugRange, direction = Gaffer.Plug.Direction.Out ) )
Gaffer.Plug.RecursiveRange = classmethod( __recursivePlugRange )
Gaffer.Plug.RecursiveInputRange = classmethod( functools.partial( __recursivePlugRange, direction = Gaffer.Plug.Direction.In ) )
Gaffer.Plug.RecursiveOutputRange = classmethod( functools.partial( __recursivePlugRange, direction = Gaffer.Plug.Direction.Out ) )
| 0.031001 |
from django.db import models
from django.forms import ModelForm
from django.forms import Textarea,TextInput,NumberInput, Select, DateInput
from parsley.decorators import parsleyfy
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
class Records(models.Model):
CHOICES = (('Individual', 'Individual',), ('Group', 'Group',),('Company','Company'))
category = models.CharField(max_length=50)
datecreated = models.DateField()
amount = models.FloatField()
paid = models.CharField(max_length=50)
unitprice = models.CharField(max_length=50)
quantity = models.IntegerField()
balance = models.FloatField()
description = models.TextField(blank=True, null=True)
customertype = models.CharField(max_length=40,choices=CHOICES)
customername = models.CharField(max_length=50)
customeraddress = models.CharField(max_length=50, blank=True, null=True)
customerphone = models.IntegerField(blank=True, null=True)
customeremail = models.EmailField(blank=True, null=True)
month=models.IntegerField()
year=models.IntegerField()
issuer = models.CharField(max_length=50, default='Null')
created_by = models.ForeignKey(User, on_delete=models.CASCADE, default=1)
def get_absolute_url(self):
return reverse('record-detail', kwargs={'pk':self.pk})
@parsleyfy
class RecordForm(ModelForm):
class Meta:
model = Records
fields = ['paid','quantity','customeremail',
'customeraddress','datecreated','description','customertype','customername','unitprice',
'customerphone']
widgets = {
'paid': TextInput(attrs={'class':'form-control number','required':'required'}),
'quantity': NumberInput(attrs={"class":"form-control","required":"required","id":"quantity"}),
'customeremail': TextInput(attrs={'class':'form-control'}),
'customeraddress': TextInput(attrs={'class':'form-control'}),
'datecreated': DateInput(attrs={'class':'form-control','required':'required', 'type':'date'}),
'description': Textarea(attrs={'class':'form-control',"rows":"4","cols":""}),
'customertype': Select(attrs={'class':'form-control select2','required':'required'}),
'customername': TextInput(attrs={'class':'form-control','required':'required'}),
'unitprice': TextInput(attrs={"class":"form-control number","required":"required",
"onBlur":"AddTotal();","id":"unitprice"}),
'customerphone': NumberInput(attrs={'class':'form-control'})
}
class IncomeCategory(models.Model):
category_name = models.CharField(max_length=255)
def __str__(self):
return self.category_name
class ExpenditureCategory(models.Model):
category_name = models.CharField(max_length=255)
def __str__(self):
return self.category_name
class Credit(models.Model):
amount = models.CharField(max_length=50)
datecreated = models.DateField()
particulars = models.CharField(max_length=200)
def get_absolute_url(self):
return reverse('creditlist')
class CreditForm(ModelForm):
class Meta:
model = Credit
fields = ['amount','datecreated','particulars']
widgets = {
'amount': TextInput(attrs={'class':'form-control number','required':'required'}),
'datecreated': TextInput(attrs={'class':'form-control','id':'datespent','required':'required','type':'date'}),
'particulars': TextInput(attrs={'class':'form-control','required':'required'}),
}
class Debit(models.Model):
amount = models.CharField(max_length=50)
datecreated = models.DateField()
particulars = models.CharField(max_length=200)
def get_absolute_url(self):
return reverse('debit-list')
class DebitForm(ModelForm):
class Meta:
model = Debit
fields = ['amount','datecreated','particulars']
widgets = {
'amount': TextInput(attrs={'class':'form-control number','required':'required'}),
'datecreated': TextInput(attrs={'class':'form-control','id':'datespent2','required':'required','type':'date'}),
'particulars': TextInput(attrs={'class':'form-control','required':'required'}),
}
class Expenses(models.Model):
amountSpent = models.CharField(max_length=50)
dateSpent = models.DateField()
category = models.CharField(max_length=50)
detailSpent = models.TextField(blank=True, null=True)
monthSpent=models.IntegerField()
yearSpent=models.IntegerField()
issuerSpent = models.CharField(max_length=50, default='Null')
#Income = models.ForeignKey(Records, blank=True, null=True,on_delete=models.PROTECT)
def get_absolute_url(self):
return reverse('expense-details', kwargs={'pk':self.pk})
class ExpenseForm(ModelForm):
class Meta:
model = Expenses
fields = ['amountSpent','dateSpent','detailSpent']
widgets = {
'amountSpent': TextInput(attrs={'class':'form-control number','required':'required'}),
'dateSpent': DateInput(attrs={'class':'form-control','type':'date'}),
'detailSpent':Textarea(attrs={'class':'form-control',"rows":"4","cols":""})
}
| 0.04975 |
#Linked List implementation
class node(object):
"List node structure "
def __init__(self):
self.x=None
self.next=None
root=None
def init():
"Initialise list"
global root
root=node()
pass
def insert_end(v):
"Insert element at the end of list"
global root
if root.x is None:
root.x=v
else:
c=root
while not (c.next is None):
c=c.next
c.next=node()
c=c.next
c.x=v
pass
def insert_beg(v):
"Insert element at the root of list"
global root
if root.x is None:
root.x=v
else:
c=root
root=node()
root.x=v
root.next=c
pass
def insert_n(n,v):
"Insert element after nth element of list"
global root
if n==0:
insert_beg(v)
return
c=root
while not (c.next is None or n ==1):
c=c.next
n-=1
t=c.next
c.next=node()
c=c.next
c.x=v
c.next=t
pass
def remove_end():
"Remove element from the end of list"
global root
if root.x is None:
print "Empty list"
elif root.next is None:
root.x=None
else:
c=root
while not (c.next.next is None):
c=c.next
c.next=None
pass
def remove_beg():
"Remove element from the beginning of list"
global root
if root.x is None:
print "Empty list"
elif root.next is None:
root.x=None
else:
root=root.next
pass
def remove_n(n):
"Remove element after nth element of list"
global root
if n==0:
remove_beg()
return
c=root
while not (c.next is None or n ==1):
c=c.next
n-=1
if n<1:
print "Invalid n value"
else:
c.next=c.next.next
pass
def rev():
"Reverse the list"
global root
if root.x is None:return
r=root
c=r.next
r.next=None
while not (c is None):
t=c.next
c.next=r
r=c
if t is None:
root=c
c=t
def trav():
"Traverse the list"
global root
if root.x is None:
print "Empty list"
else:
print "The List: ",
c=root
while not (c is None):
print "->",c.x,
c=c.next
print
pass
if __name__=='__main__':
init()
while True:
print "Enter choice:"
print "0. Re-initialise the list"
print "1. Traverse list"
print "2. Insert at end"
print "3. Insert at root"
print "4. Remove from root"
print "5. Remove from end"
print "6. Insert at middle for a given n"
print "7. Remove from middle for a given n"
print "8. Reverse list"
print "9. Exit"
print "$Linked-list\_",
n=input()
if n==9:print "Exiting...";break
elif n==0:init();print "Initialised..."
elif n==1:trav()
elif n==2:insert_end(input("Enter value : "));print "Inserted "
elif n==3:insert_beg(input("Enter value : "));print "Inserted "
elif n==4:remove_beg();print "Removed "
elif n==5:remove_end();print "Removed "
elif n==6:insert_n(input("Enter n : "),input("Enter value : "));print "Inserted "
elif n==7:remove_n(input("Enter n : "));print "Removed "
elif n==8:rev();print "Reversed "
else:continue
l=raw_input("(Press Enter)")
| 0.035506 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest.mock
import pytest
from google.cloud.ndb import _datastore_types
from google.cloud.ndb import exceptions
class TestBlobKey:
@staticmethod
def test_constructor_bytes():
value = b"abc"
blob_key = _datastore_types.BlobKey(value)
assert blob_key._blob_key is value
@staticmethod
def test_constructor_none():
blob_key = _datastore_types.BlobKey(None)
assert blob_key._blob_key is None
@staticmethod
def test_constructor_too_long():
value = b"a" * 2000
with pytest.raises(exceptions.BadValueError):
_datastore_types.BlobKey(value)
@staticmethod
def test_constructor_bad_type():
value = {"a": "b"}
with pytest.raises(exceptions.BadValueError):
_datastore_types.BlobKey(value)
@staticmethod
def test___eq__():
blob_key1 = _datastore_types.BlobKey(b"abc")
blob_key2 = _datastore_types.BlobKey(b"def")
blob_key3 = _datastore_types.BlobKey(None)
blob_key4 = b"ghi"
blob_key5 = unittest.mock.sentinel.blob_key
assert blob_key1 == blob_key1
assert not blob_key1 == blob_key2
assert not blob_key1 == blob_key3
assert not blob_key1 == blob_key4
assert not blob_key1 == blob_key5
@staticmethod
def test___lt__():
blob_key1 = _datastore_types.BlobKey(b"abc")
blob_key2 = _datastore_types.BlobKey(b"def")
blob_key3 = _datastore_types.BlobKey(None)
blob_key4 = b"ghi"
blob_key5 = unittest.mock.sentinel.blob_key
assert not blob_key1 < blob_key1
assert blob_key1 < blob_key2
with pytest.raises(TypeError):
blob_key1 < blob_key3
assert blob_key1 < blob_key4
with pytest.raises(TypeError):
blob_key1 < blob_key5
@staticmethod
def test___hash__():
value = b"289399038904ndkjndjnd02mx"
blob_key = _datastore_types.BlobKey(value)
assert hash(blob_key) == hash(value)
| 0 |
from core.domain import widget_domain
class FileReadInput(widget_domain.BaseWidget):
"""Definition of a widget.
Do NOT make any changes to this widget definition while the Oppia app is
running, otherwise things will break.
This class represents a widget, whose id is the name of the class. It is
auto-discovered when the default widgets are refreshed.
"""
# The human-readable name of the widget.
name = 'File upload'
# The category the widget falls under in the widget repository.
category = 'Basic Input'
# A description of the widget.
description = 'A widget for uploading files.'
# Customization parameters and their descriptions, types and default
# values. This attribute name MUST be prefixed by '_'.
_params = []
# Actions that the reader can perform on this widget which trigger a
# feedback interaction, and the associated input types. Interactive widgets
# must have at least one of these. This attribute name MUST be prefixed by
# '_'.
_handlers = [{
'name': 'submit', 'obj_type': 'UnicodeString'
}]
# Additional JS library dependencies that should be loaded in pages
# containing this widget. These should correspond to names of files in
# feconf.DEPENDENCIES_TEMPLATES_DIR.
_dependency_ids = []
| 0 |
import time
import logging
from autotest.client.shared import error
@error.context_aware
def run(test, params, env):
"""
Test the function of nmi injection and verify the response of guest
1) Log in the guest
2) Add 'watchdog=1' to boot option
2) Check if guest's NMI counter augment after injecting nmi
:param test: kvm test object
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
get_nmi_cmd = params["get_nmi_cmd"]
kernel_version = session.get_command_output("uname -r").strip()
nmi_watchdog_type = int(params["nmi_watchdog_type"])
update_kernel_cmd = ("grubby --update-kernel=/boot/vmlinuz-%s "
"--args='nmi_watchdog=%d'" %
(kernel_version, nmi_watchdog_type))
error.context("Add 'nmi_watchdog=%d' to guest kernel cmdline and reboot"
% nmi_watchdog_type)
session.cmd(update_kernel_cmd)
time.sleep(int(params.get("sleep_before_reset", 10)))
session = vm.reboot(session, method='shell', timeout=timeout)
try:
error.context("Getting guest's number of vcpus")
guest_cpu_num = session.cmd(params["cpu_chk_cmd"])
error.context("Getting guest's NMI counter")
output = session.cmd(get_nmi_cmd)
logging.debug(output.strip())
nmi_counter1 = output.split()[1:]
logging.info("Waiting 60 seconds to see if guest's NMI counter "
"increases")
time.sleep(60)
error.context("Getting guest's NMI counter 2nd time")
output = session.cmd(get_nmi_cmd)
logging.debug(output.strip())
nmi_counter2 = output.split()[1:]
error.context("")
for i in range(int(guest_cpu_num)):
logging.info("vcpu: %s, nmi_counter1: %s, nmi_counter2: %s" %
(i, nmi_counter1[i], nmi_counter2[i]))
if int(nmi_counter2[i]) <= int(nmi_counter1[i]):
raise error.TestFail("Guest's NMI counter did not increase "
"after 60 seconds")
finally:
session.close()
| 0 |
from __future__ import division, print_function, absolute_import
import six
range = six.moves.range
map = six.moves.map
import dynpy
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import raises
densemx = dynpy.mx.DenseMatrix.format_mx([[0,1],[2,3]])
sparsemx = dynpy.mx.SparseMatrix.format_mx([[0,1],[2,3]])
def test_from_coords_dense():
rows = np.array([0,0,0])
cols = np.array([0,0,0])
data = np.array([1,1,1])
r = dynpy.mx.DenseMatrix.from_coords(rows, cols, data, shape=(2,2))
assert_array_equal(r, np.array([[3,0],[0,0]]))
def test_from_coords_sparse():
rows = np.array([0,0,0])
cols = np.array([0,0,0])
data = np.array([1,1,1])
r = dynpy.mx.SparseMatrix.from_coords(rows, cols, data, shape=(2,2))
r = r.todense()
assert_array_equal(r, np.array([[3,0],[0,0]]))
def test_array_equal():
a = np.array([0,])
b = np.array([0,])
c = np.array([1,])
assert( dynpy.mx.DenseMatrix.array_equal(a, b) )
assert( not dynpy.mx.DenseMatrix.array_equal(a, c) )
def test_issparse():
assert( dynpy.mx.issparse(densemx) == False)
assert( dynpy.mx.issparse(sparsemx) == True)
@raises(ValueError)
def test_issparse_invalid():
assert( dynpy.mx.issparse('not a matrix') )
def test_todense():
assert( dynpy.mx.issparse(dynpy.mx.todense(densemx)) == False)
assert( dynpy.mx.issparse(dynpy.mx.todense(sparsemx)) == False)
def test_tosparse():
assert( dynpy.mx.issparse(dynpy.mx.tosparse(densemx)) == True)
assert( dynpy.mx.issparse(dynpy.mx.tosparse(sparsemx)) == True)
def test_hashable():
# 1-dimensional
hash(dynpy.mx.hashable_array(np.ravel(densemx[0,:])))
hash(dynpy.mx.hashable_array(densemx[0:1,:]))
# 2-dimensional
hash(dynpy.mx.hashable_array(densemx[0,:]))
# large 2-dimensional
hash(dynpy.mx.hashable_array(np.zeros((1500,1500))))
def test_arrayequal():
assert( dynpy.mx.array_equal(densemx, densemx) )
assert( not dynpy.mx.array_equal(densemx, 1+densemx) )
def test_getdiag():
assert( (dynpy.mx.getdiag(densemx) == [0,3]).all() )
| 0.05342 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.