Dataset Viewer
text
stringlengths 733
1.02M
| score
float64 0
0.27
|
---|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from email.utils import parseaddr
import functools
import htmlentitydefs
import itertools
import logging
import operator
import re
from ast import literal_eval
from openerp.tools import mute_logger
# Validation Library https://pypi.python.org/pypi/validate_email/1.1
from .validate_email import validate_email
import openerp
from openerp.osv import orm
from openerp.osv import fields
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
pattern = re.compile(r"&(\w+?);")
_logger = logging.getLogger('base.partner.merge')
# http://www.php2python.com/wiki/function.html-entity-decode/
def html_entity_decode_char(m, defs=None):
if defs is None:
defs = htmlentitydefs.entitydefs
try:
return defs[m.group(1)]
except KeyError:
return m.group(0)
def html_entity_decode(string):
return pattern.sub(html_entity_decode_char, string)
def sanitize_email(partner_email):
assert isinstance(partner_email, basestring) and partner_email
result = re.subn(r';|/|:', ',',
html_entity_decode(partner_email or ''))[0].split(',')
emails = [parseaddr(email)[1]
for item in result
for email in item.split()]
return [email.lower()
for email in emails
if validate_email(email)]
def is_integer_list(ids):
return all(isinstance(i, (int, long)) for i in ids)
class MergePartnerLine(orm.TransientModel):
_name = 'base.partner.merge.line'
_columns = {
'wizard_id': fields.many2one('base.partner.merge.automatic.wizard',
'Wizard'),
'min_id': fields.integer('MinID'),
'aggr_ids': fields.char('Ids', required=True),
}
_order = 'min_id asc'
class MergePartnerAutomatic(orm.TransientModel):
"""
The idea behind this wizard is to create a list of potential partners to
merge. We use two objects, the first one is the wizard for the end-user.
And the second will contain the partner list to merge.
"""
_name = 'base.partner.merge.automatic.wizard'
_columns = {
# Group by
'group_by_email': fields.boolean('Email'),
'group_by_name': fields.boolean('Name'),
'group_by_is_company': fields.boolean('Is Company'),
'group_by_vat': fields.boolean('VAT'),
'group_by_parent_id': fields.boolean('Parent Company'),
'state': fields.selection([('option', 'Option'),
('selection', 'Selection'),
('finished', 'Finished')],
'State',
readonly=True,
required=True),
'number_group': fields.integer("Group of Contacts", readonly=True),
'current_line_id': fields.many2one('base.partner.merge.line',
'Current Line'),
'line_ids': fields.one2many('base.partner.merge.line',
'wizard_id', 'Lines'),
'partner_ids': fields.many2many('res.partner', string='Contacts'),
'dst_partner_id': fields.many2one('res.partner',
string='Destination Contact'),
'exclude_contact': fields.boolean('A user associated to the contact'),
'exclude_journal_item': fields.boolean('Journal Items associated'
' to the contact'),
'maximum_group': fields.integer("Maximum of Group of Contacts"),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(MergePartnerAutomatic, self
).default_get(cr, uid, fields, context)
if (context.get('active_model') == 'res.partner' and
context.get('active_ids')):
partner_ids = context['active_ids']
res['state'] = 'selection'
res['partner_ids'] = partner_ids
res['dst_partner_id'] = self._get_ordered_partner(cr, uid,
partner_ids,
context=context
)[-1].id
return res
_defaults = {
'state': 'option'
}
def get_fk_on(self, cr, table):
q = """ SELECT cl1.relname as table,
att1.attname as column
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND cl2.relname = %s
AND att2.attname = 'id'
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND con.contype = 'f'
"""
return cr.execute(q, (table,))
def _update_foreign_keys(self, cr, uid, src_partners,
dst_partner, context=None):
_logger.debug('_update_foreign_keys for dst_partner: %s for '
'src_partners: %r',
dst_partner.id,
list(map(operator.attrgetter('id'), src_partners)))
# find the many2one relation to a partner
proxy = self.pool.get('res.partner')
self.get_fk_on(cr, 'res_partner')
# ignore two tables
for table, column in cr.fetchall():
if 'base_partner_merge_' in table:
continue
partner_ids = tuple(map(int, src_partners))
query = ("SELECT column_name FROM information_schema.columns"
" WHERE table_name LIKE '%s'") % (table)
cr.execute(query, ())
columns = []
for data in cr.fetchall():
if data[0] != column:
columns.append(data[0])
query_dic = {
'table': table,
'column': column,
'value': columns[0],
}
if len(columns) <= 1:
# unique key treated
query = """
UPDATE "%(table)s" as ___tu
SET %(column)s = %%s
WHERE
%(column)s = %%s AND
NOT EXISTS (
SELECT 1
FROM "%(table)s" as ___tw
WHERE
%(column)s = %%s AND
___tu.%(value)s = ___tw.%(value)s
)""" % query_dic
for partner_id in partner_ids:
cr.execute(query, (dst_partner.id, partner_id,
dst_partner.id))
else:
cr.execute("SAVEPOINT recursive_partner_savepoint")
try:
query = ('UPDATE "%(table)s" SET %(column)s = %%s WHERE '
'%(column)s IN %%s') % query_dic
cr.execute(query, (dst_partner.id, partner_ids,))
if (column == proxy._parent_name and
table == 'res_partner'):
query = """
WITH RECURSIVE cycle(id, parent_id) AS (
SELECT id, parent_id FROM res_partner
UNION
SELECT cycle.id, res_partner.parent_id
FROM res_partner, cycle
WHERE res_partner.id = cycle.parent_id
AND cycle.id != cycle.parent_id
)
SELECT id FROM cycle
WHERE id = parent_id AND id = %s
"""
cr.execute(query, (dst_partner.id,))
if cr.fetchall():
cr.execute("ROLLBACK TO SAVEPOINT "
"recursive_partner_savepoint")
finally:
cr.execute("RELEASE SAVEPOINT "
"recursive_partner_savepoint")
def _update_reference_fields(self, cr, uid, src_partners, dst_partner,
context=None):
_logger.debug('_update_reference_fields for dst_partner: %s for '
'src_partners: %r',
dst_partner.id,
list(map(operator.attrgetter('id'), src_partners)))
def update_records(model, src, field_model='model', field_id='res_id',
context=None):
proxy = self.pool.get(model)
if proxy is None:
return
domain = [(field_model, '=', 'res.partner'),
(field_id, '=', src.id)]
ids = proxy.search(cr, openerp.SUPERUSER_ID,
domain, context=context)
return proxy.write(cr, openerp.SUPERUSER_ID, ids,
{field_id: dst_partner.id}, context=context)
update_records = functools.partial(update_records, context=context)
for partner in src_partners:
update_records('base.calendar', src=partner,
field_model='model_id.model')
update_records('ir.attachment', src=partner,
field_model='res_model')
update_records('mail.followers', src=partner,
field_model='res_model')
update_records('mail.message', src=partner)
update_records('marketing.campaign.workitem', src=partner,
field_model='object_id.model')
update_records('ir.model.data', src=partner)
proxy = self.pool['ir.model.fields']
domain = [('ttype', '=', 'reference')]
record_ids = proxy.search(cr, openerp.SUPERUSER_ID, domain,
context=context)
for record in proxy.browse(cr, openerp.SUPERUSER_ID, record_ids,
context=context):
try:
proxy_model = self.pool[record.model]
except KeyError:
# ignore old tables
continue
if record.model == 'ir.property':
continue
field_type = proxy_model._columns.get(record.name).__class__._type
if field_type == 'function':
continue
for partner in src_partners:
domain = [
(record.name, '=', 'res.partner,%d' % partner.id)
]
model_ids = proxy_model.search(cr, openerp.SUPERUSER_ID,
domain, context=context)
values = {
record.name: 'res.partner,%d' % dst_partner.id,
}
proxy_model.write(cr, openerp.SUPERUSER_ID, model_ids, values,
context=context)
def _update_values(self, cr, uid, src_partners, dst_partner, context=None):
_logger.debug('_update_values for dst_partner: %s for src_partners: '
'%r',
dst_partner.id,
list(map(operator.attrgetter('id'), src_partners)))
columns = dst_partner._columns
def write_serializer(column, item):
if isinstance(item, browse_record):
return item.id
else:
return item
values = dict()
for column, field in columns.iteritems():
if (field._type not in ('many2many', 'one2many') and
not isinstance(field, fields.function)):
for item in itertools.chain(src_partners, [dst_partner]):
if item[column]:
values[column] = write_serializer(column,
item[column])
values.pop('id', None)
parent_id = values.pop('parent_id', None)
dst_partner.write(values)
if parent_id and parent_id != dst_partner.id:
try:
dst_partner.write({'parent_id': parent_id})
except (orm.except_orm, orm.except_orm):
_logger.info('Skip recursive partner hierarchies for '
'parent_id %s of partner: %s',
parent_id, dst_partner.id)
@mute_logger('openerp.osv.expression', 'openerp.osv.orm')
def _merge(self, cr, uid, partner_ids, dst_partner=None, context=None):
proxy = self.pool.get('res.partner')
partner_ids = proxy.exists(cr, uid, list(partner_ids),
context=context)
if len(partner_ids) < 2:
return
if len(partner_ids) > 3:
raise orm.except_orm(
_('Error'),
_("For safety reasons, you cannot merge more than 3 contacts "
"together. You can re-open the wizard several times if "
"needed."))
if (openerp.SUPERUSER_ID != uid and
len(set(partner.email for partner
in proxy.browse(cr, uid, partner_ids,
context=context))) > 1):
raise orm.except_orm(
_('Error'),
_("All contacts must have the same email. Only the "
"Administrator can merge contacts with different emails."))
if dst_partner and dst_partner.id in partner_ids:
src_partners = proxy.browse(cr, uid,
[id for id in partner_ids
if id != dst_partner.id],
context=context)
else:
ordered_partners = self._get_ordered_partner(cr, uid, partner_ids,
context)
dst_partner = ordered_partners[-1]
src_partners = ordered_partners[:-1]
_logger.info("dst_partner: %s", dst_partner.id)
if (openerp.SUPERUSER_ID != uid and
self._model_is_installed(
cr, uid, 'account.move.line', context=context) and
self.pool['account.move.line'].search(
cr, openerp.SUPERUSER_ID,
[('partner_id', 'in', [partner.id for partner
in src_partners])],
context=context)):
raise orm.except_orm(
_('Error'),
_("Only the destination contact may be linked to existing "
"Journal Items. Please ask the Administrator if you need to"
" merge several contacts linked to existing Journal "
"Items."))
self._update_foreign_keys(
cr, uid, src_partners, dst_partner, context=context)
self._update_reference_fields(
cr, uid, src_partners, dst_partner, context=context)
self._update_values(
cr, uid, src_partners, dst_partner, context=context)
_logger.info('(uid = %s) merged the partners %r with %s',
uid,
list(map(operator.attrgetter('id'), src_partners)),
dst_partner.id)
dst_partner.message_post(
body='%s %s' % (
_("Merged with the following partners:"),
", ".join(
'%s<%s>(ID %s)' % (p.name, p.email or 'n/a', p.id)
for p in src_partners
)
)
)
for partner in src_partners:
partner.unlink()
def clean_emails(self, cr, uid, context=None):
"""
Clean the email address of the partner, if there is an email field
with a minimum of two addresses, the system will create a new partner,
with the information of the previous one and will copy the new cleaned
email into the email field.
"""
if context is None:
context = {}
proxy_model = self.pool['ir.model.fields']
field_ids = proxy_model.search(cr, uid,
[('model', '=', 'res.partner'),
('ttype', 'like', '%2many')],
context=context)
fields = proxy_model.read(cr, uid, field_ids, context=context)
reset_fields = dict((field['name'], []) for field in fields)
proxy_partner = self.pool['res.partner']
context['active_test'] = False
ids = proxy_partner.search(cr, uid, [], context=context)
fields = ['name', 'var' 'partner_id' 'is_company', 'email']
partners = proxy_partner.read(cr, uid, ids, fields, context=context)
partners.sort(key=operator.itemgetter('id'))
partners_len = len(partners)
_logger.info('partner_len: %r', partners_len)
for idx, partner in enumerate(partners):
if not partner['email']:
continue
percent = (idx / float(partners_len)) * 100.0
_logger.info('idx: %r', idx)
_logger.info('percent: %r', percent)
try:
emails = sanitize_email(partner['email'])
head, tail = emails[:1], emails[1:]
email = head[0] if head else False
proxy_partner.write(cr, uid, [partner['id']],
{'email': email}, context=context)
for email in tail:
values = dict(reset_fields, email=email)
proxy_partner.copy(cr, uid, partner['id'], values,
context=context)
except Exception:
_logger.exception("There is a problem with this partner: %r",
partner)
raise
return True
def close_cb(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_window_close'}
def _generate_query(self, fields, maximum_group=100):
group_fields = ', '.join(fields)
filters = []
for field in fields:
if field in ['email', 'name']:
filters.append((field, 'IS NOT', 'NULL'))
criteria = ' AND '.join('%s %s %s' % (field, operator, value)
for field, operator, value in filters)
text = [
"SELECT min(id), array_agg(id)",
"FROM res_partner",
]
if criteria:
text.append('WHERE %s' % criteria)
text.extend([
"GROUP BY %s" % group_fields,
"HAVING COUNT(*) >= 2",
"ORDER BY min(id)",
])
if maximum_group:
text.extend([
"LIMIT %s" % maximum_group,
])
return ' '.join(text)
def _compute_selected_groupby(self, this):
group_by_str = 'group_by_'
group_by_len = len(group_by_str)
fields = [
key[group_by_len:]
for key in self._columns.keys()
if key.startswith(group_by_str)
]
groups = [
field
for field in fields
if getattr(this, '%s%s' % (group_by_str, field), False)
]
if not groups:
raise orm.except_orm(_('Error'),
_("You have to specify a filter for your "
"selection"))
return groups
def next_cb(self, cr, uid, ids, context=None):
"""
Don't compute any thing
"""
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
if this.current_line_id:
this.current_line_id.unlink()
return self._next_screen(cr, uid, this, context)
def _get_ordered_partner(self, cr, uid, partner_ids, context=None):
partners = self.pool.get('res.partner'
).browse(cr, uid,
list(partner_ids),
context=context)
ordered_partners = sorted(
sorted(
partners,
key=operator.attrgetter('create_date'),
reverse=True
),
key=operator.attrgetter('active'),
reverse=True
)
return ordered_partners
def _next_screen(self, cr, uid, this, context=None):
this.refresh()
values = {}
if this.line_ids:
# in this case, we try to find the next record.
current_line = this.line_ids[0]
current_partner_ids = literal_eval(current_line.aggr_ids)
values.update({
'current_line_id': current_line.id,
'partner_ids': [(6, 0, current_partner_ids)],
'dst_partner_id': self._get_ordered_partner(
cr, uid,
current_partner_ids,
context
)[-1].id,
'state': 'selection',
})
else:
values.update({
'current_line_id': False,
'partner_ids': [],
'state': 'finished',
})
this.write(values)
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def _model_is_installed(self, cr, uid, model, context=None):
proxy = self.pool.get('ir.model')
domain = [('model', '=', model)]
return proxy.search_count(cr, uid, domain, context=context) > 0
def _partner_use_in(self, cr, uid, aggr_ids, models, context=None):
"""
Check if there is no occurence of this group of partner in the selected
model
"""
for model, field in models.iteritems():
proxy = self.pool.get(model)
domain = [(field, 'in', aggr_ids)]
if proxy.search_count(cr, uid, domain, context=context):
return True
return False
def compute_models(self, cr, uid, ids, context=None):
"""
Compute the different models needed by the system if you want to
exclude some partners.
"""
assert is_integer_list(ids)
this = self.browse(cr, uid, ids[0], context=context)
models = {}
if this.exclude_contact:
models['res.users'] = 'partner_id'
if (self._model_is_installed(
cr, uid, 'account.move.line', context=context) and
this.exclude_journal_item):
models['account.move.line'] = 'partner_id'
return models
def _process_query(self, cr, uid, ids, query, context=None):
"""
Execute the select request and write the result in this wizard
"""
proxy = self.pool.get('base.partner.merge.line')
this = self.browse(cr, uid, ids[0], context=context)
models = self.compute_models(cr, uid, ids, context=context)
cr.execute(query)
counter = 0
for min_id, aggr_ids in cr.fetchall():
if models and self._partner_use_in(cr, uid, aggr_ids, models,
context=context):
continue
values = {
'wizard_id': this.id,
'min_id': min_id,
'aggr_ids': aggr_ids,
}
proxy.create(cr, uid, values, context=context)
counter += 1
values = {
'state': 'selection',
'number_group': counter,
}
this.write(values)
_logger.info("counter: %s", counter)
def start_process_cb(self, cr, uid, ids, context=None):
"""
Start the process.
* Compute the selected groups (with duplication)
* If the user has selected the 'exclude_XXX' fields, avoid the
partners.
"""
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
groups = self._compute_selected_groupby(this)
query = self._generate_query(groups, this.maximum_group)
self._process_query(cr, uid, ids, query, context=context)
return self._next_screen(cr, uid, this, context)
def automatic_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
this = self.browse(cr, uid, ids[0], context=context)
this.start_process_cb()
this.refresh()
for line in this.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(cr, uid, partner_ids, context=context)
line.unlink()
cr.commit()
this.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def parent_migration_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
query = """
SELECT
min(p1.id),
array_agg(DISTINCT p1.id)
FROM
res_partner as p1
INNER join
res_partner as p2
ON
p1.email = p2.email AND
p1.name = p2.name AND
(p1.parent_id = p2.id OR p1.id = p2.parent_id)
WHERE
p2.id IS NOT NULL
GROUP BY
p1.email,
p1.name,
CASE WHEN p1.parent_id = p2.id THEN p2.id
ELSE p1.id
END
HAVING COUNT(*) >= 2
ORDER BY
min(p1.id)
"""
self._process_query(cr, uid, ids, query, context=context)
for line in this.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(cr, uid, partner_ids, context=context)
line.unlink()
cr.commit()
this.write({'state': 'finished'})
cr.execute("""
UPDATE
res_partner
SET
is_company = NULL,
parent_id = NULL
WHERE
parent_id = id
""")
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def update_all_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
# WITH RECURSIVE cycle(id, parent_id) AS (
# SELECT id, parent_id FROM res_partner
# UNION
# SELECT cycle.id, res_partner.parent_id
# FROM res_partner, cycle
# WHERE res_partner.id = cycle.parent_id AND
# cycle.id != cycle.parent_id
# )
# UPDATE res_partner
# SET parent_id = NULL
# WHERE id in (SELECT id FROM cycle WHERE id = parent_id);
this = self.browse(cr, uid, ids[0], context=context)
self.parent_migration_process_cb(cr, uid, ids, context=None)
list_merge = [
{'group_by_vat': True,
'group_by_email': True,
'group_by_name': True},
# {'group_by_name': True,
# 'group_by_is_company': True,
# 'group_by_parent_id': True},
# {'group_by_email': True,
# 'group_by_is_company': True,
# 'group_by_parent_id': True},
# {'group_by_name': True,
# 'group_by_vat': True,
# 'group_by_is_company': True,
# 'exclude_journal_item': True},
# {'group_by_email': True,
# 'group_by_vat': True,
# 'group_by_is_company': True,
# 'exclude_journal_item': True},
# {'group_by_email': True,
# 'group_by_is_company': True,
# 'exclude_contact': True,
# 'exclude_journal_item': True},
# {'group_by_name': True,
# 'group_by_is_company': True,
# 'exclude_contact': True,
# 'exclude_journal_item': True}
]
for merge_value in list_merge:
id = self.create(cr, uid, merge_value, context=context)
self.automatic_process_cb(cr, uid, [id], context=context)
cr.execute("""
UPDATE
res_partner
SET
is_company = NULL
WHERE
parent_id IS NOT NULL AND
is_company IS NOT NULL
""")
# cr.execute("""
# UPDATE
# res_partner as p1
# SET
# is_company = NULL,
# parent_id = (
# SELECT p2.id
# FROM res_partner as p2
# WHERE p2.email = p1.email AND
# p2.parent_id != p2.id
# LIMIT 1
# )
# WHERE
# p1.parent_id = p1.id
# """)
return self._next_screen(cr, uid, this, context)
def merge_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
partner_ids = set(map(int, this.partner_ids))
if not partner_ids:
this.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
self._merge(cr, uid, partner_ids, this.dst_partner_id,
context=context)
if this.current_line_id:
this.current_line_id.unlink()
return self._next_screen(cr, uid, this, context)
def auto_set_parent_id(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
# select partner who have one least invoice
partner_treated = ['@gmail.com']
cr.execute(""" SELECT p.id, p.email
FROM res_partner as p
LEFT JOIN account_invoice as a
ON p.id = a.partner_id AND a.state in ('open','paid')
WHERE p.grade_id is NOT NULL
GROUP BY p.id
ORDER BY COUNT(a.id) DESC
""")
re_email = re.compile(r".*@")
for id, email in cr.fetchall():
# check email domain
email = re_email.sub("@", email or "")
if not email or email in partner_treated:
continue
partner_treated.append(email)
# don't update the partners if they are more of one who have
# invoice
cr.execute("""
SELECT *
FROM res_partner as p
WHERE p.id != %s AND p.email LIKE '%%%s' AND
EXISTS (SELECT * FROM account_invoice as a
WHERE p.id = a.partner_id
AND a.state in ('open','paid'))
""" % (id, email))
if len(cr.fetchall()) > 1:
_logger.info("%s MORE OF ONE COMPANY", email)
continue
# to display changed values
cr.execute(""" SELECT id,email
FROM res_partner
WHERE parent_id != %s
AND id != %s AND email LIKE '%%%s'
""" % (id, id, email))
_logger.info("%r", cr.fetchall())
# upgrade
cr.execute(""" UPDATE res_partner
SET parent_id = %s
WHERE id != %s AND email LIKE '%%%s'
""" % (id, id, email))
return False
| 0 |
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import aggregates as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import base_compute_client
class AggregatesClient(base_compute_client.BaseComputeClient):
def list_aggregates(self):
"""Get aggregate list."""
resp, body = self.get("os-aggregates")
body = json.loads(body)
self.validate_response(schema.list_aggregates, resp, body)
return rest_client.ResponseBody(resp, body)
def show_aggregate(self, aggregate_id):
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % aggregate_id)
body = json.loads(body)
self.validate_response(schema.get_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def create_aggregate(self, **kwargs):
"""Create a new aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#createAggregate
"""
post_body = json.dumps({'aggregate': kwargs})
resp, body = self.post('os-aggregates', post_body)
body = json.loads(body)
self.validate_response(schema.create_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def update_aggregate(self, aggregate_id, **kwargs):
"""Update an aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#updateAggregate
"""
put_body = json.dumps({'aggregate': kwargs})
resp, body = self.put('os-aggregates/%s' % aggregate_id, put_body)
body = json.loads(body)
self.validate_response(schema.update_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_aggregate(self, aggregate_id):
"""Delete the given aggregate."""
resp, body = self.delete("os-aggregates/%s" % aggregate_id)
self.validate_response(schema.delete_aggregate, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_aggregate(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Return the primary type of resource this client works with."""
return 'aggregate'
def add_host(self, aggregate_id, **kwargs):
"""Add a host to the given aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#addHost
"""
post_body = json.dumps({'add_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return rest_client.ResponseBody(resp, body)
def remove_host(self, aggregate_id, **kwargs):
"""Remove a host from the given aggregate.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#removeAggregateHost
"""
post_body = json.dumps({'remove_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return rest_client.ResponseBody(resp, body)
def set_metadata(self, aggregate_id, **kwargs):
"""Replace the aggregate's existing metadata with new metadata.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#addAggregateMetadata
"""
post_body = json.dumps({'set_metadata': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_set_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
| 0 |
"""Create coordinate transforms."""
# Author: Eric Larson <larson.eric.d<gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from scipy import linalg
from ...transforms import combine_transforms, invert_transform, Transform
from ...utils import logger
from ..constants import FIFF
from .constants import CTF
def _make_transform_card(fro, to, r_lpa, r_nasion, r_rpa):
"""Make a transform from cardinal landmarks."""
# XXX de-duplicate this with code from Montage somewhere?
diff_1 = r_nasion - r_lpa
ex = r_rpa - r_lpa
alpha = np.dot(diff_1, ex) / np.dot(ex, ex)
ex /= np.sqrt(np.sum(ex * ex))
trans = np.eye(4)
move = (1. - alpha) * r_lpa + alpha * r_rpa
trans[:3, 3] = move
trans[:3, 0] = ex
ey = r_nasion - move
ey /= np.sqrt(np.sum(ey * ey))
trans[:3, 1] = ey
trans[:3, 2] = np.cross(ex, ey) # ez
return Transform(fro, to, trans)
def _quaternion_align(from_frame, to_frame, from_pts, to_pts):
"""Perform an alignment using the unit quaternions (modifies points)."""
assert from_pts.shape[1] == to_pts.shape[1] == 3
# Calculate the centroids and subtract
from_c, to_c = from_pts.mean(axis=0), to_pts.mean(axis=0)
from_ = from_pts - from_c
to_ = to_pts - to_c
# Compute the dot products
S = np.dot(from_.T, to_)
# Compute the magical N matrix
N = np.array([[S[0, 0] + S[1, 1] + S[2, 2], 0., 0., 0.],
[S[1, 2] - S[2, 1], S[0, 0] - S[1, 1] - S[2, 2], 0., 0.],
[S[2, 0] - S[0, 2], S[0, 1] + S[1, 0],
-S[0, 0] + S[1, 1] - S[2, 2], 0.],
[S[0, 1] - S[1, 0], S[2, 0] + S[0, 2],
S[1, 2] + S[2, 1], -S[0, 0] - S[1, 1] + S[2, 2]]])
# Compute the eigenvalues and eigenvectors
# Use the eigenvector corresponding to the largest eigenvalue as the
# unit quaternion defining the rotation
eig_vals, eig_vecs = linalg.eigh(N, overwrite_a=True)
which = np.argmax(eig_vals)
if eig_vals[which] < 0:
raise RuntimeError('No positive eigenvalues. Cannot do the alignment.')
q = eig_vecs[:, which]
# Write out the rotation
trans = np.eye(4)
trans[0, 0] = q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3]
trans[0, 1] = 2.0 * (q[1] * q[2] - q[0] * q[3])
trans[0, 2] = 2.0 * (q[1] * q[3] + q[0] * q[2])
trans[1, 0] = 2.0 * (q[2] * q[1] + q[0] * q[3])
trans[1, 1] = q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3]
trans[1, 2] = 2.0 * (q[2] * q[3] - q[0] * q[1])
trans[2, 0] = 2.0 * (q[3] * q[1] - q[0] * q[2])
trans[2, 1] = 2.0 * (q[3] * q[2] + q[0] * q[1])
trans[2, 2] = q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]
# Now we need to generate a transformed translation vector
trans[:3, 3] = to_c - np.dot(trans[:3, :3], from_c)
del to_c, from_c
# Test the transformation and print the results
logger.info(' Quaternion matching (desired vs. transformed):')
for fro, to in zip(from_pts, to_pts):
rr = np.dot(trans[:3, :3], fro) + trans[:3, 3]
diff = np.sqrt(np.sum((to - rr) ** 2))
logger.info(' %7.2f %7.2f %7.2f mm <-> %7.2f %7.2f %7.2f mm '
'(orig : %7.2f %7.2f %7.2f mm) diff = %8.3f mm'
% (tuple(1000 * to) + tuple(1000 * rr) +
tuple(1000 * fro) + (1000 * diff,)))
if diff > 1e-4:
raise RuntimeError('Something is wrong: quaternion matching did '
'not work (see above)')
return Transform(from_frame, to_frame, trans)
def _make_ctf_coord_trans_set(res4, coils):
"""Figure out the necessary coordinate transforms."""
# CTF head > Neuromag head
lpa = rpa = nas = T1 = T2 = T3 = T5 = None
if coils is not None:
for p in coils:
if p['valid'] and (p['coord_frame'] ==
FIFF.FIFFV_MNE_COORD_CTF_HEAD):
if lpa is None and p['kind'] == CTF.CTFV_COIL_LPA:
lpa = p
elif rpa is None and p['kind'] == CTF.CTFV_COIL_RPA:
rpa = p
elif nas is None and p['kind'] == CTF.CTFV_COIL_NAS:
nas = p
if lpa is None or rpa is None or nas is None:
raise RuntimeError('Some of the mandatory HPI device-coordinate '
'info was not there.')
t = _make_transform_card(FIFF.FIFFV_COORD_HEAD,
FIFF.FIFFV_MNE_COORD_CTF_HEAD,
lpa['r'], nas['r'], rpa['r'])
T3 = invert_transform(t)
# CTF device -> Neuromag device
#
# Rotate the CTF coordinate frame by 45 degrees and shift by 190 mm
# in z direction to get a coordinate system comparable to the Neuromag one
#
R = np.eye(4)
R[:3, 3] = [0., 0., 0.19]
val = 0.5 * np.sqrt(2.)
R[0, 0] = val
R[0, 1] = -val
R[1, 0] = val
R[1, 1] = val
T4 = Transform(FIFF.FIFFV_MNE_COORD_CTF_DEVICE,
FIFF.FIFFV_COORD_DEVICE, R)
# CTF device -> CTF head
# We need to make the implicit transform explicit!
h_pts = dict()
d_pts = dict()
kinds = (CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, CTF.CTFV_COIL_NAS,
CTF.CTFV_COIL_SPARE)
if coils is not None:
for p in coils:
if p['valid']:
if p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
for kind in kinds:
if kind not in h_pts and p['kind'] == kind:
h_pts[kind] = p['r']
elif p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE:
for kind in kinds:
if kind not in d_pts and p['kind'] == kind:
d_pts[kind] = p['r']
if any(kind not in h_pts for kind in kinds[:-1]):
raise RuntimeError('Some of the mandatory HPI device-coordinate '
'info was not there.')
if any(kind not in d_pts for kind in kinds[:-1]):
raise RuntimeError('Some of the mandatory HPI head-coordinate '
'info was not there.')
use_kinds = [kind for kind in kinds
if (kind in h_pts and kind in d_pts)]
r_head = np.array([h_pts[kind] for kind in use_kinds])
r_dev = np.array([d_pts[kind] for kind in use_kinds])
T2 = _quaternion_align(FIFF.FIFFV_MNE_COORD_CTF_DEVICE,
FIFF.FIFFV_MNE_COORD_CTF_HEAD, r_dev, r_head)
# The final missing transform
if T3 is not None and T2 is not None:
T5 = combine_transforms(T2, T3, FIFF.FIFFV_MNE_COORD_CTF_DEVICE,
FIFF.FIFFV_COORD_HEAD)
T1 = combine_transforms(invert_transform(T4), T5,
FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD)
s = dict(t_dev_head=T1, t_ctf_dev_ctf_head=T2, t_ctf_head_head=T3,
t_ctf_dev_dev=T4, t_ctf_dev_head=T5)
logger.info(' Coordinate transformations established.')
return s
| 0 |
#!/usr/bin/python
#
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
#
# Extract Json Value
#
# Very simple tool to ease extracting json values from the cmd line.
import os
import sys
import json
def print_usage():
print """
Usage: extract_json_value.py [json file path] [key of value to extract]
For nested keys, use . separator
"""
def help_and_exit(msg=None):
print msg
print_usage()
sys.exit(1)
def parse_and_validate_args():
if len(sys.argv) < 3:
help_and_exit(msg="Error: Invalid Args")
json_path = sys.argv[1]
json_key = sys.argv[2]
if not os.path.isfile(json_path):
help_and_exit("Error: Invalid json file path")
return json_path, json_key
def extract_key(json_path, json_key):
json_data = None
with open(json_path, 'r') as json_file:
json_data = json.load(json_file)
nested_keys = json_key.split('.')
json_context = json_data
for key in nested_keys:
json_context = json_context.get(key, None)
if json_context is None:
help_and_exit("Error: Invalid json key")
return str(json_context)
def execute():
json_path, json_key = parse_and_validate_args()
value = extract_key(json_path, json_key)
return value
if __name__ == "__main__":
print execute()
| 0.028507 |
#################################################################
# Program: Toolib
"""
Known issues:
! GetTable call after Destroy leads to crush
Careful with delegating to table
Example (how to avoid crush):
def __getattr__(self, name):
if name != '__del__':
return getattr(self.GetTable(), name)
else:
raise AttributeError, name
"""
__author__ = "Oleg Noga"
__date__ = "$Date: 2007/09/17 13:19:12 $"
__version__ = "$Revision: 1.3 $"
# $Source: D:/HOME/cvs/toolib/wx/grid/wxGrid.py,v $
#
#################################################################
import wx.grid
FIX_SETTABLE = 1
if FIX_SETTABLE:
from table.MTableMessaging import MTableMessaging
class NoneTable(wx.grid.PyGridTableBase):
def GetNumberRows(self):
return 0
def GetNumberCols(self):
return 0
def __nonzero__(self):
return False
USE_SETWXTABLE = 0
class DelegatingTable(wx.grid.PyGridTableBase, MTableMessaging):
"""
Since wxGrid SetTable is buggy
using TableDelegator and pythonic tables
"""
def __init__(self, table=None):
wx.grid.PyGridTableBase.__init__(self)
MTableMessaging.__init__(self)
self.__table = table or NoneTable()
if USE_SETWXTABLE:
self.__table._setWxTable(self)
def _setTable(self, table):
self.fireTableStructureChanging()
if USE_SETWXTABLE:
if self.__table is not None:
self.__table._setWxTable(None)
self.__table = table
if USE_SETWXTABLE:
self.__table._setWxTable(self)
self.fireTableStructureChanged()
def _getTable(self):
return self.__table
#######################################################
# BEGIN_AUTO_GENERATED_CODE
def AppendCols(self, *args):
return self.__table.AppendCols(*args)
def AppendRows(self, *args):
return self.__table.AppendRows(*args)
def AttrProvider(self, *args):
return self.__table.AttrProvider(*args)
def CanGetValueAs(self, *args):
return self.__table.CanGetValueAs(*args)
def CanHaveAttributes(self, *args):
return self.__table.CanHaveAttributes(*args)
def CanSetValueAs(self, *args):
return self.__table.CanSetValueAs(*args)
def ClassName(self, *args):
return self.__table.ClassName(*args)
def Clear(self, *args):
return self.__table.Clear(*args)
def DeleteCols(self, *args):
return self.__table.DeleteCols(*args)
def DeleteRows(self, *args):
return self.__table.DeleteRows(*args)
def Destroy(self, *args):
return self.__table.Destroy(*args)
def GetAttr(self, *args):
return self.__table.GetAttr(*args)
def GetAttrProvider(self, *args):
return self.__table.GetAttrProvider(*args)
def GetClassName(self, *args):
return self.__table.GetClassName(*args)
def GetColLabelValue(self, *args):
return self.__table.GetColLabelValue(*args)
def GetNumberCols(self, *args):
return self.__table.GetNumberCols(*args)
def GetNumberRows(self, *args):
return self.__table.GetNumberRows(*args)
def GetRowLabelValue(self, *args):
return self.__table.GetRowLabelValue(*args)
def GetTypeName(self, *args):
return self.__table.GetTypeName(*args)
def GetValue(self, *args):
return self.__table.GetValue(*args)
def GetValueAsBool(self, *args):
return self.__table.GetValueAsBool(*args)
def GetValueAsDouble(self, *args):
return self.__table.GetValueAsDouble(*args)
def GetValueAsLong(self, *args):
return self.__table.GetValueAsLong(*args)
def GetView(self, *args):
return self.__table.GetView(*args)
def InsertCols(self, *args):
return self.__table.InsertCols(*args)
def InsertRows(self, *args):
return self.__table.InsertRows(*args)
def IsEmptyCell(self, *args):
return self.__table.IsEmptyCell(*args)
def IsSameAs(self, *args):
return self.__table.IsSameAs(*args)
def NumberCols(self, *args):
return self.__table.NumberCols(*args)
def NumberRows(self, *args):
return self.__table.NumberRows(*args)
def SetAttr(self, *args):
return self.__table.SetAttr(*args)
def SetAttrProvider(self, *args):
return self.__table.SetAttrProvider(*args)
def SetColAttr(self, *args):
return self.__table.SetColAttr(*args)
def SetColLabelValue(self, *args):
return self.__table.SetColLabelValue(*args)
def SetRowAttr(self, *args):
return self.__table.SetRowAttr(*args)
def SetRowLabelValue(self, *args):
return self.__table.SetRowLabelValue(*args)
def SetValue(self, *args):
return self.__table.SetValue(*args)
def SetValueAsBool(self, *args):
return self.__table.SetValueAsBool(*args)
def SetValueAsDouble(self, *args):
return self.__table.SetValueAsDouble(*args)
def SetValueAsLong(self, *args):
return self.__table.SetValueAsLong(*args)
def SetView(self, *args):
return self.__table.SetView(*args)
def View(self, *args):
return self.__table.View(*args)
# END_AUTO_GENERATED_CODE
#######################################################
__super__ = wx.grid.Grid
class wxGrid(__super__):
def __init__(self, *args, **kwargs):
__super__.__init__(self, *args, **kwargs)
table = DelegatingTable()
__super__.SetTable(self, table, True)
table.addGridTableListener(self)
def GetTable(self):
return __super__.GetTable(self)._getTable()
def SetTable(self, table, ignored_takeOwnership=False):
__super__.GetTable(self)._setTable(table)
else:
wxGrid = wx.grid.Grid
if __name__ == '__main__':
def inject(f):
d = filter(lambda name: name[0].isupper(), dir(wx.grid.PyGridTableBase))
d.sort()
for i in d:
#if VERBOSE: print "%s", args
print >>f, """\
def %s(self, *args, **kwargs):
return self.__table.%s(*args, **kwargs)
""".replace("%s", i)
f = open(__file__, 'rt')
code = f.readlines()
f.close()
state = "begin"
f = open(__file__, 'wt')
for i in code:
if state == 'begin':
f.write(i)
if i.find('BEGIN_AUTO_GENERATED_CODE') != -1:
inject(f)
state = 'injected'
elif state == 'injected':
if i.find('END_AUTO_GENERATED_CODE') != -1:
f.write(i)
state = 'end'
elif state == 'end':
f.write(i)
f.close()
| 0.043704 |
#
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Base class for project."""
import logging
from . import util
@util.export
class Base(object):
"""Base class for all objects."""
_LOG_PREFIX = 'ovirt.engine.'
@property
def logger(self):
"""Logger."""
return self._logger
def __init__(self):
"""Contructor."""
prefix = ''
if not self.__module__.startswith(self._LOG_PREFIX):
prefix = self._LOG_PREFIX
self._logger = logging.getLogger(prefix + self.__module__)
# vim: expandtab tabstop=4 shiftwidth=4
| 0 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponse # noqa
from django import shortcuts
from django import template
from django.template.defaultfilters import title # noqa
from django.utils.http import urlencode
from django.utils.translation import npgettext_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.floating_ips \
import workflows
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances.workflows \
import resize_instance
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF")
SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
SHELVE = 0
UNSHELVE = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class TerminateInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "terminate"
classes = ("btn-danger",)
icon = "remove"
policy_rules = (("compute", "compute:delete"),)
help_text = _("Terminated instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Terminate Instance",
u"Terminate Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled termination of Instance",
u"Scheduled termination of Instances",
count
)
def allowed(self, request, instance=None):
"""Allow terminate action if instance not currently being deleted."""
return not is_deleting(instance)
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "reboot"
classes = ('btn-danger', 'btn-reboot')
policy_rules = (("compute", "compute:reboot"),)
help_text = _("Restarted instances will lose any data"
" not saved in persistent storage.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Hard Reboot Instance",
u"Hard Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Hard Rebooted Instance",
u"Hard Rebooted Instances",
count
)
def allowed(self, request, instance=None):
if instance is not None:
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
else:
return True
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=False)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Soft Reboot Instance",
u"Soft Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Soft Rebooted Instance",
u"Soft Rebooted Instances",
count
)
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=True)
class TogglePause(tables.BatchAction):
name = "pause"
icon = "pause"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Pause Instance",
u"Pause Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Paused Instance",
u"Paused Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
policy = (("compute", "compute_extension:admin_actions:unpause"),)
else:
self.current_present_action = PAUSE
policy = (("compute", "compute_extension:admin_actions:pause"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.paused)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
classes = ("btn-suspend",)
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Suspend Instance",
u"Suspend Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Suspended Instance",
u"Suspended Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
policy = (("compute", "compute_extension:admin_actions:resume"),)
else:
self.current_present_action = SUSPEND
policy = (("compute", "compute_extension:admin_actions:suspend"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.suspended)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class ToggleShelve(tables.BatchAction):
name = "shelve"
icon = "shelve"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Shelve Instance",
u"Shelve Instances",
count
),
ungettext_lazy(
u"Unshelve Instance",
u"Unshelve Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Shelved Instance",
u"Shelved Instances",
count
),
ungettext_lazy(
u"Unshelved Instance",
u"Unshelved Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('Shelve', request):
return False
if not instance:
return False
self.shelved = instance.status == "SHELVED_OFFLOADED"
if self.shelved:
self.current_present_action = UNSHELVE
policy = (("compute", "compute_extension:unshelve"),)
else:
self.current_present_action = SHELVE
policy = (("compute", "compute_extension:shelve"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.shelved)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.shelved:
api.nova.server_unshelve(request, obj_id)
self.current_past_action = UNSHELVE
else:
api.nova.server_shelve(request, obj_id)
self.current_past_action = SHELVE
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchLink, self).__init__(attrs, **kwargs)
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except Exception:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class LaunchLinkNG(LaunchLink):
name = "launch-ng"
url = "horizon:project:instances:index"
ajax = False
classes = ("btn-launch", )
def get_default_attrs(self):
url = urlresolvers.reverse(self.url)
ngclick = "modal.openLaunchInstanceWizard(" \
"{ successUrl: '%s' })" % url
self.attrs.update({
'ng-controller': 'LaunchInstanceModalController as modal',
'ng-click': ngclick
})
return super(LaunchLinkNG, self).get_default_attrs()
def get_link_url(self, datum=None):
return "javascript:void(0);"
class EditInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("compute", "compute:update"),)
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
update_instance.UpdateInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images:snapshots:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("compute", "compute:snapshot"),)
def allowed(self, request, instance=None):
return instance.status in SNAPSHOT_READY_STATES \
and not is_deleting(instance)
class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
policy_rules = (("compute", "compute_extension:consoles"),)
def allowed(self, request, instance=None):
# We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is
# not set at all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True)) and \
instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(ConsoleLink, self).get_link_url(datum)
tab_query_string = tabs.ConsoleTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
policy_rules = (("compute", "compute_extension:console_output"),)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(LogLink, self).get_link_url(datum)
tab_query_string = tabs.LogTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "resize"
verbose_name = _("Resize Instance")
url = "horizon:project:instances:resize"
classes = ("ajax-modal", "btn-resize")
policy_rules = (("compute", "compute:resize"),)
def get_link_url(self, project):
return self._get_link_url(project, 'flavor_choice')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
resize_instance.ResizeInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
class ConfirmResize(policy.PolicyTargetMixin, tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
policy_rules = (("compute", "compute:confirm_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_confirm_resize(request, instance)
class RevertResize(policy.PolicyTargetMixin, tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
policy_rules = (("compute", "compute:revert_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_revert_resize(request, instance)
class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rebuild"
verbose_name = _("Rebuild Instance")
classes = ("btn-rebuild", "ajax-modal")
url = "horizon:project:instances:rebuild"
policy_rules = (("compute", "compute:rebuild"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class DecryptInstancePassword(tables.LinkAction):
name = "decryptpassword"
verbose_name = _("Retrieve Password")
classes = ("btn-decrypt", "ajax-modal")
url = "horizon:project:instances:decryptpassword"
def allowed(self, request, instance):
enable = getattr(settings,
'OPENSTACK_ENABLE_PASSWORD_RETRIEVE',
False)
return (enable
and (instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and get_keyname(instance) is not None)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
keypair_name = get_keyname(datum)
return urlresolvers.reverse(self.url, args=[instance_id,
keypair_name])
class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
next_url = self.table.get_full_url()
params = {
"instance_id": self.table.get_object_id(datum),
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
params = urlencode(params)
return "?".join([base_url, params])
class SimpleAssociateIP(policy.PolicyTargetMixin, tables.Action):
name = "associate-simple"
verbose_name = _("Associate Floating IP")
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
target_id = api.network.floating_ip_target_get_by_instance(
request, instance_id).split('_')[0]
fip = api.network.tenant_floating_ip_allocate(request)
api.network.floating_ip_associate(request, fip.id, target_id)
messages.success(request,
_("Successfully associated floating IP: %s")
% fip.ip)
except Exception:
exceptions.handle(request,
_("Unable to associate floating IP."))
return shortcuts.redirect(request.get_full_path())
class SimpleDisassociateIP(policy.PolicyTargetMixin, tables.Action):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-danger", "btn-disassociate",)
policy_rules = (("compute", "network:disassociate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if not conf.HORIZON_CONFIG["simple_ip_management"]:
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return not is_deleting(instance)
return False
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
targets = api.network.floating_ip_target_list_by_instance(
request, instance_id)
target_ids = [t.split('_')[0] for t in targets]
fips = [fip for fip in api.network.tenant_floating_ip_list(request)
if fip.port_id in target_ids]
# Removing multiple floating IPs at once doesn't work, so this pops
# off the first one.
if fips:
fip = fips.pop()
api.network.floating_ip_disassociate(request, fip.id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
else:
messages.info(request, _("No floating IPs to disassociate."))
except Exception:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect(request.get_full_path())
def instance_fault_to_friendly_message(instance):
fault = getattr(instance, 'fault', {})
message = fault.get('message', _("Unknown"))
default_message = _("Please try again later [Error: %s].") % message
fault_map = {
'NoValidHost': _("There is not enough capacity for this "
"flavor in the selected availability zone. "
"Try again later or select a different availability "
"zone.")
}
return fault_map.get(message, default_message)
def get_instance_error(instance):
if instance.status.lower() != 'error':
return None
message = instance_fault_to_friendly_message(instance)
preamble = _('Failed to perform requested operation on instance "%s", the '
'instance has an error status') % instance.name or instance.id
message = string_concat(preamble, ': ', message)
return message
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
try:
instance.full_flavor = api.nova.flavor_get(request,
instance.flavor["id"])
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor information '
'for instance "%s".') % instance_id,
ignore=True)
error = get_instance_error(instance)
if error:
messages.error(request, error)
return instance
class StartInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "start"
classes = ('btn-confirm',)
policy_rules = (("compute", "compute:start"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Start Instance",
u"Start Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Started Instance",
u"Started Instances",
count
)
def allowed(self, request, instance):
return ((instance is None) or
(instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED")))
def action(self, request, obj_id):
api.nova.server_start(request, obj_id)
class StopInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "stop"
classes = ('btn-danger',)
policy_rules = (("compute", "compute:stop"),)
help_text = _("The instance(s) will be shut off.")
@staticmethod
def action_present(count):
return npgettext_lazy(
"Action to perform (the instance is currently running)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
@staticmethod
def action_past(count):
return npgettext_lazy(
"Past action (the instance is currently already Shut Off)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
def allowed(self, request, instance):
return ((instance is None)
or ((get_power_state(instance) in ("RUNNING", "SUSPENDED"))
and not is_deleting(instance)))
def action(self, request, obj_id):
api.nova.server_stop(request, obj_id)
class LockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "lock"
policy_rules = (("compute", "compute_extension:admin_actions:lock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Lock Instance",
u"Lock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Locked Instance",
u"Locked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow unlocked instances to be locked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_lock(request, obj_id)
class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "unlock"
policy_rules = (("compute", "compute_extension:admin_actions:unlock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unlock Instance",
u"Unlock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unlocked Instance",
u"Unlocked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow locked instances to be unlocked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_unlock(request, obj_id)
class AttachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "attach_interface"
verbose_name = _("Attach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:attach_interface"
policy_rules = (("compute", "compute_extension:attach_interfaces"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
# TODO(lyj): the policy for detach interface not exists in nova.json,
# once it's added, it should be added here.
class DetachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "detach_interface"
verbose_name = _("Detach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:detach_interface"
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
ip_groups = {}
for ip_group, addresses in six.iteritems(instance.addresses):
ip_groups[ip_group] = {}
ip_groups[ip_group]["floating"] = []
ip_groups[ip_group]["non_floating"] = []
for address in addresses:
if ('OS-EXT-IPS:type' in address and
address['OS-EXT-IPS:type'] == "floating"):
ip_groups[ip_group]["floating"].append(address)
else:
ip_groups[ip_group]["non_floating"].append(address)
context = {
"ip_groups": ip_groups,
}
return template.loader.render_to_string(template_name, context)
def get_size(instance):
if hasattr(instance, "full_flavor"):
template_name = 'project/instances/_instance_flavor.html'
size_ram = sizeformat.mb_float_format(instance.full_flavor.ram)
if instance.full_flavor.disk > 0:
size_disk = sizeformat.diskgbformat(instance.full_flavor.disk)
else:
size_disk = _("%s GB") % "0"
context = {
"name": instance.full_flavor.name,
"id": instance.id,
"size_disk": size_disk,
"size_ram": size_ram,
"vcpus": instance.full_flavor.vcpus,
"flavor_id": instance.full_flavor.id
}
return template.loader.render_to_string(template_name, context)
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("active", pgettext_lazy("Current status of an Instance", u"Active")),
("shutoff", pgettext_lazy("Current status of an Instance", u"Shutoff")),
("suspended", pgettext_lazy("Current status of an Instance",
u"Suspended")),
("paused", pgettext_lazy("Current status of an Instance", u"Paused")),
("error", pgettext_lazy("Current status of an Instance", u"Error")),
("resize", pgettext_lazy("Current status of an Instance",
u"Resize/Migrate")),
("verify_resize", pgettext_lazy("Current status of an Instance",
u"Confirm or Revert Resize/Migrate")),
("revert_resize", pgettext_lazy(
"Current status of an Instance", u"Revert Resize/Migrate")),
("reboot", pgettext_lazy("Current status of an Instance", u"Reboot")),
("hard_reboot", pgettext_lazy("Current status of an Instance",
u"Hard Reboot")),
("password", pgettext_lazy("Current status of an Instance", u"Password")),
("rebuild", pgettext_lazy("Current status of an Instance", u"Rebuild")),
("migrating", pgettext_lazy("Current status of an Instance",
u"Migrating")),
("build", pgettext_lazy("Current status of an Instance", u"Build")),
("rescue", pgettext_lazy("Current status of an Instance", u"Rescue")),
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("soft_deleted", pgettext_lazy("Current status of an Instance",
u"Soft Deleted")),
("shelved", pgettext_lazy("Current status of an Instance", u"Shelved")),
("shelved_offloaded", pgettext_lazy("Current status of an Instance",
u"Shelved Offloaded")),
)
TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", u"None")
# Mapping of task states taken from Nova's nova/compute/task_states.py
TASK_DISPLAY_CHOICES = (
("scheduling", pgettext_lazy("Task status of an Instance",
u"Scheduling")),
("block_device_mapping", pgettext_lazy("Task status of an Instance",
u"Block Device Mapping")),
("networking", pgettext_lazy("Task status of an Instance",
u"Networking")),
("spawning", pgettext_lazy("Task status of an Instance", u"Spawning")),
("image_snapshot", pgettext_lazy("Task status of an Instance",
u"Snapshotting")),
("image_snapshot_pending", pgettext_lazy("Task status of an Instance",
u"Image Snapshot Pending")),
("image_pending_upload", pgettext_lazy("Task status of an Instance",
u"Image Pending Upload")),
("image_uploading", pgettext_lazy("Task status of an Instance",
u"Image Uploading")),
("image_backup", pgettext_lazy("Task status of an Instance",
u"Image Backup")),
("updating_password", pgettext_lazy("Task status of an Instance",
u"Updating Password")),
("resize_prep", pgettext_lazy("Task status of an Instance",
u"Preparing Resize or Migrate")),
("resize_migrating", pgettext_lazy("Task status of an Instance",
u"Resizing or Migrating")),
("resize_migrated", pgettext_lazy("Task status of an Instance",
u"Resized or Migrated")),
("resize_finish", pgettext_lazy("Task status of an Instance",
u"Finishing Resize or Migrate")),
("resize_reverting", pgettext_lazy("Task status of an Instance",
u"Reverting Resize or Migrate")),
("resize_confirming", pgettext_lazy("Task status of an Instance",
u"Confirming Resize or Migrate")),
("rebooting", pgettext_lazy("Task status of an Instance", u"Rebooting")),
("reboot_pending", pgettext_lazy("Task status of an Instance",
u"Reboot Pending")),
("reboot_started", pgettext_lazy("Task status of an Instance",
u"Reboot Started")),
("rebooting_hard", pgettext_lazy("Task status of an Instance",
u"Rebooting Hard")),
("reboot_pending_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Pending Hard")),
("reboot_started_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Started Hard")),
("pausing", pgettext_lazy("Task status of an Instance", u"Pausing")),
("unpausing", pgettext_lazy("Task status of an Instance", u"Resuming")),
("suspending", pgettext_lazy("Task status of an Instance",
u"Suspending")),
("resuming", pgettext_lazy("Task status of an Instance", u"Resuming")),
("powering-off", pgettext_lazy("Task status of an Instance",
u"Powering Off")),
("powering-on", pgettext_lazy("Task status of an Instance",
u"Powering On")),
("rescuing", pgettext_lazy("Task status of an Instance", u"Rescuing")),
("unrescuing", pgettext_lazy("Task status of an Instance",
u"Unrescuing")),
("rebuilding", pgettext_lazy("Task status of an Instance",
u"Rebuilding")),
("rebuild_block_device_mapping", pgettext_lazy(
"Task status of an Instance", u"Rebuild Block Device Mapping")),
("rebuild_spawning", pgettext_lazy("Task status of an Instance",
u"Rebuild Spawning")),
("migrating", pgettext_lazy("Task status of an Instance", u"Migrating")),
("deleting", pgettext_lazy("Task status of an Instance", u"Deleting")),
("soft-deleting", pgettext_lazy("Task status of an Instance",
u"Soft Deleting")),
("restoring", pgettext_lazy("Task status of an Instance", u"Restoring")),
("shelving", pgettext_lazy("Task status of an Instance", u"Shelving")),
("shelving_image_pending_upload", pgettext_lazy(
"Task status of an Instance", u"Shelving Image Pending Upload")),
("shelving_image_uploading", pgettext_lazy("Task status of an Instance",
u"Shelving Image Uploading")),
("shelving_offloading", pgettext_lazy("Task status of an Instance",
u"Shelving Offloading")),
("unshelving", pgettext_lazy("Task status of an Instance",
u"Unshelving")),
)
POWER_DISPLAY_CHOICES = (
("NO STATE", pgettext_lazy("Power state of an Instance", u"No State")),
("RUNNING", pgettext_lazy("Power state of an Instance", u"Running")),
("BLOCKED", pgettext_lazy("Power state of an Instance", u"Blocked")),
("PAUSED", pgettext_lazy("Power state of an Instance", u"Paused")),
("SHUTDOWN", pgettext_lazy("Power state of an Instance", u"Shut Down")),
("SHUTOFF", pgettext_lazy("Power state of an Instance", u"Shut Off")),
("CRASHED", pgettext_lazy("Power state of an Instance", u"Crashed")),
("SUSPENDED", pgettext_lazy("Power state of an Instance", u"Suspended")),
("FAILED", pgettext_lazy("Power state of an Instance", u"Failed")),
("BUILDING", pgettext_lazy("Power state of an Instance", u"Building")),
)
class InstancesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = (('name', _("Instance Name"), True),
('status', _("Status ="), True),
('image', _("Image ID ="), True),
('flavor', _("Flavor ID ="), True))
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved", True),
("shelved_offloaded", True),
)
name = tables.Column("name",
link="horizon:project:instances:detail",
verbose_name=_("Instance Name"))
image_name = tables.Column("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
size = tables.Column(get_size, sortable=False, verbose_name=_("Size"))
keypair = tables.Column(get_keyname, verbose_name=_("Key Pair"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
az = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
empty_value=TASK_DISPLAY_NONE,
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"),
display_choices=POWER_DISPLAY_CHOICES)
created = tables.Column("created",
verbose_name=_("Time since created"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta(object):
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', True):
launch_actions = (LaunchLink,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', False):
launch_actions = (LaunchLinkNG,) + launch_actions
table_actions = launch_actions + (TerminateInstance,
InstancesFilterAction)
row_actions = (StartInstance, ConfirmResize, RevertResize,
CreateSnapshot, SimpleAssociateIP, AssociateIP,
SimpleDisassociateIP, AttachInterface,
DetachInterface, EditInstance,
DecryptInstancePassword, EditInstanceSecurityGroups,
ConsoleLink, LogLink, TogglePause, ToggleSuspend,
ToggleShelve, ResizeLink, LockInstance, UnlockInstance,
SoftRebootInstance, RebootInstance,
StopInstance, RebuildInstance, TerminateInstance)
| 0 |
#!/usr/bin/env python
try:
import pacman
except ImportError:
import alpm
pacman = alpm
import os, tempfile, shutil, sys
def pacman_pkg_getgroups(pkg):
i = pacman.void_to_PM_LIST(pacman.pkg_getinfo(pkg, pacman.PKG_GROUPS))
ret = []
while i:
ret.append(pacman.void_to_char(pacman.list_getdata(i)))
i = pacman.list_next(i)
return ret
def any_in(needle, haystack):
"""return true if any of the needle list is found in haystack"""
for i in needle:
if i in haystack:
return True
return False
if len(sys.argv) > 1 and sys.argv[1] == "--help":
print "COREPKGS (core, chroot-core, devel-core) packages which depend on packages outsite COREPKGS (->COREPKGS is needed)"
sys.exit(0)
basecats = ['core', 'chroot-core', 'devel-core']
root = tempfile.mkdtemp()
pacman.initialize(root)
if os.getcwd().split('/')[-2] == "frugalware-current":
treename = "frugalware-current"
else:
treename = "frugalware"
db = pacman.db_register(treename)
pacman.db_setserver(db, "file://" + os.getcwd() + "/../frugalware-%s" % sys.argv[1])
pacman.db_update(1, db)
i = pacman.db_getpkgcache(db)
while i:
pkg = pacman.void_to_PM_PKG(pacman.list_getdata(i))
pkgname = pacman.void_to_char(pacman.pkg_getinfo(pkg, pacman.PKG_NAME))
groups = pacman_pkg_getgroups(pkg)
if not any_in(basecats, groups):
i = pacman.list_next(i)
continue
j = pacman.void_to_PM_LIST(pacman.pkg_getinfo(pkg, pacman.PKG_DEPENDS))
while j:
found = False
dep = pacman.void_to_char(pacman.list_getdata(j)).split("<")[0].split(">")[0].split("=")[0]
k = pacman.db_getpkgcache(db)
while not found and k:
p = pacman.void_to_PM_PKG(pacman.list_getdata(k))
if pacman.void_to_char(pacman.pkg_getinfo(p, pacman.PKG_NAME)) == dep:
if any_in(basecats, pacman_pkg_getgroups(p)):
found = True
else:
l = pacman.void_to_PM_LIST(pacman.pkg_getinfo(p, pacman.PKG_PROVIDES))
while not found and l:
pr = pacman.void_to_PM_PKG(pacman.list_getdata(l))
if pacman.void_to_char(pacman.pkg_getinfo(pr, pacman.PKG_NAME)) == dep:
found = True
l = pacman.list_next(l)
k = pacman.list_next(k)
if not found:
try:
socket = open("../source/%s/%s/FrugalBuild" % (groups[0], pkgname))
while True:
line = socket.readline()
if not line:
break
if line[:14] != "# Maintainer: ":
continue
# FIXME: we here hardcore the encoding of the FBs
maintainer = line[14:].strip().decode('latin1')
break
socket.close()
except IOError:
maintainer = "Unknown"
print "%s should be moved out from COREPKGS (%s is not in COREPKGS; %s)" % (pkgname, pacman.void_to_char(pacman.list_getdata(j)), maintainer)
j = pacman.list_next(j)
i = pacman.list_next(i)
shutil.rmtree(root)
| 0.025707 |
########
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import copy
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from cloudstack_plugin.cloudstack_common import (
get_cloud_driver,
CLOUDSTACK_ID_PROPERTY,
CLOUDSTACK_NAME_PROPERTY,
CLOUDSTACK_TYPE_PROPERTY,
COMMON_RUNTIME_PROPERTIES_KEYS,
USE_EXTERNAL_RESOURCE_PROPERTY,
)
VOLUME_CLOUDSTACK_TYPE = 'volume'
RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
@operation
def create(**kwargs):
""" Create a volume
"""
cloud_driver = get_cloud_driver(ctx)
volume = {}
if ctx.node.properties[USE_EXTERNAL_RESOURCE_PROPERTY] is False:
ctx.logger.debug('reading volume attributes.')
volume.update(copy.deepcopy(ctx.node.properties['volume']))
if 'name' in volume:
volume_name = volume['name']
else:
raise NonRecoverableError("To create a volume, the name of the "
"volume is needed")
if 'size' in volume:
volume_size = volume['size']
else:
raise NonRecoverableError("To create a volume, the size of the "
"volume is needed")
volume = cloud_driver.create_volume(name=volume_name,
size=volume_size)
if volume_exists(cloud_driver, volume.id):
ctx.instance.runtime_properties[CLOUDSTACK_ID_PROPERTY] = volume.id
ctx.instance.runtime_properties[CLOUDSTACK_TYPE_PROPERTY] = \
VOLUME_CLOUDSTACK_TYPE
else:
raise NonRecoverableError("Volume not created")
elif ctx.node.properties[USE_EXTERNAL_RESOURCE_PROPERTY] is True:
if ctx.node.properties['resource_id']:
resource_id = ctx.node.properties['resource_id']
volume = get_volume_by_id(cloud_driver, resource_id)
if volume is not None:
ctx.instance.runtime_properties[CLOUDSTACK_ID_PROPERTY] = \
volume.id
ctx.instance.runtime_properties[CLOUDSTACK_NAME_PROPERTY] = \
volume.name
ctx.instance.runtime_properties[CLOUDSTACK_TYPE_PROPERTY] = \
VOLUME_CLOUDSTACK_TYPE
else:
raise NonRecoverableError("Could not find volume with id {0}".
format(resource_id))
else:
raise NonRecoverableError("Resource_id for volume is not supplied")
return
@operation
def delete(**kwargs):
""" Delete a volume
"""
cloud_driver = get_cloud_driver(ctx)
volume_id = ctx.instance.runtime_properties[CLOUDSTACK_ID_PROPERTY]
volume = get_volume_by_id(cloud_driver, volume_id)
if volume is None:
raise NonRecoverableError('Volume with id {0} not found'
.format(volume_id))
if not ctx.node.properties[USE_EXTERNAL_RESOURCE_PROPERTY]:
ctx.logger.info('Trying to destroy volume {0}'.format(volume))
cloud_driver.destroy_volume(volume=volume)
else:
ctx.logger.info('Volume {0} does not need to be destroyed'.format(
volume))
def volume_exists(cloud_driver, volume_id):
exists = get_volume_by_id(cloud_driver, volume_id)
if not exists:
return False
return True
def get_volume_by_id(cloud_driver, volume_id):
volumes = [volume for volume in cloud_driver.list_volumes()
if volume_id == volume.id]
if not volumes:
ctx.logger.info('Could not find volume with ID {0}'.
format(volume_id))
return None
return volumes[0]
| 0 |
"""Support for Juicenet cloud."""
import logging
import pyjuicenet
import voluptuous as vol
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "juicenet"
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_ACCESS_TOKEN): cv.string})},
extra=vol.ALLOW_EXTRA,
)
JUICENET_COMPONENTS = ["sensor", "switch"]
def setup(hass, config):
"""Set up the Juicenet component."""
hass.data[DOMAIN] = {}
access_token = config[DOMAIN].get(CONF_ACCESS_TOKEN)
hass.data[DOMAIN]["api"] = pyjuicenet.Api(access_token)
for component in JUICENET_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class JuicenetDevice(Entity):
"""Represent a base Juicenet device."""
def __init__(self, device, sensor_type, hass):
"""Initialise the sensor."""
self.hass = hass
self.device = device
self.type = sensor_type
@property
def name(self):
"""Return the name of the device."""
return self.device.name()
def update(self):
"""Update state of the device."""
self.device.update_state()
@property
def _manufacturer_device_id(self):
"""Return the manufacturer device id."""
return self.device.id()
@property
def _token(self):
"""Return the device API token."""
return self.device.token()
@property
def unique_id(self):
"""Return a unique ID."""
return "{}-{}".format(self.device.id(), self.type)
| 0 |
import datetime, os, random, uuid, zipfile
# GLOBAL:
DATES = []
PST = ((datetime.datetime.utcnow()) - datetime.timedelta(hours=7))
YEAR = PST.year - 1;
MONTH = PST.month;
DAY = PST.day;
date = datetime.datetime(YEAR,MONTH,DAY)
patterns = {
"A" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"B" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"C" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
"D" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0],
"E" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1],
"F" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],
"G" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1],
"H" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"I" : [1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
"J" : [1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
"K" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
"L" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1],
"M" : [1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"N" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"O" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"P" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
"Q" : [1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
"R" : [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1],
"S" : [1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1],
"T" : [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
"U" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"V" : [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0],
"W" : [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"X" : [1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
"Y" : [1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"Z" : [1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1]
};
def gen_pattern(char):
global date
for i in range(35):
check = patterns[char][i]
if check:
DATES.append(date)
date += datetime.timedelta(days=1)
def gen_empty_pattern():
global date
for i in range(7):
date += datetime.timedelta(days=1)
def init(message="ANINDYA", author_name="", author_email=""):
class author:
name = author_name
email = author_email
_author = author()
message = message.upper()
string = message[0:8]
for char in string:
gen_pattern(char)
gen_empty_pattern()
random_dir = str(uuid.uuid4())
dir_path = os.getcwd() + "/gits/" + random_dir
import github
REPO = github.init(dir_path)
for date in DATES:
date = date.strftime("%Y-%m-%d %H:%M:%S")
github.commit(REPO, date, dir_path, _author)
return "./gits/" + random_dir
| 0.029058 |
# -*- coding: utf-8 -*-
import collections
import functools
from requests.compat import urlparse, urlencode
from . import exceptions
from . import models
class GitHubIterator(models.GitHubCore, collections.Iterator):
"""The :class:`GitHubIterator` class powers all of the iter_* methods."""
def __init__(self, count, url, cls, session, params=None, etag=None,
headers=None):
models.GitHubCore.__init__(self, {}, session)
#: Original number of items requested
self.original = count
#: Number of items left in the iterator
self.count = count
#: URL the class used to make it's first GET
self.url = url
#: Last URL that was requested
self.last_url = None
self._api = self.url
#: Class for constructing an item to return
self.cls = cls
#: Parameters of the query string
self.params = params or {}
self._remove_none(self.params)
# We do not set this from the parameter sent. We want this to
# represent the ETag header returned by GitHub no matter what.
# If this is not None, then it won't be set from the response and
# that's not what we want.
#: The ETag Header value returned by GitHub
self.etag = None
#: Headers generated for the GET request
self.headers = headers or {}
#: The last response seen
self.last_response = None
#: Last status code received
self.last_status = 0
if etag:
self.headers.update({'If-None-Match': etag})
self.path = urlparse(self.url).path
def _repr(self):
return '<GitHubIterator [{0}, {1}]>'.format(self.count, self.path)
def __iter__(self):
self.last_url, params = self.url, self.params
headers = self.headers
if 0 < self.count <= 100 and self.count != -1:
params['per_page'] = self.count
if 'per_page' not in params and self.count == -1:
params['per_page'] = 100
cls = self.cls
if issubclass(self.cls, models.GitHubCore):
cls = functools.partial(self.cls, session=self)
while (self.count == -1 or self.count > 0) and self.last_url:
response = self._get(self.last_url, params=params,
headers=headers)
self.last_response = response
self.last_status = response.status_code
if params:
params = None # rel_next already has the params
if not self.etag and response.headers.get('ETag'):
self.etag = response.headers.get('ETag')
json = self._get_json(response)
if json is None:
break
# languages returns a single dict. We want the items.
if isinstance(json, dict):
if issubclass(self.cls, models.GitHubObject):
raise exceptions.UnprocessableResponseBody(
"GitHub's API returned a body that could not be"
" handled", json
)
if json.get('ETag'):
del json['ETag']
if json.get('Last-Modified'):
del json['Last-Modified']
json = json.items()
for i in json:
yield cls(i)
self.count -= 1 if self.count > 0 else 0
if self.count == 0:
break
rel_next = response.links.get('next', {})
self.last_url = rel_next.get('url', '')
def __next__(self):
if not hasattr(self, '__i__'):
self.__i__ = self.__iter__()
return next(self.__i__)
def _get_json(self, response):
return self._json(response, 200)
def refresh(self, conditional=False):
self.count = self.original
if conditional:
self.headers['If-None-Match'] = self.etag
self.etag = None
self.__i__ = self.__iter__()
return self
def next(self):
return self.__next__()
class SearchIterator(GitHubIterator):
"""This is a special-cased class for returning iterable search results.
It inherits from :class:`GitHubIterator <github3.structs.GitHubIterator>`.
All members and methods documented here are unique to instances of this
class. For other members and methods, check its parent class.
"""
def __init__(self, count, url, cls, session, params=None, etag=None,
headers=None):
super(SearchIterator, self).__init__(count, url, cls, session, params,
etag, headers)
#: Total count returned by GitHub
self.total_count = 0
#: Items array returned in the last request
self.items = []
def _repr(self):
return '<SearchIterator [{0}, {1}?{2}]>'.format(self.count, self.path,
urlencode(self.params))
def _get_json(self, response):
json = self._json(response, 200)
# I'm not sure if another page will retain the total_count attribute,
# so if it's not in the response, just set it back to what it used to
# be
self.total_count = json.get('total_count', self.total_count)
self.items = json.get('items', [])
# If we return None then it will short-circuit the while loop.
return json.get('items')
| 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that attaches a correlation id to WSGI request"""
from ceilometer.openstack.common.middleware import base
from ceilometer.openstack.common import uuidutils
class CorrelationIdMiddleware(base.Middleware):
def process_request(self, req):
correlation_id = (req.headers.get("X_CORRELATION_ID") or
uuidutils.generate_uuid())
req.headers['X_CORRELATION_ID'] = correlation_id
| 0 |
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.identity.v2 import endpoints_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestEndpointsClient(base.BaseServiceTest):
FAKE_CREATE_ENDPOINT = {
"endpoint": {
"id": 1,
"tenantId": 1,
"region": "North",
"type": "compute",
"publicURL": "https://compute.north.public.com/v1",
"internalURL": "https://compute.north.internal.com/v1",
"adminURL": "https://compute.north.internal.com/v1"
}
}
FAKE_LIST_ENDPOINTS = {
"endpoints": [
{
"id": 1,
"tenantId": "1",
"region": "North",
"type": "compute",
"publicURL": "https://compute.north.public.com/v1",
"internalURL": "https://compute.north.internal.com/v1",
"adminURL": "https://compute.north.internal.com/v1"
},
{
"id": 2,
"tenantId": "1",
"region": "South",
"type": "compute",
"publicURL": "https://compute.north.public.com/v1",
"internalURL": "https://compute.north.internal.com/v1",
"adminURL": "https://compute.north.internal.com/v1"
}
]
}
def setUp(self):
super(TestEndpointsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = endpoints_client.EndpointsClient(fake_auth,
'identity', 'regionOne')
def _test_create_endpoint(self, bytes_body=False):
self.check_service_client_function(
self.client.create_endpoint,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_ENDPOINT,
bytes_body,
service_id="b344506af7644f6794d9cb316600b020",
region="region-demo",
publicurl="https://compute.north.public.com/v1",
adminurl="https://compute.north.internal.com/v1",
internalurl="https://compute.north.internal.com/v1")
def _test_list_endpoints(self, bytes_body=False):
self.check_service_client_function(
self.client.list_endpoints,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_ENDPOINTS,
bytes_body)
def test_create_endpoint_with_str_body(self):
self._test_create_endpoint()
def test_create_endpoint_with_bytes_body(self):
self._test_create_endpoint(bytes_body=True)
def test_list_endpoints_with_str_body(self):
self._test_list_endpoints()
def test_list_endpoints_with_bytes_body(self):
self._test_list_endpoints(bytes_body=True)
def test_delete_endpoint(self):
self.check_service_client_function(
self.client.delete_endpoint,
'tempest.lib.common.rest_client.RestClient.delete',
{},
endpoint_id="b344506af7644f6794d9cb316600b020",
status=204)
| 0 |
#
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.cElementTree as etree
import ethtool
import socket
import logging
import utils
logger = logging.getLogger('glustercli')
if hasattr(etree, 'ParseError'):
_etreeExceptions = (etree.ParseError, AttributeError, ValueError)
else:
_etreeExceptions = (SyntaxError, AttributeError, ValueError)
_glusterCommandPath = utils.CommandPath("gluster",
"/usr/sbin/gluster",
)
_TRANS_IN_PROGRESS = "another transaction is in progress"
_peerUUID = ''
_peer = ''
def _getLocalPeer():
global _peer
if _peer:
return _peer
fqdn = socket.getfqdn()
ip = socket.gethostbyname(fqdn)
if not ip.startswith('127.'):
_peer = ip
return ip
for dev in ethtool.get_active_devices():
try:
ip = ethtool.get_ipaddr(dev)
if not ip.startswith('127.'):
_peer = ip
return ip
except IOError as e:
logger.warn('failed to get ipaddr for device %s: %s' % (dev, e))
return fqdn
def _getGlusterVolCmd():
return [_glusterCommandPath.cmd, "--mode=script", "volume"]
def _getGlusterPeerCmd():
return [_glusterCommandPath.cmd, "--mode=script", "peer"]
def _getGlusterSystemCmd():
return [_glusterCommandPath.cmd, "system::"]
def _getGlusterVolGeoRepCmd():
return _getGlusterVolCmd() + ["geo-replication"]
def _getGlusterSnapshotCmd():
return [_glusterCommandPath.cmd, "--mode=script", "snapshot"]
class BrickStatus:
PAUSED = 'PAUSED'
COMPLETED = 'COMPLETED'
RUNNING = 'RUNNING'
UNKNOWN = 'UNKNOWN'
NA = 'NA'
class HostStatus:
CONNECTED = 'CONNECTED'
DISCONNECTED = 'DISCONNECTED'
UNKNOWN = 'UNKNOWN'
class VolumeStatus:
ONLINE = 'ONLINE'
OFFLINE = 'OFFLINE'
class TransportType:
TCP = 'TCP'
RDMA = 'RDMA'
class TaskType:
REBALANCE = 'REBALANCE'
REPLACE_BRICK = 'REPLACE_BRICK'
REMOVE_BRICK = 'REMOVE_BRICK'
class GlusterXMLError(Exception):
message = "XML error"
def __init__(self, cmd, xml):
self.cmd = cmd
self.xml = xml
def __str__(self):
return "%s\ncommand: %s\nXML: %s" % (self.message, self.cmd, self.xml)
class GlusterCmdFailed(utils.CmdExecFailed):
message = "gluster command failed"
class GlusterBusy(utils.CmdExecFailed):
message = "gluster busy"
def _throwIfBusy(cmd, rc, out, err):
o = out + err
if _TRANS_IN_PROGRESS in o.lower():
raise GlusterBusy(cmd, rc, out, err)
def _execGluster(cmd):
rc, out, err = utils.execCmd(cmd)
_throwIfBusy(cmd, rc, out, err)
return rc, out, err
def _execGlusterXml(cmd):
cmd.append('--xml')
rc, out, err = utils.execCmd(cmd)
_throwIfBusy(cmd, rc, out, err)
try:
tree = etree.fromstring(out)
rv = int(tree.find('opRet').text)
msg = tree.find('opErrstr').text
errNo = int(tree.find('opErrno').text)
except _etreeExceptions:
raise GlusterXMLError(cmd, out)
if rv == 0:
return tree
if errNo != 0:
rv = errNo
raise GlusterCmdFailed(cmd, rv, err=msg)
def _getLocalPeerUUID():
global _peerUUID
if _peerUUID:
return _peerUUID
command = _getGlusterSystemCmd() + ["uuid", "get"]
rc, out, err = _execGluster(command)
o = out.strip()
if o.startswith('UUID: '):
_peerUUID = o[6:]
return _peerUUID
def _parseVolumeStatus(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': [],
'nfs': [],
'shd': []}
hostname = _getLocalPeer()
for el in tree.findall('volStatus/volumes/volume/node'):
value = {}
for ch in el.getchildren():
value[ch.tag] = ch.text or ''
if value['path'] == 'localhost':
value['path'] = hostname
if value['status'] == '1':
value['status'] = 'ONLINE'
else:
value['status'] = 'OFFLINE'
if value['hostname'] == 'NFS Server':
status['nfs'].append({'hostname': value['path'],
'hostuuid': value['peerid'],
'port': value['port'],
'status': value['status'],
'pid': value['pid']})
elif value['hostname'] == 'Self-heal Daemon':
status['shd'].append({'hostname': value['path'],
'hostuuid': value['peerid'],
'status': value['status'],
'pid': value['pid']})
else:
status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
value['path']),
'hostuuid': value['peerid'],
'port': value['port'],
'status': value['status'],
'pid': value['pid']})
return status
def _parseVolumeStatusDetail(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
value = {}
for ch in el.getchildren():
value[ch.tag] = ch.text or ''
sizeTotal = int(value['sizeTotal'])
value['sizeTotal'] = sizeTotal / (1024.0 * 1024.0)
sizeFree = int(value['sizeFree'])
value['sizeFree'] = sizeFree / (1024.0 * 1024.0)
status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
value['path']),
'hostuuid': value['peerid'],
'sizeTotal': '%.3f' % (value['sizeTotal'],),
'sizeFree': '%.3f' % (value['sizeFree'],),
'device': value['device'],
'blockSize': value['blockSize'],
'mntOptions': value['mntOptions'],
'fsName': value['fsName']})
return status
def _parseVolumeStatusClients(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
hostname = el.find('hostname').text
path = el.find('path').text
hostuuid = el.find('peerid').text
clientsStatus = []
for c in el.findall('clientsStatus/client'):
clientValue = {}
for ch in c.getchildren():
clientValue[ch.tag] = ch.text or ''
clientsStatus.append({'hostname': clientValue['hostname'],
'bytesRead': clientValue['bytesRead'],
'bytesWrite': clientValue['bytesWrite']})
status['bricks'].append({'brick': '%s:%s' % (hostname, path),
'hostuuid': hostuuid,
'clientsStatus': clientsStatus})
return status
def _parseVolumeStatusMem(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
brick = {'brick': '%s:%s' % (el.find('hostname').text,
el.find('path').text),
'hostuuid': el.find('peerid').text,
'mallinfo': {},
'mempool': []}
for ch in el.find('memStatus/mallinfo').getchildren():
brick['mallinfo'][ch.tag] = ch.text or ''
for c in el.findall('memStatus/mempool/pool'):
mempool = {}
for ch in c.getchildren():
mempool[ch.tag] = ch.text or ''
brick['mempool'].append(mempool)
status['bricks'].append(brick)
return status
def volumeStatus(volumeName, brick=None, option=None):
command = _getGlusterVolCmd() + ["status", volumeName]
if brick:
command.append(brick)
if option:
command.append(option)
xmltree = _execGlusterXml(command)
try:
if option == 'detail':
return _parseVolumeStatusDetail(xmltree)
elif option == 'clients':
return _parseVolumeStatusClients(xmltree)
elif option == 'mem':
return _parseVolumeStatusMem(xmltree)
else:
return _parseVolumeStatus(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def _parseVolumeInfo(tree):
volumes = {}
for el in tree.findall('volInfo/volumes/volume'):
value = {}
value['volumeName'] = el.find('name').text
value['uuid'] = el.find('id').text
value['volumeType'] = el.find('typeStr').text.upper().replace('-', '_')
status = el.find('statusStr').text.upper()
if status == 'STARTED':
value["volumeStatus"] = VolumeStatus.ONLINE
else:
value["volumeStatus"] = VolumeStatus.OFFLINE
value['brickCount'] = el.find('brickCount').text
value['distCount'] = el.find('distCount').text
value['stripeCount'] = el.find('stripeCount').text
value['replicaCount'] = el.find('replicaCount').text
transportType = el.find('transport').text
if transportType == '0':
value['transportType'] = [TransportType.TCP]
elif transportType == '1':
value['transportType'] = [TransportType.RDMA]
else:
value['transportType'] = [TransportType.TCP, TransportType.RDMA]
value['bricks'] = []
value['options'] = {}
value['bricksInfo'] = []
for b in el.findall('bricks/brick'):
value['bricks'].append(b.text)
for o in el.findall('options/option'):
value['options'][o.find('name').text] = o.find('value').text
for d in el.findall('bricks/brick'):
brickDetail = {}
# this try block is to maintain backward compatibility
# it returns an empty list when gluster doesnot return uuid
try:
brickDetail['name'] = d.find('name').text
brickDetail['hostUuid'] = d.find('hostUuid').text
value['bricksInfo'].append(brickDetail)
except AttributeError:
break
volumes[value['volumeName']] = value
return volumes
def _parseVolumeProfileInfo(tree, nfs):
bricks = []
if nfs:
brickKey = 'nfs'
bricksKey = 'nfsServers'
else:
brickKey = 'brick'
bricksKey = 'bricks'
for brick in tree.findall('volProfile/brick'):
fopCumulative = []
blkCumulative = []
fopInterval = []
blkInterval = []
brickName = brick.find('brickName').text
if brickName == 'localhost':
brickName = _getLocalPeer()
for block in brick.findall('cumulativeStats/blockStats/block'):
blkCumulative.append({'size': block.find('size').text,
'read': block.find('reads').text,
'write': block.find('writes').text})
for fop in brick.findall('cumulativeStats/fopStats/fop'):
fopCumulative.append({'name': fop.find('name').text,
'hits': fop.find('hits').text,
'latencyAvg': fop.find('avgLatency').text,
'latencyMin': fop.find('minLatency').text,
'latencyMax': fop.find('maxLatency').text})
for block in brick.findall('intervalStats/blockStats/block'):
blkInterval.append({'size': block.find('size').text,
'read': block.find('reads').text,
'write': block.find('writes').text})
for fop in brick.findall('intervalStats/fopStats/fop'):
fopInterval.append({'name': fop.find('name').text,
'hits': fop.find('hits').text,
'latencyAvg': fop.find('avgLatency').text,
'latencyMin': fop.find('minLatency').text,
'latencyMax': fop.find('maxLatency').text})
bricks.append(
{brickKey: brickName,
'cumulativeStats': {
'blockStats': blkCumulative,
'fopStats': fopCumulative,
'duration': brick.find('cumulativeStats/duration').text,
'totalRead': brick.find('cumulativeStats/totalRead').text,
'totalWrite': brick.find('cumulativeStats/totalWrite').text},
'intervalStats': {
'blockStats': blkInterval,
'fopStats': fopInterval,
'duration': brick.find('intervalStats/duration').text,
'totalRead': brick.find('intervalStats/totalRead').text,
'totalWrite': brick.find('intervalStats/totalWrite').text}})
status = {'volumeName': tree.find("volProfile/volname").text,
bricksKey: bricks}
return status
def volumeInfo(volumeName=None, remoteServer=None):
command = _getGlusterVolCmd() + ["info"]
if remoteServer:
command += ['--remote-host=%s' % remoteServer]
if volumeName:
command.append(volumeName)
xmltree = _execGlusterXml(command)
try:
return _parseVolumeInfo(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeCreate(volumeName, brickList, replicaCount=0, stripeCount=0,
transportList=[], force=False):
command = _getGlusterVolCmd() + ["create", volumeName]
if stripeCount:
command += ["stripe", "%s" % stripeCount]
if replicaCount:
command += ["replica", "%s" % replicaCount]
if transportList:
command += ["transport", ','.join(transportList)]
command += brickList
if force:
command.append('force')
xmltree = _execGlusterXml(command)
try:
return {'uuid': xmltree.find('volCreate/volume/id').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeStart(volumeName, force=False):
command = _getGlusterVolCmd() + ["start", volumeName]
if force:
command.append('force')
_execGluster(command)
return True
def volumeStop(volumeName, force=False):
command = _getGlusterVolCmd() + ["stop", volumeName]
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeDelete(volumeName):
command = _getGlusterVolCmd() + ["delete", volumeName]
_execGlusterXml(command)
return True
def volumeSet(volumeName, option, value):
command = _getGlusterVolCmd() + ["set", volumeName, option, value]
_execGlusterXml(command)
return True
def _parseVolumeSetHelpXml(out):
optionList = []
tree = etree.fromstring(out)
for el in tree.findall('option'):
option = {}
for ch in el.getchildren():
option[ch.tag] = ch.text or ''
optionList.append(option)
return optionList
def volumeSetHelpXml():
rc, out, err = _execGluster(_getGlusterVolCmd() + ["set", 'help-xml'])
return _parseVolumeSetHelpXml(out)
def volumeReset(volumeName, option='', force=False):
command = _getGlusterVolCmd() + ['reset', volumeName]
if option:
command.append(option)
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeBrickAdd(volumeName, brickList,
replicaCount=0, stripeCount=0, force=False):
command = _getGlusterVolCmd() + ["add-brick", volumeName]
if stripeCount:
command += ["stripe", "%s" % stripeCount]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeRebalanceStart(volumeName, rebalanceType="", force=False):
command = _getGlusterVolCmd() + ["rebalance", volumeName]
if rebalanceType:
command.append(rebalanceType)
command.append("start")
if force:
command.append("force")
xmltree = _execGlusterXml(command)
try:
return {'taskId': xmltree.find('volRebalance/task-id').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeRebalanceStop(volumeName, force=False):
command = _getGlusterVolCmd() + ["rebalance", volumeName, "stop"]
if force:
command.append('force')
xmltree = _execGlusterXml(command)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'rebalance')
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode):
if mode == 'rebalance':
tree = xmltree.find('volRebalance')
elif mode == 'remove-brick':
tree = xmltree.find('volRemoveBrick')
else:
return
st = tree.find('aggregate/statusStr').text
statusStr = st.replace(' ', '_').replace('-', '_')
status = {
'summary': {
'runtime': tree.find('aggregate/runtime').text,
'filesScanned': tree.find('aggregate/lookups').text,
'filesMoved': tree.find('aggregate/files').text,
'filesFailed': tree.find('aggregate/failures').text,
'filesSkipped': tree.find('aggregate/skipped').text,
'totalSizeMoved': tree.find('aggregate/size').text,
'status': statusStr.upper()},
'hosts': []}
for el in tree.findall('node'):
st = el.find('statusStr').text
statusStr = st.replace(' ', '_').replace('-', '_')
status['hosts'].append({'name': el.find('nodeName').text,
'id': el.find('id').text,
'runtime': el.find('runtime').text,
'filesScanned': el.find('lookups').text,
'filesMoved': el.find('files').text,
'filesFailed': el.find('failures').text,
'filesSkipped': el.find('skipped').text,
'totalSizeMoved': el.find('size').text,
'status': statusStr.upper()})
return status
def volumeRebalanceStatus(volumeName):
command = _getGlusterVolCmd() + ["rebalance", volumeName, "status"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'rebalance')
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeReplaceBrickStart(volumeName, existingBrick, newBrick):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "start"]
xmltree = _execGlusterXml(command)
try:
return {'taskId': xmltree.find('volReplaceBrick/task-id').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeReplaceBrickAbort(volumeName, existingBrick, newBrick):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "abort"]
_execGlusterXml(command)
return True
def volumeReplaceBrickPause(volumeName, existingBrick, newBrick):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "pause"]
_execGlusterXml(command)
return True
def volumeReplaceBrickStatus(volumeName, existingBrick, newBrick):
rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
volumeName,
existingBrick, newBrick,
"status"])
message = "\n".join(out)
statLine = out[0].strip().upper()
if BrickStatus.PAUSED in statLine:
return BrickStatus.PAUSED, message
elif statLine.endswith('MIGRATION COMPLETE'):
return BrickStatus.COMPLETED, message
elif statLine.startswith('NUMBER OF FILES MIGRATED'):
return BrickStatus.RUNNING, message
elif statLine.endswith("UNKNOWN"):
return BrickStatus.UNKNOWN, message
else:
return BrickStatus.NA, message
def volumeReplaceBrickCommit(volumeName, existingBrick, newBrick,
force=False):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "commit"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeBrickRemoveStart(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["start"]
xmltree = _execGlusterXml(command)
try:
return {'taskId': xmltree.find('volRemoveBrick/task-id').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeBrickRemoveStop(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["stop"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'remove-brick')
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeBrickRemoveStatus(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["status"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'remove-brick')
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeBrickRemoveCommit(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["commit"]
_execGlusterXml(command)
return True
def volumeBrickRemoveForce(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["force"]
_execGlusterXml(command)
return True
def peerProbe(hostName):
command = _getGlusterPeerCmd() + ["probe", hostName]
_execGlusterXml(command)
return True
def peerDetach(hostName, force=False):
command = _getGlusterPeerCmd() + ["detach", hostName]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except GlusterCmdFailed as e:
if e.rc == 2:
raise GlusterPeerNotFound(hostName)
raise
def _parsePeerStatus(tree, gHostName, gUuid, gStatus):
hostList = [{'hostname': gHostName,
'uuid': gUuid,
'status': gStatus}]
for el in tree.findall('peerStatus/peer'):
if el.find('state').text != '3':
status = HostStatus.UNKNOWN
elif el.find('connected').text == '1':
status = HostStatus.CONNECTED
else:
status = HostStatus.DISCONNECTED
hostList.append({'hostname': el.find('hostname').text,
'uuid': el.find('uuid').text,
'status': status})
return hostList
def peerStatus():
command = _getGlusterPeerCmd() + ["status"]
xmltree = _execGlusterXml(command)
try:
return _parsePeerStatus(xmltree,
_getLocalPeer(),
_getLocalPeerUUID(), HostStatus.CONNECTED)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeProfileStart(volumeName):
command = _getGlusterVolCmd() + ["profile", volumeName, "start"]
_execGlusterXml(command)
return True
def volumeProfileStop(volumeName):
command = _getGlusterVolCmd() + ["profile", volumeName, "stop"]
_execGlusterXml(command)
return True
def volumeProfileInfo(volumeName, nfs=False):
command = _getGlusterVolCmd() + ["profile", volumeName, "info"]
if nfs:
command += ["nfs"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeProfileInfo(xmltree, nfs)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def _parseVolumeTasks(tree):
tasks = {}
for el in tree.findall('volStatus/volumes/volume'):
volumeName = el.find('volName').text
for c in el.findall('tasks/task'):
taskType = c.find('type').text
taskType = taskType.upper().replace('-', '_').replace(' ', '_')
taskId = c.find('id').text
bricks = []
if taskType == TaskType.REPLACE_BRICK:
bricks.append(c.find('params/srcBrick').text)
bricks.append(c.find('params/dstBrick').text)
elif taskType == TaskType.REMOVE_BRICK:
for b in c.findall('params/brick'):
bricks.append(b.text)
elif taskType == TaskType.REBALANCE:
pass
statusStr = c.find('statusStr').text.upper() \
.replace('-', '_') \
.replace(' ', '_')
tasks[taskId] = {'volumeName': volumeName,
'taskType': taskType,
'status': statusStr,
'bricks': bricks}
return tasks
def volumeTasks(volumeName="all"):
command = _getGlusterVolCmd() + ["status", volumeName, "tasks"]
xmltree = _execGlusterXml(command)
try:
return _parseVolumeTasks(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeGeoRepSessionStart(volumeName, remoteHost, remoteVolumeName,
force=False):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "start"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeGeoRepSessionStop(volumeName, remoteHost, remoteVolumeName,
force=False):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "stop"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def _parseGeoRepStatus(tree, detail=False):
status = {}
for volume in tree.findall('geoRep/volume'):
sessions = []
volumeDetail = {}
for session in volume.findall('sessions/session'):
pairs = []
sessionDetail = {}
sessionDetail['sessionKey'] = session.find('session_slave').text
sessionDetail['remoteVolumeName'] = sessionDetail[
'sessionKey'].split("::")[-1]
for pair in session.findall('pair'):
pairDetail = {}
pairDetail['host'] = pair.find('master_node').text
pairDetail['hostUuid'] = pair.find(
'master_node_uuid').text
pairDetail['brickName'] = pair.find('master_brick').text
pairDetail['remoteHost'] = pair.find(
'slave').text.split("::")[0]
pairDetail['status'] = pair.find('status').text
pairDetail['checkpointStatus'] = pair.find(
'checkpoint_status').text
pairDetail['crawlStatus'] = pair.find('crawl_status').text
if detail:
pairDetail['filesSynced'] = pair.find('files_syncd').text
pairDetail['filesPending'] = pair.find(
'files_pending').text
pairDetail['bytesPending'] = pair.find(
'bytes_pending').text
pairDetail['deletesPending'] = pair.find(
'deletes_pending').text
pairDetail['filesSkipped'] = pair.find(
'files_skipped').text
pairs.append(pairDetail)
sessionDetail['bricks'] = pairs
sessions.append(sessionDetail)
volumeDetail['sessions'] = sessions
status[volume.find('name').text] = volumeDetail
return status
def volumeGeoRepStatus(volumeName=None, remoteHost=None,
remoteVolumeName=None, detail=False):
command = _getGlusterVolGeoRepCmd()
if volumeName:
command.append(volumeName)
if remoteHost and remoteVolumeName:
command.append("%s::%s" % (remoteHost, remoteVolumeName))
command.append("status")
if detail:
command.append("detail")
xmltree = _execGlusterXml(command)
try:
return _parseGeoRepStatus(xmltree, detail)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def volumeGeoRepSessionPause(volumeName, remoteHost, remoteVolumeName,
force=False):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "pause"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def volumeGeoRepSessionResume(volumeName, remoteHost, remoteVolumeName,
force=False):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "resume"]
if force:
command.append('force')
_execGlusterXml(command)
return True
def _parseVolumeGeoRepConfig(tree):
conf = tree.find('geoRep/config')
config = {}
for child in conf.getchildren():
config[child.tag] = child.text
return {'geoRepConfig': config}
def volumeGeoRepConfig(volumeName, remoteHost,
remoteVolumeName, optionName=None,
optionValue=None):
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
remoteHost, remoteVolumeName), "config"]
if optionName and optionValue:
command += [optionName, optionValue]
elif optionName:
command += ["!%s" % optionName]
xmltree = _execGlusterXml(command)
if optionName:
return True
try:
return _parseVolumeGeoRepConfig(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def snapshotCreate(volumeName, snapName,
snapDescription=None,
force=False):
command = _getGlusterSnapshotCmd() + ["create", snapName, volumeName]
if snapDescription:
command += ['description', snapDescription]
if force:
command.append('force')
xmltree = _execGlusterXml(command)
try:
return {'uuid': xmltree.find('snapCreate/snapshot/uuid').text}
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
def snapshotDelete(volumeName=None, snapName=None):
command = _getGlusterSnapshotCmd() + ["delete"]
if snapName:
command.append(snapName)
elif volumeName:
command += ["volume", volumeName]
# xml output not used because of BZ:1161416 in gluster cli
rc, out, err = _execGluster(command)
return True
def snapshotActivate(snapName, force=False):
command = _getGlusterSnapshotCmd() + ["activate", snapName]
if force:
command.append('force')
_execGlusterXml(command)
return True
def snapshotDeactivate(snapName):
command = _getGlusterSnapshotCmd() + ["deactivate", snapName]
_execGlusterXml(command)
return True
def _parseRestoredSnapshot(tree):
snapshotRestore = {}
snapshotRestore['volumeName'] = tree.find('snapRestore/volume/name').text
snapshotRestore['volumeUuid'] = tree.find('snapRestore/volume/uuid').text
snapshotRestore['snapshotName'] = tree.find(
'snapRestore/snapshot/name').text
snapshotRestore['snapshotUuid'] = tree.find(
'snapRestore/snapshot/uuid').text
return snapshotRestore
def snapshotRestore(snapName):
command = _getGlusterSnapshotCmd() + ["restore", snapName]
xmltree = _execGlusterXml(command)
try:
return _parseRestoredSnapshot(xmltree)
except _etreeExceptions:
raise GlusterXMLError(command, etree.tostring(xmltree))
| 0 |
#
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from conveyor.conveyorheat.engine import attributes
from conveyor.conveyorheat.engine import constraints
from conveyor.conveyorheat.engine import properties
from conveyor.conveyorheat.engine.resources.openstack.neutron import neutron
from conveyor.conveyorheat.engine import support
from conveyor.i18n import _
class PoolMember(neutron.NeutronResource):
"""A resource for managing LBaaS v2 Pool Members.
A pool member represents a single backend node.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
PROPERTIES = (
POOL, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP,
SUBNET,
) = (
'pool', 'address', 'protocol_port', 'weight', 'admin_state_up',
'subnet'
)
ATTRIBUTES = (
ADDRESS_ATTR, POOL_ID_ATTR
) = (
'address', 'pool_id'
)
properties_schema = {
POOL: properties.Schema(
properties.Schema.STRING,
_('Name or ID of the load balancing pool.'),
required=True
),
ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address of the pool member on the pool network.'),
required=True,
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('Port on which the pool member listens for requests or '
'connections.'),
required=True,
constraints=[
constraints.Range(1, 65535),
]
),
WEIGHT: properties.Schema(
properties.Schema.INTEGER,
_('Weight of pool member in the pool (default to 1).'),
default=1,
constraints=[
constraints.Range(0, 256),
],
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of the pool member.'),
default=True,
update_allowed=True,
constraints=[constraints.AllowedValues(['True'])]
),
SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet name or ID of this member.'),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
}
attributes_schema = {
ADDRESS_ATTR: attributes.Schema(
_('The IP address of the pool member.'),
type=attributes.Schema.STRING
),
POOL_ID_ATTR: attributes.Schema(
_('The ID of the pool to which the pool member belongs.'),
type=attributes.Schema.STRING
)
}
def __init__(self, name, definition, stack):
super(PoolMember, self).__init__(name, definition, stack)
self._pool_id = None
self._lb_id = None
@property
def pool_id(self):
if self._pool_id is None:
self._pool_id = self.client_plugin().find_resourceid_by_name_or_id(
self.POOL,
self.properties[self.POOL],
cmd_resource='lbaas_pool')
return self._pool_id
@property
def lb_id(self):
if self._lb_id is None:
pool = self.client().show_lbaas_pool(self.pool_id)['pool']
listener_id = pool['listeners'][0]['id']
listener = self.client().show_listener(listener_id)['listener']
self._lb_id = listener['loadbalancers'][0]['id']
return self._lb_id
def _check_lb_status(self):
return self.client_plugin().check_lb_status(self.lb_id)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_pool(
properties, self.POOL, 'pool_id')
properties.pop('pool_id')
if self.SUBNET in properties:
self.client_plugin().resolve_subnet(
properties, self.SUBNET, 'subnet_id')
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
member = self.client().create_lbaas_member(
self.pool_id, {'member': properties})['member']
self.resource_id_set(member['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
member = self.client().show_lbaas_member(self.resource_id,
self.pool_id)
return member['member']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_lbaas_member(self.resource_id,
self.pool_id,
{'member': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_lbaas_member(self.resource_id,
self.pool_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::PoolMember': PoolMember,
}
| 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Citrix Systems, Inc.
# Copyright 2013 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.virt import driver
class FakeDriver(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class FakeDriver2(FakeDriver):
pass
class ToDriverRegistryTestCase(test.TestCase):
def assertDriverInstance(self, inst, class_, *args, **kwargs):
self.assertEquals(class_, inst.__class__)
self.assertEquals(args, inst.args)
self.assertEquals(kwargs, inst.kwargs)
def test_driver_dict_from_config(self):
drvs = driver.driver_dict_from_config(
[
'key1=nova.tests.test_driver.FakeDriver',
'key2=nova.tests.test_driver.FakeDriver2',
], 'arg1', 'arg2', param1='value1', param2='value2'
)
self.assertEquals(
sorted(['key1', 'key2']),
sorted(drvs.keys())
)
self.assertDriverInstance(
drvs['key1'],
FakeDriver, 'arg1', 'arg2', param1='value1',
param2='value2')
self.assertDriverInstance(
drvs['key2'],
FakeDriver2, 'arg1', 'arg2', param1='value1',
param2='value2')
| 0 |
#!/usr/bin/python3
import argparse
import sys
from collections import defaultdict
from random import choice
def randnewword(letter_dict, char=None):
if not char:
char = choice(letter_dict['first'])
result = char
while char != '\n':
char = choice(letter_dict[char])
result += char
return result
def build_dictionary(infile):
letter_dict = defaultdict(list)
for line in infile:
first = True
for char1, char2 in zip(line, line[1:]):
if first:
first = False
letter_dict['first'].append(char1)
letter_dict[char1].append(char2)
return letter_dict
def main(args):
letter_dict = build_dictionary(args.infile)
while args.numwords > 0:
args.numwords -= 1
new_word = randnewword(letter_dict)
args.outfile.write(new_word)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
'Create new words using a stochastic Markov chain.')
parser.add_argument('infile', nargs='?',
type=argparse.FileType('r'),
default=sys.stdin)
parser.add_argument('outfile', nargs='?',
type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('-n', '--numwords',
type=int,
default=1)
args = parser.parse_args()
main(args)
| 0 |
#!/usr/bin/env python
'''
File: Communicator.py
Author: Samuel Barrett
Description: some classes for socket communication
Created: 2010-11-07
Modified: 2010-11-07
'''
import socket, sys, time
import cPickle as pickle
defaultPort = 5557
class TimeoutError(Exception):
def __init__(self, *args):
self.value = args
def __str__(self):
return repr(self.value)
class Communicator(object):
def __init__(self,host='localhost',port=defaultPort,sock=None):
self._sock = sock
self._storedMsg = ''
self._addr = (host,port)
self.initialize()
def initialize(self):
if self._sock is None:
raise ValueError
def close(self):
if self._sock is not None:
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except:
pass
finally:
self._sock = None
def sendMsg(self,msg):
#print 'sending',msg
self._sock.sendto(msg + '\0',self._addr)
def recvMsg(self,event=None,retryCount=None):
msg = self._storedMsg
while ('\0' not in msg):
if (event is not None) and event.isSet():
return None
newMsg = ''
try:
newMsg,self._addr = self._sock.recvfrom(8192)
msg += newMsg
except socket.error:
#time.sleep(0.1)
pass
if len(newMsg) == 0:
if (retryCount is None) or (retryCount <= 0):
raise TimeoutError
else:
retryCount -= 1
print '[Trainer] waiting for message, retry =', retryCount
time.sleep(0.3)
#raise ValueError('Error while receiving message')
(msg,sep,rest) = msg.partition('\0')
self._storedMsg = rest
#print 'received',msg
return msg
def send(self,obj):
self.sendMsg(pickle.dumps(obj))
def recv(self,event=None):
msg = self.recvMsg(event)
if msg is None:
return None
return self.convertMsg(msg)
def convertMsg(self,msg):
return pickle.loads(msg)
class ClientCommunicator(Communicator):
def initialize(self):
try:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.settimeout(5)
except:
print >>sys.stderr,'Error creating socket'
raise
| 0.025571 |
#!/usr/bin/python
import sys
import argparse
import os
import re
'''
A simple script to create lower-resolution Android drawables from higher-resolution ones.
For example, given a batch of -xhdpi images, you can generate -hdpi and -mdpi images.
This makes it possible to only export highest-resolution artwork from image authoring tools, and
automate the rest.
Usage:
drawable_convert.py -d res/drawable-mdpi -d res/drawable-hdpi res/drawable-xhdpi-v14/select*.png
... will take select*.png from xhdpi and place versions into mdpi and hdpi folders.
Correct resize ratios are computed based on resource directory names.
Actual scaling is done by ImageMagick's convert command.
'''
class Converter:
def __init__(self, dstList, image_magic_path):
# print u'Dst list: {0}'.format(dstList)
self.mDstList = dstList
self.image_magic_path = image_magic_path;
def convert(self, src):
for dstpath in self.mDstList:
(srcpath, srcname) = os.path.split(src)
dst = os.path.join(dstpath, srcname)
self.convertOne(src, dst)
def convertOne(self, src, dst):
# print u'\n*****\n{0} to {1}\n*****\n'.format(src, dst)
'''
Determine relative density
'''
srcDpi = self.getDpi(src)
dstDpi = self.getDpi(dst)
if srcDpi < dstDpi:
print u'NOT converting from {0}dpi to {1}dpi'.format(srcDpi, dstDpi)
else:
factor = dstDpi*100/srcDpi
print u'Converting from {0}dpi to {1}dpi, {2}%'.format(srcDpi, dstDpi, factor)
image_magic_path = self.image_magic_path + '\\\\';
cmd = u'{0}convert -verbose "{1}" -resize "{3}%x{3}%" "{2}"'.format(image_magic_path, src, dst, factor)
os.system(cmd)
def getDpi(self, f):
p = os.path.dirname(f)
if re.match('.*drawable.*\\-mdpi.*', p):
return 160
elif re.match('.*drawable.*\\-hdpi.*', p):
return 240
elif re.match('.*drawable.*\\-xhdpi.*', p):
return 320
elif re.match('.*drawable.*\\-xxhdpi.*', p):
return 480
elif re.match('.*drawable.*\\-xxhdpi.*', p):
return 640
else:
raise ValueError(u'Cannot determine densitiy for {0}'.format(p))
if __name__ == "__main__":
'''
Parse command line arguments
'''
parser = argparse.ArgumentParser(description='Converts drawable resources in Android applications')
parser.add_argument('-i', dest='image_magic_path', action='append', required=True, help='ImageMagick root directory')
parser.add_argument('-d', dest='DST', action='append', required=True, help='destination directory')
parser.add_argument('src', nargs='+', help='files to convert (one or more)')
args = parser.parse_args()
cv = Converter(args.DST, args.image_magic_path[0])
for src in args.src:
cv.convert(src)
'''
if [ $# -lt 1 ] ; then
echo "Usage: $0 file_list"
exit 1
fi
for f in $*
do
echo "File: ${f}"
convert -verbose "${f}" -resize "75%x75%" "../drawable-hdpi/${f}"
convert -verbose "${f}" -resize "50%x50%" "../drawable-mdpi/${f}"
done
''' | 0.036606 |
"""Viessmann ViCare sensor device."""
import logging
import requests
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_POWER,
BinarySensorDevice,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_NAME
from . import (
DOMAIN as VICARE_DOMAIN,
PYVICARE_ERROR,
VICARE_API,
VICARE_HEATING_TYPE,
VICARE_NAME,
HeatingType,
)
_LOGGER = logging.getLogger(__name__)
CONF_GETTER = "getter"
SENSOR_CIRCULATION_PUMP_ACTIVE = "circulationpump_active"
SENSOR_BURNER_ACTIVE = "burner_active"
SENSOR_COMPRESSOR_ACTIVE = "compressor_active"
SENSOR_TYPES = {
SENSOR_CIRCULATION_PUMP_ACTIVE: {
CONF_NAME: "Circulation pump active",
CONF_DEVICE_CLASS: DEVICE_CLASS_POWER,
CONF_GETTER: lambda api: api.getCirculationPumpActive(),
},
# gas sensors
SENSOR_BURNER_ACTIVE: {
CONF_NAME: "Burner active",
CONF_DEVICE_CLASS: DEVICE_CLASS_POWER,
CONF_GETTER: lambda api: api.getBurnerActive(),
},
# heatpump sensors
SENSOR_COMPRESSOR_ACTIVE: {
CONF_NAME: "Compressor active",
CONF_DEVICE_CLASS: DEVICE_CLASS_POWER,
CONF_GETTER: lambda api: api.getCompressorActive(),
},
}
SENSORS_GENERIC = [SENSOR_CIRCULATION_PUMP_ACTIVE]
SENSORS_BY_HEATINGTYPE = {
HeatingType.gas: [SENSOR_BURNER_ACTIVE],
HeatingType.heatpump: [SENSOR_COMPRESSOR_ACTIVE],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare sensor devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
heating_type = hass.data[VICARE_DOMAIN][VICARE_HEATING_TYPE]
sensors = SENSORS_GENERIC.copy()
if heating_type != HeatingType.generic:
sensors.extend(SENSORS_BY_HEATINGTYPE[heating_type])
add_entities(
[
ViCareBinarySensor(
hass.data[VICARE_DOMAIN][VICARE_NAME], vicare_api, sensor
)
for sensor in sensors
]
)
class ViCareBinarySensor(BinarySensorDevice):
"""Representation of a ViCare sensor."""
def __init__(self, name, api, sensor_type):
"""Initialize the sensor."""
self._sensor = SENSOR_TYPES[sensor_type]
self._name = f"{name} {self._sensor[CONF_NAME]}"
self._api = api
self._sensor_type = sensor_type
self._state = None
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None and self._state != PYVICARE_ERROR
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.service.id}-{self._sensor_type}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._sensor[CONF_DEVICE_CLASS]
def update(self):
"""Update state of sensor."""
try:
self._state = self._sensor[CONF_GETTER](self._api)
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from ViCare server")
except ValueError:
_LOGGER.error("Unable to decode data from ViCare server")
| 0 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import mail
import handle_incoming_email
def test_handle_bounced_email(testbed):
handler = handle_incoming_email.LogSenderHandler()
handler.request = 'request'
message = mail.EmailMessage(
sender='support@example.com',
subject='Your account has been approved')
message.to = 'Albert Johnson <Albert.Johnson@example.com>'
message.body = 'Dear Albert.'
handler.receive(message)
| 0 |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 22